LinuxQuestions.org

LinuxQuestions.org (/questions/)
-   Linux - Hardware (https://www.linuxquestions.org/questions/linux-hardware-18/)
-   -   Format & use disk fails - "used by system". LVM issue(?) (https://www.linuxquestions.org/questions/linux-hardware-18/format-and-use-disk-fails-used-by-system-lvm-issue-4175521420/)

pingu 10-08-2014 03:43 AM

Format & use disk fails - "used by system". LVM issue(?)
 
I have 2 disks, previously used for lvm, that I want to use in a new server. Problem is the partitions can't be formatted, I think it's an lvm issue but not sure.
I have read so much about this but found nothing useful. My system is CentOS 6.5, selinux is permissivr..

Here's what happens:
1 Booting from live-cd:
# dd if=/dev/zero of=/dev/sda bs=10M
2. Boot up normal system. Create one partition on /dev/sda
# mkfs.ext4 /dev/sda1
mke2fs 1.41.12 (17-May-2010)
/dev/sda1 is obviously used by system; will not create filesystem here! (Translated from Swedish.)
Checking filesystem partitions & mounts:
Code:

  # df
  Filesystem    1K-blocks    Used Available Use% Mounted on
  /dev/sdd2      44138980 1837452  40059380  5% /
  tmpfs            8161924      0  8161924  0% /dev/shm
  /dev/sdd1        495844  31704    438540  7% /boot

# cat /etc/mtab
/dev/sdd2 / ext4 rw 0 0
proc /proc proc rw 0 0
sysfs /sys sysfs rw 0 0
devpts /dev/pts devpts rw,gid=5,mode=620 0 0
tmpfs /dev/shm tmpfs rw,rootcontext="system_u:object_r:tmpfs_t:s0" 0 0
/dev/sdd1 /boot ext4 rw 0 0
none /proc/sys/fs/binfmt_misc binfmt_misc rw 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw 0 0

# fdisk -l (only showing for /dev/sda here)
Disk /dev/sda: 120,0 GB, 120033041920 byte
255 huvuden, 63 sektorer/spår, 14593 cylindrar
Enheter = cylindrar av 16065 · 512 = 8225280 byte
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Diskidentifierare: 0x00000000

Disk /dev/mapper/1ATA_ST3120827AS_4MS01PC9: 120,0 GB, 120033041920 byte
255 huvuden, 63 sektorer/spår, 14593 cylindrar
Enheter = cylindrar av 16065 · 512 = 8225280 byte
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Diskidentifierare: 0x00000000

This entry "/dev/mapper..." for /dev/sda tells me lvm is still holding the disk.
And reading /etc/lvm/cache/.cache - yes my disks are all there!
I did try to delete this file and change lvm.conf to search in non-existent directory instead of /dev, it didn't change anything.
Code:

# cat /etc/lvm/cache/.cache
# This file is automatically maintained by lvm.

persistent_filter_cache {
        valid_devices=[
                "/dev/disk/by-id/ata-Maxtor_6Y080M0_Y2KMZYLE-part3",
                "/dev/block/1:13",
                "/dev/block/1:14",
                "/dev/ram11",
                "/dev/disk/by-path/pci-0000:00:1f.2-scsi-3:0:0:0-part1",
                "/dev/block/253:2",
                "/dev/disk/by-id/scsi-SATA_Maxtor_6Y080M0_Y2KMZYLE-part3",
                "/dev/disk/by-id/dm-name-1ATA_ST3120827AS_4MS01PC9",
                "/dev/disk/by-id/ata-Maxtor_6Y080M0_Y2KMZYLE-part2",
                "/dev/block/1:12",
                "/dev/ram10",
                "/dev/disk/by-id/dm-uuid-mpath-1ATA_WDC_WD5003ABYX-18WERA0_WD-WMAYP5327559",
                "/dev/block/8:49",
                "/dev/disk/by-uuid/c6770085-e8b0-403c-9757-46168bfc0ee8",
                "/dev/block/253:3",
                "/dev/disk/by-id/scsi-SATA_Maxtor_6Y080M0_Y2KMZYLE-part2",
                "/dev/block/1:10",
                "/dev/ram12",
                "/dev/disk/by-path/pci-0000:00:1f.2-scsi-3:0:0:0-part2",
                "/dev/block/1:6",
                "/dev/mapper/1ATA_WDC_WD5003ABYX-18WERA0_WD-WMAYP5327559p1",
                "/dev/ram6",
                "/dev/block/8:50",
                "/dev/block/253:1",
                "/dev/mapper/1ATA_ST3160812AS_5LS55TSN",
                "/dev/disk/by-id/ata-Maxtor_6Y080M0_Y2KMZYLE-part1",
                "/dev/block/1:11",
                "/dev/ram13",
                "/dev/ram14",
                "/dev/disk/by-path/pci-0000:00:1f.2-scsi-3:0:0:0-part3",
                "/dev/block/1:5",
                "/dev/disk/by-uuid/deae4ddc-73e8-45b1-988c-55e6b0dd20a1",
                "/dev/ram5",
                "/dev/block/8:51",
                "/dev/disk/by-id/dm-name-1ATA_ST3160812AS_5LS55TSN",
                "/dev/block/253:0",
                "/dev/mapper/1ATA_WDC_WD5003ABYX-18WERA0_WD-WMAYP5327559",
                "/dev/disk/by-id/scsi-SATA_Maxtor_6Y080M0_Y2KMZYLE-part1",
                "/dev/block/1:15",
                "/dev/disk/by-id/dm-name-1ATA_WDC_WD5003ABYX-18WERA0_WD-WMAYP5327559",
                "/dev/dm-2",
                "/dev/block/1:1",
                "/dev/mapper/1ATA_ST3120827AS_4MS01PC9",
                "/dev/ram1",
                "/dev/sdd1",
                "/dev/disk/by-id/dm-name-1ATA_WDC_WD5003ABYX-18WERA0_WD-WMAYP5327559p1",
                "/dev/disk/by-uuid/559f67e4-cfe8-4e18-955c-6b8fb71c2bfa",
                "/dev/dm-3",
                "/dev/block/1:0",
                "/dev/ram0",
                "/dev/disk/by-uuid/376d57ea-b934-4a7d-af35-bc7829e5ca49",
                "/dev/disk/by-id/dm-uuid-mpath-1ATA_ST3120827AS_4MS01PC9",
                "/dev/dm-1",
                "/dev/root",
                "/dev/block/1:2",
                "/dev/block/1:8",
                "/dev/block/1:9",
                "/dev/ram2",
                "/dev/ram8",
                "/dev/ram9",
                "/dev/sdd2",
                "/dev/dm-0",
                "/dev/ram15",
                "/dev/block/1:3",
                "/dev/block/1:4",
                "/dev/block/1:7",
                "/dev/disk/by-id/dm-uuid-mpath-1ATA_ST3160812AS_5LS55TSN",
                "/dev/disk/by-id/dm-uuid-part1-mpath-1ATA_WDC_WD5003ABYX-18WERA0_WD-WMAYP5327559",
                "/dev/ram3",
                "/dev/ram4",
                "/dev/ram7",
                "/dev/sdd3"
        ]
}

Various other commands tried:
Code:

# pvcreate /dev/sda1
  Can't open /dev/sda1 exclusively.  Mounted filesystem?
# vgdisplay
  No volume groups found
# pvdisplay
  (blank output)
# pvs -o+pv_used
  (blank output)
# vgs -o +devices
  No volume groups found
# lvs -P -a -o +devices
  PARTIAL MODE. Incomplete logical volumes will be processed.
  No volume groups found
# vgs -a -o +devices -P
  PARTIAL MODE. Incomplete logical volumes will be processed.
  No volume groups found
# vgscan
  Reading all physical volumes.  This may take a while...
  No volume groups found

Code:

# lvm dumpconfig
config {
        checks=1
        abort_on_errors=0
        profile_dir="/etc/lvm/profile"
}
dmeventd {
        mirror_library="libdevmapper-event-lvm2mirror.so"
        snapshot_library="libdevmapper-event-lvm2snapshot.so"
        thin_library="libdevmapper-event-lvm2thin.so"
}
activation {
        checks=0
        udev_sync=1
        udev_rules=1
        verify_udev_operations=0
        retry_deactivation=1
        missing_stripe_filler="error"
        use_linear_target=1
        reserved_stack=64
        reserved_memory=8192
        process_priority=-18
        raid_region_size=512
        readahead="auto"
        raid_fault_policy="warn"
        mirror_log_fault_policy="allocate"
        mirror_image_fault_policy="remove"
        snapshot_autoextend_threshold=100
        snapshot_autoextend_percent=20
        thin_pool_autoextend_threshold=100
        thin_pool_autoextend_percent=20
        use_mlockall=0
        monitoring=1
        polling_interval=15
}
global {
        umask=63
        test=0
        units="h"
        si_unit_consistency=1
        activation=1
        proc="/proc"
        locking_type=1
        wait_for_locks=1
        fallback_to_clustered_locking=1
        fallback_to_local_locking=1
        locking_dir="/var/lock/lvm"
        prioritise_write_locks=1
        abort_on_internal_errors=0
        detect_internal_vg_cache_corruption=0
        metadata_read_only=0
        mirror_segtype_default="mirror"
        raid10_segtype_default="mirror"
        use_lvmetad=0
}
shell {
        history_size=100
}
backup {
        backup=1
        backup_dir="/etc/lvm/backup"
        archive=1
        archive_dir="/etc/lvm/archive"
        retain_min=10
        retain_days=30
}
log {
        verbose=0
        silent=0
        syslog=1
        overwrite=0
        level=0
        indent=1
        command_names=0
        prefix="  "
        debug_classes=["memory", "devices", "activation", "allocation", "lvmetad", "metadata", "cache", "locking"]
}
allocation {
        maximise_cling=1
        mirror_logs_require_separate_pvs=0
        thin_pool_metadata_require_separate_pvs=0
}
devices {
        dir="/dev"
        scan="/dev"
        obtain_device_list_from_udev=0
        preferred_names=["^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d"]
        filter="a/.*/"
        cache_dir="/etc/lvm/cache"
        cache_file_prefix=""
        write_cache_state=1
        sysfs_scan=1
        multipath_component_detection=1
        md_component_detection=1
        md_chunk_alignment=1
        data_alignment_detection=1
        data_alignment=0
        data_alignment_offset_detection=1
        ignore_suspended_devices=0
        ignore_lvm_mirrors=0
        disable_after_error_count=0
        require_restorefile_with_uuid=1
        pv_min_size=2048
        issue_discards=0
}


pingu 10-08-2014 04:44 AM

Finally found it!
I was blindly staring at LVM commands, but it's the device mapper that's the issue here.
So solution is:
Code:

# dmsetup info
Then remove partitions & disks:
# dmsetup remove 1ATA_ST3120827AS_4MS01PC9p1
# dmsetup remove 1ATA_ST3120827AS_4MS01PC9



All times are GMT -5. The time now is 08:52 AM.