Table des matières
Configuration serveurs
Installation des packages manquants apt-get install sudo
Configuration de SSHD PubkeyAuthentication yes
Configuration du Mutipathing/Stockage
Configuration des cartes Fibre Channel
Debain / V7000 / Storwise Trouver les cartes Fibre Channe
# lspci | grep -i fibre 09:00.0 Fibre Channel: QLogic Corp. ISP2432-based 4Gb Fibre Channel to PCI Express HBA (rev 03) 09:00.1 Fibre Channel: QLogic Corp. ISP2432-based 4Gb Fibre Channel to PCI Express HBA (rev 03)
Modifier des sources.list pour intégrer non-free
deb http://httpredir.debian.org/debian stretch main non-free deb-src http://httpredir.debian.org/debian stretch main non-free
Installer les packages pour les cartes FC QLOGIC
root@:~# apt-get install qla-tools firmware-qlogic
Installer les packages de gestion des FileSystem
root@:~# apt-get libsysfs2 lsscsi sysfsutils
Installer le multipathing
root@:~# apt-get install multipath-tools
Redémarrer le serveur
Trouver la carte Fibre Channel et son port host
root@britt:~# systool -c fc_host -v | more Class = "fc_host" Class Device = "host0" Class Device path = "/sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/host0/fc_host/host0" dev_loss_tmo = "30" fabric_name = "0x5005076802e5f62d" issue_lip = <store method only> max_npiv_vports = "127" node_name = "0x2000001b328787bc" npiv_vports_inuse = "0" port_id = "0xab0200" port_name = "0x2100001b328787bc" port_state = "Online" port_type = "NPort (fabric via point-to-point)" speed = "4 Gbit" supported_classes = "Class 3" supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit" symbolic_name = "QLE2462 FW:v8.03.00 DVR:v8.07.00.38-k" system_hostname = "" tgtid_bind_type = "wwpn (World Wide Port Name)" uevent = vport_create = <store method only> vport_delete = <store method only> Device = "host0" Device path = "/sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/host0" fw_dump = issue_logo = <store method only> nvram = "ISP " optrom_ctl = <store method only> optrom = reset = <store method only> sfp = "" uevent = "DEVTYPE=scsi_host" vpd = "▒^" Class Device = "host5" Class Device path = "/sys/devices/pci0000:00/0000:00:03.0/0000:03:00.1/host5/fc_host/host5" dev_loss_tmo = "30" fabric_name = "0x5005076802e5f62e" issue_lip = <store method only> max_npiv_vports = "127" node_name = "0x2001001b32a787bc" npiv_vports_inuse = "0" port_id = "0xab0200" port_name = "0x2101001b32a787bc" port_state = "Online" port_type = "NPort (fabric via point-to-point)" speed = "4 Gbit" supported_classes = "Class 3" supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit" symbolic_name = "QLE2462 FW:v8.03.00 DVR:v8.07.00.38-k" system_hostname = "" tgtid_bind_type = "wwpn (World Wide Port Name)" uevent = vport_create = <store method only> vport_delete = <store method only> Device = "host5" Device path = "/sys/devices/pci0000:00/0000:00:03.0/0000:03:00.1/host5" fw_dump = issue_logo = <store method only> nvram = "ISP " optrom_ctl = <store method only> optrom = reset = <store method only> sfp = "" uevent = "DEVTYPE=scsi_host" vpd = "▒^"
Trouver les WWN des cartes FC sur les serveurs
root@1:~# for i in 0 1 2 3 4 5; do cat /sys/class/scsi_host/host$i/device/fc_host/host$i/port_name; done 0x2100001b328555da 0x2101001b32a555da
root@2:~# for i in 0 1 2 3 4 5; do cat /sys/class/scsi_host/host$i/device/fc_host/host$i/port_name; done 0x2100001b328787bc 0x2101001b32a787bc
Configuration du mapping puis du multiphating
Avant le mapping
root@britt:~# lsscsi [6:0:0:0] disk USB TO I DE/SATA Device 0009 /dev/sda [7:0:0:0] disk SEAGATE ST31000424SS 0005 - [7:0:1:0] disk SEAGATE ST31000424SS 0005 - [7:1:2:0] disk LSILOGIC Logical Volume 3000 /dev/sdb
Configurer le mapping sur la storwise
Lister les hosts
IBM_Storwize:FMA-NTE01-V7000:adminsys>lshost id name port_count iogrp_count status site_id site_name host_cluster_id host_cluster_name 0 morbraz 2 4 online 1 britt 2 4 online
Mapper les volumes
IBM_Storwize:FMA-NTE01-V7000:adminsys>mkvdiskhostmap -force -host 0 -scsi 10 4 Virtual Disk to Host map, id [10], successfully created IBM_Storwize:FMA-NTE01-V7000:adminsys>mkvdiskhostmap -force -host 1 -scsi 10 4 Virtual Disk to Host map, id [10], successfully created IBM_Storwize:FMA-NTE01-V7000:adminsys>mkvdiskhostmap -force -host 1 -scsi 20 5 Virtual Disk to Host map, id [20], successfully created IBM_Storwize:FMA-NTE01-V7000:adminsys>mkvdiskhostmap -force -host 0 -scsi 20 5 Virtual Disk to Host map, id [20], successfully created
Vérifier du mapping
IBM_Storwize:FMA-NTE01-V7000:adminsys>lsvdiskhostmap 5 id name SCSI_id host_id host_name vdisk_UID IO_group_id IO_group_name mapping_type host_cluster_id host_cluster_name 5 Serv12_OCFS 20 0 morbraz 6005076802810CCE8800000000000024 0 io_grp0 private 5 Serv12_OCFS 20 1 britt 6005076802810CCE8800000000000024 0 io_grp0 private
Vérification du mapping sur le serveur
Rescanner les cartes FC
root@britt:~# echo "- - -" > /sys/class/scsi_host/host0/scan root@britt:~# echo "- - -" > /sys/class/scsi_host/host5/scan
Après le mapping
root@britt:~# lsscsi [5:0:0:10] disk IBM 2145 0000 /dev/sdc [5:0:0:20] disk IBM 2145 0000 /dev/sdd [6:0:0:0] disk USB TO I DE/SATA Device 0009 /dev/sda [7:0:0:0] disk SEAGATE ST31000424SS 0005 - [7:0:1:0] disk SEAGATE ST31000424SS 0005 - [7:1:2:0] disk LSILOGIC Logical Volume 3000 /dev/sdb
root@britt:/tmp# multipath -l 36005076802810cce8800000000000024 dm-1 IBM,2145 size=1.0T features='2 queue_if_no_path retain_attached_hw_handler' hwhandler='1 alua' wp=rw `-+- policy='service-time 0' prio=0 status=active `- 5:0:0:20 sdd 8:48 active undef running 36005076802810cce8800000000000023 dm-0 IBM,2145 size=1.0T features='2 queue_if_no_path retain_attached_hw_handler' hwhandler='1 alua' wp=rw `-+- policy='service-time 0' prio=0 status=active `- 5:0:0:10 sdc 8:32 active undef running
DEBUG multipath
Quand vous avez un problème avec multipath et DM : DeviceMapper
multipathd -k multipathd> show paths hcil dev dev_t pri dm_st chk_st dev_st next_check 3:1:0:0 sda 8:0 1 undef undef unknown orphan 2:0:0:0 sdb 8:16 10 undef undef unknown orphan 2:0:0:10 sdc 8:32 50 undef undef unknown orphan 4:0:0:0 sdd 8:48 50 undef undef unknown orphan 4:0:0:10 sde 8:64 10 undef undef unknown orphan multipathd> quit
Pour voir la bonne config
multipathd -k multipathd> show config defaults { verbosity 2 polling_interval 5 max_polling_interval 20 reassign_maps "no" [...]
Pour voir le problème
multipath -v3 Sep 29 18:48:46 | set open fds limit to 1048576/1048576 Sep 29 18:48:46 | loading //lib/multipath/libchecktur.so checker Sep 29 18:48:46 | checker tur: message table size = 3 Sep 29 18:48:46 | loading //lib/multipath/libprioconst.so prioritizer Sep 29 18:48:46 | foreign library "nvme" loaded successfully Sep 29 18:48:46 | sda: udev property ID_WWN whitelisted Sep 29 18:48:46 | sda: mask = 0x1f Sep 29 18:48:46 | sda: dev_t = 8:0 [...] Sep 29 18:48:46 | wwid 3600507630080814ec000000000000006 not in wwids file, skipping sdb [...]
ON a trouvé que les WWIDS ne sont pas dans le fichier!
/etc/multipath/wwids # Multipath wwids, Version : 1.0 # NOTE: This file is automatically maintained by multipath and multipathd. # You should not need to edit this file in normal circumstances. # # Valid WWIDs: /36005076802810cce880000000000002b/ /36005076802810cce880000000000002c/ /36005076802810cce880000000000002d/
Il faut ajouter l'enregistrement automatique des nouveaux LUN, ce n'est pas par défaut(suite à la mise à jour)! find_multipaths “yes”
root@gueuza:/etc# more /etc/multipath.conf defaults { verbosity 2 polling_interval 5 max_polling_interval 20 reassign_maps "no" multipath_dir "//lib/multipath" path_selector "service-time 0" path_grouping_policy "failover" uid_attribute "ID_SERIAL" [...] find_multipaths "yes" [...]
Redémarrage de Multipath
service multipathd restart
Vérification
multipath -ll 3600507630080814ec000000000000008 dm-6 IBM,2145 size=10T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw |-+- policy='service-time 0' prio=50 status=active | `- 2:0:0:10 sdc 8:32 active ready running `-+- policy='service-time 0' prio=10 status=enabled `- 4:0:0:10 sde 8:64 active ready running 3600507630080814ec000000000000006 dm-5 IBM,2145 size=2.0T features='1 queue_if_no_path' hwhandler='1 alua' wp=rw |-+- policy='service-time 0' prio=50 status=active | `- 4:0:0:0 sdd 8:48 active ready running `-+- policy='service-time 0' prio=10 status=enabled `- 2:0:0:0 sdb 8:16 active ready running
Création de la partition et du filesystem
Trouver les cartes contrôleurs réseaux sur les serveurs
root@:~# lspci | grep -i eth 01:00.0 Ethernet controller: Intel Corporation 82576 Gigabit Network Connection (rev 01) 01:00.1 Ethernet controller: Intel Corporation 82576 Gigabit Network Connection (rev 01) 05:00.0 Ethernet controller: Broadcom Limited NetXtreme BCM5719 Gigabit Ethernet PCIe (rev 01) 05:00.1 Ethernet controller: Broadcom Limited NetXtreme BCM5719 Gigabit Ethernet PCIe (rev 01) 05:00.2 Ethernet controller: Broadcom Limited NetXtreme BCM5719 Gigabit Ethernet PCIe (rev 01) 05:00.3 Ethernet controller: Broadcom Limited NetXtreme BCM5719 Gigabit Ethernet PCIe (rev 01)
Trouver les @MAC des cartes réseaux sur les serveurs
root@:~# ip link 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 2: enp5s0f0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 00:0a:f7:13:12:84 brd ff:ff:ff:ff:ff:ff 3: enp1s0f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000 link/ether 00:a0:d1:ec:fc:60 brd ff:ff:ff:ff:ff:ff 4: enp5s0f1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 00:0a:f7:13:12:85 brd ff:ff:ff:ff:ff:ff 5: enp5s0f2: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 00:0a:f7:13:12:86 brd ff:ff:ff:ff:ff:ff 6: enp1s0f1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 00:a0:d1:ec:fc:61 brd ff:ff:ff:ff:ff:ff 7: enp5s0f3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 00:0a:f7:13:12:87 brd ff:ff:ff:ff:ff:ff
HP DL180 G6: IPMI: http://192.168.1.221 admin/admin