Tag Archives: rhel

Usefull Linux Commands for SAN LUN allocation in RHEL

ls -l /dev/disk/by-* | grep lun-31
ls -l /dev/disk/by-* | grep lun-33
ls -l /dev/disk/by-* | grep lun-20
ls -l /dev/disk/by-* | grep lun-10
cat /sys/class/fc_transport/*/node_name
grep 50060160bee045be /sys/class/fc_transport/*/node_name
lsscsi
./inq.LinuxAMD64 -clariion
multipath -ll
df -h
cat /etc/fstsb
cat /etc/fstab
multipath -ll | grep mpathg
ls -ltr /data*
ls -ls /data*
ls -ld /data*
df -h
mount/dev/mapper/mpathg /data10
mount /dev/mapper/mpathg /data10
df -h
cd /data10
ls -ltr
du -hs regcss
rm -rf regcss
df -h
ls -ltr
vi /etc/fstab
cat /etc/fstsb
cat /etc/fstab
cd
mount /data10
umount /data10
mount /data10
df -h
multipath -ll | grep mpathk
multipath -ll | grep mpathl
ls -l /dev/disk/by-* | grep lun-33
echo “0 5 33” > /sys/class/scsi_host/host2/scan
ls -l /dev/disk/by-* | grep lun-33
echo “0 4 33” > /sys/class/scsi_host/host2/scan
ls -l /dev/disk/by-* | grep lun-33
ls -l /dev/disk/by-* | grep lun-31
cat /sys/class/fc_transport/*/node_name
echo “0 5 33” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-33
cd /proc/scsi
ls
cd scsi
cd sg
ls
cd device
cd devices
cat devices
grep 0x50060160bee045be  /sys/class/fc_transport/*/node_name
echo “0 0 33” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-33
echo “0 1 33” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-33
echo “0 3 33” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-33
echo “0 3 33” > /sys/class/scsi_host/host2/scan
ls -l /dev/disk/by-* | grep lun-33
echo “0 1 33” > /sys/class/scsi_host/host2/scan
ls -l /dev/disk/by-* | grep lun-33
grep 0x5006016b08605821  /sys/class/fc_transport/*/node_name
cat /sys/class/fc_transport/*/node_name
grep 0x5006016088605821  /sys/class/fc_transport/*/node_name
echo “0 2 33” > /sys/class/scsi_host/host2/scan
ls -l /dev/disk/by-* | grep lun-33
echo “0 4 33” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-33
echo “0 2 33” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-33
grep 0x50060160bea0597f  /sys/class/fc_transport/*/node_name
echo “0 3 33” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-33
echo “0 5 33” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-33
ls -l /dev/disk/by-* | grep lun-34
echo “0 5 34” > /sys/class/scsi_host/host1/scan
ls -l /dev/disk/by-* | grep lun-34
echo “0 2 34” > /sys/class/scsi_host/host2/scan
echo “0 2 34” > /sys/class/scsi_host/host1/scan
echo “0 4 34” > /sys/class/scsi_host/host1/scan
echo “0 4 34” > /sys/class/scsi_host/host2/scan
ls -l /dev/disk/by-* | grep lun-34
cd
mkdir /data11
mkdir /data12
multipath ll
multipath -ll
df -h | grep mpathp
history | grep ext4
mkfs.ext4 -L DATA11 -m 0 -b 2048 /dev/mapper/mpathp
df -h | grep mpathq
mkfs.ext4 -L DATA12 -m 0 -b 2048 /dev/mapper/mpathq
df -h
mkdir /data11
mkdir /data12
mount /dev/mapper/mpathp /data11
mount /dev/mapper/mpathp /data12
umount /data12
umount /data11
mount /dev/mapper/mpathp /data11
mount /dev/mapper/mpathq /data12
df -h
umount /data12
vi /etc/fstsb
vi /etc/fstab
df -h
umount /data11
mount all
mount -all
df -h
cat /etc/fstab
df -h
ls -ld /data*
chown -R orarh11g:dba /data11 /data12
ls -ld /data*
df -h
rm -rf /data12
cat /proc/scsi/scsi | egrep -i ‘Host:’ | wc -l
ls /sys/class/fc_host
df -h
cat /etc/fstsb
cat /etc/fstab
vi /etc/fstab
df -h
mount all
mount -all
mkdir /data12
mount -all
df -h
chown -R orarh11g:dba  /data12
df -h
cat /sys/class/scsi_host/host*/device/fc_host/host*/node_name
for i in 0 1 2 3 4 5; do cat host$i/device/fc_host/host$i/port_name;  done
for i in 0 1 2 3 4 5 6 7 8 9 10; do cat host$i/device/fc_host/host$i/port_name;  done
cd  /sys/class/scsi_host/
for i in 0 1 2 3 4 5 6 7 8 9 10; do cat host$i/device/fc_host/host$i/port_name;  done
ls /sys/class/fc_host
fdisk -l |egrep ‘^Disk’ |egrep -v ‘dm-‘
multipath -ll
lspci | grep Fibre
lspci -v -s 05:00.0
ls -l /sys/class/scsi_host
ind /sys/class/pci_bus/0000\:05/device/0000\:05\:00.0/host*/rport-*/target*/*/state | awk -F’/’ ‘{print $11}’ | sort
find /sys/class/pci_bus/0000\:05/device/0000\:05\:00.0/host*/rport-*/target*/*/state | awk -F’/’ ‘{print $11}’ | sort
find /sys/class/pci_bus/0000\:05/device/0000\:05\:00.1/host*/rport-*/target*/*/state | awk -F’/’ ‘{print $11}’ | sort
cat /proc/scsi/scsi | grep scsi2
cat /proc/scsi/scsi | grep scsi1
find   /sys/class/pci_bus/0000\:05/device/0000\:05\:00.0/host*/rport-*/target*/*/block/*/stat | awk -F’/’ ‘{print $11,$13}’
find   /sys/class/pci_bus/0000\:05/device/0000\:05\:00.1/host*/rport-*/target*/*/block/*/stat | awk -F’/’ ‘{print $11,$13}’
udevadm info –query=path –name /dev/sdad
df -h
udevadm info –query=path –name /dev/mapper/mpathq
udevadm info –query=path –name /devices/virtual/block/dm-13
for port in /sys/class/fc_host/host[0-9]/port_name; { echo -n “$port : “; cat $port; }
history
CAILDB-63 scsi_host]#

Setup AWS Cloudwatch Memory and Drive Monitoring on RHEL

Download Scripts

Install Prerequisite Packages

sudo yum install wget unzip perl-core perl-DateTime perl-Sys-Syslog perl-CPAN perl-libwww-perl perl-Crypt-SMIME perl-Crypt-SSLeay

Install LWP Perl Bundles

  1. Launch cpan
    sudo perl -MCPAN -e shell
    
  2. Install Bundle
    install Bundle::LWP6 LWP YAML
    

Install Script

wget http://aws-cloudwatch.s3.amazonaws.com/downloads/CloudWatchMonitoringScripts-1.2.1.zip
unzip CloudWatchMonitoringScripts-1.2.1.zip -d /opt
rm -f CloudWatchMonitoringScripts-1.2.1.zip

Setup Credentials

API Access Key (Option 1)

This is good for testing, but it’s better to use IAM roles covered in Option 2.

  1. Copy awscreds template
    cp /opt/aws-scripts-mon/awscreds.template /opt/aws-scripts-mon/awscreds.conf
    
  2. Add access key id and secret access key
    vim /opt/aws-scripts-mon/awscreds.conf
    
  3. Lock down file access
    chmod 0400 /opt/aws-scripts-mon/awscreds.conf
    

IAM Role (Option 2)

  1. Login to AWS web console
  2. Select Identity & Access Management
  3. Select Roles | Create New Role
  4. Enter Role Name
    1. i.e. ec2-cloudwatch
  5. Select Next Step
  6. Select Amazon EC2
  7. Search for cloudwatch
  8. Select CloudwatchFullAccess
  9. Select Next Step | Create Role
  10. Launch a new instance and assign the ec2-cloudwatch IAM role

You can not add an IAM Role to an existing EC2 Instance; you can only specify a role when you launch a new instance.

https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html?console_help=true

Test

This won’t send data to Cloudwatch.

/opt/aws-scripts-mon/mon-put-instance-data.pl --mem-util --verify --verbose

Example

MemoryUtilization: 31.7258903184253 (Percent)
Using AWS credentials file <./awscreds.conf>
Endpoint: https://monitoring.us-west-2.amazonaws.com
Payload: {"MetricData":[{"Timestamp":1443537153,"Dimensions":[{"Value":"i-12e1fac4","Name":"InstanceId"}],"Value":31.7258903184253,"Unit":"Percent","MetricName":"MemoryUtilization"}],"Namespace":"System/Linux","__type":"com.amazonaws.cloudwatch.v2010_08_01#PutMetricDataInput"}

Verification completed successfully. No actual metrics sent to CloudWatch.

Report to Cloudwatch Test

Test that communication to Cloudwatch works and design the command you’ll want to cron out in the next step.

/opt/aws-scripts-mon/mon-put-instance-data.pl --mem-util --mem-used --mem-avail

After you run this command one point-in-time metric should show up for the instance under Cloudwatch | Linux System

Create Cron Task (as root)

Now that you’ve tested out the command and figured out what you want to report it’s time to add a Cron task so it runs ever X minutes. Usually 5 minutes is good.

  1. Edit cron table
    crontab -e
    
    */5 * * * * /opt/aws-scripts-mon/mon-put-instance-data.pl --mem-util --mem-used --mem-avail --disk-space-util --disk-path=/ --from-cron
    

Create Cron Task (as other user)

You may want to create a user that runs the cron. Here’s an example using a user named cloudwatch

  1. Create user
    useradd cloudwatch
    
  2. Disable user login
    usermod -s /sbin/nologin cloudwatch
    
  3. Set ownership
    chown -R cloudwatch.cloudwatch /opt/aws-scripts-mon
    
  4. Edit cron table
    crontab -e -u cloudwatch
    
  5. Add cron job
    */5 * * * * /opt/aws-scripts-mon/mon-put-instance-data.pl --mem-util --mem-used --mem-avail --swap-used --disk-space-util --disk-path=/ --from-cron
    

Verify Cron Job Ran

One way to verify the cron job ran is to look in the cron log.

less /var/log/cron
tail -f /var/log/cron

References

Monitor Script Arguments

Name Description
–mem-util Collects and sends the MemoryUtilization metrics in percentages. This option reports only memory allocated by applications and the operating system, and excludes memory in cache and buffers.
–mem-used Collects and sends the MemoryUsed metrics, reported in megabytes. This option reports only memory allocated by applications and the operating system, and excludes memory in cache and buffers.
–mem-avail Collects and sends the MemoryAvailable metrics, reported in megabytes. This option reports memory available for use by applications and the operating system.
–swap-util Collects and sends SwapUtilization metrics, reported in percentages.
–swap-used Collects and sends SwapUsed metrics, reported in megabytes.
–disk-path=PATH Selects the disk on which to report.PATH can specify a mount point or any file located on a mount point for the filesystem that needs to be reported. For selecting multiple disks, specify a –disk-path=PATH for each one of them. To select a disk for the filesystems mounted on / and /home, use the following parameters:
–disk-path=/ –disk-path=/home
–disk-space-util Collects and sends the DiskSpaceUtilization metric for the selected disks. The metric is reported in percentages.
–disk-space-used Collects and sends the DiskSpaceUsed metric for the selected disks. The metric is reported by default in gigabytes.Due to reserved disk space in Linux operating systems, disk space used and disk space available might not accurately add up to the amount of total disk space.
–disk-space-avail Collects and sends the DiskSpaceAvailable metric for the selected disks. The metric is reported in gigabytes.Due to reserved disk space in the Linux operating systems, disk space used and disk space available might not accurately add up to the amount of total disk space.
–memory-units=UNITS Specifies units in which to report memory usage. If not specified, memory is reported in megabytes. UNITS may be one of the following: bytes, kilobytes, megabytes, gigabytes.
–disk-space-units=UNITS Specifies units in which to report disk space usage. If not specified, disk space is reported in gigabytes. UNITS may be one of the following: bytes, kilobytes, megabytes, gigabytes.
–aws-credential- file=PATH Provides the location of the file containing AWS credentials.This parameter cannot be used with the –aws-access-key-id and –aws-secret-keyparameters.
–aws-access-key-id=VALUE Specifies the AWS access key ID to use to identify the caller. Must be used together with the –aws-secret-key option. Do not use this option with the –aws-credential-file parameter.
–aws-secret-key=VALUE Specifies the AWS secret access key to use to sign the request to CloudWatch. Must be used together with the –aws-access-key-id option. Do not use this option with –aws-credential-file parameter.
–verify Performs a test run of the script that collects the metrics, prepares a complete HTTP request, but does not actually call CloudWatch to report the data. This option also checks that credentials are provided. When run in verbose mode, this option outputs the metrics that will be sent to CloudWatch.
–from-cron Use this option when calling the script from cron. When this option is used, all diagnostic output is suppressed, but error messages are sent to the local system log of the user account.
–verbose Displays detailed information about what the script is doing.
–help Displays usage information.
–version Displays the version number of the script.

Prepare a RHEL-Based Virtual Machine for Azure

Today we have got project to prepare RHEL  VHD’s  for Azure. I did not find any doc for RHEL on azure. So i think to write steps i followed for RHEL on azure …

Prerequisites

CentOS Installation Notes

  • The newer VHDX format is not supported in Azure. You can  convert the disk to VHD format using Hyper-V Manager or the convert-vhd cmdlet.
  • When installing the Linux system it is recommended that you use standard partitions rather than LVM (often the default for many installations). This will avoid LVM name conflicts with cloned VMs, particularly if an OS disk ever needs to be attached to another VM for troubleshooting. LVM or RAID may be used on data disks if preferred.
  • NUMA is not supported for larger VM sizes due to a bug in Linux kernel versions below 2.6.37. This issue primarily impacts distributions using the upstream Red Hat 2.6.32 kernel. Manual installation of the Azure Linux agent (waagent) will automatically disable NUMA in the GRUB configuration for the Linux kernel. More information about this can be found in the steps below.
  • Do not configure a swap partition on the OS disk. The Linux agent can be configured to create a swap file on the temporary resource disk. More information about this can be found in the steps below.
  • All of the VHDs must have sizes that are multiples of 1 MB.

RHEL 6.5

  1. In Hyper-V Manager, select the virtual machine.
  2. Click Connect to open a console window for the virtual machine.
  3. Uninstall NetworkManager by running the following command:
    # sudo rpm -e --nodeps NetworkManager

    Note: If the package is not already installed, this command will fail with an error message. This is expected.

  4. Create a file named network in the /etc/sysconfig/ directory that contains the following text:
    NETWORKING=yes
    HOSTNAME=localhost.localdomain
  5. Create a file named ifcfg-eth0 in the /etc/sysconfig/network-scripts/ directory that contains the following text:
    DEVICE=eth0
    ONBOOT=yes
    BOOTPROTO=dhcp
    TYPE=Ethernet
    USERCTL=no
    PEERDNS=yes
    IPV6INIT=no
  6. Move (or remove) udev rules to avoid generating static rules for the Ethernet interface. These rules cause problems when cloning a virtual machine in Microsoft Azure or Hyper-V:
    # sudo mkdir -m 0700 /var/lib/waagent
    # sudo mv /lib/udev/rules.d/75-persistent-net-generator.rules /var/lib/waagent/
    # sudo mv /etc/udev/rules.d/70-persistent-net.rules /var/lib/waagent/
  7. Ensure the network service will start at boot time by running the following command:
    # sudo chkconfig network on
  8. Install the python-pyasn1 package by running the following command:
    # sudo yum install python-pyasn1
  9. If you would like to use the OpenLogic mirrors that are hosted within the Azure datacenters, then replace the /etc/yum.repos.d/CentOS-Base.repo file with the following repositories. This will also add the [openlogic] repository that includes packages for the Azure Linux agent:
    [openlogic]
    name=CentOS-$releasever - openlogic packages for $basearch
    baseurl=http://olcentgbl.trafficmanager.net/openlogic/6/openlogic/$basearch/
    enabled=1
    gpgcheck=0
    
    [base]
    name=CentOS-$releasever - Base
    baseurl=http://olcentgbl.trafficmanager.net/centos/$releasever/os/$basearch/
    gpgcheck=1
    gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
    

    Note: The rest of this guide will assume you are using at least the [openlogic] repo, which will be used to install the Azure Linux agent below.

  10. Add the following line to /etc/yum.conf:
    http_caching=packages
  11. Run the following command to clear the current yum metadata:
    # yum clean all
  12. Modify the kernel boot line in your grub configuration to include additional kernel parameters for Azure. To do this open “/boot/grub/menu.lst” in a text editor and ensure that the default kernel includes the following parameters:
    console=ttyS0 earlyprintk=ttyS0 rootdelay=300 numa=off

     

    This will also ensure all console messages are sent to the first serial port, which can assist Azure support with debugging issues. This will disable NUMA due to a bug in the kernel version used by RHEL 6.azure_kernel

    In addition to the above, it is recommended to remove the following parameters:

    rhgb quiet crashkernel=auto

    Graphical and quiet boot are not useful in a cloud environment where we want all the logs to be sent to the serial port.

    The crashkernel option may be left configured if desired, but note that this parameter will reduce the amount of available memory in the VM by 128MB or more, which may be problematic on the smaller VM sizes.

  13. Ensure that the SSH server is installed and configured to start at boot time. This is usually the default.
  14. Disable SWAP  :  comment swap in /etc/fstab
        # blkid | grep swap
      /dev/sda3: UUID="53-e0e3efe22612" TYPE="swap"
      # swapoff /dev/sda3
  15. Install the Azure Linux Agent by running the following command:
    # sudo yum install WALinuxAgent

    Note that installing the WALinuxAgent package will remove the NetworkManager and NetworkManager-gnome packages if they were not already removed as described in step 2.

  16. Do not create swap space on the OS diskThe Azure Linux Agent can automatically configure swap space using the local resource disk that is attached to the VM after provisioning on Azure. Note that the local resource disk is a temporary disk, and might be emptied when the VM is deprovisioned. After installing the Azure Linux Agent (see previous step), modify the following parameters in /etc/waagent.conf appropriately:
    ResourceDisk.Format=y
    ResourceDisk.Filesystem=ext4
    ResourceDisk.MountPoint=/mnt/resource
    ResourceDisk.EnableSwap=y
    ResourceDisk.SwapSizeMB=8192    ## NOTE: set this to whatever you need it to be.
  17. Run the following commands to deprovision the virtual machine and prepare it for provisioning on Azure:
    # sudo waagent -force -deprovision
    # export HISTSIZE=0
    # logout
  18. Click Action -> Shut Down in Hyper-V Manager. Your Linux VHD is now ready to be uploaded to Azure.

Linux: Native Multipath Configration on RHEL 4

1. The device-mapper-multipath package is not installed as part of a default installation of the operating system. Select the package as part of the OS install or install the rpm later either from the install CD or by downloading the rpm from the Red Hat website.
2. Verify that the required version of uDev has been installed for your environment. To query the uDev version, execute the command:

# rpm -q udev
If required, upgrade the uDev package and then execute the
command to create a new device under the ‘/dev’ directory.

# udevstart
3. Load the dm_multipath kernel module if it is not already loaded.

# modprobe dm_multipath
4. Replace the default /etc/multipath.conf with the following multipath.conf file recommended by EMC for attach to EMC storage.

Follow the instructions in the annotated multipath.conf file for masking internal scsi disks or disks that need to be excluded from multipath control.

## This is the /etc/multipath.conf file recommended for
## EMC storage devices.
##
## OS : RHEL 4 U3
## Arrays : CLARiiON and Symmetrix
##
## The blacklist is the enumeration of all devices that are to be
## excluded from multipath control
devnode_blacklist
{
## Replace the wwid with the output of the command
## ‘scsi_id -g -u -s /block/[internal scsi disk name]’
## Enumerate the wwid for all internal scsi disks.
## Optionally, the wwid of VCM database may also be listed here.
##

wwid 35005076718 d4224d
devnode “^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*”
devnode “^hd[a-z][0-9]*”
devnode “^cciss!c[0-9]d[0-9]*[p[0-9]*]”
}
## Use user friendly names, instead of using WWIDs as names.
defaults {
## Use user friendly names, instead of using WWIDs as names.
user_friendly_names yes
}
devices {
## Device attributes requirements for EMC Symmetrix
## are part of the default definitions and do not require separate
## definition.
## Device attributes for EMC CLARiiON
device {
vendor “DGC ”
product “*”
path_grouping_policy group_by_prio
getuid_callout “/sbin/scsi_id -g -u -s /block/%n”
prio_callout “/sbin/mpath_prio_emc /dev/%n”
path_checker emc_clariion
path_selector “round-robin 0”
features “1 queue_if_no_path”
no_path_retry 300
hardware_handler “1 emc”
failback immediate
}
}
5. Perform a dry run and evaluate the setup by running the
following command.
# multipath -v2 -d
• With Symmetrix Logical Units, the output will look similar to:
create: SEMC_____SYMMETRIX______490073232000
[size=5 GB][features=”0″][hwhandler=”0″]
_ round-robin 0
_ 11:0:1:52 sdao 66:128
_ 11:0:2:52 sdaz 67:48
_ 10:0:1:52 sdm 8:192
_ 10:0:2:52 sdx 65:112
create: SEMC_____SYMMETRIX______490073233000
[size=5 GB][features=”0″][hwhandler=”0″]
_ round-robin 0
_ 11:0:1:53 sdap 66:144
_ 11:0:2:53 sdba 67:64
_ 10:0:1:53 sdn 8:208
_ 10:0:2:53 sdy 65:128
create: SEMC_____SYMMETRIX______490073234000
[size=5 GB][features=”0″][hwhandler=”0″]
_ round-robin 0
_ 11:0:1:54 sdaq 66:160
_ 11:0:2:54 sdbb 67:80
_ 10:0:1:54 sdo 8:224
_ 10:0:2:54 sdz 65:144
• With CLARiiON Logical Units, the output will look similar to:
create: 360060160b540160171f77f705558da11
[size=10 GB][features=”1
queue_if_no_path”][hwhandler=”1 emc”]
_ round-robin 0
_ 11:0:0:0 sdad 65:208
_ round-robin 0
_ 10:0:0:0 sdb 8:16
create: 360060160b540160170f77f705558da11
[size=10 GB][features=”1
queue_if_no_path”][hwhandler=”1 emc”]
_ round-robin 0
_ 11:0:0:1 sdae 65:224
_ round-robin 0
_ 10:0:0:1 sdc 8:32

6. If the listing is appropriate, commit the configuration as follows:
a. Start the required multipath processes.

# /etc/init.d/multipathd start
b. Execute the multipath command.

# multipath -v2
c. Perform an ‘lsmod’ and verify that the processes are running.

• With Symmetrix attach the following modules should be listed:
# lsmod |grep dm
dm_round_robin 4929 1
dm_multipath 22097 2 dm_round_robin
dm_mod 66433 1 dm_multipath

• With CLARiiON attach the following modules will be listed:

dm_emc 7489 1
dm_round_robin 4929 1
dm_multipath 22097 3 dm_emc,dm_round_robin
dm_mod 66433 3 dm_multipath

7. To get a listing of the current setup do:

# multipath -ll
8. Integrate the startup of the appropriate daemons in the boot sequence as follows:

# chkconfig --add multipathd
# chkconfig multipathd on

9. Device-mapper (dm) names and sd device names may not be persistent across reboots. There are two mechanisms to achieve persistence:

• Use of friendly names.
The friendly names are softlinks created in the /dev/mpath/ directory.

• Use of LVM-2 on top of device-mapper Configure logical volumes as necessary. LVM based entries in ‘/etc/fstab’ are supported. In the normal boot process, the device mapper and the multipath daemons start before LVM starts the discovery process.

You may optionally reboot the host to verify that the required processes automatically startup.