This is the mail archive of the cluster-cvs@sourceware.org mailing list for the cluster.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Cluster Project branch, RHEL4, updated. gfs-kernel_2_6_9_76-23-g36e10fa


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Cluster Project".

http://sources.redhat.com/git/gitweb.cgi?p=cluster.git;a=commitdiff;h=36e10fa435afc59e5a0e658c1b9b7ee2c34f184a

The branch, RHEL4 has been updated
       via  36e10fa435afc59e5a0e658c1b9b7ee2c34f184a (commit)
       via  a0e32d4eea87f15b2e07c8f61ecd53bd2462e73f (commit)
      from  d0aecbf552f27bb2bf9d3c66015dd4d8df559dd8 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 36e10fa435afc59e5a0e658c1b9b7ee2c34f184a
Author: Jonathan Brassow <jbrassow@redhat.com>
Date:   Wed Apr 2 14:25:22 2008 -0500

    rgmanager/lvm.sh:  Make lvm.sh call into lvm_by_*.sh (RE: bz427377)
    
    lvm_by_*.sh scripts are updated.  Now call them.  This makes
    lvm.sh a wrapper around lvm_by_*.sh scripts

commit a0e32d4eea87f15b2e07c8f61ecd53bd2462e73f
Author: Jonathan Brassow <jbrassow@redhat.com>
Date:   Wed Apr 2 14:19:02 2008 -0500

    rgmanager/lvm.sh:  Updated fix for bz242798
    
    When fixing bug Bug 427377 (allow multiple LVs / VG),
    I failed to commit the changes to lvm.sh - which
    effectively just became a shell that called functions
    in other files.
    
    Now, I need to update the other files with recent bug
    fixes and then make the change that was necessary for
    bug 427377 - removing the functional contents of lvm.sh.

-----------------------------------------------------------------------

Summary of changes:
 rgmanager/src/resources/lvm.metadata |   16 +-
 rgmanager/src/resources/lvm.sh       |  500 ++++------------------------------
 rgmanager/src/resources/lvm_by_lv.sh |   15 +-
 rgmanager/src/resources/lvm_by_vg.sh |   17 +-
 4 files changed, 94 insertions(+), 454 deletions(-)

diff --git a/rgmanager/src/resources/lvm.metadata b/rgmanager/src/resources/lvm.metadata
index 61a740b..650675b 100755
--- a/rgmanager/src/resources/lvm.metadata
+++ b/rgmanager/src/resources/lvm.metadata
@@ -42,14 +42,14 @@
 	</parameter>
 
 	<parameter name="self_fence">
-            <longdesc lang="en">
-                If set and the clean up of the tags fails, the node will
-                immediately reboot.
-            </longdesc>
-            	<shortdesc lang="en">
-                Fence the node if it is not able to clean up LVM tags
-            </shortdesc>
-            <content type="boolean"/>
+	    <longdesc lang="en">
+		If set and the clean up of the tags fails, the node will
+		immediately reboot.
+	    </longdesc>
+	    <shortdesc lang="en">
+		Fence the node if it is not able to clean up LVM tags
+	    </shortdesc>
+	    <content type="boolean"/>
 	</parameter>
 
 	<parameter name="nfslock" inherit="service%nfslock">
diff --git a/rgmanager/src/resources/lvm.sh b/rgmanager/src/resources/lvm.sh
index f45e50d..82f3383 100755
--- a/rgmanager/src/resources/lvm.sh
+++ b/rgmanager/src/resources/lvm.sh
@@ -21,19 +21,7 @@
 
 #
 # LVM Failover Script.
-#
-# This script correctly handles:
-#  - Relocation
-#  - Fail-over
-#  - Disk failure + Fail-over
-# If you don't know what those mean, ASK!  (jbrassow@redhat.com)
 # NOTE: Changes to /etc/lvm/lvm.conf are required for proper operation.
-#
-# This script should handle (but doesn't right now):
-#  - Operations on VG level.  Make lv_name optional.  This would have
-#    the effect of moving all LVs in a VG, not just one LV
-
-
 
 LC_ALL=C
 LANG=C
@@ -42,466 +30,82 @@ export LC_ALL LANG PATH
 
 . $(dirname $0)/ocf-shellfuncs
 . $(dirname $0)/utils/member_util.sh
+. $(dirname $0)/lvm_by_lv.sh
+. $(dirname $0)/lvm_by_vg.sh
 
 rv=0
 
-meta_data()
-{
-	cat <<EOT
-<?xml version="1.0" ?>
-<resource-agent name="lvm" version="rgmanager 2.0">
-    <version>1.0</version>
-
-    <longdesc lang="en">
-	This defines a LVM volume group that is ...
-    </longdesc>
-
-    <shortdesc lang="en">
-	LVM Failover script
-    </shortdesc>
-
-    <parameters>
-        <parameter name="name" primary="1">
-            <longdesc lang="en">
-                Descriptive name LVM Volume group
-            </longdesc>
-            <shortdesc lang="en">
-                Name
-            </shortdesc>
-	    <content type="string"/>
-        </parameter>
-
-        <parameter name="vg_name" required="1">
-            <longdesc lang="en">
-                If you can see this, your GUI is broken.
-            </longdesc>
-            <shortdesc lang="en">
-                If you can see this, your GUI is broken.
-            </shortdesc>
-	    <content type="string"/>
-        </parameter>
-
-        <parameter name="lv_name" required="1">
-            <longdesc lang="en">
-                If you can see this, your GUI is broken.
-            </longdesc>
-            <shortdesc lang="en">
-                If you can see this, your GUI is broken.
-            </shortdesc>
-	    <content type="string"/>
-        </parameter>
-
-	<parameter name="nfslock" inherit="service%nfslock">
-	    <longdesc lang="en">
-	        If set and unmounting the file system fails, the node will
-		try to kill lockd and issue reclaims across all remaining
-		network interface cards.
-	    </longdesc>
-	    <shortdesc lang="en">
-	        Enable NFS lock workarounds
-	    </shortdesc>
-	    <content type="boolean"/>
-	</parameter>
-
-    </parameters>
-
-    <actions>
-        <action name="start" timeout="5"/>
-	<action name="stop" timeout="5"/>
-
-	<action name="status" timeout="5" interval="1h"/>
-	<action name="monitor" timeout="5" interval="1h"/>
-
-	<action name="meta-data" timeout="5"/>
-	<action name="verify-all" timeout="30"/>
-    </actions>
-
-    <special tag="rgmanager">
-    	<attributes maxinstances="1"/>
-    </special>
-
-</resource-agent>
-EOT
-}
-
-# verify_all
-#
-# Verify the parameters passed in
+################################################################################
+# clvm_check
 #
-verify_all()
-{
-	declare lv_path="$OCF_RESKEY_vg_name/$OCF_RESKEY_lv_name"
-	declare -i ret=0
-
-	# Anything to verify?  Perhaps the names?
-	ocf_log notice "Verifying $lv_path"
-
-	return $ret
-}
-
-vg_status()
-{
-	return $OCF_ERR_GENERIC
-}
-
-vg_activate()
+################################################################################
+function clvm_check
 {
-	return $OCF_ERR_GENERIC
-}
-
-# lvm_exec_resilient
-#
-# Sometimes, devices can come back.  Their metadata will conflict
-# with the good devices that remain.  This function filters out those
-# failed devices when executing the given command
-#
-# Finishing with vgscan resets the cache/filter
-lvm_exec_resilient()
-{
-	declare command=$1
-	declare all_pvs
-
-	ocf_log notice "Making resilient : $command"
-
-	if [ -z $command ]; then
-		ocf_log err "lvm_exec_resilient: Arguments not supplied"
-		return $OCF_ERR_ARGS
+	if [[ $(vgs -o attr --noheadings $1) =~ .....c ]]; then
+		return 1
 	fi
 
-	# pvs will print out only those devices that are valid
-	# If a device dies and comes back, it will not appear
-	# in pvs output (but you will get a Warning).
-	all_pvs=(`pvs --noheadings -o pv_name | grep -v Warning`)
-
-	# Now we use those valid devices in a filter which we set up.
-	# The device will then be activated because there are no
-	# metadata conflicts.
-        command=$command" --config devices{filter=[";
-	for i in ${all_pvs[*]}; do
-		command=$command'"a|'$i'|",'
-	done
-	command=$command"\"r|.*|\"]}"
-
-	ocf_log notice "Resilient command: $command"
-	if ! $command ; then
-		ocf_log err "lvm_exec_resilient failed"
-		vgscan
-		return $OCF_ERR_GENERIC
-	else
-		vgscan
-		return $OCF_SUCCESS
-	fi
+	return 0
 }
 
-# lv_activate_resilient
+################################################################################
+# ha_lvm_proper_setup_check
 #
-# Sometimes, devices can come back.  Their metadata will conflict
-# with the good devices that remain.  We must filter out those
-# failed devices when trying to reactivate
-lv_activate_resilient()
+################################################################################
+function ha_lvm_proper_setup_check
 {
-	declare action=$1
-	declare lv_path=$2
-	declare op="-ay"
-
-	if [ -z $action ] || [ -z $lv_path ]; then
-		ocf_log err "lv_activate_resilient: Arguments not supplied"
-		return $OCF_ERR_ARGS
-	fi
-
-	if [ $action != "start" ]; then
-	        op="-an"
-	fi
-
-	if ! lvm_exec_resilient "lvchange $op $lv_path" ; then
-		ocf_log err "lv_activate_resilient $action failed on $lv_path"
-		return $OCF_ERR_GENERIC
-	else
-		return $OCF_SUCCESS
-	fi
-}
-
-# lv_status
-#
-# Is the LV active?
-lv_status()
-{
-	declare lv_path="$OCF_RESKEY_vg_name/$OCF_RESKEY_lv_name"
-	declare dev="/dev/$lv_path"
-	declare realdev
-	declare owner
-	declare my_name
-
-	#
-	# Check if device is active
-	#
-	if [[ ! $(lvs -o attr --noheadings $lv_path) =~ ....a. ]]; then
-		return $OCF_ERR_GENERIC
-	fi
-
-	if [[ $(vgs -o attr --noheadings $OCF_RESKEY_vg_name) =~ .....c ]]; then
-		ocf_log notice "$OCF_RESKEY_vg_name is a cluster volume.  Ignoring..."
-		return $OCF_SUCCESS
-	fi
-
-	#
-	# Check if all links/device nodes are present
-	#
-        if [ -h "$dev" ]; then
-		realdev=$(readlink -f $dev)
-		if [ $? -ne 0 ]; then
-			ocf_log err "Failed to follow link, $dev"
-			return $OCF_ERR_ARGS
-		fi
-
-		if [ ! -b $realdev ]; then
-			ocf_log err "Device node for $lv_path is not present"
-			return $OCF_ERR_GENERIC
-		fi
-	else
-	        ocf_log err "Symbolic link for $lv_path is not present"
-		return $OCF_ERR_GENERIC
-	fi
-
-	#
-	# Verify that we are the correct owner
-	#
-	owner=`lvs -o tags --noheadings $lv_path`
-	my_name=$(local_node_name)
-	if [ -z $my_name ]; then
-		ocf_log err "Unable to determine local machine name"
-
-		# FIXME: I don't really want to fail on 1st offense
-		return $OCF_SUCCESS
-	fi
-
-	if [ -z $owner ] || [ $my_name != $owner ]; then
-		ocf_log err "WARNING: $lv_path should not be active"
-		ocf_log err "WARNING: $my_name does not own $lv_path"
-		ocf_log err "WARNING: Attempting shutdown of $lv_path"
-
-		lv_activate_resilient "stop" $lv_path
-		return $OCF_ERR_GENERIC
-	fi
-
-	return $OCF_SUCCESS
-}
-
-# lv_activate_and_tag
-lv_activate_and_tag()
-{
-	declare action=$1
-	declare tag=$2
-	declare lv_path=$3
-	typeset self_fence=""
-
-	case ${OCF_RESKEY_self_fence} in
-		"yes")          self_fence=1 ;;
-		1)              self_fence=1 ;;
-		*)              self_fence="" ;;
-	esac
-
-	if [ -z $action ] || [ -z $tag ] || [ -z $lv_path ]; then
-		ocf_log err "Supplied args: 1) $action, 2) $tag, 3) $lv_path"
-		return $OCF_ERR_ARGS
-	fi
-
-	if [ $action == "start" ]; then
-		ocf_log notice "Activating $lv_path"
-		lvchange --addtag $tag $lv_path
-		if [ $? -ne 0 ]; then
-			ocf_log err "Unable to add tag to $lv_path"
-			return $OCF_ERR_GENERIC
-		fi
-
-		if ! lv_activate_resilient $action $lv_path; then
-			ocf_log err "Unable to activate $lv_path"
-			return $OCF_ERR_GENERIC
-		fi
-	else
-		ocf_log notice "Deactivating $lv_path"
-		if ! lv_activate_resilient $action $lv_path; then
-			if [ "$self_fence" ]; then
-				ocf_log err "Unable to deactivate $lv_path REBOOT"
-				sync
-				reboot -fn
-			else
-				ocf_log err "Unable to deactivate $lv_path"
-			fi
-
-			ocf_log err "Unable to deactivate $lv_path"
-			return $OCF_ERR_GENERIC
-		fi
-
-		ocf_log notice "Removing ownership tag ($tag) from $lv_path"
-
-		lvchange --deltag $tag $lv_path
-		if [ $? -ne 0 ]; then
-			ocf_log err "Unable to delete tag from $lv_path"
-			return $OCF_ERR_GENERIC
-		fi
-	fi
-
-	return $OCF_SUCCESS
-}
-
-# lv_activate
-# $1: start/stop only
-#
-# Basically, if we want to [de]activate an LVM volume,
-# we must own it.  That means that our tag must be on it.
-# This requires a change to /etc/lvm/lvm.conf:
-#	volume_list = [ "root_volume", "@my_hostname" ]
-# where "root_volume" is your root volume group and
-# "my_hostname" is $(local_node_name)
-#
-# If there is a node failure, we may wish to "steal" the
-# LV.  For that, we need to check if the node that owns
-# it is still part of the cluster.  We use the tag to
-# determine who owns the volume then query for their
-# liveness.  If they are dead, we can steal.
-lv_activate()
-{
-	declare lv_path="$OCF_RESKEY_vg_name/$OCF_RESKEY_lv_name"
-	declare owner=`lvs -o tags --noheadings $lv_path`
-	declare my_name=$(local_node_name)
-
-	if [ -z $my_name ]; then
-		ocf_log err "Unable to determine cluster node name"
-		return $OCF_ERR_GENERIC
-	fi
-
-	#
-	# FIXME: This code block is repeated below... might be
-	# nice to put it in a function
-	#
-	if [ ! -z $owner ] && [ $owner != $my_name ]; then
-		if is_node_member_clustat $owner ; then
-			ocf_log err "$owner owns $lv_path unable to $1"
-			return $OCF_ERR_GENERIC
-		fi
-		ocf_log notice "Owner of $lv_path is not in the cluster"
-		ocf_log notice "Stealing $lv_path"
-
-		lvchange --deltag $owner $lv_path
-		if [ $? -ne 0 ]; then
-			ocf_log err "Failed to steal $lv_path from $owner"
-			return $OCF_ERR_GENERIC
-		fi
-
-		# Warning --deltag doesn't always result in failure
-		if [ ! -z `lvs -o tags --noheadings $lv_path` ]; then
-			ocf_log err "Failed to steal $lv_path from $owner."
-			return $OCF_ERR_GENERIC
-		fi
-	fi
-
-	if ! lv_activate_and_tag $1 $my_name $lv_path; then
-		ocf_log err "Failed to $1 $lv_path"
-
-		if [ "$1" == "start" ]; then
-			ocf_log notice "Attempting cleanup of $OCF_RESKEY_vg_name"
-
-			if vgreduce --removemissing --config \
-			    "activation { volume_list = \"$OCF_RESKEY_vg_name\" }" \
-			    $OCF_RESKEY_vg_name; then
-				ocf_log notice "$OCF_RESKEY_vg_name now consistent"
-				owner=`lvs -o tags --noheadings $lv_path`
-				if [ ! -z $owner ] && [ $owner != $my_name ]; then
-					if is_node_member_clustat $owner ; then
-						ocf_log err "$owner owns $lv_path unable to $1"
-						return $OCF_ERR_GENERIC
-					fi
-					ocf_log notice "Owner of $lv_path is not in the cluster"
-					ocf_log notice "Stealing $lv_path"
-
-					lvchange --deltag $owner $lv_path
-					if [ $? -ne 0 ]; then
-						ocf_log err "Failed to steal $lv_path from $owner"
-						return $OCF_ERR_GENERIC
-					fi
-
-					# Warning --deltag doesn't always result in failure
-					if [ ! -z `lvs -o tags --noheadings $lv_path` ]; then
-						ocf_log err "Failed to steal $lv_path from $owner."
-						return $OCF_ERR_GENERIC
-					fi
-				fi
-
-				if ! lv_activate_and_tag $1 $my_name $lv_path; then
-					ocf_log err "Failed second attempt to $1 $lv_path"
-					return $OCF_ERR_GENERIC
-				else
-					ocf_log notice "Second attempt to $1 $lv_path successful"
-					return $OCF_SUCCESS
-				fi
-			else
-				ocf_log err "Failed to make $OCF_RESKEY_vg_name consistent"
-				return $OCF_ERR_GENERIC
-			fi
-		else
-			ocf_log err "Failed to $1 $lv_path"
-			return $OCF_ERR_GENERIC
-		fi
-	fi
-	return $OCF_SUCCESS
-}
-
-ha_lvm_proper_setup_check()
-{
-	# First, let's check that they have setup their lvm.conf correctly
+	##
+	# Machine's cluster node name must be present as
+	# a tag in lvm.conf:activation/volume_list
+	##
 	if ! lvm dumpconfig activation/volume_list >& /dev/null ||
 	   ! lvm dumpconfig activation/volume_list | grep $(local_node_name); then
 		ocf_log err "lvm.conf improperly configured for HA LVM."
 		return $OCF_ERR_GENERIC
 	fi
 
+	##
 	# Next, we need to ensure that their initrd has been updated
-	if [ -e /boot/initrd-`uname -r`.img ]; then
-		if [ "$(find /boot/initrd-`uname -r`.img -newer /etc/lvm/lvm.conf)" == "" ]; then
-			ocf_log err "HA LVM requires the initrd image to be newer than lvm.conf"
-			return $OCF_ERR_GENERIC
-		fi
-	else
-		# Best guess...
-		if [ "$(find /boot/*.img -newer /etc/lvm/lvm.conf)" == "" ]; then
-			ocf_log err "HA LVM requires the initrd image to be newer than lvm.conf"
-			return $OCF_ERR_GENERIC
-		fi
+	# If not, the machine could boot and activate the VG outside
+	# the control of rgmanager
+	##
+	# Fixme: we might be able to perform a better check...
+	if [ "$(find /boot/*.img -newer /etc/lvm/lvm.conf)" == "" ]; then
+		ocf_log err "HA LVM requires the initrd image to be newer than lvm.conf"
+		return $OCF_ERR_GENERIC
 	fi
 
 	return $OCF_SUCCESS
 }
 
+################################################################################
+# MAIN
+################################################################################
+
 case $1 in
 start)
-	if [[ $(vgs -o attr --noheadings $OCF_RESKEY_vg_name) =~ .....c ]]; then
+	##
+	# We can safely ignore clustered volume groups (VGs handled by CLVM)
+	##
+	if ! clvm_check $OCF_RESKEY_vg_name; then
 		ocf_log notice "$OCF_RESKEY_vg_name is a cluster volume.  Ignoring..."
 		exit 0
 	fi
 
-	if ! lvs $OCF_RESKEY_vg_name >& /dev/null; then
-		lv_count=0
-	else
-		lv_count=`lvs --noheadings -o name $OCF_RESKEY_vg_name | grep -v _mlog | grep -v _mimage | grep -v nconsistent | wc -l`
-	fi
-	if [ $lv_count -gt 1 ]; then
-		ocf_log err "HA LVM requires Only one logical volume per volume group."
-		ocf_log err "There are currently $lv_count logical volumes in $OCF_RESKEY_vg_name"
-		ocf_log err "Failing HA LVM start of $OCF_RESKEY_vg_name/$OCF_RESKEY_lv_name"
-		exit $OCF_ERR_GENERIC
-	fi
 	ha_lvm_proper_setup_check || exit 1
-		
+
+	rv=0
+
 	if [ -z $OCF_RESKEY_lv_name ]; then
-		vg_activate start || exit 1
+		vg_start || exit 1
 	else
-		lv_activate start || exit 1
+		lv_start || exit 1
 	fi
-	rv=0
 	;;
 
 status|monitor)
+	ocf_log notice "Getting status"
+
 	if [ -z $OCF_RESKEY_lv_name ]; then
 		vg_status || exit 1
 	else
@@ -511,7 +115,10 @@ status|monitor)
 	;;
 		    
 stop)
-	if [[ $(vgs -o attr --noheadings $OCF_RESKEY_vg_name) =~ .....c ]]; then
+	##
+	# We can safely ignore clustered volume groups (VGs handled by CLVM)
+	##
+	if ! clvm_check $OCF_RESKEY_vg_name; then
 		ocf_log notice "$OCF_RESKEY_vg_name is a cluster volume.  Ignoring..."
 		exit 0
 	fi
@@ -521,9 +128,9 @@ stop)
 	fi
 
 	if [ -z $OCF_RESKEY_lv_name ]; then
-		vg_activate stop || exit 1
+		vg_stop || exit 1
 	else
-		lv_activate stop || exit 1
+		lv_stop || exit 1
 	fi
 	rv=0
 	;;
@@ -535,18 +142,25 @@ recover|restart)
 	;;
 
 meta-data)
-	meta_data
+	cat `echo $0 | sed 's/^\(.*\)\.sh$/\1.metadata/'`
 	rv=0
 	;;
 
 verify-all)
-	if [[ $(vgs -o attr --noheadings $OCF_RESKEY_vg_name) =~ .....c ]]; then
+	##
+	# We can safely ignore clustered volume groups (VGs handled by CLVM)
+	##
+	if ! clvm_check $OCF_RESKEY_vg_name; then
 		ocf_log notice "$OCF_RESKEY_vg_name is a cluster volume.  Ignoring..."
 		exit 0
 	fi
 
-	verify_all
-	rv=$?
+	if [ -z $OCF_RESKEY_lv_name ]; then
+		vg_verify || exit 1
+	else
+		lv_verify || exit 1
+	fi
+	rv=0
 	;;
 *)
 	echo "usage: $0 {start|status|monitor|stop|restart|meta-data|verify-all}"
diff --git a/rgmanager/src/resources/lvm_by_lv.sh b/rgmanager/src/resources/lvm_by_lv.sh
index e14540f..6691181 100644
--- a/rgmanager/src/resources/lvm_by_lv.sh
+++ b/rgmanager/src/resources/lvm_by_lv.sh
@@ -173,6 +173,13 @@ lv_activate_and_tag()
 	declare action=$1
 	declare tag=$2
 	declare lv_path=$3
+	typeset self_fence=""
+
+	case ${OCF_RESKEY_self_fence} in
+		"yes")          self_fence=1 ;;
+		1)              self_fence=1 ;;
+		*)              self_fence="" ;;
+	esac
 
 	if [ -z $action ] || [ -z $tag ] || [ -z $lv_path ]; then
 		ocf_log err "Supplied args: 1) $action, 2) $tag, 3) $lv_path"
@@ -194,7 +201,13 @@ lv_activate_and_tag()
 	else
 		ocf_log notice "Deactivating $lv_path"
 		if ! lv_activate_resilient $action $lv_path; then
-			ocf_log err "Unable to deactivate $lv_path"
+			if [ "$self_fence" ]; then
+				ocf_log err "Unable to deactivate $lv_path: REBOOTING"
+				sync
+				reboot -fn
+			else
+				ocf_log err "Unable to deactivate $lv_path"
+			fi
 			return $OCF_ERR_GENERIC
 		fi
 
diff --git a/rgmanager/src/resources/lvm_by_vg.sh b/rgmanager/src/resources/lvm_by_vg.sh
index b1840a1..8fa36d6 100755
--- a/rgmanager/src/resources/lvm_by_vg.sh
+++ b/rgmanager/src/resources/lvm_by_vg.sh
@@ -242,17 +242,30 @@ function vg_stop
 {
 	local a
 	local results
+	typeset self_fence=""
+
+	case ${OCF_RESKEY_self_fence} in
+		"yes")          self_fence=1 ;;
+		1)              self_fence=1 ;;
+		*)              self_fence="" ;;
+	esac
 
 	#  Shut down the volume group
 	#  Do we need to make this resilient?
 	vgchange -an $OCF_RESKEY_vg_name
 
 	#  Make sure all the logical volumes are inactive
-	results=(`lvs -o name,attr --noheadings 2> /dev/null $OCF_RESKEY_vg_name`)
+	results=(`lvs -o name,attr --noheadings $OCF_RESKEY_vg_name 2> /dev/null`)
 	a=0
 	while [ ! -z ${results[$a]} ]; do
 		if [[ ${results[$(($a + 1))]} =~ ....a. ]]; then
-			ocf_log err "Logical volume $OCF_RESKEY_vg_name/${results[$a]} failed to shutdown"
+			if [ "$self_fence" ]; then
+				ocf_log err "Unable to deactivate $lv_path REBOOT"
+				sync
+				reboot -fn
+			else
+				ocf_log err "Logical volume $OCF_RESKEY_vg_name/${results[$a]} failed to shutdown"
+			fi
 			return $OCF_ERR_GENERIC
 		fi
 		a=$(($a + 2))


hooks/post-receive
--
Cluster Project


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]