This is the mail archive of the cluster-cvs@sourceware.org mailing list for the cluster.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Cluster Project branch, RHEL4, updated. gfs-kernel_2_6_9_76-67-g6ccdc71


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Cluster Project".

http://sources.redhat.com/git/gitweb.cgi?p=cluster.git;a=commitdiff;h=6ccdc714ff546423bc44954a8f581152bd212d97

The branch, RHEL4 has been updated
       via  6ccdc714ff546423bc44954a8f581152bd212d97 (commit)
      from  532a3cdbc09cfc0dfd85296ccc613f2753acce22 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 6ccdc714ff546423bc44954a8f581152bd212d97
Author: Lon Hohberger <lhh@redhat.com>
Date:   Wed May 28 10:47:37 2008 -0400

    [rgmanager] Fix relocate operation when using gulm.
    
    This fixes a regression in the relocation operation when
    using gulm on the RHEL4/STABLE branches.  This patch
    also cleans up several log messages.

-----------------------------------------------------------------------

Summary of changes:
 rgmanager/src/daemons/groups.c     |    4 ++--
 rgmanager/src/daemons/main.c       |    2 +-
 rgmanager/src/daemons/rg_state.c   |   33 ++++++++++++++++-----------------
 rgmanager/src/daemons/service_op.c |    4 ++--
 rgmanager/src/utils/clusvcadm.c    |    4 ++--
 5 files changed, 23 insertions(+), 24 deletions(-)

diff --git a/rgmanager/src/daemons/groups.c b/rgmanager/src/daemons/groups.c
index 5df7ed7..d1fe3db 100644
--- a/rgmanager/src/daemons/groups.c
+++ b/rgmanager/src/daemons/groups.c
@@ -753,8 +753,8 @@ eval_groups(int local, uint64_t nodeid, int nodeStatus)
 	pthread_rwlock_unlock(&resource_lock);
 	cml_free(membership);
 
-	clulog(LOG_DEBUG, "Event (%d:%d:%d) Processed\n", local,
-	       (int)nodeid, nodeStatus);
+	clulog(LOG_DEBUG, "Event (%d:%llx:%d) Processed\n", local,
+	       nodeid, nodeStatus);
 
 	return 0;
 }
diff --git a/rgmanager/src/daemons/main.c b/rgmanager/src/daemons/main.c
index 527c5d7..07fe206 100644
--- a/rgmanager/src/daemons/main.c
+++ b/rgmanager/src/daemons/main.c
@@ -153,7 +153,7 @@ membership_update(void)
 	new_ml = clu_member_list(RG_SERVICE_GROUP);
 	member_list_update(new_ml);
 
-	clulog(LOG_DEBUG, "I am node #%lld\n", my_id());
+	clulog(LOG_DEBUG, "I am node 0x%llx\n", my_id());
 
 	/*
 	 * Handle nodes lost.  Do our local node event first.
diff --git a/rgmanager/src/daemons/rg_state.c b/rgmanager/src/daemons/rg_state.c
index 4509e6a..df0ac1b 100644
--- a/rgmanager/src/daemons/rg_state.c
+++ b/rgmanager/src/daemons/rg_state.c
@@ -164,7 +164,7 @@ svc_report_failure(char *svcName)
 		       c_name(svcName), nodeName);
 	} else {
 		clulog(LOG_ALERT, "#3: Service %s returned failure "
-		       "code.  Last Owner: %d\n",
+		       "code.  Last Owner: %llx\n",
 		       c_name(svcName), (int)svcStatus.rs_last_owner);
 	}
 
@@ -1152,7 +1152,7 @@ svc_start_remote(char *svcName, int request, uint64_t target)
 
 	if ((fd_relo = msg_open(target, RG_PORT, RG_PURPOSE, 2)) < 0) {
 		clulog(LOG_ERR,
-		       "#58: Failed opening connection to member #%d\n",
+		       "#58: Failed opening connection to member #%llx\n",
 		       target);
 		return -1;
 	}
@@ -1164,13 +1164,13 @@ svc_start_remote(char *svcName, int request, uint64_t target)
 	if (msg_send(fd_relo, &msg_relo, sizeof (SmMessageSt)) !=
 	    sizeof (SmMessageSt)) {
 		clulog(LOG_ERR,
-		       "#59: Error sending remote start request to member #%d\n",
+		       "#59: Error sending remote start request to member #%llx\n",
 		       target);
 		msg_close(fd_relo);
 		return -1;
 	}
 
-	clulog(LOG_DEBUG, "Sent remote start request to %d\n", (int)target);
+	clulog(LOG_DEBUG, "Sent remote start request to #%llx\n", target);
 
 	/* Check the response */
 	do {
@@ -1208,7 +1208,7 @@ svc_start_remote(char *svcName, int request, uint64_t target)
 		 * In this case, we don't restart the service, because the 
 		 * service state is actually unknown to us at this time.
 		 */
-		clulog(LOG_ERR, "#60: Mangled reply from member #%d during "
+		clulog(LOG_ERR, "#60: Mangled reply from member #%llx during "
 		       "relocate\n", target);
 		msg_close(fd_relo);
 		return 0;	/* XXX really UNKNOWN */
@@ -1249,7 +1249,7 @@ handle_relocate_req(char *svcName, int request, uint64_t preferred_target,
 {
 	cluster_member_list_t *allowed_nodes = NULL, *backup = NULL;
 	cluster_member_t *m;
-	int target = preferred_target, me = my_id();
+	uint64_t target = preferred_target, me = my_id();
 	int ret, x;
 	rg_state_t svcStatus;
 	
@@ -1258,7 +1258,7 @@ handle_relocate_req(char *svcName, int request, uint64_t preferred_target,
 	    svcStatus.rs_state == RG_STATE_UNINITIALIZED)
 		return RG_EINVAL;
 
-	if (preferred_target > 0) {
+	if (preferred_target != NODE_ID_NONE) {
 		/* TODO: simplify this and don't keep alloc/freeing 
 		   member lists */
 		allowed_nodes = member_list();
@@ -1291,7 +1291,7 @@ handle_relocate_req(char *svcName, int request, uint64_t preferred_target,
 			return RG_EFORWARD;
 	}
 
-	if (preferred_target > 0) {
+	if (preferred_target != NODE_ID_NONE) {
 
 		allowed_nodes = member_list();
 		/*
@@ -1364,7 +1364,7 @@ handle_relocate_req(char *svcName, int request, uint64_t preferred_target,
 		//count_resource_groups(allowed_nodes);
 	}
 
-	if (preferred_target > 0)
+	if (preferred_target != NODE_ID_NONE)
 		memb_mark_down(allowed_nodes, preferred_target);
 	memb_mark_down(allowed_nodes, me);
 
@@ -1393,19 +1393,19 @@ handle_relocate_req(char *svcName, int request, uint64_t preferred_target,
 		default:
 			/* deliberate fallthrough */
 			clulog(LOG_ERR,
-			       "#61: Invalid reply from member %d during"
+			       "#61: Invalid reply from member %llx during"
 			       " relocate operation!\n", target);
 		case RG_NO:
 			/* state uncertain */
 			cml_free(allowed_nodes);
 			clulog(LOG_CRIT, "State Uncertain: svc:%s "
-			       "nid:%d req:%s ret:%d\n", svcName,
+			       "nid:%llx req:%s ret:%d\n", svcName,
 			       target, rg_req_str(request), ret);
 			return 0;
 		case 0:
 			*new_owner = target;
 			clulog(LOG_NOTICE, "Service %s is now running "
-			       "on member %d\n", svcName, (int)target);
+			       "on member %llx\n", svcName, (int)target);
 			cml_free(allowed_nodes);
 			return 0;
 		}
@@ -1478,20 +1478,19 @@ handle_fd_start_req(char *svcName, int request, uint64_t *new_owner)
 			/* state uncertain */
 			cml_free(allowed_nodes);
 			clulog(LOG_DEBUG, "State Uncertain: %s "
-			       "nid:%08x%08x req:%d\n", svcName,
-			       (uint32_t)(target>>32)&0xffffffff,
-			       (uint32_t)(target&0xffffffff), request);
+			       "nid:%llx req:%d\n", svcName,
+			       target, request);
 			return 0;
 		case 0:
 			*new_owner = target;
 			clulog(LOG_NOTICE, "Service %s is now running "
-			       "on member %d\n", c_name(svcName),
+			       "on member %llx\n", c_name(svcName),
 			       (int)target);
 			cml_free(allowed_nodes);
 			return 0;
 		default:
 			clulog(LOG_ERR,
-			       "#61: Invalid reply from member %d during"
+			       "#61: Invalid reply from member %llx during"
 			       " relocate operation!\n", target);
 		}
 	}
diff --git a/rgmanager/src/daemons/service_op.c b/rgmanager/src/daemons/service_op.c
index 6705a5f..3c02688 100644
--- a/rgmanager/src/daemons/service_op.c
+++ b/rgmanager/src/daemons/service_op.c
@@ -158,8 +158,8 @@ service_op_stop(char *svcName, int do_disable, int event_type)
 
 	if ((fd = msg_open(msgtarget, RG_PORT, RG_PURPOSE, 2)) < 0) {
 		clulog(LOG_ERR,
-		       "#58: Failed opening connection to member #%d\n",
-		       msgtarget);
+		       "#58: Failed opening connection to member #0x%x\n",
+		       (unsigned)msgtarget);
 		return -1;
 	}
 
diff --git a/rgmanager/src/utils/clusvcadm.c b/rgmanager/src/utils/clusvcadm.c
index 0701c18..2e227d4 100644
--- a/rgmanager/src/utils/clusvcadm.c
+++ b/rgmanager/src/utils/clusvcadm.c
@@ -416,7 +416,7 @@ main(int argc, char **argv)
 		    msg.sm_data.d_svcOwner != svctarget) {
 			/* Service running somewhere besides where requested */
 	    		printf("Warning: Service %s is running on %s "
-	    			"instead of %s\n", svcname,
+	    			"instead of %s\n", printname?printname:svcname,
 	    			memb_id_to_name(membership,
 				       msg.sm_data.d_svcOwner),
 				memb_id_to_name(membership, svctarget));
@@ -424,7 +424,7 @@ main(int argc, char **argv)
 	    	}
 
 		/* No node specified or service running where requested */
-	    	printf("Service %s is now running on %s\n", svcname, 
+	    	printf("Service %s is now running on %s\n", printname?printname:svcname, 
 			memb_id_to_name(membership, msg.sm_data.d_svcOwner));
 		break;
 	default:


hooks/post-receive
--
Cluster Project


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]