[Cluster-devel] cluster/rgmanager src/daemons/rg_forward.c src ...
lhh at sourceware.org
lhh at sourceware.org
Wed Dec 13 18:20:00 UTC 2006
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL4
Changes by: lhh at sourceware.org 2006-12-13 18:19:57
Modified files:
rgmanager/src/daemons: rg_forward.c resrules.c rg_thread.c
reslist.c rg_state.c main.c
rgmanager/init.d: rgmanager
rgmanager/src/clulib: vft.c
rgmanager/src/utils: clusvcadm.c clustat.c
rgmanager/src/resources: ip.sh
rgmanager/include: resgroup.h
Log message:
Fix #193603 (part 2; return error code if rgmanager start fails), #210455, #212110, #216774, #212634/#218112.
Fix bug in ip.sh allowing start of the IP if the link was down, preventing failover (linux-cluster reported).
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_forward.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.2.2.1&r2=1.2.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/resrules.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.9.2.4&r2=1.9.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_thread.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.7.2.7&r2=1.7.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/reslist.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.6.2.6&r2=1.6.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/rg_state.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.4.2.16&r2=1.4.2.17
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/daemons/main.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.9.2.20&r2=1.9.2.21
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/init.d/rgmanager.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.3.2.3&r2=1.3.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/clulib/vft.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.7.2.6&r2=1.7.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clusvcadm.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.2.2.6&r2=1.2.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/utils/clustat.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.5.2.14&r2=1.5.2.15
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/ip.sh.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.5.2.15&r2=1.5.2.16
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/include/resgroup.h.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.3.2.7&r2=1.3.2.8
--- cluster/rgmanager/src/daemons/rg_forward.c 2006/01/20 16:25:25 1.2.2.1
+++ cluster/rgmanager/src/daemons/rg_forward.c 2006/12/13 18:19:56 1.2.2.2
@@ -97,7 +97,7 @@
msg_close(fd);
swab_SmMessageSt(&msg);
- send_response(msg.sm_data.d_ret, req);
+ send_response(msg.sm_data.d_ret, req->rr_target, req);
rq_free(req);
--- cluster/rgmanager/src/daemons/resrules.c 2006/10/20 20:57:19 1.9.2.4
+++ cluster/rgmanager/src/daemons/resrules.c 2006/12/13 18:19:56 1.9.2.5
@@ -248,17 +248,36 @@
}
+/**
+ * Store a resource action
+ * @param actsp Action array; may be modified and returned!
+ * @param name Name of the action
+ * @param depth Resource depth (status/monitor; -1 means *ALL LEVELS*
+ * ... this means that only the highest-level check depth
+ * will ever be performed!)
+ * @param timeout Timeout (not used)
+ * @param interval Time interval for status/monitor
+ * @return 0 on success, -1 on failure
+ *
+ */
int
store_action(resource_act_t **actsp, char *name, int depth,
int timeout, int interval)
{
- int x = 0;
+ int x = 0, replace = 0;
resource_act_t *acts = *actsp;
if (!name)
return -1;
+
+ if (depth < 0 && timeout < 0 && interval < 0)
+ return -1;
if (!acts) {
+ /* Can't create with anything < 0 */
+ if (depth < 0 || timeout < 0 || interval < 0)
+ return -1;
+
acts = malloc(sizeof(resource_act_t) * 2);
if (!acts)
return -1;
@@ -274,17 +293,38 @@
for (x = 0; acts[x].ra_name; x++) {
if (!strcmp(acts[x].ra_name, name) &&
- depth == acts[x].ra_depth) {
- printf("Skipping duplicate action/depth %s/%d\n",
- name, depth);
- return -1;
+ (depth == acts[x].ra_depth || depth == -1)) {
+ printf("Replacing action '%s' depth %d: ",
+ name, acts[x].ra_depth);
+ if (timeout >= 0) {
+ printf("timeout: %d->%d ",
+ (int)acts[x].ra_timeout,
+ (int)timeout);
+ acts[x].ra_timeout = timeout;
+ }
+ if (interval >= 0) {
+ printf("interval: %d->%d",
+ (int)acts[x].ra_interval,
+ (int)interval);
+ acts[x].ra_interval = interval;
+ }
+ printf("\n");
+ replace = 1;
}
}
+
+ if (replace)
+ /* If we replaced something, we're done */
+ return 1;
+
+ /* Can't create with anything < 0 */
+ if (depth < 0 || timeout < 0 || interval < 0)
+ return -1;
acts = realloc(acts, sizeof(resource_act_t) * (x+2));
if (!acts)
return -1;
-
+
acts[x].ra_name = name;
acts[x].ra_depth = depth;
acts[x].ra_timeout = timeout;
@@ -297,6 +337,7 @@
}
+
void
_get_actions(xmlDocPtr doc, xmlXPathContextPtr ctx, char *base,
resource_rule_t *rr)
@@ -324,8 +365,8 @@
ret = xpath_get_one(doc, ctx, xpath);
if (ret) {
timeout = expand_time(ret);
- if (interval < 0)
- interval = 0;
+ if (timeout < 0)
+ timeout = 0;
free(ret);
}
@@ -352,9 +393,8 @@
}
if (store_action(&rr->rr_actions, act, depth, timeout,
- interval) < 0)
+ interval) != 0)
free(act);
-
} while (1);
@@ -946,6 +986,14 @@
type = xpath_get_one(doc, ctx, base);
if (!type)
break;
+
+ if (!strcasecmp(type, "action")) {
+ fprintf(stderr,
+ "Error: Resource type '%s' is reserved",
+ type);
+ free(type);
+ break;
+ }
rr = malloc(sizeof(*rr));
if (!rr)
--- cluster/rgmanager/src/daemons/rg_thread.c 2006/11/03 16:26:18 1.7.2.7
+++ cluster/rgmanager/src/daemons/rg_thread.c 2006/12/13 18:19:56 1.7.2.8
@@ -145,7 +145,7 @@
list_remove(list, curr);
dprintf("Removed request %d\n", curr->rr_request);
if (curr->rr_resp_fd != -1) {
- send_response(RG_EABORT, curr);
+ send_response(RG_EABORT, NODE_ID_NONE, curr);
}
rq_free(curr);
}
@@ -412,9 +412,8 @@
if (ret != RG_NONE && rg_initialized() &&
(req->rr_resp_fd >= 0)) {
- send_response(error, req);
+ send_response(error, newowner, req);
}
-
rq_free(req);
}
@@ -611,7 +610,7 @@
case RG_START:
case RG_ENABLE:
send_ret(response_fd, resgroup->rt_name, RG_EDEADLCK,
- request);
+ request, NODE_ID_NONE);
break;
}
fprintf(stderr, "Failed to queue request: Would block\n");
--- cluster/rgmanager/src/daemons/reslist.c 2005/10/17 20:23:52 1.6.2.6
+++ cluster/rgmanager/src/daemons/reslist.c 2006/12/13 18:19:56 1.6.2.7
@@ -523,6 +523,83 @@
}
+/* Copied from resrules.c -- _get_actions */
+void
+_get_actions_ccs(int ccsfd, char *base, resource_t *res)
+{
+ char xpath[256];
+ int idx = 0;
+ char *act, *ret;
+ int interval, timeout, depth;
+
+ do {
+ /* setting these to -1 prevents overwriting with 0 */
+ interval = -1;
+ depth = -1;
+ act = NULL;
+ timeout = -1;
+
+ snprintf(xpath, sizeof(xpath),
+ "%s/action[%d]/@name", base, ++idx);
+
+#ifndef NO_CCS
+ if (ccs_get(ccsfd, xpath, &act) != 0)
+#else
+ if (conf_get(xpath, &act) != 0)
+#endif
+ break;
+
+ snprintf(xpath, sizeof(xpath),
+ "%s/action[%d]/@timeout", base, idx);
+#ifndef NO_CCS
+ if (ccs_get(ccsfd, xpath, &ret) == 0 && ret) {
+#else
+ if (conf_get(xpath, &ret) == 0 && ret) {
+#endif
+ timeout = expand_time(ret);
+ if (timeout < 0)
+ timeout = 0;
+ free(ret);
+ }
+
+ snprintf(xpath, sizeof(xpath),
+ "%s/action[%d]/@interval", base, idx);
+#ifndef NO_CCS
+ if (ccs_get(ccsfd, xpath, &ret) == 0 && ret) {
+#else
+ if (conf_get(xpath, &ret) == 0 && ret) {
+#endif
+ interval = expand_time(ret);
+ if (interval < 0)
+ interval = 0;
+ free(ret);
+ }
+
+ if (!strcmp(act, "status") || !strcmp(act, "monitor")) {
+ snprintf(xpath, sizeof(xpath),
+ "%s/action[%d]/@depth", base, idx);
+#ifndef NO_CCS
+ if (ccs_get(ccsfd, xpath, &ret) == 0 && ret) {
+#else
+ if (conf_get(xpath, &ret) == 0 && ret) {
+#endif
+ depth = atoi(ret);
+ if (depth < 0)
+ depth = 0;
+
+ /* */
+ if (ret[0] == '*')
+ depth = -1;
+ free(ret);
+ }
+ }
+
+ if (store_action(&res->r_actions, act, depth, timeout,
+ interval) != 0)
+ free(act);
+ } while (1);
+}
+
/**
Try to load all the attributes in our rule set. If none are found,
@@ -628,12 +705,12 @@
}
if (!found) {
- //printf("No attributes found for %s\n", base);
destroy_resource(res);
return NULL;
}
res->r_actions = act_dup(rule->rr_actions);
+ _get_actions_ccs(ccsfd, base, res);
return res;
}
@@ -660,7 +737,7 @@
for (resID = 1; ; resID++) {
snprintf(tok, sizeof(tok), RESOURCE_BASE "/%s[%d]",
currule->rr_type, resID);
-
+
newres = load_resource(ccsfd, currule, tok);
if (!newres)
break;
--- cluster/rgmanager/src/daemons/rg_state.c 2006/09/28 20:02:07 1.4.2.16
+++ cluster/rgmanager/src/daemons/rg_state.c 2006/12/13 18:19:56 1.4.2.17
@@ -111,123 +111,6 @@
int
-clu_lock_verbose(char *resource, int dflt_flags, void **lockpp)
-{
- int ret, timed_out = 0;
- struct timeval start, now;
- uint64_t nodeid, *p;
- int flags;
- int conv = 0, err;
- int block = !(dflt_flags & CLK_NOWAIT);
-
- /* Holder not supported for this call */
- dflt_flags &= ~CLK_HOLDER;
-
- flags = dflt_flags;
-
- if (block) {
- gettimeofday(&start, NULL);
- start.tv_sec += 30;
- }
-
- /* Ripped from global.c in magma */
- if (!(dflt_flags & CLK_CONVERT) &&
- (block || ((dflt_flags & CLK_EX) == 0))) {
- /* Acquire NULL lock */
- ret = clu_lock(resource, CLK_NULL, lockpp);
- err = errno;
- if (ret == 0) {
- if ((flags & CLK_EX) == 0) {
- /* User only wanted a NULL lock... */
- return 0;
- }
- /*
- Ok, NULL lock was taken, rest of blocking
- call should be done using lock conversions.
- */
- flags |= CLK_CONVERT;
- conv = 1;
- } else {
- switch(err) {
- case EINVAL:
- /* Oops, null locks don't work on this
- plugin; use normal spam mode */
- break;
- default:
- errno = err;
- return -1;
- }
- }
- }
-
- while (1) {
- if (block) {
- gettimeofday(&now, NULL);
-
- if ((now.tv_sec > start.tv_sec) ||
- ((now.tv_sec == start.tv_sec) &&
- (now.tv_usec >= start.tv_usec))) {
-
- gettimeofday(&start, NULL);
- start.tv_sec += 30;
-
- timed_out = 1;
- flags |= CLK_HOLDER;
- }
- }
-
- *lockpp = NULL;
-
- /* Take the lock (convert if possible). */
- ret = clu_lock(resource, flags | CLK_NOWAIT |
- ((conv && !timed_out) ? CLK_CONVERT : 0),
- lockpp);
- err = errno;
-
- if ((ret != 0) && (err == EAGAIN) && block) {
- if (timed_out) {
- p = (uint64_t *)*lockpp;
- if (p) {
- nodeid = *p;
- clulog(LOG_WARNING, "Node ID:%08x%08x"
- " stuck with lock %s\n",
- (uint32_t)(nodeid>>32&0xffffffff),
- (uint32_t)nodeid&0xffffffff,
- resource);
- free(p);
- } else {
- clulog(LOG_WARNING, "Starving for lock"
- " %s\n", resource);
- }
- flags = dflt_flags;
- timed_out = 0;
- }
- usleep(random()&32767<<1);
- continue;
-
- } else if (ret == 0) {
- /* Success */
- return 0;
- }
-
- break;
- }
-
- /* Fatal error. If we took an automatic NL lock with the hopes of
- converting it, release the lock before returning */
- if (conv == 1 && ret < 0) {
- clu_unlock(resource, *lockpp);
- *lockpp = NULL;
- }
-
- if (ret < 0)
- errno = err;
-
- return ret;
-}
-
-
-int
#ifdef DEBUG
_rg_lock(char *name, void **p)
#else
@@ -237,7 +120,7 @@
char res[256];
snprintf(res, sizeof(res), "usrm::rg=\"%s\"", name);
- return clu_lock_verbose(res, CLK_EX, p);
+ return clu_lock(res, CLK_EX, p);
}
@@ -277,7 +160,7 @@
void
-send_ret(int fd, char *name, int ret, int orig_request)
+send_ret(int fd, char *name, int ret, int orig_request, uint64_t newowner)
{
SmMessageSt msg, *msgp = &msg;
if (fd < 0)
@@ -289,7 +172,11 @@
msgp->sm_data.d_action = orig_request;
strncpy(msgp->sm_data.d_svcName, name,
sizeof(msgp->sm_data.d_svcName));
- msgp->sm_data.d_svcOwner = my_id(); /* XXX Broken */
+ if (newowner == NODE_ID_NONE) {
+ msgp->sm_data.d_svcOwner = my_id(); /* XXX Broken */
+ } else {
+ msgp->sm_data.d_svcOwner = newowner;
+ }
msgp->sm_data.d_ret = ret;
swab_SmMessageSt(msgp);
@@ -301,7 +188,7 @@
void
-send_response(int ret, request_t *req)
+send_response(int ret, uint64_t newowner, request_t *req)
{
SmMessageSt msg, *msgp = &msg;
@@ -314,7 +201,11 @@
msgp->sm_data.d_action = req->rr_orig_request;
strncpy(msgp->sm_data.d_svcName, req->rr_group,
sizeof(msgp->sm_data.d_svcName));
- msgp->sm_data.d_svcOwner = my_id(); /* XXX Broken */
+ if (newowner == NODE_ID_NONE) {
+ msgp->sm_data.d_svcOwner = my_id(); /* XXX Broken */
+ } else {
+ msgp->sm_data.d_svcOwner = newowner;
+ }
msgp->sm_data.d_ret = ret;
swab_SmMessageSt(msgp);
@@ -594,6 +485,7 @@
* 1 = START service - return whatever it returns.
* 2 = DO NOT start service, return 0
* 3 = DO NOT start service, return RG_EAGAIN
+ * 4 = DO NOT start servuce, return RG_ERUN
*/
int
svc_advise_start(rg_state_t *svcStatus, char *svcName, int req)
@@ -618,7 +510,7 @@
clulog(LOG_DEBUG,
"RG %s is already running locally\n", svcName);
*/
- ret = 2;
+ ret = 4;
break;
}
@@ -630,7 +522,7 @@
svcName,
memb_id_to_name(membership,svcStatus->rs_owner));
*/
- ret = 2;
+ ret = 4;
break;
}
@@ -688,6 +580,7 @@
break;
case RG_STATE_STOPPED:
+ case RG_STATE_ERROR:
/* Don't actually enable if the RG is locked! */
if (rg_locked()) {
ret = 3;
@@ -719,7 +612,6 @@
svcName);
break;
- case RG_STATE_ERROR:
default:
clulog(LOG_ERR,
"#44: Cannot start RG %s: Invalid State %d\n",
@@ -771,6 +663,9 @@
case 3:
rg_unlock(svcName, lockp);
return RG_EAGAIN;
+ case 4:
+ rg_unlock(svcName, lockp);
+ return RG_ERUN;
default:
break;
}
@@ -1404,8 +1299,8 @@
/*
If services are locked, return the error
*/
- if (ret == RG_EAGAIN)
- return RG_EAGAIN;
+ if (ret == RG_EAGAIN || ret == RG_ERUN)
+ return ret;
/*
* If we succeeded, then we're done.
--- cluster/rgmanager/src/daemons/main.c 2006/10/05 17:52:27 1.9.2.20
+++ cluster/rgmanager/src/daemons/main.c 2006/12/13 18:19:56 1.9.2.21
@@ -426,6 +426,7 @@
sizeof (SmMessageSt))
clulog(LOG_ERR, "#40: Error replying to "
"action request.\n");
+ break;
}
/* Queue request */
--- cluster/rgmanager/init.d/rgmanager 2006/09/07 18:39:45 1.3.2.3
+++ cluster/rgmanager/init.d/rgmanager 2006/12/13 18:19:56 1.3.2.4
@@ -96,10 +96,14 @@
[ -z "$RGMGR_OPTS" ] && RGMGR_OPTS="-t 30"
echo -n $"Starting $ID: "
daemon $RGMGRD $RGMGR_OPTS
+ rv=$?
echo
# To be consistent...
- touch /var/lock/subsys/rgmanager
+ if [ $ret -eq 0 ]; then
+ touch /var/lock/subsys/rgmanager
+ fi
+ exit $rv
;;
restart)
--- cluster/rgmanager/src/clulib/vft.c 2006/05/12 21:28:31 1.7.2.6
+++ cluster/rgmanager/src/clulib/vft.c 2006/12/13 18:19:56 1.7.2.7
@@ -44,8 +44,6 @@
#include <signals.h>
-int clu_lock_verbose(char *lockname, int flags, void **lockpp);
-
static int vf_lfds[2];
static int vf_lfd = 0;
static key_node_t *key_list = NULL; /** List of key nodes. */
@@ -1187,7 +1185,7 @@
pthread_mutex_lock(&vf_mutex);
/* Obtain cluster lock on it. */
snprintf(lock_name, sizeof(lock_name), "usrm::vf");
- l = clu_lock_verbose(lock_name, CLK_EX, &lockp);
+ l = clu_lock(lock_name, CLK_EX, &lockp);
if (l < 0) {
clu_unlock(lock_name, lockp);
pthread_mutex_unlock(&vf_mutex);
@@ -1524,7 +1522,7 @@
/* Obtain cluster lock on it. */
pthread_mutex_lock(&vf_mutex);
snprintf(lock_name, sizeof(lock_name), "usrm::vf");
- l = clu_lock_verbose(lock_name, CLK_EX, &lockp);
+ l = clu_lock(lock_name, CLK_EX, &lockp);
if (l < 0) {
clu_unlock(lock_name, lockp);
pthread_mutex_unlock(&vf_mutex);
--- cluster/rgmanager/src/utils/clusvcadm.c 2006/05/12 21:28:31 1.2.2.6
+++ cluster/rgmanager/src/utils/clusvcadm.c 2006/12/13 18:19:56 1.2.2.7
@@ -153,7 +153,7 @@
printf("Resource Group Control Commands:\n");
printf(" %s -v Display version and exit\n",name);
printf(" %s -d <group> Disable <group>\n", name);
-printf(" %s -e <group> Enable <group>\n",
+printf(" %s -e <group> Enable <group> on the local node\n",
name);
printf(" %s -e <group> -m <member> Enable <group>"
" on <member>\n", name);
@@ -317,12 +317,31 @@
fprintf(stderr, "Error receiving reply!\n");
return 1;
}
-
+
/* Decode */
swab_SmMessageSt(&msg);
switch (msg.sm_data.d_ret) {
case SUCCESS:
printf("success\n");
+
+ /* Non-start/relo request: done */
+ if (action != RG_RELOCATE && action != RG_ENABLE)
+ break;
+
+ if (svctarget != NODE_ID_NONE &&
+ msg.sm_data.d_svcOwner != svctarget) {
+ /* Service running somewhere besides where requested */
+ printf("Warning: Service %s is running on %s "
+ "instead of %s\n", svcname,
+ memb_id_to_name(membership,
+ msg.sm_data.d_svcOwner),
+ memb_id_to_name(membership, svctarget));
+ break;
+ }
+
+ /* No node specified or service running where requested */
+ printf("Service %s is now running on %s\n", svcname,
+ memb_id_to_name(membership, msg.sm_data.d_svcOwner));
break;
case RG_EFAIL:
printf("failed\n");
@@ -339,6 +358,10 @@
case RG_EAGAIN:
printf("failed: Try again (resource groups locked)\n");
break;
+ case RG_ERUN:
+ printf("failed: Service is already running\n");
+ return 0;
+ break;
default:
printf("failed: unknown reason %d\n", msg.sm_data.d_ret);
break;
--- cluster/rgmanager/src/utils/clustat.c 2006/09/07 18:39:45 1.5.2.14
+++ cluster/rgmanager/src/utils/clustat.c 2006/12/13 18:19:56 1.5.2.15
@@ -358,14 +358,14 @@
}
-void
+int
txt_rg_states(rg_state_list_t *rgl, cluster_member_list_t *members,
char *svcname, int flags)
{
- int x;
+ int x, ret = 0;
if (!rgl || !members)
- return;
+ return -1;
if (!(flags & RG_VERBOSE)) {
printf(" %-20.20s %-30.30s %-14.14s\n",
@@ -382,18 +382,31 @@
strcmp(rgl->rgl_states[x].rs_name, svcname))
continue;
txt_rg_state(&rgl->rgl_states[x], members, flags);
+ if (svcname) {
+ switch (rgl->rgl_states[x].rs_state) {
+ case RG_STATE_STARTING:
+ case RG_STATE_STARTED:
+ case RG_STATE_STOPPING:
+ break;
+ default:
+ ret = rgl->rgl_states[x].rs_state;
+ }
+ }
}
+
+ return ret;
}
-void
+int
xml_rg_states(rg_state_list_t *rgl, cluster_member_list_t *members,
char *svcname)
{
int x;
+ int ret = 0;
if (!rgl || !members)
- return;
+ return -1;
printf(" <groups>\n");
@@ -401,14 +414,23 @@
if (svcname &&
strcmp(rgl->rgl_states[x].rs_name, svcname))
continue;
- xml_rg_state(&rgl->rgl_states[x], members, 0);
+ if (svcname) {
+ switch (rgl->rgl_states[x].rs_state) {
+ case RG_STATE_STARTING:
+ case RG_STATE_STARTED:
+ case RG_STATE_STOPPING:
+ break;
+ default:
+ ret = rgl->rgl_states[x].rs_state;
+ }
+ }
}
printf(" </groups>\n");
+ return ret;
}
-
void
txt_quorum_state(int qs)
{
@@ -481,14 +503,14 @@
}
-void
+int
txt_member_states(cluster_member_list_t *membership, char *name)
{
- int x;
+ int x, ret = 0;
if (!membership) {
printf("Membership information not available\n");
- return;
+ return -1;
}
printf(" %-40.40s %s\n", "Member Name", "Status");
@@ -498,20 +520,22 @@
if (name && strcmp(membership->cml_members[x].cm_name, name))
continue;
txt_member_state(&membership->cml_members[x]);
+ ret = !(membership->cml_members[x].cm_state & FLAG_UP);
}
printf("\n");
+ return ret;
}
-void
+int
xml_member_states(cluster_member_list_t *membership, char *name)
{
- int x;
+ int x, ret = 0;
if (!membership) {
printf(" <nodes/>\n");
- return;
+ return -1;
}
printf(" <nodes>\n");
@@ -519,16 +543,22 @@
if (name && strcmp(membership->cml_members[x].cm_name, name))
continue;
xml_member_state(&membership->cml_members[x]);
+ if (name)
+ ret = !(membership->cml_members[x].cm_state & FLAG_UP);
}
printf(" </nodes>\n");
+
+ return ret;
}
-void
+int
txt_cluster_status(int qs, cluster_member_list_t *membership,
rg_state_list_t *rgs, char *name, char *svcname,
int flags)
{
+ int ret;
+
if (!svcname && !name) {
txt_quorum_state(qs);
if (!membership || !(qs & QF_GROUPMEMBER)) {
@@ -538,50 +568,43 @@
}
if (!svcname || (name && svcname))
- txt_member_states(membership, name);
+ ret = txt_member_states(membership, name);
+ if (name && !svcname)
+ return ret;
if (!name || (name && svcname))
- txt_rg_states(rgs, membership, svcname, flags);
+ ret = txt_rg_states(rgs, membership, svcname, flags);
+ return ret;
}
-void
+int
xml_cluster_status(int qs, cluster_member_list_t *membership,
rg_state_list_t *rgs, char *name, char *svcname,
int flags)
{
+ int ret1 = 0, ret2 = -1;
+
printf("<?xml version=\"1.0\"?>\n");
printf("<clustat version=\"4.1.1\">\n");
if (!svcname && !name)
xml_quorum_state(qs);
if (!svcname || (name && svcname))
- xml_member_states(membership, name);
+ ret1 = xml_member_states(membership, name);
+
if (rgs &&
(!name || (name && svcname)))
- xml_rg_states(rgs, membership, svcname);
+ ret2 = xml_rg_states(rgs, membership, svcname);
printf("</clustat>\n");
+
+ if (name && ret1)
+ return ret1;
+ if (svcname && ret2)
+ return ret2;
+ return 0;
}
-void
-dump_node(cluster_member_t *node)
-{
- printf("Node %s state %02x\n", node->cm_name, node->cm_state);
-}
-
-
-void
-dump_nodes(cluster_member_list_t *nodes)
-{
- int x;
-
- for (x=0; x<nodes->cml_count; x++) {
- dump_node(&nodes->cml_members[x]);
- }
-}
-
-
-
cluster_member_list_t *
build_member_list(uint64_t *lid)
{
@@ -778,11 +801,13 @@
}
if (xml)
- xml_cluster_status(qs, membership, rgs, member_name,
- rg_name,flags);
+ ret = xml_cluster_status(qs, membership, rgs,
+ member_name, rg_name,
+ flags);
else
- txt_cluster_status(qs, membership, rgs, member_name,
- rg_name,flags);
+ ret = txt_cluster_status(qs, membership, rgs,
+ member_name, rg_name,
+ flags);
if (membership)
cml_free(membership);
--- cluster/rgmanager/src/resources/ip.sh 2006/05/16 20:03:04 1.5.2.15
+++ cluster/rgmanager/src/resources/ip.sh 2006/12/13 18:19:57 1.5.2.16
@@ -884,6 +884,9 @@
exit 0
fi
ip_op ${OCF_RESKEY_family} add ${OCF_RESKEY_address}
+ if [ $? -ne 0 ]; then
+ exit $OCF_ERR_GENERIC
+ fi
if [ $NFS_TRICKS -eq 0 ]; then
if [ "$OCF_RESKEY_nfslock" = "yes" ] || \
--- cluster/rgmanager/include/resgroup.h 2006/05/12 21:28:30 1.3.2.7
+++ cluster/rgmanager/include/resgroup.h 2006/12/13 18:19:57 1.3.2.8
@@ -121,8 +121,8 @@
int rt_enqueue_request(const char *resgroupname, int request, int response_fd,
int max, uint64_t target, int arg0, int arg1);
-void send_response(int ret, request_t *req);
-void send_ret(int fd, char *name, int ret, int req);
+void send_response(int ret, uint64_t owner, request_t *req);
+void send_ret(int fd, char *name, int ret, int req, uint64_t newowner);
/* do this op on all resource groups. The handler for the request
will sort out whether or not it's a valid request given the state */
@@ -156,12 +156,13 @@
cluster_member_list_t *member_list(void);
uint64_t my_id(void);
-#define RG_EAGAIN -6
-#define RG_EDEADLCK -5
-#define RG_ENOSERVICE -4
-#define RG_EFORWARD -3
-#define RG_EABORT -2
-#define RG_EFAIL -1
+#define RG_ERUN -7 /* Service is running already */
+#define RG_EAGAIN -6 /* Try again */
+#define RG_EDEADLCK -5 /* Operation would cause deadlock */
+#define RG_ENOSERVICE -4 /* Service does not exist */
+#define RG_EFORWARD -3 /* Ask current service owner to do this, please */
+#define RG_EABORT -2 /* Request cancelled */
+#define RG_EFAIL -1 /* Generic error */
#define RG_ESUCCESS 0
More information about the Cluster-devel
mailing list