rpms/lvm2-cluster/devel clvmd-gulm.patch, NONE, 1.1 lvm2-cluster.spec, 1.9, 1.10
fedora-cvs-commits at redhat.com
fedora-cvs-commits at redhat.com
Thu Apr 21 13:35:28 UTC 2005
Update of /cvs/dist/rpms/lvm2-cluster/devel
In directory cvs.devel.redhat.com:/tmp/cvs-serv26631
Modified Files:
lvm2-cluster.spec
Added Files:
clvmd-gulm.patch
Log Message:
sync with RHEL4
clvmd-gulm.patch:
clvmd-gulm.c | 58 +++++++++++++++++++++++++++++++++++++++++-----------------
clvmd.c | 27 +++++++++++++++++++++++----
clvmd.h | 1 +
tcp-comms.c | 2 ++
4 files changed, 67 insertions(+), 21 deletions(-)
--- NEW FILE clvmd-gulm.patch ---
diff -ru LVM2.2.01.09/daemons/clvmd/clvmd.c LVM2.2.01.09.2/daemons/clvmd/clvmd.c
--- LVM2.2.01.09/daemons/clvmd/clvmd.c 2005-04-01 14:01:01.000000000 +0100
+++ LVM2.2.01.09.2/daemons/clvmd/clvmd.c 2005-04-13 17:11:26.000000000 +0100
@@ -280,6 +280,7 @@
child_init_signal(DFAIL_MALLOC);
newfd->fd = local_sock;
+ newfd->removeme = 0;
newfd->type = LOCAL_RENDEZVOUS;
newfd->callback = local_rendezvous_callback;
newfd->next = local_client_head.next;
@@ -346,6 +347,7 @@
newfd->fd = client_fd;
newfd->type = LOCAL_SOCK;
newfd->xid = 0;
+ newfd->removeme = 0;
newfd->callback = local_sock_callback;
newfd->bits.localsock.replies = NULL;
newfd->bits.localsock.expected_replies = 0;
@@ -519,6 +521,20 @@
for (thisfd = &local_client_head; thisfd != NULL;
thisfd = thisfd->next) {
+
+ if (thisfd->removeme) {
+ struct local_client *free_fd;
+ lastfd->next = thisfd->next;
+ free_fd = thisfd;
+ thisfd = lastfd;
+
+ DEBUGLOG("removeme set for fd %d\n", free_fd->fd);
+
+ /* Queue cleanup, this also frees the client struct */
+ add_to_lvmqueue(free_fd, NULL, 0, NULL);
+ break;
+ }
+
if (FD_ISSET(thisfd->fd, &in)) {
struct local_client *newfd;
int ret;
@@ -905,6 +921,7 @@
DEBUGLOG("creating pipe, [%d, %d]\n", comms_pipe[0],
comms_pipe[1]);
newfd->fd = comms_pipe[0];
+ newfd->removeme = 0;
newfd->type = THREAD_PIPE;
newfd->callback = local_pipe_callback;
newfd->next = thisfd->next;
@@ -1061,8 +1078,8 @@
/* Get the node name as we /may/ need it later */
clops->name_from_csid(csid, nodename);
- DEBUGLOG("process_remote_command %d for clientid 0x%x on node %s\n",
- msg->cmd, msg->clientid, nodename);
+ DEBUGLOG("process_remote_command %d for clientid 0x%x XID %d on node %s\n",
+ msg->cmd, msg->clientid, msg->xid, nodename);
/* Is the data to be found in the system LV ? */
if (msg->flags & CLVMD_FLAG_SYSTEMLV) {
@@ -1575,9 +1592,10 @@
static int process_work_item(struct lvm_thread_cmd *cmd)
{
-
/* If msg is NULL then this is a cleanup request */
if (cmd->msg == NULL) {
+ DEBUGLOG("process_work_item: free fd %d\n", cmd->client->fd);
+ close(cmd->client->fd);
cmd_client_cleanup(cmd->client);
free(cmd->client);
return 0;
@@ -1638,7 +1656,8 @@
pthread_mutex_unlock(&lvm_thread_mutex);
process_work_item(cmd);
- free(cmd->msg);
+ if (cmd->msg)
+ free(cmd->msg);
free(cmd);
pthread_mutex_lock(&lvm_thread_mutex);
diff -ru LVM2.2.01.09/daemons/clvmd/clvmd-gulm.c LVM2.2.01.09.2/daemons/clvmd/clvmd-gulm.c
--- LVM2.2.01.09/daemons/clvmd/clvmd-gulm.c 2005-03-07 17:03:44.000000000 +0000
+++ LVM2.2.01.09.2/daemons/clvmd/clvmd-gulm.c 2005-04-13 17:11:26.000000000 +0100
@@ -60,8 +60,9 @@
/* hash list of outstanding lock requests */
static struct hash_table *lock_hash;
-/* Copy of the current core state */
-static uint8_t current_corestate;
+/* Copy of the current quorate state */
+static uint8_t gulm_quorate = 0;
+static enum {INIT_NOTDONE, INIT_DONE, INIT_WAITQUORATE} init_state = INIT_NOTDONE;
/* Number of active nodes */
static int num_nodes;
@@ -312,12 +313,16 @@
if (error)
exit(error);
- current_corestate = corestate;
+ /* Get the current core state (for quorum) */
+ lg_core_corestate(gulm_if);
+
return 0;
}
static void set_node_state(struct node_info *ninfo, char *csid, uint8_t nodestate)
{
+ int oldstate = ninfo->state;
+
if (nodestate == lg_core_Logged_in)
{
/* Don't clobber NODE_CLVMD state */
@@ -339,11 +344,17 @@
if (ninfo->state != NODE_DOWN)
num_nodes--;
ninfo->state = NODE_DOWN;
- tcp_remove_client(csid);
}
}
- DEBUGLOG("set_node_state, '%s' state = %d, num_nodes=%d\n",
- ninfo->name, ninfo->state, num_nodes);
+ /* Gulm doesn't always send node DOWN events, so even if this a a node UP we must
+ * assume (ahem) that it prevously went down at some time. So we close
+ * the sockets here to make sure that we don't have any dead connections
+ * to that node.
+ */
+ tcp_remove_client(csid);
+
+ DEBUGLOG("set_node_state, '%s' state = %d (oldstate=%d), num_nodes=%d\n",
+ ninfo->name, ninfo->state, oldstate, num_nodes);
}
static struct node_info *add_or_set_node(char *name, struct in6_addr *ip, uint8_t state)
@@ -400,7 +411,16 @@
char ourcsid[GULM_MAX_CSID_LEN];
DEBUGLOG("Got Nodelist, stop\n");
- clvmd_cluster_init_completed();
+ if (gulm_quorate)
+ {
+ clvmd_cluster_init_completed();
+ init_state = INIT_DONE;
+ }
+ else
+ {
+ if (init_state == INIT_NOTDONE)
+ init_state = INIT_WAITQUORATE;
+ }
/* Mark ourself as up */
_get_our_csid(ourcsid);
@@ -418,10 +438,15 @@
static int core_statechange(void *misc, uint8_t corestate, uint8_t quorate, struct in6_addr *masterip, char *mastername)
{
- DEBUGLOG("CORE Got statechange corestate:%#x mastername:%s\n",
- corestate, mastername);
+ DEBUGLOG("CORE Got statechange. quorate:%d, corestate:%x mastername:%s\n",
+ quorate, corestate, mastername);
- current_corestate = corestate;
+ gulm_quorate = quorate;
+ if (quorate && init_state == INIT_WAITQUORATE)
+ {
+ clvmd_cluster_init_completed();
+ init_state = INIT_DONE;
+ }
return 0;
}
@@ -474,7 +499,7 @@
lock_start_flag = 0;
pthread_mutex_unlock(&lock_start_mutex);
}
-
+
return 0;
}
@@ -615,7 +640,11 @@
}
DEBUGLOG("gulm_add_up_node %s\n", ninfo->name);
+
+ if (ninfo->state == NODE_DOWN)
+ num_nodes++;
ninfo->state = NODE_CLVMD;
+
return;
}
@@ -853,12 +882,7 @@
static int _is_quorate()
{
- if (current_corestate == lg_core_Slave ||
- current_corestate == lg_core_Master ||
- current_corestate == lg_core_Client)
- return 1;
- else
- return 0;
+ return gulm_quorate;
}
/* Get all the cluster node names & IPs from CCS and
diff -ru LVM2.2.01.09/daemons/clvmd/clvmd.h LVM2.2.01.09.2/daemons/clvmd/clvmd.h
--- LVM2.2.01.09/daemons/clvmd/clvmd.h 2005-03-09 14:08:11.000000000 +0000
+++ LVM2.2.01.09.2/daemons/clvmd/clvmd.h 2005-04-13 17:11:26.000000000 +0100
@@ -86,6 +86,7 @@
struct local_client *next;
unsigned short xid;
fd_callback_t callback;
+ uint8_t removeme;
union {
struct localsock_bits localsock;
diff -ru LVM2.2.01.09/daemons/clvmd/tcp-comms.c LVM2.2.01.09.2/daemons/clvmd/tcp-comms.c
--- LVM2.2.01.09/daemons/clvmd/tcp-comms.c 2005-04-01 17:03:00.000000000 +0100
+++ LVM2.2.01.09.2/daemons/clvmd/tcp-comms.c 2005-04-13 17:11:26.000000000 +0100
@@ -105,6 +105,7 @@
if (client)
{
hash_remove_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
+ client->removeme = 1;
}
/* Look for a mangled one too */
@@ -114,6 +115,7 @@
if (client)
{
hash_remove_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
+ client->removeme = 1;
}
/* Put it back as we found it */
Index: lvm2-cluster.spec
===================================================================
RCS file: /cvs/dist/rpms/lvm2-cluster/devel/lvm2-cluster.spec,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -r1.9 -r1.10
--- lvm2-cluster.spec 4 Apr 2005 15:52:01 -0000 1.9
+++ lvm2-cluster.spec 21 Apr 2005 13:35:25 -0000 1.10
@@ -1,17 +1,18 @@
Summary: Cluster extenstions for userland logical volume management tools
Name: lvm2-cluster
Version: 2.01.09
-Release: 1.0
+Release: 2.0
License: GPL
Group: System Environment/Base
URL: http://sources.redhat.com/lvm2
Source0: LVM2.%{version}.tgz
+Patch0: clvmd-gulm.patch
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
BuildRequires: device-mapper >= 1.01
BuildRequires: libselinux-devel
BuildRequires: dlm
Requires: lvm2 >= 2.01
-ExcludeArch: s390 s390x ppc
+ExclusiveArch: i386 ia64 x86_64
%define _exec_prefix /usr
%description
@@ -20,6 +21,8 @@
%prep
%setup -q -n LVM2.%{version}
+%patch0 -p1 -b .gulm
+
%build
%configure --with-clvmd=all --with-cluster=shared --with-user= --with-group=
make DESTDIR=$RPM_BUILD_ROOT
@@ -50,6 +53,11 @@
%config /etc/rc.d/init.d/clvmd
%changelog
+* Thu Apr 21 2005 Alasdair Kergon <agk at redhat.com> - 2.01.09-2.0
+- Improve co-operation between clvmd and gulm over node state changes. [146056]
+- Switch from 'ExcludeArch: s390 s390x ppc' to
+ 'ExclusiveArch: i386 ia64 x86_64' to match dlm dependency.
+
* Mon Apr 04 2005 Alasdair Kergon <agk at redhat.com> - 2.01.09-1.0
- clvmd: Don't allow user operations to start until lvm thread is fully up.
- clvmd-gulm: Set KEEPALIVE on sockets.
More information about the fedora-cvs-commits
mailing list