From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 18572 invoked by alias); 4 Oct 2006 08:22:18 -0000 Received: (qmail 18557 invoked by uid 9452); 4 Oct 2006 08:22:17 -0000 Date: Wed, 04 Oct 2006 08:22:00 -0000 Message-ID: <20061004082217.18555.qmail@sourceware.org> From: pcaulfield@sourceware.org To: lvm2-cvs@sourceware.org Subject: LVM2 ./WHATS_NEW daemons/clvmd/Makefile.in dae ... Mailing-List: contact lvm2-cvs-help@sourceware.org; run by ezmlm Precedence: bulk List-Subscribe: List-Post: List-Help: , Sender: lvm2-cvs-owner@sourceware.org X-SW-Source: 2006-10/txt/msg00004.txt.bz2 List-Id: CVSROOT: /cvs/lvm2 Module name: LVM2 Changes by: pcaulfield@sourceware.org 2006-10-04 08:22:16 Modified files: . : WHATS_NEW daemons/clvmd : Makefile.in clvm.h clvmd-command.c clvmd.c lvm-functions.c lvm-functions.h Added files: daemons/clvmd : refresh_clvmd.c refresh_clvmd.h Log message: Add -R switch to clvmd. This option will instruct all the clvmd daemons in the cluster to reload their device cache Patches: http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.448&r2=1.449 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/refresh_clvmd.c.diff?cvsroot=lvm2&r1=NONE&r2=1.1 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/refresh_clvmd.h.diff?cvsroot=lvm2&r1=NONE&r2=1.1 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/Makefile.in.diff?cvsroot=lvm2&r1=1.15&r2=1.16 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvm.h.diff?cvsroot=lvm2&r1=1.2&r2=1.3 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-command.c.diff?cvsroot=lvm2&r1=1.8&r2=1.9 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd.c.diff?cvsroot=lvm2&r1=1.26&r2=1.27 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.c.diff?cvsroot=lvm2&r1=1.21&r2=1.22 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.h.diff?cvsroot=lvm2&r1=1.2&r2=1.3 --- LVM2/WHATS_NEW 2006/10/03 17:55:19 1.448 +++ LVM2/WHATS_NEW 2006/10/04 08:22:15 1.449 @@ -1,5 +1,6 @@ Version 2.02.11 - ===================================== + Add -R to clvmd which tells running clvmds to reload their device cache. Add LV column to reports listing kernel modules needed for activation. Show available fields if report given invalid field. (e.g. lvs -o list) Add timestamp functions with --disable-realtime configure option. /cvs/lvm2/LVM2/daemons/clvmd/refresh_clvmd.c,v --> standard output revision 1.1 --- LVM2/daemons/clvmd/refresh_clvmd.c +++ - 2006-10-04 08:22:16.589972000 +0000 @@ -0,0 +1,334 @@ +/* + * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved. + * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. + * + * This file is part of LVM2. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * Tell all clvmds in a cluster to refresh their toolcontext + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clvm.h" +#include "refresh_clvmd.h" + +typedef struct lvm_response { + char node[255]; + char *response; + int status; + int len; +} lvm_response_t; + +/* + * This gets stuck at the start of memory we allocate so we + * can sanity-check it at deallocation time + */ +#define LVM_SIGNATURE 0x434C564D + +static int _clvmd_sock = -1; + +/* Open connection to the Cluster Manager daemon */ +static int _open_local_sock(void) +{ + int local_socket; + struct sockaddr_un sockaddr; + + /* Open local socket */ + if ((local_socket = socket(PF_UNIX, SOCK_STREAM, 0)) < 0) { + fprintf(stderr, "Local socket creation failed: %s", strerror(errno)); + return -1; + } + + memset(&sockaddr, 0, sizeof(sockaddr)); + memcpy(sockaddr.sun_path, CLVMD_SOCKNAME, sizeof(CLVMD_SOCKNAME)); + + sockaddr.sun_family = AF_UNIX; + + if (connect(local_socket,(struct sockaddr *) &sockaddr, + sizeof(sockaddr))) { + int saved_errno = errno; + + fprintf(stderr, "connect() failed on local socket: %s\n", + strerror(errno)); + if (close(local_socket)) + return -1; + + errno = saved_errno; + return -1; + } + + return local_socket; +} + +/* Send a request and return the status */ +static int _send_request(char *inbuf, int inlen, char **retbuf) +{ + char outbuf[PIPE_BUF]; + struct clvm_header *outheader = (struct clvm_header *) outbuf; + int len; + int off; + int buflen; + int err; + + /* Send it to CLVMD */ + rewrite: + if ( (err = write(_clvmd_sock, inbuf, inlen)) != inlen) { + if (err == -1 && errno == EINTR) + goto rewrite; + fprintf(stderr, "Error writing data to clvmd: %s", strerror(errno)); + return 0; + } + + /* Get the response */ + reread: + if ((len = read(_clvmd_sock, outbuf, sizeof(struct clvm_header))) < 0) { + if (errno == EINTR) + goto reread; + fprintf(stderr, "Error reading data from clvmd: %s", strerror(errno)); + return 0; + } + + if (len == 0) { + fprintf(stderr, "EOF reading CLVMD"); + errno = ENOTCONN; + return 0; + } + + /* Allocate buffer */ + buflen = len + outheader->arglen; + *retbuf = dm_malloc(buflen); + if (!*retbuf) { + errno = ENOMEM; + return 0; + } + + /* Copy the header */ + memcpy(*retbuf, outbuf, len); + outheader = (struct clvm_header *) *retbuf; + + /* Read the returned values */ + off = 1; /* we've already read the first byte */ + while (off <= outheader->arglen && len > 0) { + len = read(_clvmd_sock, outheader->args + off, + buflen - off - offsetof(struct clvm_header, args)); + if (len > 0) + off += len; + } + + /* Was it an error ? */ + if (outheader->status != 0) { + errno = outheader->status; + + /* Only return an error here if there are no node-specific + errors present in the message that might have more detail */ + if (!(outheader->flags & CLVMD_FLAG_NODEERRS)) { + fprintf(stderr, "cluster request failed: %s\n", strerror(errno)); + return 0; + } + + } + + return 1; +} + +/* Build the structure header and parse-out wildcard node names */ +static void _build_header(struct clvm_header *head, int cmd, const char *node, + int len) +{ + head->cmd = cmd; + head->status = 0; + head->flags = 0; + head->clientid = 0; + head->arglen = len; + + if (node) { + /* + * Allow a couple of special node names: + * "*" for all nodes, + * "." for the local node only + */ + if (strcmp(node, "*") == 0) { + head->node[0] = '\0'; + } else if (strcmp(node, ".") == 0) { + head->node[0] = '\0'; + head->flags = CLVMD_FLAG_LOCAL; + } else + strcpy(head->node, node); + } else + head->node[0] = '\0'; +} + +/* + * Send a message to a(or all) node(s) in the cluster and wait for replies + */ +static int _cluster_request(char cmd, const char *node, void *data, int len, + lvm_response_t ** response, int *num) +{ + char outbuf[sizeof(struct clvm_header) + len + strlen(node) + 1]; + int *outptr; + char *inptr; + char *retbuf = NULL; + int status; + int i; + int num_responses = 0; + struct clvm_header *head = (struct clvm_header *) outbuf; + lvm_response_t *rarray; + + *num = 0; + + if (_clvmd_sock == -1) + _clvmd_sock = _open_local_sock(); + + if (_clvmd_sock == -1) + return 0; + + _build_header(head, cmd, node, len); + memcpy(head->node + strlen(head->node) + 1, data, len); + + status = _send_request(outbuf, sizeof(struct clvm_header) + + strlen(head->node) + len, &retbuf); + if (!status) + goto out; + + /* Count the number of responses we got */ + head = (struct clvm_header *) retbuf; + inptr = head->args; + while (inptr[0]) { + num_responses++; + inptr += strlen(inptr) + 1; + inptr += sizeof(int); + inptr += strlen(inptr) + 1; + } + + /* + * Allocate response array. + * With an extra pair of INTs on the front to sanity + * check the pointer when we are given it back to free + */ + outptr = dm_malloc(sizeof(lvm_response_t) * num_responses + + sizeof(int) * 2); + if (!outptr) { + errno = ENOMEM; + status = 0; + goto out; + } + + *response = (lvm_response_t *) (outptr + 2); + outptr[0] = LVM_SIGNATURE; + outptr[1] = num_responses; + rarray = *response; + + /* Unpack the response into an lvm_response_t array */ + inptr = head->args; + i = 0; + while (inptr[0]) { + strcpy(rarray[i].node, inptr); + inptr += strlen(inptr) + 1; + + memcpy(&rarray[i].status, inptr, sizeof(int)); + inptr += sizeof(int); + + rarray[i].response = dm_malloc(strlen(inptr) + 1); + if (rarray[i].response == NULL) { + /* Free up everything else and return error */ + int j; + for (j = 0; j < i; j++) + dm_free(rarray[i].response); + free(outptr); + errno = ENOMEM; + status = -1; + goto out; + } + + strcpy(rarray[i].response, inptr); + rarray[i].len = strlen(inptr); + inptr += strlen(inptr) + 1; + i++; + } + *num = num_responses; + *response = rarray; + + out: + if (retbuf) + dm_free(retbuf); + + return status; +} + +/* Free reply array */ +static int _cluster_free_request(lvm_response_t * response) +{ + int *ptr = (int *) response - 2; + int i; + int num; + + /* Check it's ours to free */ + if (response == NULL || *ptr != LVM_SIGNATURE) { + errno = EINVAL; + return 0; + } + + num = ptr[1]; + + for (i = 0; i < num; i++) { + dm_free(response[i].response); + } + + dm_free(ptr); + + return 1; +} + +int refresh_clvmd() +{ + int num_responses; + char args[1]; // No args really. + lvm_response_t *response; + int saved_errno; + int status; + int i; + + status = _cluster_request(CLVMD_CMD_REFRESH, "*", args, 0, &response, &num_responses); + + /* If any nodes were down then display them and return an error */ + for (i = 0; i < num_responses; i++) { + if (response[i].status == EHOSTDOWN) { + fprintf(stderr, "clvmd not running on node %s", + response[i].node); + status = 0; + errno = response[i].status; + } else if (response[i].status) { + fprintf(stderr, "Error resetting node %s: %s", + response[i].node, + response[i].response[0] ? + response[i].response : + strerror(response[i].status)); + status = 0; + errno = response[i].status; + } + } + + saved_errno = errno; + _cluster_free_request(response); + errno = saved_errno; + + return status; +} /cvs/lvm2/LVM2/daemons/clvmd/refresh_clvmd.h,v --> standard output revision 1.1 --- LVM2/daemons/clvmd/refresh_clvmd.h +++ - 2006-10-04 08:22:16.674075000 +0000 @@ -0,0 +1,2 @@ +int refresh_clvmd(void); + --- LVM2/daemons/clvmd/Makefile.in 2006/05/16 16:48:30 1.15 +++ LVM2/daemons/clvmd/Makefile.in 2006/10/04 08:22:16 1.16 @@ -19,6 +19,7 @@ clvmd-command.c \ clvmd.c \ lvm-functions.c \ + refresh_clvmd.c \ system-lv.c ifeq ("@CLVMD@", "gulm") --- LVM2/daemons/clvmd/clvm.h 2005/01/21 11:35:24 1.2 +++ LVM2/daemons/clvmd/clvm.h 2006/10/04 08:22:16 1.3 @@ -63,4 +63,7 @@ #define CLVMD_CMD_LOCK_LV 50 #define CLVMD_CMD_LOCK_VG 51 +/* Misc functions */ +#define CLVMD_CMD_REFRESH 40 + #endif --- LVM2/daemons/clvmd/clvmd-command.c 2006/05/12 19:16:48 1.8 +++ LVM2/daemons/clvmd/clvmd-command.c 2006/10/04 08:22:16 1.9 @@ -122,6 +122,10 @@ } break; + case CLVMD_CMD_REFRESH: + do_refresh_cache(); + break; + default: /* Won't get here because command is validated in pre_command */ break; @@ -222,6 +226,9 @@ status = pre_lock_lv(lock_cmd, lock_flags, lockname); break; + case CLVMD_CMD_REFRESH: + break; + default: log_error("Unknown command %d received\n", header->cmd); status = EINVAL; --- LVM2/daemons/clvmd/clvmd.c 2006/03/14 14:18:34 1.26 +++ LVM2/daemons/clvmd/clvmd.c 2006/10/04 08:22:16 1.27 @@ -42,6 +42,7 @@ #include "clvm.h" #include "version.h" #include "clvmd.h" +#include "refresh_clvmd.h" #include "libdlm.h" #include "system-lv.h" #include "list.h" @@ -143,6 +144,7 @@ fprintf(file, " -V Show version of clvmd\n"); fprintf(file, " -h Show this help information\n"); fprintf(file, " -d Don't fork, run in the foreground\n"); + fprintf(file, " -R Tell all running clvmds in the cluster to reload their device cache\n"); fprintf(file, " -t Command timeout (default 60 seconds)\n"); fprintf(file, "\n"); } @@ -173,7 +175,7 @@ /* Deal with command-line arguments */ opterr = 0; optind = 0; - while ((opt = getopt(argc, argv, "?vVhdt:")) != EOF) { + while ((opt = getopt(argc, argv, "?vVhdt:R")) != EOF) { switch (opt) { case 'h': usage(argv[0], stdout); @@ -183,6 +185,9 @@ usage(argv[0], stderr); exit(0); + case 'R': + return refresh_clvmd(); + case 'd': debug++; break; --- LVM2/daemons/clvmd/lvm-functions.c 2006/08/22 09:49:20 1.21 +++ LVM2/daemons/clvmd/lvm-functions.c 2006/10/04 08:22:16 1.22 @@ -416,6 +416,13 @@ return status == 1 ? 0 : EBUSY; } +int do_refresh_cache() +{ + DEBUGLOG("Refreshing context\n"); + log_notice("Refreshing context"); + return refresh_toolcontext(cmd)==1?0:-1; +} + /* Only called at gulm startup. Drop any leftover VG or P_orphan locks that might be hanging around if we died for any reason --- LVM2/daemons/clvmd/lvm-functions.h 2005/03/07 17:03:44 1.2 +++ LVM2/daemons/clvmd/lvm-functions.h 2006/10/04 08:22:16 1.3 @@ -25,6 +25,7 @@ extern int post_lock_lv(unsigned char lock_cmd, unsigned char lock_flags, char *resource); extern int do_check_lvm1(char *vgname); +extern int do_refresh_cache(void); extern int init_lvm(int using_gulm); extern void init_lvhash(void);