This is the mail archive of the cluster-cvs@sourceware.org mailing list for the cluster.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

STABLE2 - gfs-kernel: Bug 450209: Create gfs1-specific lock modules +minor fixes to build with 2.6.27


Gitweb:        http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=ff7d89bfe60ed041d9342c8c9d91815c1f3d3bef
Commit:        ff7d89bfe60ed041d9342c8c9d91815c1f3d3bef
Parent:        7d5a4d0247c2d874d04be4ee4e74b8bf48ec09a3
Author:        Abhijith Das <adas@redhat.com>
AuthorDate:    Tue Aug 12 09:59:36 2008 -0500
Committer:     Fabio M. Di Nitto <fdinitto@redhat.com>
CommitterDate: Fri Oct 10 07:42:34 2008 +0200

gfs-kernel: Bug 450209: Create gfs1-specific lock modules + minor fixes to build with 2.6.27

gfs1 has its own lock modules now and is no longer dependent on gfs2.ko
or lock_nolock.ko or lock_dlm.ko.

This commit contains the lock modules patch and the following fixes to
make gfs1 build with Steve's git tree.

- change all instances of <asm/semaphore.h> to <linux/semaphore.h>
- change all calls to permission() to inode_permission()
- change all calls to remote_llseek() to generic_file_llseek_unlocked()

I have been able to successfully compile with the patch against Steve's git
tree, insmod the gfs.ko module, and mount a nolock filesystem using it. I don't
have a cluster running upstream bits, so I couldn't test the module in a
cluster.
---
 gfs-kernel/src/gfs/Makefile           |    7 +
 gfs-kernel/src/gfs/acl.c              |    2 +-
 gfs-kernel/src/gfs/bits.c             |    2 +-
 gfs-kernel/src/gfs/bmap.c             |    2 +-
 gfs-kernel/src/gfs/dio.c              |    2 +-
 gfs-kernel/src/gfs/dir.c              |    2 +-
 gfs-kernel/src/gfs/eaops.c            |    2 +-
 gfs-kernel/src/gfs/eattr.c            |    2 +-
 gfs-kernel/src/gfs/file.c             |    2 +-
 gfs-kernel/src/gfs/gfs.h              |    2 +-
 gfs-kernel/src/gfs/glock.c            |    2 +-
 gfs-kernel/src/gfs/glops.c            |    2 +-
 gfs-kernel/src/gfs/inode.c            |   10 +-
 gfs-kernel/src/gfs/ioctl.c            |    2 +-
 gfs-kernel/src/gfs/lm.c               |    8 +-
 gfs-kernel/src/gfs/lm_interface.h     |  278 +++++++++++++++++
 gfs-kernel/src/gfs/lock_dlm.h         |  182 ++++++++++++
 gfs-kernel/src/gfs/lock_dlm_lock.c    |  527 +++++++++++++++++++++++++++++++++
 gfs-kernel/src/gfs/lock_dlm_main.c    |   40 +++
 gfs-kernel/src/gfs/lock_dlm_mount.c   |  279 +++++++++++++++++
 gfs-kernel/src/gfs/lock_dlm_plock.c   |  439 +++++++++++++++++++++++++++
 gfs-kernel/src/gfs/lock_dlm_sysfs.c   |  225 ++++++++++++++
 gfs-kernel/src/gfs/lock_dlm_thread.c  |  367 +++++++++++++++++++++++
 gfs-kernel/src/gfs/lock_nolock_main.c |  230 ++++++++++++++
 gfs-kernel/src/gfs/locking.c          |  180 +++++++++++
 gfs-kernel/src/gfs/log.c              |    2 +-
 gfs-kernel/src/gfs/lops.c             |    2 +-
 gfs-kernel/src/gfs/lvb.c              |    2 +-
 gfs-kernel/src/gfs/main.c             |   12 +-
 gfs-kernel/src/gfs/mount.c            |    2 +-
 gfs-kernel/src/gfs/ondisk.c           |    2 +-
 gfs-kernel/src/gfs/ops_address.c      |    2 +-
 gfs-kernel/src/gfs/ops_dentry.c       |    2 +-
 gfs-kernel/src/gfs/ops_export.c       |    2 +-
 gfs-kernel/src/gfs/ops_file.c         |    6 +-
 gfs-kernel/src/gfs/ops_inode.c        |   16 +-
 gfs-kernel/src/gfs/ops_super.c        |    2 +-
 gfs-kernel/src/gfs/ops_vm.c           |    2 +-
 gfs-kernel/src/gfs/page.c             |    2 +-
 gfs-kernel/src/gfs/proc.c             |    2 +-
 gfs-kernel/src/gfs/quota.c            |    2 +-
 gfs-kernel/src/gfs/recovery.c         |    2 +-
 gfs-kernel/src/gfs/rgrp.c             |    2 +-
 gfs-kernel/src/gfs/super.c            |    2 +-
 gfs-kernel/src/gfs/sys.c              |    2 +-
 gfs-kernel/src/gfs/trans.c            |    2 +-
 gfs-kernel/src/gfs/unlinked.c         |    2 +-
 gfs-kernel/src/gfs/util.c             |    2 +-
 48 files changed, 2816 insertions(+), 54 deletions(-)

diff --git a/gfs-kernel/src/gfs/Makefile b/gfs-kernel/src/gfs/Makefile
index 19f3b4f..aa12e01 100644
--- a/gfs-kernel/src/gfs/Makefile
+++ b/gfs-kernel/src/gfs/Makefile
@@ -32,6 +32,13 @@ gfs-objs := acl.o \
 	inode.o \
 	ioctl.o \
 	lm.o \
+	locking.o \
+	lock_nolock_main.o \
+	lock_dlm_lock.o \
+	lock_dlm_main.o \
+	lock_dlm_mount.o \
+	lock_dlm_sysfs.o \
+	lock_dlm_thread.o \
 	log.o \
 	lops.o \
 	lvb.o \
diff --git a/gfs-kernel/src/gfs/acl.c b/gfs-kernel/src/gfs/acl.c
index 63cf1cd..a3e30b0 100644
--- a/gfs-kernel/src/gfs/acl.c
+++ b/gfs-kernel/src/gfs/acl.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/posix_acl.h>
diff --git a/gfs-kernel/src/gfs/bits.c b/gfs-kernel/src/gfs/bits.c
index 4265db8..af84bf0 100644
--- a/gfs-kernel/src/gfs/bits.c
+++ b/gfs-kernel/src/gfs/bits.c
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/bmap.c b/gfs-kernel/src/gfs/bmap.c
index cc8744a..0efd1d1 100644
--- a/gfs-kernel/src/gfs/bmap.c
+++ b/gfs-kernel/src/gfs/bmap.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/dio.c b/gfs-kernel/src/gfs/dio.c
index 7313f7b..0d09023 100644
--- a/gfs-kernel/src/gfs/dio.c
+++ b/gfs-kernel/src/gfs/dio.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/mm.h>
diff --git a/gfs-kernel/src/gfs/dir.c b/gfs-kernel/src/gfs/dir.c
index 14422af..7dadf7f 100644
--- a/gfs-kernel/src/gfs/dir.c
+++ b/gfs-kernel/src/gfs/dir.c
@@ -48,7 +48,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/vmalloc.h>
diff --git a/gfs-kernel/src/gfs/eaops.c b/gfs-kernel/src/gfs/eaops.c
index 4a99707..dbd0254 100644
--- a/gfs-kernel/src/gfs/eaops.c
+++ b/gfs-kernel/src/gfs/eaops.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <asm/uaccess.h>
diff --git a/gfs-kernel/src/gfs/eattr.c b/gfs-kernel/src/gfs/eattr.c
index 8f7d88c..062b682 100644
--- a/gfs-kernel/src/gfs/eattr.c
+++ b/gfs-kernel/src/gfs/eattr.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <asm/uaccess.h>
diff --git a/gfs-kernel/src/gfs/file.c b/gfs-kernel/src/gfs/file.c
index a9c6ff2..3f8e5b9 100644
--- a/gfs-kernel/src/gfs/file.c
+++ b/gfs-kernel/src/gfs/file.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <asm/uaccess.h>
diff --git a/gfs-kernel/src/gfs/gfs.h b/gfs-kernel/src/gfs/gfs.h
index 6616662..7cec09c 100644
--- a/gfs-kernel/src/gfs/gfs.h
+++ b/gfs-kernel/src/gfs/gfs.h
@@ -3,7 +3,7 @@
 
 #define RELEASE_VERSION "<CVS>"
 
-#include <linux/lm_interface.h>
+#include "lm_interface.h"
 
 #include "gfs_ondisk.h"
 #include "fixed_div64.h"
diff --git a/gfs-kernel/src/gfs/glock.c b/gfs-kernel/src/gfs/glock.c
index a54ffed..57a829d 100644
--- a/gfs-kernel/src/gfs/glock.c
+++ b/gfs-kernel/src/gfs/glock.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <asm/uaccess.h>
diff --git a/gfs-kernel/src/gfs/glops.c b/gfs-kernel/src/gfs/glops.c
index 37f5c09..708291d 100644
--- a/gfs-kernel/src/gfs/glops.c
+++ b/gfs-kernel/src/gfs/glops.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/inode.c b/gfs-kernel/src/gfs/inode.c
index 533117a..45245bb 100644
--- a/gfs-kernel/src/gfs/inode.c
+++ b/gfs-kernel/src/gfs/inode.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/posix_acl.h>
@@ -910,7 +910,7 @@ gfs_lookupi(struct gfs_holder *d_gh, struct qstr *name,
 		return error;
 
 	if (!is_root) {
-		error = permission(dip->i_vnode, MAY_EXEC, NULL);
+		error = inode_permission(dip->i_vnode, MAY_EXEC);
 		if (error) {
 			gfs_glock_dq(d_gh);
 			return error;
@@ -952,7 +952,7 @@ gfs_lookupi(struct gfs_holder *d_gh, struct qstr *name,
 		}
 
 		if (!is_root) {
-			error = permission(dip->i_vnode, MAY_EXEC, NULL);
+			error = inode_permission(dip->i_vnode, MAY_EXEC);
 			if (error) {
 				gfs_glock_dq(d_gh);
 				gfs_glock_dq_uninit(i_gh);
@@ -1017,7 +1017,7 @@ create_ok(struct gfs_inode *dip, struct qstr *name, unsigned int type)
 {
 	int error;
 
-	error = permission(dip->i_vnode, MAY_WRITE | MAY_EXEC, NULL);
+	error = inode_permission(dip->i_vnode, MAY_WRITE | MAY_EXEC);
 	if (error)
 		return error;
 
@@ -1577,7 +1577,7 @@ gfs_unlink_ok(struct gfs_inode *dip, struct qstr *name, struct gfs_inode *ip)
 	if (IS_APPEND(dip->i_vnode))
 		return -EPERM;
 
-	error = permission(dip->i_vnode, MAY_WRITE | MAY_EXEC, NULL);
+	error = inode_permission(dip->i_vnode, MAY_WRITE | MAY_EXEC);
 	if (error)
 		return error;
 
diff --git a/gfs-kernel/src/gfs/ioctl.c b/gfs-kernel/src/gfs/ioctl.c
index cc174f6..c66197b 100644
--- a/gfs-kernel/src/gfs/ioctl.c
+++ b/gfs-kernel/src/gfs/ioctl.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <asm/uaccess.h>
diff --git a/gfs-kernel/src/gfs/lm.c b/gfs-kernel/src/gfs/lm.c
index d08d7e4..f27d581 100644
--- a/gfs-kernel/src/gfs/lm.c
+++ b/gfs-kernel/src/gfs/lm.c
@@ -35,7 +35,7 @@ int gfs_lm_mount(struct gfs_sbd *sdp, int silent)
 
 	printk("Trying to join cluster \"%s\", \"%s\"\n", proto, table);
 
-	error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
+	error = gfs_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
 				     gfs_glock_cb, sdp,
 				     GFS_MIN_LVB_SIZE, flags,
 				     &sdp->sd_lockstruct, &sdp->sd_kobj);
@@ -49,7 +49,7 @@ int gfs_lm_mount(struct gfs_sbd *sdp, int silent)
 	    gfs_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
 	    gfs_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
 				  GFS_MIN_LVB_SIZE)) {
-		gfs2_unmount_lockproto(&sdp->sd_lockstruct);
+		gfs_unmount_lockproto(&sdp->sd_lockstruct);
 		goto out;
 	}
 
@@ -80,7 +80,7 @@ void gfs_lm_others_may_mount(struct gfs_sbd *sdp)
 void gfs_lm_unmount(struct gfs_sbd *sdp)
 {
 	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
-		gfs2_unmount_lockproto(&sdp->sd_lockstruct);
+		gfs_unmount_lockproto(&sdp->sd_lockstruct);
 }
 
 int gfs_lm_withdraw(struct gfs_sbd *sdp, char *fmt, ...)
@@ -102,7 +102,7 @@ int gfs_lm_withdraw(struct gfs_sbd *sdp, char *fmt, ...)
 	printk("GFS: fsid=%s: telling LM to withdraw\n",
 	       sdp->sd_fsname);
 
-	gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
+	gfs_withdraw_lockproto(&sdp->sd_lockstruct);
 
 	printk("GFS: fsid=%s: withdrawn\n",
 	       sdp->sd_fsname);
diff --git a/gfs-kernel/src/gfs/lm_interface.h b/gfs-kernel/src/gfs/lm_interface.h
new file mode 100644
index 0000000..f1d17ec
--- /dev/null
+++ b/gfs-kernel/src/gfs/lm_interface.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#ifndef __LM_INTERFACE_DOT_H__
+#define __LM_INTERFACE_DOT_H__
+
+
+typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
+
+/*
+ * lm_mount() flags
+ *
+ * LM_MFLAG_SPECTATOR
+ * GFS is asking to join the filesystem's lockspace, but it doesn't want to
+ * modify the filesystem.  The lock module shouldn't assign a journal to the FS
+ * mount.  It shouldn't send recovery callbacks to the FS mount.  If the node
+ * dies or withdraws, all locks can be wiped immediately.
+ */
+
+#define LM_MFLAG_SPECTATOR	0x00000001
+
+/*
+ * lm_lockstruct flags
+ *
+ * LM_LSFLAG_LOCAL
+ * The lock_nolock module returns LM_LSFLAG_LOCAL to GFS, indicating that GFS
+ * can make single-node optimizations.
+ */
+
+#define LM_LSFLAG_LOCAL		0x00000001
+
+/*
+ * lm_lockname types
+ */
+
+#define LM_TYPE_RESERVED	0x00
+#define LM_TYPE_NONDISK		0x01
+#define LM_TYPE_INODE		0x02
+#define LM_TYPE_RGRP		0x03
+#define LM_TYPE_META		0x04
+#define LM_TYPE_IOPEN		0x05
+#define LM_TYPE_FLOCK		0x06
+#define LM_TYPE_PLOCK		0x07
+#define LM_TYPE_QUOTA		0x08
+#define LM_TYPE_JOURNAL		0x09
+
+/*
+ * lm_lock() states
+ *
+ * SHARED is compatible with SHARED, not with DEFERRED or EX.
+ * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
+ */
+
+#define LM_ST_UNLOCKED		0
+#define LM_ST_EXCLUSIVE		1
+#define LM_ST_DEFERRED		2
+#define LM_ST_SHARED		3
+
+/*
+ * lm_lock() flags
+ *
+ * LM_FLAG_TRY
+ * Don't wait to acquire the lock if it can't be granted immediately.
+ *
+ * LM_FLAG_TRY_1CB
+ * Send one blocking callback if TRY is set and the lock is not granted.
+ *
+ * LM_FLAG_NOEXP
+ * GFS sets this flag on lock requests it makes while doing journal recovery.
+ * These special requests should not be blocked due to the recovery like
+ * ordinary locks would be.
+ *
+ * LM_FLAG_ANY
+ * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
+ * also be granted in SHARED.  The preferred state is whichever is compatible
+ * with other granted locks, or the specified state if no other locks exist.
+ *
+ * LM_FLAG_PRIORITY
+ * Override fairness considerations.  Suppose a lock is held in a shared state
+ * and there is a pending request for the deferred state.  A shared lock
+ * request with the priority flag would be allowed to bypass the deferred
+ * request and directly join the other shared lock.  A shared lock request
+ * without the priority flag might be forced to wait until the deferred
+ * requested had acquired and released the lock.
+ */
+
+#define LM_FLAG_TRY		0x00000001
+#define LM_FLAG_TRY_1CB		0x00000002
+#define LM_FLAG_NOEXP		0x00000004
+#define LM_FLAG_ANY		0x00000008
+#define LM_FLAG_PRIORITY	0x00000010
+
+/*
+ * lm_lock() and lm_async_cb return flags
+ *
+ * LM_OUT_ST_MASK
+ * Masks the lower two bits of lock state in the returned value.
+ *
+ * LM_OUT_CACHEABLE
+ * The lock hasn't been released so GFS can continue to cache data for it.
+ *
+ * LM_OUT_CANCELED
+ * The lock request was canceled.
+ *
+ * LM_OUT_ASYNC
+ * The result of the request will be returned in an LM_CB_ASYNC callback.
+ */
+
+#define LM_OUT_ST_MASK		0x00000003
+#define LM_OUT_CACHEABLE	0x00000004
+#define LM_OUT_CANCELED		0x00000008
+#define LM_OUT_ASYNC		0x00000080
+#define LM_OUT_ERROR		0x00000100
+
+/*
+ * lm_callback_t types
+ *
+ * LM_CB_NEED_E LM_CB_NEED_D LM_CB_NEED_S
+ * Blocking callback, a remote node is requesting the given lock in
+ * EXCLUSIVE, DEFERRED, or SHARED.
+ *
+ * LM_CB_NEED_RECOVERY
+ * The given journal needs to be recovered.
+ *
+ * LM_CB_DROPLOCKS
+ * Reduce the number of cached locks.
+ *
+ * LM_CB_ASYNC
+ * The given lock has been granted.
+ */
+
+#define LM_CB_NEED_E		257
+#define LM_CB_NEED_D		258
+#define LM_CB_NEED_S		259
+#define LM_CB_NEED_RECOVERY	260
+#define LM_CB_DROPLOCKS		261
+#define LM_CB_ASYNC		262
+
+/*
+ * lm_recovery_done() messages
+ */
+
+#define LM_RD_GAVEUP		308
+#define LM_RD_SUCCESS		309
+
+
+struct lm_lockname {
+	u64 ln_number;
+	unsigned int ln_type;
+};
+
+#define lm_name_equal(name1, name2) \
+	(((name1)->ln_number == (name2)->ln_number) && \
+	 ((name1)->ln_type == (name2)->ln_type)) \
+
+struct lm_async_cb {
+	struct lm_lockname lc_name;
+	int lc_ret;
+};
+
+struct lm_lockstruct;
+
+struct lm_lockops {
+	const char *lm_proto_name;
+
+	/*
+	 * Mount/Unmount
+	 */
+
+	int (*lm_mount) (char *table_name, char *host_data,
+			 lm_callback_t cb, void *cb_data,
+			 unsigned int min_lvb_size, int flags,
+			 struct lm_lockstruct *lockstruct,
+			 struct kobject *fskobj);
+
+	void (*lm_others_may_mount) (void *lockspace);
+
+	void (*lm_unmount) (void *lockspace);
+
+	void (*lm_withdraw) (void *lockspace);
+
+	/*
+	 * Lock oriented operations
+	 */
+
+	int (*lm_get_lock) (void *lockspace, struct lm_lockname *name, void **lockp);
+
+	void (*lm_put_lock) (void *lock);
+
+	unsigned int (*lm_lock) (void *lock, unsigned int cur_state,
+				 unsigned int req_state, unsigned int flags);
+
+	unsigned int (*lm_unlock) (void *lock, unsigned int cur_state);
+
+	void (*lm_cancel) (void *lock);
+
+	int (*lm_hold_lvb) (void *lock, char **lvbp);
+	void (*lm_unhold_lvb) (void *lock, char *lvb);
+
+	/*
+	 * Posix Lock oriented operations
+	 */
+
+	int (*lm_plock_get) (void *lockspace, struct lm_lockname *name,
+			     struct file *file, struct file_lock *fl);
+
+	int (*lm_plock) (void *lockspace, struct lm_lockname *name,
+			 struct file *file, int cmd, struct file_lock *fl);
+
+	int (*lm_punlock) (void *lockspace, struct lm_lockname *name,
+			   struct file *file, struct file_lock *fl);
+
+	/*
+	 * Client oriented operations
+	 */
+
+	void (*lm_recovery_done) (void *lockspace, unsigned int jid,
+				  unsigned int message);
+
+	struct module *lm_owner;
+};
+
+/*
+ * lm_mount() return values
+ *
+ * ls_jid - the journal ID this node should use
+ * ls_first - this node is the first to mount the file system
+ * ls_lvb_size - size in bytes of lock value blocks
+ * ls_lockspace - lock module's context for this file system
+ * ls_ops - lock module's functions
+ * ls_flags - lock module features
+ */
+
+struct lm_lockstruct {
+	unsigned int ls_jid;
+	unsigned int ls_first;
+	unsigned int ls_lvb_size;
+	void *ls_lockspace;
+	const struct lm_lockops *ls_ops;
+	int ls_flags;
+};
+
+/*
+ * Lock module bottom interface.  A lock module makes itself available to GFS
+ * with these functions.
+ */
+
+int gfs_register_lockproto(const struct lm_lockops *proto);
+void gfs_unregister_lockproto(const struct lm_lockops *proto);
+
+/*
+ * Lock module top interface.  GFS calls these functions when mounting or
+ * unmounting a file system.
+ */
+
+int gfs_mount_lockproto(char *proto_name, char *table_name, char *host_data,
+			 lm_callback_t cb, void *cb_data,
+			 unsigned int min_lvb_size, int flags,
+			 struct lm_lockstruct *lockstruct,
+			 struct kobject *fskobj);
+
+void gfs_unmount_lockproto(struct lm_lockstruct *lockstruct);
+
+void gfs_withdraw_lockproto(struct lm_lockstruct *lockstruct);
+
+int init_lock_dlm(void);
+void exit_lock_dlm(void);
+int init_nolock(void);
+void exit_nolock(void);
+
+#endif /* __LM_INTERFACE_DOT_H__ */
+
diff --git a/gfs-kernel/src/gfs/lock_dlm.h b/gfs-kernel/src/gfs/lock_dlm.h
new file mode 100644
index 0000000..624614e
--- /dev/null
+++ b/gfs-kernel/src/gfs/lock_dlm.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#ifndef LOCK_DLM_DOT_H
+#define LOCK_DLM_DOT_H
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/kobject.h>
+#include <linux/fcntl.h>
+#include <linux/wait.h>
+#include <net/sock.h>
+
+#include <linux/dlm.h>
+#include <linux/dlm_plock.h>
+#include "lm_interface.h"
+
+/*
+ * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
+ * prefix of lock_dlm_ gets awkward.  Externally, GFS refers to this module
+ * as "lock_dlm".
+ */
+
+#define GDLM_STRNAME_BYTES	24
+#define GDLM_LVB_SIZE		32
+#define GDLM_DROP_COUNT		0
+#define GDLM_DROP_PERIOD	60
+#define GDLM_NAME_LEN		128
+
+/* GFS uses 12 bytes to identify a resource (32 bit type + 64 bit number).
+   We sprintf these numbers into a 24 byte string of hex values to make them
+   human-readable (to make debugging simpler.) */
+
+struct gdlm_strname {
+	unsigned char		name[GDLM_STRNAME_BYTES];
+	unsigned short		namelen;
+};
+
+enum {
+	DFL_BLOCK_LOCKS		= 0,
+	DFL_SPECTATOR		= 1,
+	DFL_WITHDRAW		= 2,
+};
+
+struct gdlm_ls {
+	u32		id;
+	int			jid;
+	int			first;
+	int			first_done;
+	unsigned long		flags;
+	struct kobject		kobj;
+	char			clustername[GDLM_NAME_LEN];
+	char			fsname[GDLM_NAME_LEN];
+	int			fsflags;
+	dlm_lockspace_t		*dlm_lockspace;
+	lm_callback_t		fscb;
+	struct gfs_sbd		*sdp;
+	int			recover_jid;
+	int			recover_jid_done;
+	int			recover_jid_status;
+	spinlock_t		async_lock;
+	struct list_head	complete;
+	struct list_head	blocking;
+	struct list_head	delayed;
+	struct list_head	submit;
+	struct list_head	all_locks;
+	u32		all_locks_count;
+	wait_queue_head_t	wait_control;
+	struct task_struct	*thread1;
+	struct task_struct	*thread2;
+	wait_queue_head_t	thread_wait;
+	unsigned long		drop_time;
+	int			drop_locks_count;
+	int			drop_locks_period;
+};
+
+enum {
+	LFL_NOBLOCK		= 0,
+	LFL_NOCACHE		= 1,
+	LFL_DLM_UNLOCK		= 2,
+	LFL_DLM_CANCEL		= 3,
+	LFL_SYNC_LVB		= 4,
+	LFL_FORCE_PROMOTE	= 5,
+	LFL_REREQUEST		= 6,
+	LFL_ACTIVE		= 7,
+	LFL_INLOCK		= 8,
+	LFL_CANCEL		= 9,
+	LFL_NOBAST		= 10,
+	LFL_HEADQUE		= 11,
+	LFL_UNLOCK_DELETE	= 12,
+	LFL_AST_WAIT		= 13,
+};
+
+struct gdlm_lock {
+	struct gdlm_ls		*ls;
+	struct lm_lockname	lockname;
+	struct gdlm_strname	strname;
+	char			*lvb;
+	struct dlm_lksb		lksb;
+
+	s16			cur;
+	s16			req;
+	s16			prev_req;
+	u32			lkf;		/* dlm flags DLM_LKF_ */
+	unsigned long		flags;		/* lock_dlm flags LFL_ */
+
+	int			bast_mode;	/* protected by async_lock */
+
+	struct list_head	clist;		/* complete */
+	struct list_head	blist;		/* blocking */
+	struct list_head	delay_list;	/* delayed */
+	struct list_head	all_list;	/* all locks for the fs */
+	struct gdlm_lock	*hold_null;	/* NL lock for hold_lvb */
+};
+
+#define gdlm_assert(assertion, fmt, args...)                                  \
+do {                                                                          \
+	if (unlikely(!(assertion))) {                                         \
+		printk(KERN_EMERG "lock_dlm: fatal assertion failed \"%s\"\n" \
+				  "lock_dlm:  " fmt "\n",                     \
+				  #assertion, ##args);                        \
+		BUG();                                                        \
+	}                                                                     \
+} while (0)
+
+#define log_print(lev, fmt, arg...) printk(lev "lock_dlm: " fmt "\n" , ## arg)
+#define log_info(fmt, arg...)  log_print(KERN_INFO , fmt , ## arg)
+#define log_error(fmt, arg...) log_print(KERN_ERR , fmt , ## arg)
+#ifdef LOCK_DLM_LOG_DEBUG
+#define log_debug(fmt, arg...) log_print(KERN_DEBUG , fmt , ## arg)
+#else
+#define log_debug(fmt, arg...)
+#endif
+
+/* sysfs.c */
+
+int gdlm_sysfs_init(void);
+void gdlm_sysfs_exit(void);
+int gdlm_kobject_setup(struct gdlm_ls *, struct kobject *);
+void gdlm_kobject_release(struct gdlm_ls *);
+
+/* thread.c */
+
+int gdlm_init_threads(struct gdlm_ls *);
+void gdlm_release_threads(struct gdlm_ls *);
+
+/* lock.c */
+
+s16 gdlm_make_lmstate(s16);
+void gdlm_queue_delayed(struct gdlm_lock *);
+void gdlm_submit_delayed(struct gdlm_ls *);
+int gdlm_release_all_locks(struct gdlm_ls *);
+void gdlm_delete_lp(struct gdlm_lock *);
+unsigned int gdlm_do_lock(struct gdlm_lock *);
+
+int gdlm_get_lock(void *, struct lm_lockname *, void **);
+void gdlm_put_lock(void *);
+unsigned int gdlm_lock(void *, unsigned int, unsigned int, unsigned int);
+unsigned int gdlm_unlock(void *, unsigned int);
+void gdlm_cancel(void *);
+int gdlm_hold_lvb(void *, char **);
+void gdlm_unhold_lvb(void *, char *);
+
+/* mount.c */
+
+extern const struct lm_lockops gdlm_ops;
+
+#endif
+
diff --git a/gfs-kernel/src/gfs/lock_dlm_lock.c b/gfs-kernel/src/gfs/lock_dlm_lock.c
new file mode 100644
index 0000000..bbc7308
--- /dev/null
+++ b/gfs-kernel/src/gfs/lock_dlm_lock.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include "lock_dlm.h"
+
+static char junk_lvb[GDLM_LVB_SIZE];
+
+static void queue_complete(struct gdlm_lock *lp)
+{
+	struct gdlm_ls *ls = lp->ls;
+
+	clear_bit(LFL_ACTIVE, &lp->flags);
+
+	spin_lock(&ls->async_lock);
+	list_add_tail(&lp->clist, &ls->complete);
+	spin_unlock(&ls->async_lock);
+	wake_up(&ls->thread_wait);
+}
+
+static inline void gdlm_ast(void *astarg)
+{
+	queue_complete(astarg);
+}
+
+static inline void gdlm_bast(void *astarg, int mode)
+{
+	struct gdlm_lock *lp = astarg;
+	struct gdlm_ls *ls = lp->ls;
+
+	if (!mode) {
+		printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
+			lp->lockname.ln_type,
+			(unsigned long long)lp->lockname.ln_number);
+		return;
+	}
+
+	spin_lock(&ls->async_lock);
+	if (!lp->bast_mode) {
+		list_add_tail(&lp->blist, &ls->blocking);
+		lp->bast_mode = mode;
+	} else if (lp->bast_mode < mode)
+		lp->bast_mode = mode;
+	spin_unlock(&ls->async_lock);
+	wake_up(&ls->thread_wait);
+}
+
+void gdlm_queue_delayed(struct gdlm_lock *lp)
+{
+	struct gdlm_ls *ls = lp->ls;
+
+	spin_lock(&ls->async_lock);
+	list_add_tail(&lp->delay_list, &ls->delayed);
+	spin_unlock(&ls->async_lock);
+}
+
+/* convert gfs lock-state to dlm lock-mode */
+
+static s16 make_mode(s16 lmstate)
+{
+	switch (lmstate) {
+	case LM_ST_UNLOCKED:
+		return DLM_LOCK_NL;
+	case LM_ST_EXCLUSIVE:
+		return DLM_LOCK_EX;
+	case LM_ST_DEFERRED:
+		return DLM_LOCK_CW;
+	case LM_ST_SHARED:
+		return DLM_LOCK_PR;
+	}
+	gdlm_assert(0, "unknown LM state %d", lmstate);
+	return -1;
+}
+
+/* convert dlm lock-mode to gfs lock-state */
+
+s16 gdlm_make_lmstate(s16 dlmmode)
+{
+	switch (dlmmode) {
+	case DLM_LOCK_IV:
+	case DLM_LOCK_NL:
+		return LM_ST_UNLOCKED;
+	case DLM_LOCK_EX:
+		return LM_ST_EXCLUSIVE;
+	case DLM_LOCK_CW:
+		return LM_ST_DEFERRED;
+	case DLM_LOCK_PR:
+		return LM_ST_SHARED;
+	}
+	gdlm_assert(0, "unknown DLM mode %d", dlmmode);
+	return -1;
+}
+
+/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
+   DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
+
+static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
+{
+	s16 cur = make_mode(cur_state);
+	if (lp->cur != DLM_LOCK_IV)
+		gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
+}
+
+static inline unsigned int make_flags(struct gdlm_lock *lp,
+				      unsigned int gfs_flags,
+				      s16 cur, s16 req)
+{
+	unsigned int lkf = 0;
+
+	if (gfs_flags & LM_FLAG_TRY)
+		lkf |= DLM_LKF_NOQUEUE;
+
+	if (gfs_flags & LM_FLAG_TRY_1CB) {
+		lkf |= DLM_LKF_NOQUEUE;
+		lkf |= DLM_LKF_NOQUEUEBAST;
+	}
+
+	if (gfs_flags & LM_FLAG_PRIORITY) {
+		lkf |= DLM_LKF_NOORDER;
+		lkf |= DLM_LKF_HEADQUE;
+	}
+
+	if (gfs_flags & LM_FLAG_ANY) {
+		if (req == DLM_LOCK_PR)
+			lkf |= DLM_LKF_ALTCW;
+		else if (req == DLM_LOCK_CW)
+			lkf |= DLM_LKF_ALTPR;
+	}
+
+	if (lp->lksb.sb_lkid != 0) {
+		lkf |= DLM_LKF_CONVERT;
+
+		/* Conversion deadlock avoidance by DLM */
+
+		if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
+		    !(lkf & DLM_LKF_NOQUEUE) &&
+		    cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
+			lkf |= DLM_LKF_CONVDEADLK;
+	}
+
+	if (lp->lvb)
+		lkf |= DLM_LKF_VALBLK;
+
+	return lkf;
+}
+
+/* make_strname - convert GFS lock numbers to a string */
+
+static inline void make_strname(const struct lm_lockname *lockname,
+				struct gdlm_strname *str)
+{
+	sprintf(str->name, "%8x%16llx", lockname->ln_type,
+		(unsigned long long)lockname->ln_number);
+	str->namelen = GDLM_STRNAME_BYTES;
+}
+
+static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
+			  struct gdlm_lock **lpp)
+{
+	struct gdlm_lock *lp;
+
+	lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
+	if (!lp)
+		return -ENOMEM;
+
+	lp->lockname = *name;
+	make_strname(name, &lp->strname);
+	lp->ls = ls;
+	lp->cur = DLM_LOCK_IV;
+	lp->lvb = NULL;
+	lp->hold_null = NULL;
+	INIT_LIST_HEAD(&lp->clist);
+	INIT_LIST_HEAD(&lp->blist);
+	INIT_LIST_HEAD(&lp->delay_list);
+
+	spin_lock(&ls->async_lock);
+	list_add(&lp->all_list, &ls->all_locks);
+	ls->all_locks_count++;
+	spin_unlock(&ls->async_lock);
+
+	*lpp = lp;
+	return 0;
+}
+
+void gdlm_delete_lp(struct gdlm_lock *lp)
+{
+	struct gdlm_ls *ls = lp->ls;
+
+	spin_lock(&ls->async_lock);
+	if (!list_empty(&lp->clist))
+		list_del_init(&lp->clist);
+	if (!list_empty(&lp->blist))
+		list_del_init(&lp->blist);
+	if (!list_empty(&lp->delay_list))
+		list_del_init(&lp->delay_list);
+	gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
+		    (unsigned long long)lp->lockname.ln_number);
+	list_del_init(&lp->all_list);
+	ls->all_locks_count--;
+	spin_unlock(&ls->async_lock);
+
+	kfree(lp);
+}
+
+int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
+		  void **lockp)
+{
+	struct gdlm_lock *lp;
+	int error;
+
+	error = gdlm_create_lp(lockspace, name, &lp);
+
+	*lockp = lp;
+	return error;
+}
+
+void gdlm_put_lock(void *lock)
+{
+	gdlm_delete_lp(lock);
+}
+
+unsigned int gdlm_do_lock(struct gdlm_lock *lp)
+{
+	struct gdlm_ls *ls = lp->ls;
+	int error, bast = 1;
+
+	/*
+	 * When recovery is in progress, delay lock requests for submission
+	 * once recovery is done.  Requests for recovery (NOEXP) and unlocks
+	 * can pass.
+	 */
+
+	if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
+	    !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
+		gdlm_queue_delayed(lp);
+		return LM_OUT_ASYNC;
+	}
+
+	/*
+	 * Submit the actual lock request.
+	 */
+
+	if (test_bit(LFL_NOBAST, &lp->flags))
+		bast = 0;
+
+	set_bit(LFL_ACTIVE, &lp->flags);
+
+	log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
+		  (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
+		  lp->cur, lp->req, lp->lkf);
+
+	error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
+			 lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
+			 lp, bast ? gdlm_bast : NULL);
+
+	if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
+		lp->lksb.sb_status = -EAGAIN;
+		queue_complete(lp);
+		error = 0;
+	}
+
+	if (error) {
+		log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
+			  "flags=%lx", ls->fsname, lp->lockname.ln_type,
+			  (unsigned long long)lp->lockname.ln_number, error,
+			  lp->cur, lp->req, lp->lkf, lp->flags);
+		return LM_OUT_ERROR;
+	}
+	return LM_OUT_ASYNC;
+}
+
+static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
+{
+	struct gdlm_ls *ls = lp->ls;
+	unsigned int lkf = 0;
+	int error;
+
+	set_bit(LFL_DLM_UNLOCK, &lp->flags);
+	set_bit(LFL_ACTIVE, &lp->flags);
+
+	if (lp->lvb)
+		lkf = DLM_LKF_VALBLK;
+
+	log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
+		  (unsigned long long)lp->lockname.ln_number,
+		  lp->lksb.sb_lkid, lp->cur, lkf);
+
+	error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
+
+	if (error) {
+		log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
+			  "flags=%lx", ls->fsname, lp->lockname.ln_type,
+			  (unsigned long long)lp->lockname.ln_number, error,
+			  lp->cur, lp->req, lp->lkf, lp->flags);
+		return LM_OUT_ERROR;
+	}
+	return LM_OUT_ASYNC;
+}
+
+unsigned int gdlm_lock(void *lock, unsigned int cur_state,
+		       unsigned int req_state, unsigned int flags)
+{
+	struct gdlm_lock *lp = lock;
+
+	clear_bit(LFL_DLM_CANCEL, &lp->flags);
+	if (flags & LM_FLAG_NOEXP)
+		set_bit(LFL_NOBLOCK, &lp->flags);
+
+	check_cur_state(lp, cur_state);
+	lp->req = make_mode(req_state);
+	lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
+
+	return gdlm_do_lock(lp);
+}
+
+unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
+{
+	struct gdlm_lock *lp = lock;
+
+	clear_bit(LFL_DLM_CANCEL, &lp->flags);
+	if (lp->cur == DLM_LOCK_IV)
+		return 0;
+	return gdlm_do_unlock(lp);
+}
+
+void gdlm_cancel(void *lock)
+{
+	struct gdlm_lock *lp = lock;
+	struct gdlm_ls *ls = lp->ls;
+	int error, delay_list = 0;
+
+	if (test_bit(LFL_DLM_CANCEL, &lp->flags))
+		return;
+
+	log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
+		 (unsigned long long)lp->lockname.ln_number, lp->flags);
+
+	spin_lock(&ls->async_lock);
+	if (!list_empty(&lp->delay_list)) {
+		list_del_init(&lp->delay_list);
+		delay_list = 1;
+	}
+	spin_unlock(&ls->async_lock);
+
+	if (delay_list) {
+		set_bit(LFL_CANCEL, &lp->flags);
+		set_bit(LFL_ACTIVE, &lp->flags);
+		queue_complete(lp);
+		return;
+	}
+
+	if (!test_bit(LFL_ACTIVE, &lp->flags) ||
+	    test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
+		log_info("gdlm_cancel skip %x,%llx flags %lx",
+		 	 lp->lockname.ln_type,
+			 (unsigned long long)lp->lockname.ln_number, lp->flags);
+		return;
+	}
+
+	/* the lock is blocked in the dlm */
+
+	set_bit(LFL_DLM_CANCEL, &lp->flags);
+	set_bit(LFL_ACTIVE, &lp->flags);
+
+	error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
+			   NULL, lp);
+
+	log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
+		 lp->lockname.ln_type,
+		 (unsigned long long)lp->lockname.ln_number, lp->flags);
+
+	if (error == -EBUSY)
+		clear_bit(LFL_DLM_CANCEL, &lp->flags);
+}
+
+static int gdlm_add_lvb(struct gdlm_lock *lp)
+{
+	char *lvb;
+
+	lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
+	if (!lvb)
+		return -ENOMEM;
+
+	lp->lksb.sb_lvbptr = lvb;
+	lp->lvb = lvb;
+	return 0;
+}
+
+static void gdlm_del_lvb(struct gdlm_lock *lp)
+{
+	kfree(lp->lvb);
+	lp->lvb = NULL;
+	lp->lksb.sb_lvbptr = NULL;
+}
+
+static int gdlm_ast_wait(void *word)
+{
+	schedule();
+	return 0;
+}
+
+/* This can do a synchronous dlm request (requiring a lock_dlm thread to get
+   the completion) because gfs won't call hold_lvb() during a callback (from
+   the context of a lock_dlm thread). */
+
+static int hold_null_lock(struct gdlm_lock *lp)
+{
+	struct gdlm_lock *lpn = NULL;
+	int error;
+
+	if (lp->hold_null) {
+		printk(KERN_INFO "lock_dlm: lvb already held\n");
+		return 0;
+	}
+
+	error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
+	if (error)
+		goto out;
+
+	lpn->lksb.sb_lvbptr = junk_lvb;
+	lpn->lvb = junk_lvb;
+
+	lpn->req = DLM_LOCK_NL;
+	lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
+	set_bit(LFL_NOBAST, &lpn->flags);
+	set_bit(LFL_INLOCK, &lpn->flags);
+	set_bit(LFL_AST_WAIT, &lpn->flags);
+
+	gdlm_do_lock(lpn);
+	wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
+	error = lpn->lksb.sb_status;
+	if (error) {
+		printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
+		       error);
+		gdlm_delete_lp(lpn);
+		lpn = NULL;
+	}
+out:
+	lp->hold_null = lpn;
+	return error;
+}
+
+/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
+   the completion) because gfs may call unhold_lvb() during a callback (from
+   the context of a lock_dlm thread) which could cause a deadlock since the
+   other lock_dlm thread could be engaged in recovery. */
+
+static void unhold_null_lock(struct gdlm_lock *lp)
+{
+	struct gdlm_lock *lpn = lp->hold_null;
+
+	gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
+		    (unsigned long long)lp->lockname.ln_number);
+	lpn->lksb.sb_lvbptr = NULL;
+	lpn->lvb = NULL;
+	set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
+	gdlm_do_unlock(lpn);
+	lp->hold_null = NULL;
+}
+
+/* Acquire a NL lock because gfs requires the value block to remain
+   intact on the resource while the lvb is "held" even if it's holding no locks
+   on the resource. */
+
+int gdlm_hold_lvb(void *lock, char **lvbp)
+{
+	struct gdlm_lock *lp = lock;
+	int error;
+
+	error = gdlm_add_lvb(lp);
+	if (error)
+		return error;
+
+	*lvbp = lp->lvb;
+
+	error = hold_null_lock(lp);
+	if (error)
+		gdlm_del_lvb(lp);
+
+	return error;
+}
+
+void gdlm_unhold_lvb(void *lock, char *lvb)
+{
+	struct gdlm_lock *lp = lock;
+
+	unhold_null_lock(lp);
+	gdlm_del_lvb(lp);
+}
+
+void gdlm_submit_delayed(struct gdlm_ls *ls)
+{
+	struct gdlm_lock *lp, *safe;
+
+	spin_lock(&ls->async_lock);
+	list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
+		list_del_init(&lp->delay_list);
+		list_add_tail(&lp->delay_list, &ls->submit);
+	}
+	spin_unlock(&ls->async_lock);
+	wake_up(&ls->thread_wait);
+}
+
+int gdlm_release_all_locks(struct gdlm_ls *ls)
+{
+	struct gdlm_lock *lp, *safe;
+	int count = 0;
+
+	spin_lock(&ls->async_lock);
+	list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
+		list_del_init(&lp->all_list);
+
+		if (lp->lvb && lp->lvb != junk_lvb)
+			kfree(lp->lvb);
+		kfree(lp);
+		count++;
+	}
+	spin_unlock(&ls->async_lock);
+
+	return count;
+}
+
diff --git a/gfs-kernel/src/gfs/lock_dlm_main.c b/gfs-kernel/src/gfs/lock_dlm_main.c
new file mode 100644
index 0000000..e019177
--- /dev/null
+++ b/gfs-kernel/src/gfs/lock_dlm_main.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include <linux/init.h>
+
+#include "lock_dlm.h"
+
+int init_lock_dlm()
+{
+	int error;
+
+	error = gfs_register_lockproto(&gdlm_ops);
+	if (error) {
+		printk(KERN_WARNING "lock_dlm:  can't register protocol: %d\n",
+		       error);
+		return error;
+	}
+
+	error = gdlm_sysfs_init();
+	if (error) {
+		gfs_unregister_lockproto(&gdlm_ops);
+		return error;
+	}
+
+	printk(KERN_INFO
+	       "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
+	return 0;
+}
+
+void exit_lock_dlm()
+{
+	gdlm_sysfs_exit();
+	gfs_unregister_lockproto(&gdlm_ops);
+}
diff --git a/gfs-kernel/src/gfs/lock_dlm_mount.c b/gfs-kernel/src/gfs/lock_dlm_mount.c
new file mode 100644
index 0000000..ed08bc2
--- /dev/null
+++ b/gfs-kernel/src/gfs/lock_dlm_mount.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include "lock_dlm.h"
+
+const struct lm_lockops gdlm_ops;
+
+
+static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs_sbd *sdp,
+				 int flags, char *table_name)
+{
+	struct gdlm_ls *ls;
+	char buf[256], *p;
+
+	ls = kzalloc(sizeof(struct gdlm_ls), GFP_KERNEL);
+	if (!ls)
+		return NULL;
+
+	ls->drop_locks_count = GDLM_DROP_COUNT;
+	ls->drop_locks_period = GDLM_DROP_PERIOD;
+	ls->fscb = cb;
+	ls->sdp = sdp;
+	ls->fsflags = flags;
+	spin_lock_init(&ls->async_lock);
+	INIT_LIST_HEAD(&ls->complete);
+	INIT_LIST_HEAD(&ls->blocking);
+	INIT_LIST_HEAD(&ls->delayed);
+	INIT_LIST_HEAD(&ls->submit);
+	INIT_LIST_HEAD(&ls->all_locks);
+	init_waitqueue_head(&ls->thread_wait);
+	init_waitqueue_head(&ls->wait_control);
+	ls->thread1 = NULL;
+	ls->thread2 = NULL;
+	ls->drop_time = jiffies;
+	ls->jid = -1;
+
+	strncpy(buf, table_name, 256);
+	buf[255] = '\0';
+
+	p = strchr(buf, ':');
+	if (!p) {
+		log_info("invalid table_name \"%s\"", table_name);
+		kfree(ls);
+		return NULL;
+	}
+	*p = '\0';
+	p++;
+
+	strncpy(ls->clustername, buf, GDLM_NAME_LEN);
+	strncpy(ls->fsname, p, GDLM_NAME_LEN);
+
+	return ls;
+}
+
+static int make_args(struct gdlm_ls *ls, char *data_arg, int *nodir)
+{
+	char data[256];
+	char *options, *x, *y;
+	int error = 0;
+
+	memset(data, 0, 256);
+	strncpy(data, data_arg, 255);
+
+	if (!strlen(data)) {
+		log_error("no mount options, (u)mount helpers not installed");
+		return -EINVAL;
+	}
+
+	for (options = data; (x = strsep(&options, ":")); ) {
+		if (!*x)
+			continue;
+
+		y = strchr(x, '=');
+		if (y)
+			*y++ = 0;
+
+		if (!strcmp(x, "jid")) {
+			if (!y) {
+				log_error("need argument to jid");
+				error = -EINVAL;
+				break;
+			}
+			sscanf(y, "%u", &ls->jid);
+
+		} else if (!strcmp(x, "first")) {
+			if (!y) {
+				log_error("need argument to first");
+				error = -EINVAL;
+				break;
+			}
+			sscanf(y, "%u", &ls->first);
+
+		} else if (!strcmp(x, "id")) {
+			if (!y) {
+				log_error("need argument to id");
+				error = -EINVAL;
+				break;
+			}
+			sscanf(y, "%u", &ls->id);
+
+		} else if (!strcmp(x, "nodir")) {
+			if (!y) {
+				log_error("need argument to nodir");
+				error = -EINVAL;
+				break;
+			}
+			sscanf(y, "%u", nodir);
+
+		} else {
+			log_error("unkonwn option: %s", x);
+			error = -EINVAL;
+			break;
+		}
+	}
+
+	return error;
+}
+
+static int gdlm_mount(char *table_name, char *host_data,
+			lm_callback_t cb, void *cb_data,
+			unsigned int min_lvb_size, int flags,
+			struct lm_lockstruct *lockstruct,
+			struct kobject *fskobj)
+{
+	struct gdlm_ls *ls;
+	int error = -ENOMEM, nodir = 0;
+
+	if (min_lvb_size > GDLM_LVB_SIZE)
+		goto out;
+
+	ls = init_gdlm(cb, cb_data, flags, table_name);
+	if (!ls)
+		goto out;
+
+	error = make_args(ls, host_data, &nodir);
+	if (error)
+		goto out;
+
+	error = gdlm_init_threads(ls);
+	if (error)
+		goto out_free;
+
+	error = gdlm_kobject_setup(ls, fskobj);
+	if (error)
+		goto out_thread;
+
+	error = dlm_new_lockspace(ls->fsname, strlen(ls->fsname),
+				  &ls->dlm_lockspace,
+				  DLM_LSFL_FS | (nodir ? DLM_LSFL_NODIR : 0),
+				  GDLM_LVB_SIZE);
+	if (error) {
+		log_error("dlm_new_lockspace error %d", error);
+		goto out_kobj;
+	}
+
+	lockstruct->ls_jid = ls->jid;
+	lockstruct->ls_first = ls->first;
+	lockstruct->ls_lockspace = ls;
+	lockstruct->ls_ops = &gdlm_ops;
+	lockstruct->ls_flags = 0;
+	lockstruct->ls_lvb_size = GDLM_LVB_SIZE;
+	return 0;
+
+out_kobj:
+	gdlm_kobject_release(ls);
+out_thread:
+	gdlm_release_threads(ls);
+out_free:
+	kfree(ls);
+out:
+	return error;
+}
+
+static void gdlm_unmount(void *lockspace)
+{
+	struct gdlm_ls *ls = lockspace;
+	int rv;
+
+	log_debug("unmount flags %lx", ls->flags);
+
+	/* FIXME: serialize unmount and withdraw in case they
+	   happen at once.  Also, if unmount follows withdraw,
+	   wait for withdraw to finish. */
+
+	if (test_bit(DFL_WITHDRAW, &ls->flags))
+		goto out;
+
+	gdlm_kobject_release(ls);
+	dlm_release_lockspace(ls->dlm_lockspace, 2);
+	gdlm_release_threads(ls);
+	rv = gdlm_release_all_locks(ls);
+	if (rv)
+		log_info("gdlm_unmount: %d stray locks freed", rv);
+out:
+	kfree(ls);
+}
+
+static void gdlm_recovery_done(void *lockspace, unsigned int jid,
+                               unsigned int message)
+{
+	struct gdlm_ls *ls = lockspace;
+	ls->recover_jid_done = jid;
+	ls->recover_jid_status = message;
+	kobject_uevent(&ls->kobj, KOBJ_CHANGE);
+}
+
+static void gdlm_others_may_mount(void *lockspace)
+{
+	struct gdlm_ls *ls = lockspace;
+	ls->first_done = 1;
+	kobject_uevent(&ls->kobj, KOBJ_CHANGE);
+}
+
+/* Userspace gets the offline uevent, blocks new gfs locks on
+   other mounters, and lets us know (sets WITHDRAW flag).  Then,
+   userspace leaves the mount group while we leave the lockspace. */
+
+static void gdlm_withdraw(void *lockspace)
+{
+	struct gdlm_ls *ls = lockspace;
+
+	kobject_uevent(&ls->kobj, KOBJ_OFFLINE);
+
+	wait_event_interruptible(ls->wait_control,
+				 test_bit(DFL_WITHDRAW, &ls->flags));
+
+	dlm_release_lockspace(ls->dlm_lockspace, 2);
+	gdlm_release_threads(ls);
+	gdlm_release_all_locks(ls);
+	gdlm_kobject_release(ls);
+}
+
+static int gdlm_plock(void *lockspace, struct lm_lockname *name,
+	       struct file *file, int cmd, struct file_lock *fl)
+{
+	struct gdlm_ls *ls = lockspace;
+	return dlm_posix_lock(ls->dlm_lockspace, name->ln_number, file, cmd, fl);
+}
+
+static int gdlm_punlock(void *lockspace, struct lm_lockname *name,
+		 struct file *file, struct file_lock *fl)
+{
+	struct gdlm_ls *ls = lockspace;
+	return dlm_posix_unlock(ls->dlm_lockspace, name->ln_number, file, fl);
+}
+
+static int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
+		   struct file *file, struct file_lock *fl)
+{
+	struct gdlm_ls *ls = lockspace;
+	return dlm_posix_get(ls->dlm_lockspace, name->ln_number, file, fl);
+}
+
+const struct lm_lockops gdlm_ops = {
+	.lm_proto_name = "lock_dlm",
+	.lm_mount = gdlm_mount,
+	.lm_others_may_mount = gdlm_others_may_mount,
+	.lm_unmount = gdlm_unmount,
+	.lm_withdraw = gdlm_withdraw,
+	.lm_get_lock = gdlm_get_lock,
+	.lm_put_lock = gdlm_put_lock,
+	.lm_lock = gdlm_lock,
+	.lm_unlock = gdlm_unlock,
+	.lm_plock = gdlm_plock,
+	.lm_punlock = gdlm_punlock,
+	.lm_plock_get = gdlm_plock_get,
+	.lm_cancel = gdlm_cancel,
+	.lm_hold_lvb = gdlm_hold_lvb,
+	.lm_unhold_lvb = gdlm_unhold_lvb,
+	.lm_recovery_done = gdlm_recovery_done,
+	.lm_owner = THIS_MODULE,
+};
+
diff --git a/gfs-kernel/src/gfs/lock_dlm_plock.c b/gfs-kernel/src/gfs/lock_dlm_plock.c
new file mode 100644
index 0000000..d6d6e37
--- /dev/null
+++ b/gfs-kernel/src/gfs/lock_dlm_plock.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/dlm.h>
+#include <linux/dlm_plock.h>
+
+#include "dlm_internal.h"
+#include "lockspace.h"
+
+static spinlock_t ops_lock;
+static struct list_head send_list;
+static struct list_head recv_list;
+static wait_queue_head_t send_wq;
+static wait_queue_head_t recv_wq;
+
+struct plock_op {
+	struct list_head list;
+	int done;
+	struct dlm_plock_info info;
+};
+
+struct plock_xop {
+	struct plock_op xop;
+	void *callback;
+	void *fl;
+	void *file;
+	struct file_lock flc;
+};
+
+
+static inline void set_version(struct dlm_plock_info *info)
+{
+	info->version[0] = DLM_PLOCK_VERSION_MAJOR;
+	info->version[1] = DLM_PLOCK_VERSION_MINOR;
+	info->version[2] = DLM_PLOCK_VERSION_PATCH;
+}
+
+static int check_version(struct dlm_plock_info *info)
+{
+	if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
+	    (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
+		log_print("plock device version mismatch: "
+			  "kernel (%u.%u.%u), user (%u.%u.%u)",
+			  DLM_PLOCK_VERSION_MAJOR,
+			  DLM_PLOCK_VERSION_MINOR,
+			  DLM_PLOCK_VERSION_PATCH,
+			  info->version[0],
+			  info->version[1],
+			  info->version[2]);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void send_op(struct plock_op *op)
+{
+	set_version(&op->info);
+	INIT_LIST_HEAD(&op->list);
+	spin_lock(&ops_lock);
+	list_add_tail(&op->list, &send_list);
+	spin_unlock(&ops_lock);
+	wake_up(&send_wq);
+}
+
+int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		   int cmd, struct file_lock *fl)
+{
+	struct dlm_ls *ls;
+	struct plock_op *op;
+	struct plock_xop *xop;
+	int rv;
+
+	ls = dlm_find_lockspace_local(lockspace);
+	if (!ls)
+		return -EINVAL;
+
+	xop = kzalloc(sizeof(*xop), GFP_KERNEL);
+	if (!xop) {
+		rv = -ENOMEM;
+		goto out;
+	}
+
+	op = &xop->xop;
+	op->info.optype		= DLM_PLOCK_OP_LOCK;
+	op->info.pid		= fl->fl_pid;
+	op->info.ex		= (fl->fl_type == F_WRLCK);
+	op->info.wait		= IS_SETLKW(cmd);
+	op->info.fsid		= ls->ls_global_id;
+	op->info.number		= number;
+	op->info.start		= fl->fl_start;
+	op->info.end		= fl->fl_end;
+	if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
+		/* fl_owner is lockd which doesn't distinguish
+		   processes on the nfs client */
+		op->info.owner	= (__u64) fl->fl_pid;
+		xop->callback	= fl->fl_lmops->fl_grant;
+		locks_init_lock(&xop->flc);
+		locks_copy_lock(&xop->flc, fl);
+		xop->fl		= fl;
+		xop->file	= file;
+	} else {
+		op->info.owner	= (__u64)(long) fl->fl_owner;
+		xop->callback	= NULL;
+	}
+
+	send_op(op);
+
+	if (xop->callback == NULL)
+		wait_event(recv_wq, (op->done != 0));
+	else {
+		rv = -EINPROGRESS;
+		goto out;
+	}
+
+	spin_lock(&ops_lock);
+	if (!list_empty(&op->list)) {
+		log_error(ls, "dlm_posix_lock: op on list %llx",
+			  (unsigned long long)number);
+		list_del(&op->list);
+	}
+	spin_unlock(&ops_lock);
+
+	rv = op->info.rv;
+
+	if (!rv) {
+		if (posix_lock_file_wait(file, fl) < 0)
+			log_error(ls, "dlm_posix_lock: vfs lock error %llx",
+				  (unsigned long long)number);
+	}
+
+	kfree(xop);
+out:
+	dlm_put_lockspace(ls);
+	return rv;
+}
+EXPORT_SYMBOL_GPL(dlm_posix_lock);
+
+/* Returns failure iff a succesful lock operation should be canceled */
+static int dlm_plock_callback(struct plock_op *op)
+{
+	struct file *file;
+	struct file_lock *fl;
+	struct file_lock *flc;
+	int (*notify)(void *, void *, int) = NULL;
+	struct plock_xop *xop = (struct plock_xop *)op;
+	int rv = 0;
+
+	spin_lock(&ops_lock);
+	if (!list_empty(&op->list)) {
+		log_print("dlm_plock_callback: op on list %llx",
+			  (unsigned long long)op->info.number);
+		list_del(&op->list);
+	}
+	spin_unlock(&ops_lock);
+
+	/* check if the following 2 are still valid or make a copy */
+	file = xop->file;
+	flc = &xop->flc;
+	fl = xop->fl;
+	notify = xop->callback;
+
+	if (op->info.rv) {
+		notify(flc, NULL, op->info.rv);
+		goto out;
+	}
+
+	/* got fs lock; bookkeep locally as well: */
+	flc->fl_flags &= ~FL_SLEEP;
+	if (posix_lock_file(file, flc, NULL)) {
+		/*
+		 * This can only happen in the case of kmalloc() failure.
+		 * The filesystem's own lock is the authoritative lock,
+		 * so a failure to get the lock locally is not a disaster.
+		 * As long as the fs cannot reliably cancel locks (especially
+		 * in a low-memory situation), we're better off ignoring
+		 * this failure than trying to recover.
+		 */
+		log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
+			  (unsigned long long)op->info.number, file, fl);
+	}
+
+	rv = notify(flc, NULL, 0);
+	if (rv) {
+		/* XXX: We need to cancel the fs lock here: */
+		log_print("dlm_plock_callback: lock granted after lock request "
+			  "failed; dangling lock!\n");
+		goto out;
+	}
+
+out:
+	kfree(xop);
+	return rv;
+}
+
+int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		     struct file_lock *fl)
+{
+	struct dlm_ls *ls;
+	struct plock_op *op;
+	int rv;
+
+	ls = dlm_find_lockspace_local(lockspace);
+	if (!ls)
+		return -EINVAL;
+
+	op = kzalloc(sizeof(*op), GFP_KERNEL);
+	if (!op) {
+		rv = -ENOMEM;
+		goto out;
+	}
+
+	if (posix_lock_file_wait(file, fl) < 0)
+		log_error(ls, "dlm_posix_unlock: vfs unlock error %llx",
+			  (unsigned long long)number);
+
+	op->info.optype		= DLM_PLOCK_OP_UNLOCK;
+	op->info.pid		= fl->fl_pid;
+	op->info.fsid		= ls->ls_global_id;
+	op->info.number		= number;
+	op->info.start		= fl->fl_start;
+	op->info.end		= fl->fl_end;
+	if (fl->fl_lmops && fl->fl_lmops->fl_grant)
+		op->info.owner	= (__u64) fl->fl_pid;
+	else
+		op->info.owner	= (__u64)(long) fl->fl_owner;
+
+	send_op(op);
+	wait_event(recv_wq, (op->done != 0));
+
+	spin_lock(&ops_lock);
+	if (!list_empty(&op->list)) {
+		log_error(ls, "dlm_posix_unlock: op on list %llx",
+			  (unsigned long long)number);
+		list_del(&op->list);
+	}
+	spin_unlock(&ops_lock);
+
+	rv = op->info.rv;
+
+	if (rv == -ENOENT)
+		rv = 0;
+
+	kfree(op);
+out:
+	dlm_put_lockspace(ls);
+	return rv;
+}
+EXPORT_SYMBOL_GPL(dlm_posix_unlock);
+
+int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+		  struct file_lock *fl)
+{
+	struct dlm_ls *ls;
+	struct plock_op *op;
+	int rv;
+
+	ls = dlm_find_lockspace_local(lockspace);
+	if (!ls)
+		return -EINVAL;
+
+	op = kzalloc(sizeof(*op), GFP_KERNEL);
+	if (!op) {
+		rv = -ENOMEM;
+		goto out;
+	}
+
+	op->info.optype		= DLM_PLOCK_OP_GET;
+	op->info.pid		= fl->fl_pid;
+	op->info.ex		= (fl->fl_type == F_WRLCK);
+	op->info.fsid		= ls->ls_global_id;
+	op->info.number		= number;
+	op->info.start		= fl->fl_start;
+	op->info.end		= fl->fl_end;
+	if (fl->fl_lmops && fl->fl_lmops->fl_grant)
+		op->info.owner	= (__u64) fl->fl_pid;
+	else
+		op->info.owner	= (__u64)(long) fl->fl_owner;
+
+	send_op(op);
+	wait_event(recv_wq, (op->done != 0));
+
+	spin_lock(&ops_lock);
+	if (!list_empty(&op->list)) {
+		log_error(ls, "dlm_posix_get: op on list %llx",
+			  (unsigned long long)number);
+		list_del(&op->list);
+	}
+	spin_unlock(&ops_lock);
+
+	/* info.rv from userspace is 1 for conflict, 0 for no-conflict,
+	   -ENOENT if there are no locks on the file */
+
+	rv = op->info.rv;
+
+	fl->fl_type = F_UNLCK;
+	if (rv == -ENOENT)
+		rv = 0;
+	else if (rv > 0) {
+		fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
+		fl->fl_pid = op->info.pid;
+		fl->fl_start = op->info.start;
+		fl->fl_end = op->info.end;
+		rv = 0;
+	}
+
+	kfree(op);
+out:
+	dlm_put_lockspace(ls);
+	return rv;
+}
+EXPORT_SYMBOL_GPL(dlm_posix_get);
+
+/* a read copies out one plock request from the send list */
+static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+			loff_t *ppos)
+{
+	struct dlm_plock_info info;
+	struct plock_op *op = NULL;
+
+	if (count < sizeof(info))
+		return -EINVAL;
+
+	spin_lock(&ops_lock);
+	if (!list_empty(&send_list)) {
+		op = list_entry(send_list.next, struct plock_op, list);
+		list_move(&op->list, &recv_list);
+		memcpy(&info, &op->info, sizeof(info));
+	}
+	spin_unlock(&ops_lock);
+
+	if (!op)
+		return -EAGAIN;
+
+	if (copy_to_user(u, &info, sizeof(info)))
+		return -EFAULT;
+	return sizeof(info);
+}
+
+/* a write copies in one plock result that should match a plock_op
+   on the recv list */
+static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+			 loff_t *ppos)
+{
+	struct dlm_plock_info info;
+	struct plock_op *op;
+	int found = 0;
+
+	if (count != sizeof(info))
+		return -EINVAL;
+
+	if (copy_from_user(&info, u, sizeof(info)))
+		return -EFAULT;
+
+	if (check_version(&info))
+		return -EINVAL;
+
+	spin_lock(&ops_lock);
+	list_for_each_entry(op, &recv_list, list) {
+		if (op->info.fsid == info.fsid && op->info.number == info.number &&
+		    op->info.owner == info.owner) {
+			list_del_init(&op->list);
+			found = 1;
+			op->done = 1;
+			memcpy(&op->info, &info, sizeof(info));
+			break;
+		}
+	}
+	spin_unlock(&ops_lock);
+
+	if (found) {
+		struct plock_xop *xop;
+		xop = (struct plock_xop *)op;
+		if (xop->callback)
+			count = dlm_plock_callback(op);
+		else
+			wake_up(&recv_wq);
+	} else
+		log_print("dev_write no op %x %llx", info.fsid,
+			  (unsigned long long)info.number);
+	return count;
+}
+
+static unsigned int dev_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask = 0;
+
+	poll_wait(file, &send_wq, wait);
+
+	spin_lock(&ops_lock);
+	if (!list_empty(&send_list))
+		mask = POLLIN | POLLRDNORM;
+	spin_unlock(&ops_lock);
+
+	return mask;
+}
+
+static const struct file_operations dev_fops = {
+	.read    = dev_read,
+	.write   = dev_write,
+	.poll    = dev_poll,
+	.owner   = THIS_MODULE
+};
+
+static struct miscdevice plock_dev_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = DLM_PLOCK_MISC_NAME,
+	.fops = &dev_fops
+};
+
+int dlm_plock_init(void)
+{
+	int rv;
+
+	spin_lock_init(&ops_lock);
+	INIT_LIST_HEAD(&send_list);
+	INIT_LIST_HEAD(&recv_list);
+	init_waitqueue_head(&send_wq);
+	init_waitqueue_head(&recv_wq);
+
+	rv = misc_register(&plock_dev_misc);
+	if (rv)
+		log_print("dlm_plock_init: misc_register failed %d", rv);
+	return rv;
+}
+
+void dlm_plock_exit(void)
+{
+	if (misc_deregister(&plock_dev_misc) < 0)
+		log_print("dlm_plock_exit: misc_deregister failed");
+}
+
diff --git a/gfs-kernel/src/gfs/lock_dlm_sysfs.c b/gfs-kernel/src/gfs/lock_dlm_sysfs.c
new file mode 100644
index 0000000..31fd6e1
--- /dev/null
+++ b/gfs-kernel/src/gfs/lock_dlm_sysfs.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include <linux/ctype.h>
+#include <linux/stat.h>
+
+#include "lock_dlm.h"
+
+static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name);
+}
+
+static ssize_t block_show(struct gdlm_ls *ls, char *buf)
+{
+	ssize_t ret;
+	int val = 0;
+
+	if (test_bit(DFL_BLOCK_LOCKS, &ls->flags))
+		val = 1;
+	ret = sprintf(buf, "%d\n", val);
+	return ret;
+}
+
+static ssize_t block_store(struct gdlm_ls *ls, const char *buf, size_t len)
+{
+	ssize_t ret = len;
+	int val;
+
+	val = simple_strtol(buf, NULL, 0);
+
+	if (val == 1)
+		set_bit(DFL_BLOCK_LOCKS, &ls->flags);
+	else if (val == 0) {
+		clear_bit(DFL_BLOCK_LOCKS, &ls->flags);
+		gdlm_submit_delayed(ls);
+	} else {
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static ssize_t withdraw_show(struct gdlm_ls *ls, char *buf)
+{
+	ssize_t ret;
+	int val = 0;
+
+	if (test_bit(DFL_WITHDRAW, &ls->flags))
+		val = 1;
+	ret = sprintf(buf, "%d\n", val);
+	return ret;
+}
+
+static ssize_t withdraw_store(struct gdlm_ls *ls, const char *buf, size_t len)
+{
+	ssize_t ret = len;
+	int val;
+
+	val = simple_strtol(buf, NULL, 0);
+
+	if (val == 1)
+		set_bit(DFL_WITHDRAW, &ls->flags);
+	else
+		ret = -EINVAL;
+	wake_up(&ls->wait_control);
+	return ret;
+}
+
+static ssize_t id_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%u\n", ls->id);
+}
+
+static ssize_t jid_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%d\n", ls->jid);
+}
+
+static ssize_t first_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%d\n", ls->first);
+}
+
+static ssize_t first_done_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%d\n", ls->first_done);
+}
+
+static ssize_t recover_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%d\n", ls->recover_jid);
+}
+
+static ssize_t recover_store(struct gdlm_ls *ls, const char *buf, size_t len)
+{
+	ls->recover_jid = simple_strtol(buf, NULL, 0);
+	ls->fscb(ls->sdp, LM_CB_NEED_RECOVERY, &ls->recover_jid);
+	return len;
+}
+
+static ssize_t recover_done_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%d\n", ls->recover_jid_done);
+}
+
+static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%d\n", ls->recover_jid_status);
+}
+
+static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf)
+{
+	return sprintf(buf, "%d\n", ls->drop_locks_count);
+}
+
+static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len)
+{
+	ls->drop_locks_count = simple_strtol(buf, NULL, 0);
+	return len;
+}
+
+struct gdlm_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct gdlm_ls *, char *);
+	ssize_t (*store)(struct gdlm_ls *, const char *, size_t);
+};
+
+#define GDLM_ATTR(_name,_mode,_show,_store) \
+static struct gdlm_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+GDLM_ATTR(proto_name,     0444, proto_name_show,     NULL);
+GDLM_ATTR(block,          0644, block_show,          block_store);
+GDLM_ATTR(withdraw,       0644, withdraw_show,       withdraw_store);
+GDLM_ATTR(id,             0444, id_show,             NULL);
+GDLM_ATTR(jid,            0444, jid_show,            NULL);
+GDLM_ATTR(first,          0444, first_show,          NULL);
+GDLM_ATTR(first_done,     0444, first_done_show,     NULL);
+GDLM_ATTR(recover,        0644, recover_show,        recover_store);
+GDLM_ATTR(recover_done,   0444, recover_done_show,   NULL);
+GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
+GDLM_ATTR(drop_count,     0644, drop_count_show,     drop_count_store);
+
+static struct attribute *gdlm_attrs[] = {
+	&gdlm_attr_proto_name.attr,
+	&gdlm_attr_block.attr,
+	&gdlm_attr_withdraw.attr,
+	&gdlm_attr_id.attr,
+	&gdlm_attr_jid.attr,
+	&gdlm_attr_first.attr,
+	&gdlm_attr_first_done.attr,
+	&gdlm_attr_recover.attr,
+	&gdlm_attr_recover_done.attr,
+	&gdlm_attr_recover_status.attr,
+	&gdlm_attr_drop_count.attr,
+	NULL,
+};
+
+static ssize_t gdlm_attr_show(struct kobject *kobj, struct attribute *attr,
+			      char *buf)
+{
+	struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
+	struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
+	return a->show ? a->show(ls, buf) : 0;
+}
+
+static ssize_t gdlm_attr_store(struct kobject *kobj, struct attribute *attr,
+			       const char *buf, size_t len)
+{
+	struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
+	struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
+	return a->store ? a->store(ls, buf, len) : len;
+}
+
+static struct sysfs_ops gdlm_attr_ops = {
+	.show  = gdlm_attr_show,
+	.store = gdlm_attr_store,
+};
+
+static struct kobj_type gdlm_ktype = {
+	.default_attrs = gdlm_attrs,
+	.sysfs_ops     = &gdlm_attr_ops,
+};
+
+static struct kset *gdlm_kset;
+
+int gdlm_kobject_setup(struct gdlm_ls *ls, struct kobject *fskobj)
+{
+	int error;
+
+	ls->kobj.kset = gdlm_kset;
+	error = kobject_init_and_add(&ls->kobj, &gdlm_ktype, fskobj,
+				     "lock_module");
+	if (error)
+		log_error("can't register kobj %d", error);
+	kobject_uevent(&ls->kobj, KOBJ_ADD);
+
+	return error;
+}
+
+void gdlm_kobject_release(struct gdlm_ls *ls)
+{
+	kobject_put(&ls->kobj);
+}
+
+int gdlm_sysfs_init(void)
+{
+	gdlm_kset = kset_create_and_add("lock_dlm_gfs", NULL, kernel_kobj);
+	if (!gdlm_kset) {
+		printk(KERN_WARNING "%s: can not create kset\n", __FUNCTION__);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void gdlm_sysfs_exit(void)
+{
+	kset_unregister(gdlm_kset);
+}
+
diff --git a/gfs-kernel/src/gfs/lock_dlm_thread.c b/gfs-kernel/src/gfs/lock_dlm_thread.c
new file mode 100644
index 0000000..521694f
--- /dev/null
+++ b/gfs-kernel/src/gfs/lock_dlm_thread.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include "lock_dlm.h"
+
+/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
+   thread gets to it. */
+
+static void queue_submit(struct gdlm_lock *lp)
+{
+	struct gdlm_ls *ls = lp->ls;
+
+	spin_lock(&ls->async_lock);
+	list_add_tail(&lp->delay_list, &ls->submit);
+	spin_unlock(&ls->async_lock);
+	wake_up(&ls->thread_wait);
+}
+
+static void process_blocking(struct gdlm_lock *lp, int bast_mode)
+{
+	struct gdlm_ls *ls = lp->ls;
+	unsigned int cb = 0;
+
+	switch (gdlm_make_lmstate(bast_mode)) {
+	case LM_ST_EXCLUSIVE:
+		cb = LM_CB_NEED_E;
+		break;
+	case LM_ST_DEFERRED:
+		cb = LM_CB_NEED_D;
+		break;
+	case LM_ST_SHARED:
+		cb = LM_CB_NEED_S;
+		break;
+	default:
+		gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
+	}
+
+	ls->fscb(ls->sdp, cb, &lp->lockname);
+}
+
+static void wake_up_ast(struct gdlm_lock *lp)
+{
+	clear_bit(LFL_AST_WAIT, &lp->flags);
+	smp_mb__after_clear_bit();
+	wake_up_bit(&lp->flags, LFL_AST_WAIT);
+}
+
+static void process_complete(struct gdlm_lock *lp)
+{
+	struct gdlm_ls *ls = lp->ls;
+	struct lm_async_cb acb;
+	s16 prev_mode = lp->cur;
+
+	memset(&acb, 0, sizeof(acb));
+
+	if (lp->lksb.sb_status == -DLM_ECANCEL) {
+		log_info("complete dlm cancel %x,%llx flags %lx",
+		 	 lp->lockname.ln_type,
+			 (unsigned long long)lp->lockname.ln_number,
+			 lp->flags);
+
+		lp->req = lp->cur;
+		acb.lc_ret |= LM_OUT_CANCELED;
+		if (lp->cur == DLM_LOCK_IV)
+			lp->lksb.sb_lkid = 0;
+		goto out;
+	}
+
+	if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
+		if (lp->lksb.sb_status != -DLM_EUNLOCK) {
+			log_info("unlock sb_status %d %x,%llx flags %lx",
+				 lp->lksb.sb_status, lp->lockname.ln_type,
+				 (unsigned long long)lp->lockname.ln_number,
+				 lp->flags);
+			return;
+		}
+
+		lp->cur = DLM_LOCK_IV;
+		lp->req = DLM_LOCK_IV;
+		lp->lksb.sb_lkid = 0;
+
+		if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
+			gdlm_delete_lp(lp);
+			return;
+		}
+		goto out;
+	}
+
+	if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
+		memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
+
+	if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
+		if (lp->req == DLM_LOCK_PR)
+			lp->req = DLM_LOCK_CW;
+		else if (lp->req == DLM_LOCK_CW)
+			lp->req = DLM_LOCK_PR;
+	}
+
+	/*
+	 * A canceled lock request.  The lock was just taken off the delayed
+	 * list and was never even submitted to dlm.
+	 */
+
+	if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
+		log_info("complete internal cancel %x,%llx",
+		 	 lp->lockname.ln_type,
+			 (unsigned long long)lp->lockname.ln_number);
+		lp->req = lp->cur;
+		acb.lc_ret |= LM_OUT_CANCELED;
+		goto out;
+	}
+
+	/*
+	 * An error occured.
+	 */
+
+	if (lp->lksb.sb_status) {
+		/* a "normal" error */
+		if ((lp->lksb.sb_status == -EAGAIN) &&
+		    (lp->lkf & DLM_LKF_NOQUEUE)) {
+			lp->req = lp->cur;
+			if (lp->cur == DLM_LOCK_IV)
+				lp->lksb.sb_lkid = 0;
+			goto out;
+		}
+
+		/* this could only happen with cancels I think */
+		log_info("ast sb_status %d %x,%llx flags %lx",
+			 lp->lksb.sb_status, lp->lockname.ln_type,
+			 (unsigned long long)lp->lockname.ln_number,
+			 lp->flags);
+		return;
+	}
+
+	/*
+	 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
+	 */
+
+	if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
+		wake_up_ast(lp);
+		return;
+	}
+
+	/*
+	 * A lock has been demoted to NL because it initially completed during
+	 * BLOCK_LOCKS.  Now it must be requested in the originally requested
+	 * mode.
+	 */
+
+	if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
+		gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
+			    lp->lockname.ln_type,
+			    (unsigned long long)lp->lockname.ln_number);
+		gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
+			    lp->lockname.ln_type,
+			    (unsigned long long)lp->lockname.ln_number);
+
+		lp->cur = DLM_LOCK_NL;
+		lp->req = lp->prev_req;
+		lp->prev_req = DLM_LOCK_IV;
+		lp->lkf &= ~DLM_LKF_CONVDEADLK;
+
+		set_bit(LFL_NOCACHE, &lp->flags);
+
+		if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
+		    !test_bit(LFL_NOBLOCK, &lp->flags))
+			gdlm_queue_delayed(lp);
+		else
+			queue_submit(lp);
+		return;
+	}
+
+	/*
+	 * A request is granted during dlm recovery.  It may be granted
+	 * because the locks of a failed node were cleared.  In that case,
+	 * there may be inconsistent data beneath this lock and we must wait
+	 * for recovery to complete to use it.  When gfs recovery is done this
+	 * granted lock will be converted to NL and then reacquired in this
+	 * granted state.
+	 */
+
+	if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
+	    !test_bit(LFL_NOBLOCK, &lp->flags) &&
+	    lp->req != DLM_LOCK_NL) {
+
+		lp->cur = lp->req;
+		lp->prev_req = lp->req;
+		lp->req = DLM_LOCK_NL;
+		lp->lkf |= DLM_LKF_CONVERT;
+		lp->lkf &= ~DLM_LKF_CONVDEADLK;
+
+		log_debug("rereq %x,%llx id %x %d,%d",
+			  lp->lockname.ln_type,
+			  (unsigned long long)lp->lockname.ln_number,
+			  lp->lksb.sb_lkid, lp->cur, lp->req);
+
+		set_bit(LFL_REREQUEST, &lp->flags);
+		queue_submit(lp);
+		return;
+	}
+
+	/*
+	 * DLM demoted the lock to NL before it was granted so GFS must be
+	 * told it cannot cache data for this lock.
+	 */
+
+	if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
+		set_bit(LFL_NOCACHE, &lp->flags);
+
+out:
+	/*
+	 * This is an internal lock_dlm lock
+	 */
+
+	if (test_bit(LFL_INLOCK, &lp->flags)) {
+		clear_bit(LFL_NOBLOCK, &lp->flags);
+		lp->cur = lp->req;
+		wake_up_ast(lp);
+		return;
+	}
+
+	/*
+	 * Normal completion of a lock request.  Tell GFS it now has the lock.
+	 */
+
+	clear_bit(LFL_NOBLOCK, &lp->flags);
+	lp->cur = lp->req;
+
+	acb.lc_name = lp->lockname;
+	acb.lc_ret |= gdlm_make_lmstate(lp->cur);
+
+	if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
+	    (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
+		acb.lc_ret |= LM_OUT_CACHEABLE;
+
+	ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
+}
+
+static inline int no_work(struct gdlm_ls *ls, int blocking)
+{
+	int ret;
+
+	spin_lock(&ls->async_lock);
+	ret = list_empty(&ls->complete) && list_empty(&ls->submit);
+	if (ret && blocking)
+		ret = list_empty(&ls->blocking);
+	spin_unlock(&ls->async_lock);
+
+	return ret;
+}
+
+static inline int check_drop(struct gdlm_ls *ls)
+{
+	if (!ls->drop_locks_count)
+		return 0;
+
+	if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
+		ls->drop_time = jiffies;
+		if (ls->all_locks_count >= ls->drop_locks_count)
+			return 1;
+	}
+	return 0;
+}
+
+static int gdlm_thread(void *data, int blist)
+{
+	struct gdlm_ls *ls = (struct gdlm_ls *) data;
+	struct gdlm_lock *lp = NULL;
+	uint8_t complete, blocking, submit, drop;
+
+	/* Only thread1 is allowed to do blocking callbacks since gfs
+	   may wait for a completion callback within a blocking cb. */
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(ls->thread_wait,
+				!no_work(ls, blist) || kthread_should_stop());
+
+		complete = blocking = submit = drop = 0;
+
+		spin_lock(&ls->async_lock);
+
+		if (blist && !list_empty(&ls->blocking)) {
+			lp = list_entry(ls->blocking.next, struct gdlm_lock,
+					blist);
+			list_del_init(&lp->blist);
+			blocking = lp->bast_mode;
+			lp->bast_mode = 0;
+		} else if (!list_empty(&ls->complete)) {
+			lp = list_entry(ls->complete.next, struct gdlm_lock,
+					clist);
+			list_del_init(&lp->clist);
+			complete = 1;
+		} else if (!list_empty(&ls->submit)) {
+			lp = list_entry(ls->submit.next, struct gdlm_lock,
+					delay_list);
+			list_del_init(&lp->delay_list);
+			submit = 1;
+		}
+
+		drop = check_drop(ls);
+		spin_unlock(&ls->async_lock);
+
+		if (complete)
+			process_complete(lp);
+
+		else if (blocking)
+			process_blocking(lp, blocking);
+
+		else if (submit)
+			gdlm_do_lock(lp);
+
+		if (drop)
+			ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
+
+		schedule();
+	}
+
+	return 0;
+}
+
+static int gdlm_thread1(void *data)
+{
+	return gdlm_thread(data, 1);
+}
+
+static int gdlm_thread2(void *data)
+{
+	return gdlm_thread(data, 0);
+}
+
+int gdlm_init_threads(struct gdlm_ls *ls)
+{
+	struct task_struct *p;
+	int error;
+
+	p = kthread_run(gdlm_thread1, ls, "lock_dlm1");
+	error = IS_ERR(p);
+	if (error) {
+		log_error("can't start lock_dlm1 thread %d", error);
+		return error;
+	}
+	ls->thread1 = p;
+
+	p = kthread_run(gdlm_thread2, ls, "lock_dlm2");
+	error = IS_ERR(p);
+	if (error) {
+		log_error("can't start lock_dlm2 thread %d", error);
+		kthread_stop(ls->thread1);
+		return error;
+	}
+	ls->thread2 = p;
+
+	return 0;
+}
+
+void gdlm_release_threads(struct gdlm_ls *ls)
+{
+	kthread_stop(ls->thread1);
+	kthread_stop(ls->thread2);
+}
+
diff --git a/gfs-kernel/src/gfs/lock_nolock_main.c b/gfs-kernel/src/gfs/lock_nolock_main.c
new file mode 100644
index 0000000..c8ce5cc
--- /dev/null
+++ b/gfs-kernel/src/gfs/lock_nolock_main.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include "lm_interface.h"
+
+struct nolock_lockspace {
+	unsigned int nl_lvb_size;
+};
+
+static const struct lm_lockops nolock_ops;
+
+static int nolock_mount(char *table_name, char *host_data,
+			lm_callback_t cb, void *cb_data,
+			unsigned int min_lvb_size, int flags,
+			struct lm_lockstruct *lockstruct,
+			struct kobject *fskobj)
+{
+	char *c;
+	unsigned int jid;
+	struct nolock_lockspace *nl;
+
+	c = strstr(host_data, "jid=");
+	if (!c)
+		jid = 0;
+	else {
+		c += 4;
+		sscanf(c, "%u", &jid);
+	}
+
+	nl = kzalloc(sizeof(struct nolock_lockspace), GFP_KERNEL);
+	if (!nl)
+		return -ENOMEM;
+
+	nl->nl_lvb_size = min_lvb_size;
+
+	lockstruct->ls_jid = jid;
+	lockstruct->ls_first = 1;
+	lockstruct->ls_lvb_size = min_lvb_size;
+	lockstruct->ls_lockspace = nl;
+	lockstruct->ls_ops = &nolock_ops;
+	lockstruct->ls_flags = LM_LSFLAG_LOCAL;
+
+	return 0;
+}
+
+static void nolock_others_may_mount(void *lockspace)
+{
+}
+
+static void nolock_unmount(void *lockspace)
+{
+	struct nolock_lockspace *nl = lockspace;
+	kfree(nl);
+}
+
+static void nolock_withdraw(void *lockspace)
+{
+}
+
+/**
+ * nolock_get_lock - get a lm_lock_t given a descripton of the lock
+ * @lockspace: the lockspace the lock lives in
+ * @name: the name of the lock
+ * @lockp: return the lm_lock_t here
+ *
+ * Returns: 0 on success, -EXXX on failure
+ */
+
+static int nolock_get_lock(void *lockspace, struct lm_lockname *name,
+			   void **lockp)
+{
+	*lockp = lockspace;
+	return 0;
+}
+
+/**
+ * nolock_put_lock - get rid of a lock structure
+ * @lock: the lock to throw away
+ *
+ */
+
+static void nolock_put_lock(void *lock)
+{
+}
+
+/**
+ * nolock_lock - acquire a lock
+ * @lock: the lock to manipulate
+ * @cur_state: the current state
+ * @req_state: the requested state
+ * @flags: modifier flags
+ *
+ * Returns: A bitmap of LM_OUT_*
+ */
+
+static unsigned int nolock_lock(void *lock, unsigned int cur_state,
+				unsigned int req_state, unsigned int flags)
+{
+	return req_state | LM_OUT_CACHEABLE;
+}
+
+/**
+ * nolock_unlock - unlock a lock
+ * @lock: the lock to manipulate
+ * @cur_state: the current state
+ *
+ * Returns: 0
+ */
+
+static unsigned int nolock_unlock(void *lock, unsigned int cur_state)
+{
+	return 0;
+}
+
+static void nolock_cancel(void *lock)
+{
+}
+
+/**
+ * nolock_hold_lvb - hold on to a lock value block
+ * @lock: the lock the LVB is associated with
+ * @lvbp: return the lm_lvb_t here
+ *
+ * Returns: 0 on success, -EXXX on failure
+ */
+
+static int nolock_hold_lvb(void *lock, char **lvbp)
+{
+	struct nolock_lockspace *nl = lock;
+	int error = 0;
+
+	*lvbp = kzalloc(nl->nl_lvb_size, GFP_NOFS);
+	if (!*lvbp)
+		error = -ENOMEM;
+
+	return error;
+}
+
+/**
+ * nolock_unhold_lvb - release a LVB
+ * @lock: the lock the LVB is associated with
+ * @lvb: the lock value block
+ *
+ */
+
+static void nolock_unhold_lvb(void *lock, char *lvb)
+{
+	kfree(lvb);
+}
+
+static int nolock_plock_get(void *lockspace, struct lm_lockname *name,
+			    struct file *file, struct file_lock *fl)
+{
+	posix_test_lock(file, fl);
+
+	return 0;
+}
+
+static int nolock_plock(void *lockspace, struct lm_lockname *name,
+			struct file *file, int cmd, struct file_lock *fl)
+{
+	int error;
+	error = posix_lock_file_wait(file, fl);
+	return error;
+}
+
+static int nolock_punlock(void *lockspace, struct lm_lockname *name,
+			  struct file *file, struct file_lock *fl)
+{
+	int error;
+	error = posix_lock_file_wait(file, fl);
+	return error;
+}
+
+static void nolock_recovery_done(void *lockspace, unsigned int jid,
+				 unsigned int message)
+{
+}
+
+static const struct lm_lockops nolock_ops = {
+	.lm_proto_name = "lock_nolock",
+	.lm_mount = nolock_mount,
+	.lm_others_may_mount = nolock_others_may_mount,
+	.lm_unmount = nolock_unmount,
+	.lm_withdraw = nolock_withdraw,
+	.lm_get_lock = nolock_get_lock,
+	.lm_put_lock = nolock_put_lock,
+	.lm_lock = nolock_lock,
+	.lm_unlock = nolock_unlock,
+	.lm_cancel = nolock_cancel,
+	.lm_hold_lvb = nolock_hold_lvb,
+	.lm_unhold_lvb = nolock_unhold_lvb,
+	.lm_plock_get = nolock_plock_get,
+	.lm_plock = nolock_plock,
+	.lm_punlock = nolock_punlock,
+	.lm_recovery_done = nolock_recovery_done,
+	.lm_owner = THIS_MODULE,
+};
+
+int init_nolock()
+{
+	int error;
+
+	error = gfs_register_lockproto(&nolock_ops);
+	if (error) {
+		printk(KERN_WARNING
+		       "lock_nolock: can't register protocol: %d\n", error);
+		return error;
+	}
+
+	printk(KERN_INFO
+	       "Lock_Nolock (built %s %s) installed\n", __DATE__, __TIME__);
+	return 0;
+}
+
+void exit_nolock()
+{
+	gfs_unregister_lockproto(&nolock_ops);
+}
diff --git a/gfs-kernel/src/gfs/locking.c b/gfs-kernel/src/gfs/locking.c
new file mode 100644
index 0000000..96da559
--- /dev/null
+++ b/gfs-kernel/src/gfs/locking.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/kmod.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include "lm_interface.h"
+
+struct lmh_wrapper {
+	struct list_head lw_list;
+	const struct lm_lockops *lw_ops;
+};
+
+/* List of registered low-level locking protocols.  A file system selects one
+   of them by name at mount time, e.g. lock_nolock, lock_dlm. */
+
+static LIST_HEAD(lmh_list);
+static DEFINE_MUTEX(lmh_lock);
+
+/**
+ * gfs_register_lockproto - Register a low-level locking protocol
+ * @proto: the protocol definition
+ *
+ * Returns: 0 on success, -EXXX on failure
+ */
+
+int gfs_register_lockproto(const struct lm_lockops *proto)
+{
+	struct lmh_wrapper *lw;
+
+	mutex_lock(&lmh_lock);
+
+	list_for_each_entry(lw, &lmh_list, lw_list) {
+		if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) {
+			mutex_unlock(&lmh_lock);
+			printk(KERN_INFO "GFS2: protocol %s already exists\n",
+			       proto->lm_proto_name);
+			return -EEXIST;
+		}
+	}
+
+	lw = kzalloc(sizeof(struct lmh_wrapper), GFP_KERNEL);
+	if (!lw) {
+		mutex_unlock(&lmh_lock);
+		return -ENOMEM;
+	}
+
+	lw->lw_ops = proto;
+	list_add(&lw->lw_list, &lmh_list);
+
+	mutex_unlock(&lmh_lock);
+
+	return 0;
+}
+
+/**
+ * gfs_unregister_lockproto - Unregister a low-level locking protocol
+ * @proto: the protocol definition
+ *
+ */
+
+void gfs_unregister_lockproto(const struct lm_lockops *proto)
+{
+	struct lmh_wrapper *lw;
+
+	mutex_lock(&lmh_lock);
+
+	list_for_each_entry(lw, &lmh_list, lw_list) {
+		if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) {
+			list_del(&lw->lw_list);
+			mutex_unlock(&lmh_lock);
+			kfree(lw);
+			return;
+		}
+	}
+
+	mutex_unlock(&lmh_lock);
+
+	printk(KERN_WARNING "GFS2: can't unregister lock protocol %s\n",
+	       proto->lm_proto_name);
+}
+
+/**
+ * gfs_mount_lockproto - Mount a lock protocol
+ * @proto_name - the name of the protocol
+ * @table_name - the name of the lock space
+ * @host_data - data specific to this host
+ * @cb - the callback to the code using the lock module
+ * @sdp - The GFS2 superblock
+ * @min_lvb_size - the mininum LVB size that the caller can deal with
+ * @flags - LM_MFLAG_*
+ * @lockstruct - a structure returned describing the mount
+ *
+ * Returns: 0 on success, -EXXX on failure
+ */
+
+int gfs_mount_lockproto(char *proto_name, char *table_name, char *host_data,
+			 lm_callback_t cb, void *cb_data,
+			 unsigned int min_lvb_size, int flags,
+			 struct lm_lockstruct *lockstruct,
+			 struct kobject *fskobj)
+{
+	struct lmh_wrapper *lw = NULL;
+	int try = 0;
+	int error, found;
+
+retry:
+	mutex_lock(&lmh_lock);
+
+	found = 0;
+	list_for_each_entry(lw, &lmh_list, lw_list) {
+		if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found) {
+		if (!try && capable(CAP_SYS_MODULE)) {
+			try = 1;
+			mutex_unlock(&lmh_lock);
+			request_module(proto_name);
+			goto retry;
+		}
+		printk(KERN_INFO "GFS2: can't find protocol %s\n", proto_name);
+		error = -ENOENT;
+		goto out;
+	}
+
+	if (!try_module_get(lw->lw_ops->lm_owner)) {
+		try = 0;
+		mutex_unlock(&lmh_lock);
+		msleep(1000);
+		goto retry;
+	}
+
+	error = lw->lw_ops->lm_mount(table_name, host_data, cb, cb_data,
+				     min_lvb_size, flags, lockstruct, fskobj);
+	if (error)
+		module_put(lw->lw_ops->lm_owner);
+out:
+	mutex_unlock(&lmh_lock);
+	return error;
+}
+
+void gfs_unmount_lockproto(struct lm_lockstruct *lockstruct)
+{
+	mutex_lock(&lmh_lock);
+	lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
+	if (lockstruct->ls_ops->lm_owner)
+		module_put(lockstruct->ls_ops->lm_owner);
+	mutex_unlock(&lmh_lock);
+}
+
+/**
+ * gfs_withdraw_lockproto - abnormally unmount a lock module
+ * @lockstruct: the lockstruct passed into mount
+ *
+ */
+
+void gfs_withdraw_lockproto(struct lm_lockstruct *lockstruct)
+{
+	mutex_lock(&lmh_lock);
+	lockstruct->ls_ops->lm_withdraw(lockstruct->ls_lockspace);
+	if (lockstruct->ls_ops->lm_owner)
+		module_put(lockstruct->ls_ops->lm_owner);
+	mutex_unlock(&lmh_lock);
+}
diff --git a/gfs-kernel/src/gfs/log.c b/gfs-kernel/src/gfs/log.c
index eaa0949..8305ff2 100644
--- a/gfs-kernel/src/gfs/log.c
+++ b/gfs-kernel/src/gfs/log.c
@@ -22,7 +22,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/lops.c b/gfs-kernel/src/gfs/lops.c
index ebfbbd7..6010444 100644
--- a/gfs-kernel/src/gfs/lops.c
+++ b/gfs-kernel/src/gfs/lops.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/lvb.c b/gfs-kernel/src/gfs/lvb.c
index eccedc2..41fc26f 100644
--- a/gfs-kernel/src/gfs/lvb.c
+++ b/gfs-kernel/src/gfs/lvb.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/main.c b/gfs-kernel/src/gfs/main.c
index ca5e55d..16b520b 100644
--- a/gfs-kernel/src/gfs/main.c
+++ b/gfs-kernel/src/gfs/main.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/proc_fs.h>
@@ -73,6 +73,14 @@ int __init init_gfs_fs(void)
 	printk("GFS %s (built %s %s) installed\n",
 	       RELEASE_VERSION, __DATE__, __TIME__);
 
+	error = init_lock_dlm();
+	if (error)
+		goto fail1;
+
+	error = init_nolock();
+	if (error)
+		goto fail1;
+
 	return 0;
 
  fail1:
@@ -104,6 +112,8 @@ int __init init_gfs_fs(void)
 void __exit
 exit_gfs_fs(void)
 {
+	exit_nolock();
+	exit_lock_dlm();
 	unregister_filesystem(&gfs_fs_type);
 
 	kmem_cache_destroy(gfs_mhc_cachep);
diff --git a/gfs-kernel/src/gfs/mount.c b/gfs-kernel/src/gfs/mount.c
index d2d341b..2683736 100644
--- a/gfs-kernel/src/gfs/mount.c
+++ b/gfs-kernel/src/gfs/mount.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/ondisk.c b/gfs-kernel/src/gfs/ondisk.c
index 566e9cf..1d169eb 100644
--- a/gfs-kernel/src/gfs/ondisk.c
+++ b/gfs-kernel/src/gfs/ondisk.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/ops_address.c b/gfs-kernel/src/gfs/ops_address.c
index ff08c66..98c3384 100644
--- a/gfs-kernel/src/gfs/ops_address.c
+++ b/gfs-kernel/src/gfs/ops_address.c
@@ -3,7 +3,7 @@
 #include <linux/vmalloc.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/pagemap.h>
diff --git a/gfs-kernel/src/gfs/ops_dentry.c b/gfs-kernel/src/gfs/ops_dentry.c
index 6cd7520..48efd34 100644
--- a/gfs-kernel/src/gfs/ops_dentry.c
+++ b/gfs-kernel/src/gfs/ops_dentry.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/ops_export.c b/gfs-kernel/src/gfs/ops_export.c
index cc21c5a..bf3484c 100644
--- a/gfs-kernel/src/gfs/ops_export.c
+++ b/gfs-kernel/src/gfs/ops_export.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/exportfs.h>
diff --git a/gfs-kernel/src/gfs/ops_file.c b/gfs-kernel/src/gfs/ops_file.c
index 93d728a..2ca5242 100644
--- a/gfs-kernel/src/gfs/ops_file.c
+++ b/gfs-kernel/src/gfs/ops_file.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <asm/uaccess.h>
@@ -93,11 +93,11 @@ gfs_llseek(struct file *file, loff_t offset, int origin)
 	if (origin == 2) {
 		error = gfs_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
 		if (!error) {
-			error = remote_llseek(file, offset, origin);
+			error = generic_file_llseek_unlocked(file, offset, origin);
 			gfs_glock_dq_uninit(&i_gh);
 		}
 	} else
-		error = remote_llseek(file, offset, origin);
+		error = generic_file_llseek_unlocked(file, offset, origin);
 
 	return error;
 }
diff --git a/gfs-kernel/src/gfs/ops_inode.c b/gfs-kernel/src/gfs/ops_inode.c
index 20fc4f9..78a245a 100644
--- a/gfs-kernel/src/gfs/ops_inode.c
+++ b/gfs-kernel/src/gfs/ops_inode.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/namei.h>
@@ -375,7 +375,7 @@ gfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 	if (error)
 		goto fail;
 
-	error = permission(dir, MAY_WRITE | MAY_EXEC, NULL);
+	error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
 	if (error)
 		goto fail_gunlock;
 
@@ -1020,7 +1020,7 @@ gfs_rename(struct inode *odir, struct dentry *odentry,
 			}
 		}
 	} else {
-		error = permission(ndir, MAY_WRITE | MAY_EXEC, NULL);
+		error = inode_permission(ndir, MAY_WRITE | MAY_EXEC);
 		if (error)
 			goto fail_gunlock;
 
@@ -1261,7 +1261,6 @@ gfs_follow_link(struct dentry *dentry, struct nameidata *nd)
  * gfs_permission_i -
  * @inode:
  * @mask:
- * @nd: ignored
  *
  * Shamelessly ripped from ext3
  *
@@ -1269,7 +1268,7 @@ gfs_follow_link(struct dentry *dentry, struct nameidata *nd)
  */
 
 static int
-gfs_permission_i(struct inode *inode, int mask, struct nameidata *nd)
+gfs_permission_i(struct inode *inode, int mask)
 {
 	return generic_permission(inode, mask, gfs_check_acl);
 }
@@ -1278,13 +1277,12 @@ gfs_permission_i(struct inode *inode, int mask, struct nameidata *nd)
  * gfs_permission -
  * @inode:
  * @mask:
- * @nd: passed from Linux VFS, ignored by us
  *
  * Returns: errno
  */
 
 static int
-gfs_permission(struct inode *inode, int mask, struct nameidata *nd)
+gfs_permission(struct inode *inode, int mask)
 {
 	struct gfs_inode *ip = get_v2ip(inode);
 	struct gfs_holder i_gh;
@@ -1298,7 +1296,7 @@ gfs_permission(struct inode *inode, int mask, struct nameidata *nd)
 	if (error)
 		return error;
 
-	error = gfs_permission_i(inode, mask, nd);
+	error = gfs_permission_i(inode, mask);
 
 	gfs_glock_dq_uninit(&i_gh);
 
@@ -1369,7 +1367,7 @@ gfs_setattr(struct dentry *dentry, struct iattr *attr)
 		goto fail;
 
 	if (attr->ia_valid & ATTR_SIZE) {
-		error = permission(inode, MAY_WRITE, NULL);
+		error = inode_permission(inode, MAY_WRITE);
 		if (error)
 			goto fail;
 
diff --git a/gfs-kernel/src/gfs/ops_super.c b/gfs-kernel/src/gfs/ops_super.c
index 3b869cb..3b3eec3 100644
--- a/gfs-kernel/src/gfs/ops_super.c
+++ b/gfs-kernel/src/gfs/ops_super.c
@@ -3,7 +3,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/buffer_head.h>
 #include <linux/vmalloc.h>
 #include <linux/statfs.h>
diff --git a/gfs-kernel/src/gfs/ops_vm.c b/gfs-kernel/src/gfs/ops_vm.c
index cad15cb..423435e 100644
--- a/gfs-kernel/src/gfs/ops_vm.c
+++ b/gfs-kernel/src/gfs/ops_vm.c
@@ -1,7 +1,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/mm.h>
diff --git a/gfs-kernel/src/gfs/page.c b/gfs-kernel/src/gfs/page.c
index faac61a..4c5cdb3 100644
--- a/gfs-kernel/src/gfs/page.c
+++ b/gfs-kernel/src/gfs/page.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/pagemap.h>
diff --git a/gfs-kernel/src/gfs/proc.c b/gfs-kernel/src/gfs/proc.c
index 7685344..b25cd1c 100644
--- a/gfs-kernel/src/gfs/proc.c
+++ b/gfs-kernel/src/gfs/proc.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/proc_fs.h>
diff --git a/gfs-kernel/src/gfs/quota.c b/gfs-kernel/src/gfs/quota.c
index 13093a6..4e2bee6 100644
--- a/gfs-kernel/src/gfs/quota.c
+++ b/gfs-kernel/src/gfs/quota.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/tty.h>
diff --git a/gfs-kernel/src/gfs/recovery.c b/gfs-kernel/src/gfs/recovery.c
index ed759f2..ba5f34d 100644
--- a/gfs-kernel/src/gfs/recovery.c
+++ b/gfs-kernel/src/gfs/recovery.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/rgrp.c b/gfs-kernel/src/gfs/rgrp.c
index 947757e..e0c670f 100644
--- a/gfs-kernel/src/gfs/rgrp.c
+++ b/gfs-kernel/src/gfs/rgrp.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/super.c b/gfs-kernel/src/gfs/super.c
index 438aa07..943ee31 100644
--- a/gfs-kernel/src/gfs/super.c
+++ b/gfs-kernel/src/gfs/super.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/vmalloc.h>
diff --git a/gfs-kernel/src/gfs/sys.c b/gfs-kernel/src/gfs/sys.c
index f28b01f..77f542a 100644
--- a/gfs-kernel/src/gfs/sys.c
+++ b/gfs-kernel/src/gfs/sys.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/proc_fs.h>
diff --git a/gfs-kernel/src/gfs/trans.c b/gfs-kernel/src/gfs/trans.c
index cefb4a3..985533e 100644
--- a/gfs-kernel/src/gfs/trans.c
+++ b/gfs-kernel/src/gfs/trans.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/unlinked.c b/gfs-kernel/src/gfs/unlinked.c
index 90d427c..d41c48a 100644
--- a/gfs-kernel/src/gfs/unlinked.c
+++ b/gfs-kernel/src/gfs/unlinked.c
@@ -3,7 +3,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 
diff --git a/gfs-kernel/src/gfs/util.c b/gfs-kernel/src/gfs/util.c
index eebfef2..7d1c0e4 100644
--- a/gfs-kernel/src/gfs/util.c
+++ b/gfs-kernel/src/gfs/util.c
@@ -2,7 +2,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <asm/uaccess.h>


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]