Browse Source

update qemu-x86 to 2.6.33

- use e1000 driver by default
- no need for dummy qemu.img, phil is right.
Waldemar Brodkorb 14 years ago
parent
commit
4e6e1d3251

+ 3 - 2
target/Config.in

@@ -543,7 +543,7 @@ config ADK_LINUX_CRIS_QEMU
 config ADK_LINUX_X86_QEMU
 	bool "x86"
 	select ADK_x86_qemu
-	select ADK_KPACKAGE_KMOD_8139CP
+	select ADK_KPACKAGE_KMOD_E1000
 	select ADK_KERNEL_INPUT_KEYBOARD
 	select ADK_KERNEL_SCSI
 	select ADK_KERNEL_ATA
@@ -557,7 +557,7 @@ config ADK_LINUX_X86_QEMU
 config ADK_LINUX_X86_64_QEMU
 	bool "x86_64"
 	select ADK_x86_64_qemu
-	select ADK_KPACKAGE_KMOD_8139CP
+	select ADK_KPACKAGE_KMOD_E1000
 	select ADK_KERNEL_INPUT_KEYBOARD
 	select ADK_KERNEL_SCSI
 	select ADK_KERNEL_ATA
@@ -963,6 +963,7 @@ endchoice
 choice
 prompt "Compression method for Kernel"
 	depends on ADK_LINUX_QEMU || ADK_LINUX_ALIX
+	depends on !(ADK_TARGET_ROOTFS_INITRAMFS || ADK_TARGET_ROOTFS_INITRAMFS_PIGGYBACK)
 	
 config ADK_TARGET_KERNEL_COMPRESSION_GZIP
 	boolean

+ 24455 - 0
target/linux/patches/2.6.33/aufs2.patch

@@ -0,0 +1,24455 @@
+diff -Nur linux-2.6.31.5.orig/Documentation/ABI/testing/debugfs-aufs linux-2.6.31.5/Documentation/ABI/testing/debugfs-aufs
+--- linux-2.6.31.5.orig/Documentation/ABI/testing/debugfs-aufs	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/Documentation/ABI/testing/debugfs-aufs	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,40 @@
++What:		/debug/aufs/si_<id>/
++Date:		March 2009
++Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
++Description:
++		Under /debug/aufs, a directory named si_<id> is created
++		per aufs mount, where <id> is a unique id generated
++		internally.
++
++What:		/debug/aufs/si_<id>/xib
++Date:		March 2009
++Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
++Description:
++		It shows the consumed blocks by xib (External Inode Number
++		Bitmap), its block size and file size.
++		When the aufs mount option 'noxino' is specified, it
++		will be empty. About XINO files, see
++		Documentation/filesystems/aufs/aufs.5 in detail.
++
++What:		/debug/aufs/si_<id>/xino0, xino1 ... xinoN
++Date:		March 2009
++Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
++Description:
++		It shows the consumed blocks by xino (External Inode Number
++		Translation Table), its link count, block size and file
++		size.
++		When the aufs mount option 'noxino' is specified, it
++		will be empty. About XINO files, see
++		Documentation/filesystems/aufs/aufs.5 in detail.
++
++What:		/debug/aufs/si_<id>/xigen
++Date:		March 2009
++Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
++Description:
++		It shows the consumed blocks by xigen (External Inode
++		Generation Table), its block size and file size.
++		If CONFIG_AUFS_EXPORT is disabled, this entry will not
++		be created.
++		When the aufs mount option 'noxino' is specified, it
++		will be empty. About XINO files, see
++		Documentation/filesystems/aufs/aufs.5 in detail.
+diff -Nur linux-2.6.31.5.orig/Documentation/ABI/testing/sysfs-aufs linux-2.6.31.5/Documentation/ABI/testing/sysfs-aufs
+--- linux-2.6.31.5.orig/Documentation/ABI/testing/sysfs-aufs	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/Documentation/ABI/testing/sysfs-aufs	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,25 @@
++What:		/sys/fs/aufs/si_<id>/
++Date:		March 2009
++Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
++Description:
++		Under /sys/fs/aufs, a directory named si_<id> is created
++		per aufs mount, where <id> is a unique id generated
++		internally.
++
++What:		/sys/fs/aufs/si_<id>/br0, br1 ... brN
++Date:		March 2009
++Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
++Description:
++		It shows the abolute path of a member directory (which
++		is called branch) in aufs, and its permission.
++
++What:		/sys/fs/aufs/si_<id>/xi_path
++Date:		March 2009
++Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
++Description:
++		It shows the abolute path of XINO (External Inode Number
++		Bitmap, Translation Table and Generation Table) file
++		even if it is the default path.
++		When the aufs mount option 'noxino' is specified, it
++		will be empty. About XINO files, see
++		Documentation/filesystems/aufs/aufs.5 in detail.
+diff -Nur linux-2.6.31.5.orig/fs/aufs/aufs.h linux-2.6.31.5/fs/aufs/aufs.h
+--- linux-2.6.31.5.orig/fs/aufs/aufs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/aufs.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,51 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * all header files
++ */
++
++#ifndef __AUFS_H__
++#define __AUFS_H__
++
++#ifdef __KERNEL__
++
++#include "debug.h"
++
++#include "branch.h"
++#include "cpup.h"
++#include "dcsub.h"
++#include "dbgaufs.h"
++#include "dentry.h"
++#include "dir.h"
++#include "file.h"
++#include "fstype.h"
++#include "inode.h"
++#include "loop.h"
++#include "module.h"
++#include "opts.h"
++#include "rwsem.h"
++#include "spl.h"
++#include "super.h"
++#include "sysaufs.h"
++#include "vfsub.h"
++#include "whout.h"
++#include "wkq.h"
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/branch.c linux-2.6.31.5/fs/aufs/branch.c
+--- linux-2.6.31.5.orig/fs/aufs/branch.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/branch.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,974 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * branch management
++ */
++
++#include <linux/file.h>
++#include "aufs.h"
++
++/*
++ * free a single branch
++ */
++static void au_br_do_free(struct au_branch *br)
++{
++	int i;
++	struct au_wbr *wbr;
++
++	if (br->br_xino.xi_file)
++		fput(br->br_xino.xi_file);
++	mutex_destroy(&br->br_xino.xi_nondir_mtx);
++
++	AuDebugOn(atomic_read(&br->br_count));
++
++	wbr = br->br_wbr;
++	if (wbr) {
++		for (i = 0; i < AuBrWh_Last; i++)
++			dput(wbr->wbr_wh[i]);
++		AuDebugOn(atomic_read(&wbr->wbr_wh_running));
++		AuRwDestroy(&wbr->wbr_wh_rwsem);
++	}
++
++	/* some filesystems acquire extra lock */
++	lockdep_off();
++	mntput(br->br_mnt);
++	lockdep_on();
++
++	kfree(wbr);
++	kfree(br);
++}
++
++/*
++ * frees all branches
++ */
++void au_br_free(struct au_sbinfo *sbinfo)
++{
++	aufs_bindex_t bmax;
++	struct au_branch **br;
++
++	AuRwMustWriteLock(&sbinfo->si_rwsem);
++
++	bmax = sbinfo->si_bend + 1;
++	br = sbinfo->si_branch;
++	while (bmax--)
++		au_br_do_free(*br++);
++}
++
++/*
++ * find the index of a branch which is specified by @br_id.
++ */
++int au_br_index(struct super_block *sb, aufs_bindex_t br_id)
++{
++	aufs_bindex_t bindex, bend;
++
++	bend = au_sbend(sb);
++	for (bindex = 0; bindex <= bend; bindex++)
++		if (au_sbr_id(sb, bindex) == br_id)
++			return bindex;
++	return -1;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * add a branch
++ */
++
++static int test_overlap(struct super_block *sb, struct dentry *h_d1,
++			struct dentry *h_d2)
++{
++	if (unlikely(h_d1 == h_d2))
++		return 1;
++	return !!au_test_subdir(h_d1, h_d2)
++		|| !!au_test_subdir(h_d2, h_d1)
++		|| au_test_loopback_overlap(sb, h_d1, h_d2)
++		|| au_test_loopback_overlap(sb, h_d2, h_d1);
++}
++
++/*
++ * returns a newly allocated branch. @new_nbranch is a number of branches
++ * after adding a branch.
++ */
++static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch,
++				     int perm)
++{
++	struct au_branch *add_branch;
++	struct dentry *root;
++
++	root = sb->s_root;
++	add_branch = kmalloc(sizeof(*add_branch), GFP_NOFS);
++	if (unlikely(!add_branch))
++		goto out;
++
++	add_branch->br_wbr = NULL;
++	if (au_br_writable(perm)) {
++		/* may be freed separately at changing the branch permission */
++		add_branch->br_wbr = kmalloc(sizeof(*add_branch->br_wbr),
++					     GFP_NOFS);
++		if (unlikely(!add_branch->br_wbr))
++			goto out_br;
++	}
++
++	if (unlikely(au_sbr_realloc(au_sbi(sb), new_nbranch)
++		     || au_di_realloc(au_di(root), new_nbranch)
++		     || au_ii_realloc(au_ii(root->d_inode), new_nbranch)))
++		goto out_wbr;
++	return add_branch; /* success */
++
++ out_wbr:
++	kfree(add_branch->br_wbr);
++ out_br:
++	kfree(add_branch);
++ out:
++	return ERR_PTR(-ENOMEM);
++}
++
++/*
++ * test if the branch permission is legal or not.
++ */
++static int test_br(struct inode *inode, int brperm, char *path)
++{
++	int err;
++
++	err = 0;
++	if (unlikely(au_br_writable(brperm) && IS_RDONLY(inode))) {
++		AuErr("write permission for readonly mount or inode, %s\n",
++		      path);
++		err = -EINVAL;
++	}
++
++	return err;
++}
++
++/*
++ * returns:
++ * 0: success, the caller will add it
++ * plus: success, it is already unified, the caller should ignore it
++ * minus: error
++ */
++static int test_add(struct super_block *sb, struct au_opt_add *add, int remount)
++{
++	int err;
++	aufs_bindex_t bend, bindex;
++	struct dentry *root;
++	struct inode *inode, *h_inode;
++
++	root = sb->s_root;
++	bend = au_sbend(sb);
++	if (unlikely(bend >= 0
++		     && au_find_dbindex(root, add->path.dentry) >= 0)) {
++		err = 1;
++		if (!remount) {
++			err = -EINVAL;
++			AuErr("%s duplicated\n", add->pathname);
++		}
++		goto out;
++	}
++
++	err = -ENOSPC; /* -E2BIG; */
++	if (unlikely(AUFS_BRANCH_MAX <= add->bindex
++		     || AUFS_BRANCH_MAX - 1 <= bend)) {
++		AuErr("number of branches exceeded %s\n", add->pathname);
++		goto out;
++	}
++
++	err = -EDOM;
++	if (unlikely(add->bindex < 0 || bend + 1 < add->bindex)) {
++		AuErr("bad index %d\n", add->bindex);
++		goto out;
++	}
++
++	inode = add->path.dentry->d_inode;
++	err = -ENOENT;
++	if (unlikely(!inode->i_nlink)) {
++		AuErr("no existence %s\n", add->pathname);
++		goto out;
++	}
++
++	err = -EINVAL;
++	if (unlikely(inode->i_sb == sb)) {
++		AuErr("%s must be outside\n", add->pathname);
++		goto out;
++	}
++
++	if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) {
++		AuErr("unsupported filesystem, %s (%s)\n",
++		      add->pathname, au_sbtype(inode->i_sb));
++		goto out;
++	}
++
++	err = test_br(add->path.dentry->d_inode, add->perm, add->pathname);
++	if (unlikely(err))
++		goto out;
++
++	if (bend < 0)
++		return 0; /* success */
++
++	err = -EINVAL;
++	for (bindex = 0; bindex <= bend; bindex++)
++		if (unlikely(test_overlap(sb, add->path.dentry,
++					  au_h_dptr(root, bindex)))) {
++			AuErr("%s is overlapped\n", add->pathname);
++			goto out;
++		}
++
++	err = 0;
++	if (au_opt_test(au_mntflags(sb), WARN_PERM)) {
++		h_inode = au_h_dptr(root, 0)->d_inode;
++		if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO)
++		    || h_inode->i_uid != inode->i_uid
++		    || h_inode->i_gid != inode->i_gid)
++			AuWarn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n",
++			       add->pathname,
++			       inode->i_uid, inode->i_gid,
++			       (inode->i_mode & S_IALLUGO),
++			       h_inode->i_uid, h_inode->i_gid,
++			       (h_inode->i_mode & S_IALLUGO));
++	}
++
++ out:
++	return err;
++}
++
++/*
++ * initialize or clean the whiteouts for an adding branch
++ */
++static int au_br_init_wh(struct super_block *sb, struct au_branch *br,
++			 int new_perm, struct dentry *h_root)
++{
++	int err, old_perm;
++	aufs_bindex_t bindex;
++	struct mutex *h_mtx;
++	struct au_wbr *wbr;
++	struct au_hinode *hdir;
++
++	wbr = br->br_wbr;
++	old_perm = br->br_perm;
++	br->br_perm = new_perm;
++	hdir = NULL;
++	h_mtx = NULL;
++	bindex = au_br_index(sb, br->br_id);
++	if (0 <= bindex) {
++		hdir = au_hi(sb->s_root->d_inode, bindex);
++		au_hin_imtx_lock_nested(hdir, AuLsc_I_PARENT);
++	} else {
++		h_mtx = &h_root->d_inode->i_mutex;
++		mutex_lock_nested(h_mtx, AuLsc_I_PARENT);
++	}
++	if (!wbr)
++		err = au_wh_init(h_root, br, sb);
++	else {
++		wbr_wh_write_lock(wbr);
++		err = au_wh_init(h_root, br, sb);
++		wbr_wh_write_unlock(wbr);
++	}
++	if (hdir)
++		au_hin_imtx_unlock(hdir);
++	else
++		mutex_unlock(h_mtx);
++	br->br_perm = old_perm;
++
++	if (!err && wbr && !au_br_writable(new_perm)) {
++		kfree(wbr);
++		br->br_wbr = NULL;
++	}
++
++	return err;
++}
++
++static int au_wbr_init(struct au_branch *br, struct super_block *sb,
++		       int perm, struct path *path)
++{
++	int err;
++	struct au_wbr *wbr;
++
++	wbr = br->br_wbr;
++	au_rw_init(&wbr->wbr_wh_rwsem);
++	memset(wbr->wbr_wh, 0, sizeof(wbr->wbr_wh));
++	atomic_set(&wbr->wbr_wh_running, 0);
++	wbr->wbr_bytes = 0;
++
++	err = au_br_init_wh(sb, br, perm, path->dentry);
++
++	return err;
++}
++
++/* intialize a new branch */
++static int au_br_init(struct au_branch *br, struct super_block *sb,
++		      struct au_opt_add *add)
++{
++	int err;
++
++	err = 0;
++	memset(&br->br_xino, 0, sizeof(br->br_xino));
++	mutex_init(&br->br_xino.xi_nondir_mtx);
++	br->br_perm = add->perm;
++	br->br_mnt = add->path.mnt; /* set first, mntget() later */
++	atomic_set(&br->br_count, 0);
++	br->br_xino_upper = AUFS_XINO_TRUNC_INIT;
++	atomic_set(&br->br_xino_running, 0);
++	br->br_id = au_new_br_id(sb);
++
++	if (au_br_writable(add->perm)) {
++		err = au_wbr_init(br, sb, add->perm, &add->path);
++		if (unlikely(err))
++			goto out;
++	}
++
++	if (au_opt_test(au_mntflags(sb), XINO)) {
++		err = au_xino_br(sb, br, add->path.dentry->d_inode->i_ino,
++				 au_sbr(sb, 0)->br_xino.xi_file, /*do_test*/1);
++		if (unlikely(err)) {
++			AuDebugOn(br->br_xino.xi_file);
++			goto out;
++		}
++	}
++
++	sysaufs_br_init(br);
++	mntget(add->path.mnt);
++
++ out:
++	return err;
++}
++
++static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex,
++			     struct au_branch *br, aufs_bindex_t bend,
++			     aufs_bindex_t amount)
++{
++	struct au_branch **brp;
++
++	AuRwMustWriteLock(&sbinfo->si_rwsem);
++
++	brp = sbinfo->si_branch + bindex;
++	memmove(brp + 1, brp, sizeof(*brp) * amount);
++	*brp = br;
++	sbinfo->si_bend++;
++	if (unlikely(bend < 0))
++		sbinfo->si_bend = 0;
++}
++
++static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex,
++			     aufs_bindex_t bend, aufs_bindex_t amount)
++{
++	struct au_hdentry *hdp;
++
++	AuRwMustWriteLock(&dinfo->di_rwsem);
++
++	hdp = dinfo->di_hdentry + bindex;
++	memmove(hdp + 1, hdp, sizeof(*hdp) * amount);
++	au_h_dentry_init(hdp);
++	dinfo->di_bend++;
++	if (unlikely(bend < 0))
++		dinfo->di_bstart = 0;
++}
++
++static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex,
++			     aufs_bindex_t bend, aufs_bindex_t amount)
++{
++	struct au_hinode *hip;
++
++	AuRwMustWriteLock(&iinfo->ii_rwsem);
++
++	hip = iinfo->ii_hinode + bindex;
++	memmove(hip + 1, hip, sizeof(*hip) * amount);
++	hip->hi_inode = NULL;
++	au_hin_init(hip, NULL);
++	iinfo->ii_bend++;
++	if (unlikely(bend < 0))
++		iinfo->ii_bstart = 0;
++}
++
++static void au_br_do_add(struct super_block *sb, struct dentry *h_dentry,
++			 struct au_branch *br, aufs_bindex_t bindex)
++{
++	struct dentry *root;
++	struct inode *root_inode;
++	aufs_bindex_t bend, amount;
++
++	root = sb->s_root;
++	root_inode = root->d_inode;
++	au_plink_block_maintain(sb);
++	bend = au_sbend(sb);
++	amount = bend + 1 - bindex;
++	au_br_do_add_brp(au_sbi(sb), bindex, br, bend, amount);
++	au_br_do_add_hdp(au_di(root), bindex, bend, amount);
++	au_br_do_add_hip(au_ii(root_inode), bindex, bend, amount);
++	au_set_h_dptr(root, bindex, dget(h_dentry));
++	au_set_h_iptr(root_inode, bindex, au_igrab(h_dentry->d_inode),
++		      /*flags*/0);
++}
++
++int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount)
++{
++	int err;
++	unsigned long long maxb;
++	aufs_bindex_t bend, add_bindex;
++	struct dentry *root, *h_dentry;
++	struct inode *root_inode;
++	struct au_branch *add_branch;
++
++	root = sb->s_root;
++	root_inode = root->d_inode;
++	IMustLock(root_inode);
++	err = test_add(sb, add, remount);
++	if (unlikely(err < 0))
++		goto out;
++	if (err) {
++		err = 0;
++		goto out; /* success */
++	}
++
++	bend = au_sbend(sb);
++	add_branch = au_br_alloc(sb, bend + 2, add->perm);
++	err = PTR_ERR(add_branch);
++	if (IS_ERR(add_branch))
++		goto out;
++
++	err = au_br_init(add_branch, sb, add);
++	if (unlikely(err)) {
++		au_br_do_free(add_branch);
++		goto out;
++	}
++
++	add_bindex = add->bindex;
++	h_dentry = add->path.dentry;
++	if (!remount)
++		au_br_do_add(sb, h_dentry, add_branch, add_bindex);
++	else {
++		sysaufs_brs_del(sb, add_bindex);
++		au_br_do_add(sb, h_dentry, add_branch, add_bindex);
++		sysaufs_brs_add(sb, add_bindex);
++	}
++
++	if (!add_bindex)
++		au_cpup_attr_all(root_inode, /*force*/1);
++	else
++		au_add_nlink(root_inode, h_dentry->d_inode);
++	maxb = h_dentry->d_sb->s_maxbytes;
++	if (sb->s_maxbytes < maxb)
++		sb->s_maxbytes = maxb;
++
++	/*
++	 * this test/set prevents aufs from handling unnecesary inotify events
++	 * of xino files, in a case of re-adding a writable branch which was
++	 * once detached from aufs.
++	 */
++	if (au_xino_brid(sb) < 0
++	    && au_br_writable(add_branch->br_perm)
++	    && !au_test_fs_bad_xino(h_dentry->d_sb)
++	    && add_branch->br_xino.xi_file
++	    && add_branch->br_xino.xi_file->f_dentry->d_parent == h_dentry)
++		au_xino_brid_set(sb, add_branch->br_id);
++
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * delete a branch
++ */
++
++/* to show the line number, do not make it inlined function */
++#define AuVerbose(do_info, fmt, args...) do { \
++	if (do_info) \
++		AuInfo(fmt, ##args); \
++} while (0)
++
++/*
++ * test if the branch is deletable or not.
++ */
++static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex,
++			    unsigned int sigen)
++{
++	int err, i, j, ndentry;
++	aufs_bindex_t bstart, bend;
++	unsigned char verbose;
++	struct au_dcsub_pages dpages;
++	struct au_dpage *dpage;
++	struct dentry *d;
++	struct inode *inode;
++
++	err = au_dpages_init(&dpages, GFP_NOFS);
++	if (unlikely(err))
++		goto out;
++	err = au_dcsub_pages(&dpages, root, NULL, NULL);
++	if (unlikely(err))
++		goto out_dpages;
++
++	verbose = !!au_opt_test(au_mntflags(root->d_sb), VERBOSE);
++	for (i = 0; !err && i < dpages.ndpage; i++) {
++		dpage = dpages.dpages + i;
++		ndentry = dpage->ndentry;
++		for (j = 0; !err && j < ndentry; j++) {
++			d = dpage->dentries[j];
++			AuDebugOn(!atomic_read(&d->d_count));
++			inode = d->d_inode;
++			if (au_digen(d) == sigen && au_iigen(inode) == sigen)
++				di_read_lock_child(d, AuLock_IR);
++			else {
++				di_write_lock_child(d);
++				err = au_reval_dpath(d, sigen);
++				if (!err)
++					di_downgrade_lock(d, AuLock_IR);
++				else {
++					di_write_unlock(d);
++					break;
++				}
++			}
++
++			bstart = au_dbstart(d);
++			bend = au_dbend(d);
++			if (bstart <= bindex
++			    && bindex <= bend
++			    && au_h_dptr(d, bindex)
++			    && (!S_ISDIR(inode->i_mode) || bstart == bend)) {
++				err = -EBUSY;
++				AuVerbose(verbose, "busy %.*s\n", AuDLNPair(d));
++			}
++			di_read_unlock(d, AuLock_IR);
++		}
++	}
++
++ out_dpages:
++	au_dpages_free(&dpages);
++ out:
++	return err;
++}
++
++static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex,
++			   unsigned int sigen)
++{
++	int err;
++	struct inode *i;
++	aufs_bindex_t bstart, bend;
++	unsigned char verbose;
++
++	err = 0;
++	verbose = !!au_opt_test(au_mntflags(sb), VERBOSE);
++	list_for_each_entry(i, &sb->s_inodes, i_sb_list) {
++		AuDebugOn(!atomic_read(&i->i_count));
++		if (!list_empty(&i->i_dentry))
++			continue;
++
++		if (au_iigen(i) == sigen)
++			ii_read_lock_child(i);
++		else {
++			ii_write_lock_child(i);
++			err = au_refresh_hinode_self(i, /*do_attr*/1);
++			if (!err)
++				ii_downgrade_lock(i);
++			else {
++				ii_write_unlock(i);
++				break;
++			}
++		}
++
++		bstart = au_ibstart(i);
++		bend = au_ibend(i);
++		if (bstart <= bindex
++		    && bindex <= bend
++		    && au_h_iptr(i, bindex)
++		    && (!S_ISDIR(i->i_mode) || bstart == bend)) {
++			err = -EBUSY;
++			AuVerbose(verbose, "busy i%lu\n", i->i_ino);
++			ii_read_unlock(i);
++			break;
++		}
++		ii_read_unlock(i);
++	}
++
++	return err;
++}
++
++static int test_children_busy(struct dentry *root, aufs_bindex_t bindex)
++{
++	int err;
++	unsigned int sigen;
++
++	sigen = au_sigen(root->d_sb);
++	DiMustNoWaiters(root);
++	IiMustNoWaiters(root->d_inode);
++	di_write_unlock(root);
++	err = test_dentry_busy(root, bindex, sigen);
++	if (!err)
++		err = test_inode_busy(root->d_sb, bindex, sigen);
++	di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */
++
++	return err;
++}
++
++static void au_br_do_del_brp(struct au_sbinfo *sbinfo,
++			     const aufs_bindex_t bindex,
++			     const aufs_bindex_t bend)
++{
++	struct au_branch **brp, **p;
++
++	AuRwMustWriteLock(&sbinfo->si_rwsem);
++
++	brp = sbinfo->si_branch + bindex;
++	if (bindex < bend)
++		memmove(brp, brp + 1, sizeof(*brp) * (bend - bindex));
++	sbinfo->si_branch[0 + bend] = NULL;
++	sbinfo->si_bend--;
++
++	p = krealloc(sbinfo->si_branch, sizeof(*p) * bend, GFP_NOFS);
++	if (p)
++		sbinfo->si_branch = p;
++}
++
++static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex,
++			     const aufs_bindex_t bend)
++{
++	struct au_hdentry *hdp, *p;
++
++	AuRwMustWriteLock(&dinfo->di_rwsem);
++
++	hdp = dinfo->di_hdentry + bindex;
++	if (bindex < bend)
++		memmove(hdp, hdp + 1, sizeof(*hdp) * (bend - bindex));
++	dinfo->di_hdentry[0 + bend].hd_dentry = NULL;
++	dinfo->di_bend--;
++
++	p = krealloc(dinfo->di_hdentry, sizeof(*p) * bend, GFP_NOFS);
++	if (p)
++		dinfo->di_hdentry = p;
++}
++
++static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex,
++			     const aufs_bindex_t bend)
++{
++	struct au_hinode *hip, *p;
++
++	AuRwMustWriteLock(&iinfo->ii_rwsem);
++
++	hip = iinfo->ii_hinode + bindex;
++	if (bindex < bend)
++		memmove(hip, hip + 1, sizeof(*hip) * (bend - bindex));
++	iinfo->ii_hinode[0 + bend].hi_inode = NULL;
++	au_hin_init(iinfo->ii_hinode + bend, NULL);
++	iinfo->ii_bend--;
++
++	p = krealloc(iinfo->ii_hinode, sizeof(*p) * bend, GFP_NOFS);
++	if (p)
++		iinfo->ii_hinode = p;
++}
++
++static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex,
++			 struct au_branch *br)
++{
++	aufs_bindex_t bend;
++	struct au_sbinfo *sbinfo;
++	struct dentry *root;
++	struct inode *inode;
++
++	SiMustWriteLock(sb);
++
++	root = sb->s_root;
++	inode = root->d_inode;
++	au_plink_block_maintain(sb);
++	sbinfo = au_sbi(sb);
++	bend = sbinfo->si_bend;
++
++	dput(au_h_dptr(root, bindex));
++	au_hiput(au_hi(inode, bindex));
++	au_br_do_free(br);
++
++	au_br_do_del_brp(sbinfo, bindex, bend);
++	au_br_do_del_hdp(au_di(root), bindex, bend);
++	au_br_do_del_hip(au_ii(inode), bindex, bend);
++}
++
++int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount)
++{
++	int err, rerr, i;
++	unsigned int mnt_flags;
++	aufs_bindex_t bindex, bend, br_id;
++	unsigned char do_wh, verbose;
++	struct au_branch *br;
++	struct au_wbr *wbr;
++
++	err = 0;
++	bindex = au_find_dbindex(sb->s_root, del->h_path.dentry);
++	if (bindex < 0) {
++		if (remount)
++			goto out; /* success */
++		err = -ENOENT;
++		AuErr("%s no such branch\n", del->pathname);
++		goto out;
++	}
++	AuDbg("bindex b%d\n", bindex);
++
++	err = -EBUSY;
++	mnt_flags = au_mntflags(sb);
++	verbose = !!au_opt_test(mnt_flags, VERBOSE);
++	bend = au_sbend(sb);
++	if (unlikely(!bend)) {
++		AuVerbose(verbose, "no more branches left\n");
++		goto out;
++	}
++	br = au_sbr(sb, bindex);
++	i = atomic_read(&br->br_count);
++	if (unlikely(i)) {
++		AuVerbose(verbose, "%d file(s) opened\n", i);
++		goto out;
++	}
++
++	wbr = br->br_wbr;
++	do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph);
++	if (do_wh) {
++		/* instead of WbrWhMustWriteLock(wbr) */
++		SiMustWriteLock(sb);
++		for (i = 0; i < AuBrWh_Last; i++) {
++			dput(wbr->wbr_wh[i]);
++			wbr->wbr_wh[i] = NULL;
++		}
++	}
++
++	err = test_children_busy(sb->s_root, bindex);
++	if (unlikely(err)) {
++		if (do_wh)
++			goto out_wh;
++		goto out;
++	}
++
++	err = 0;
++	br_id = br->br_id;
++	if (!remount)
++		au_br_do_del(sb, bindex, br);
++	else {
++		sysaufs_brs_del(sb, bindex);
++		au_br_do_del(sb, bindex, br);
++		sysaufs_brs_add(sb, bindex);
++	}
++
++	if (!bindex)
++		au_cpup_attr_all(sb->s_root->d_inode, /*force*/1);
++	else
++		au_sub_nlink(sb->s_root->d_inode, del->h_path.dentry->d_inode);
++	if (au_opt_test(mnt_flags, PLINK))
++		au_plink_half_refresh(sb, br_id);
++
++	if (sb->s_maxbytes == del->h_path.dentry->d_sb->s_maxbytes) {
++		bend--;
++		sb->s_maxbytes = 0;
++		for (bindex = 0; bindex <= bend; bindex++) {
++			unsigned long long maxb;
++
++			maxb = au_sbr_sb(sb, bindex)->s_maxbytes;
++			if (sb->s_maxbytes < maxb)
++				sb->s_maxbytes = maxb;
++		}
++	}
++
++	if (au_xino_brid(sb) == br->br_id)
++		au_xino_brid_set(sb, -1);
++	goto out; /* success */
++
++ out_wh:
++	/* revert */
++	rerr = au_br_init_wh(sb, br, br->br_perm, del->h_path.dentry);
++	if (rerr)
++		AuWarn("failed re-creating base whiteout, %s. (%d)\n",
++		       del->pathname, rerr);
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * change a branch permission
++ */
++
++static int do_need_sigen_inc(int a, int b)
++{
++	return au_br_whable(a) && !au_br_whable(b);
++}
++
++static int need_sigen_inc(int old, int new)
++{
++	return do_need_sigen_inc(old, new)
++		|| do_need_sigen_inc(new, old);
++}
++
++static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex)
++{
++	int err;
++	unsigned long n, ul, bytes, files;
++	aufs_bindex_t bstart;
++	struct file *file, *hf, **a;
++	const int step_bytes = 1024, /* memory allocation unit */
++		step_files = step_bytes / sizeof(*a);
++
++	err = -ENOMEM;
++	n = 0;
++	bytes = step_bytes;
++	files = step_files;
++	a = kmalloc(bytes, GFP_NOFS);
++	if (unlikely(!a))
++		goto out;
++
++	/* no need file_list_lock() since sbinfo is locked? defered? */
++	list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
++		if (special_file(file->f_dentry->d_inode->i_mode))
++			continue;
++
++		AuDbg("%.*s\n", AuDLNPair(file->f_dentry));
++		fi_read_lock(file);
++		if (unlikely(au_test_mmapped(file))) {
++			err = -EBUSY;
++			FiMustNoWaiters(file);
++			fi_read_unlock(file);
++			goto out_free;
++		}
++
++		bstart = au_fbstart(file);
++		if (!S_ISREG(file->f_dentry->d_inode->i_mode)
++		    || !(file->f_mode & FMODE_WRITE)
++		    || bstart != bindex) {
++			FiMustNoWaiters(file);
++			fi_read_unlock(file);
++			continue;
++		}
++
++		hf = au_h_fptr(file, bstart);
++		FiMustNoWaiters(file);
++		fi_read_unlock(file);
++
++		if (n < files)
++			a[n++] = hf;
++		else {
++			void *p;
++
++			err = -ENOMEM;
++			bytes += step_bytes;
++			files += step_files;
++			p = krealloc(a, bytes, GFP_NOFS);
++			if (p) {
++				a = p;
++				a[n++] = hf;
++			} else
++				goto out_free;
++		}
++	}
++
++	err = 0;
++	for (ul = 0; ul < n; ul++) {
++		/* todo: already flushed? */
++		/* cf. fs/super.c:mark_files_ro() */
++		hf = a[ul];
++		hf->f_mode &= ~FMODE_WRITE;
++		if (!file_check_writeable(hf)) {
++			file_release_write(hf);
++			mnt_drop_write(hf->f_vfsmnt);
++		}
++	}
++
++ out_free:
++	kfree(a);
++ out:
++	return err;
++}
++
++int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
++	      int *do_update)
++{
++	int err, rerr;
++	aufs_bindex_t bindex;
++	struct dentry *root;
++	struct au_branch *br;
++
++	root = sb->s_root;
++	au_plink_block_maintain(sb);
++	bindex = au_find_dbindex(root, mod->h_root);
++	if (bindex < 0) {
++		if (remount)
++			return 0; /* success */
++		err = -ENOENT;
++		AuErr("%s no such branch\n", mod->path);
++		goto out;
++	}
++	AuDbg("bindex b%d\n", bindex);
++
++	err = test_br(mod->h_root->d_inode, mod->perm, mod->path);
++	if (unlikely(err))
++		goto out;
++
++	br = au_sbr(sb, bindex);
++	if (br->br_perm == mod->perm)
++		return 0; /* success */
++
++	if (au_br_writable(br->br_perm)) {
++		/* remove whiteout base */
++		err = au_br_init_wh(sb, br, mod->perm, mod->h_root);
++		if (unlikely(err))
++			goto out;
++
++		if (!au_br_writable(mod->perm)) {
++			/* rw --> ro, file might be mmapped */
++			DiMustNoWaiters(root);
++			IiMustNoWaiters(root->d_inode);
++			di_write_unlock(root);
++			err = au_br_mod_files_ro(sb, bindex);
++			/* aufs_write_lock() calls ..._child() */
++			di_write_lock_child(root);
++
++			if (unlikely(err)) {
++				rerr = -ENOMEM;
++				br->br_wbr = kmalloc(sizeof(*br->br_wbr),
++						     GFP_NOFS);
++				if (br->br_wbr)
++					rerr = au_br_init_wh
++						(sb, br, br->br_perm,
++						 mod->h_root);
++				if (unlikely(rerr)) {
++					AuIOErr("nested error %d (%d)\n",
++						rerr, err);
++					br->br_perm = mod->perm;
++				}
++			}
++		}
++	} else if (au_br_writable(mod->perm)) {
++		/* ro --> rw */
++		err = -ENOMEM;
++		br->br_wbr = kmalloc(sizeof(*br->br_wbr), GFP_NOFS);
++		if (br->br_wbr) {
++			struct path path = {
++				.mnt	= br->br_mnt,
++				.dentry	= mod->h_root
++			};
++
++			err = au_wbr_init(br, sb, mod->perm, &path);
++			if (unlikely(err)) {
++				kfree(br->br_wbr);
++				br->br_wbr = NULL;
++			}
++		}
++	}
++
++	if (!err) {
++		*do_update |= need_sigen_inc(br->br_perm, mod->perm);
++		br->br_perm = mod->perm;
++	}
++
++ out:
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/branch.h linux-2.6.31.5/fs/aufs/branch.h
+--- linux-2.6.31.5.orig/fs/aufs/branch.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/branch.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,219 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * branch filesystems and xino for them
++ */
++
++#ifndef __AUFS_BRANCH_H__
++#define __AUFS_BRANCH_H__
++
++#ifdef __KERNEL__
++
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/aufs_type.h>
++#include "rwsem.h"
++#include "super.h"
++
++/* ---------------------------------------------------------------------- */
++
++/* a xino file */
++struct au_xino_file {
++	struct file		*xi_file;
++	struct mutex		xi_nondir_mtx;
++
++	/* todo: make xino files an array to support huge inode number */
++
++#ifdef CONFIG_DEBUG_FS
++	struct dentry		 *xi_dbgaufs;
++#endif
++};
++
++/* members for writable branch only */
++enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last};
++struct au_wbr {
++	struct au_rwsem		wbr_wh_rwsem;
++	struct dentry		*wbr_wh[AuBrWh_Last];
++	atomic_t 		wbr_wh_running;
++#define wbr_whbase		wbr_wh[AuBrWh_BASE]	/* whiteout base */
++#define wbr_plink		wbr_wh[AuBrWh_PLINK]	/* pseudo-link dir */
++#define wbr_orph		wbr_wh[AuBrWh_ORPH]	/* dir for orphans */
++
++	/* mfs mode */
++	unsigned long long	wbr_bytes;
++};
++
++/* protected by superblock rwsem */
++struct au_branch {
++	struct au_xino_file	br_xino;
++
++	aufs_bindex_t		br_id;
++
++	int			br_perm;
++	struct vfsmount		*br_mnt;
++	atomic_t		br_count;
++
++	struct au_wbr		*br_wbr;
++
++	/* xino truncation */
++	blkcnt_t		br_xino_upper;	/* watermark in blocks */
++	atomic_t		br_xino_running;
++
++#ifdef CONFIG_SYSFS
++	/* an entry under sysfs per mount-point */
++	char			br_name[8];
++	struct attribute	br_attr;
++#endif
++};
++
++/* ---------------------------------------------------------------------- */
++
++/* branch permission and attribute */
++enum {
++	AuBrPerm_RW,		/* writable, linkable wh */
++	AuBrPerm_RO,		/* readonly, no wh */
++	AuBrPerm_RR,		/* natively readonly, no wh */
++
++	AuBrPerm_RWNoLinkWH,	/* un-linkable whiteouts */
++
++	AuBrPerm_ROWH,		/* whiteout-able */
++	AuBrPerm_RRWH,		/* whiteout-able */
++
++	AuBrPerm_Last
++};
++
++static inline int au_br_writable(int brperm)
++{
++	return brperm == AuBrPerm_RW || brperm == AuBrPerm_RWNoLinkWH;
++}
++
++static inline int au_br_whable(int brperm)
++{
++	return brperm == AuBrPerm_RW
++		|| brperm == AuBrPerm_ROWH
++		|| brperm == AuBrPerm_RRWH;
++}
++
++static inline int au_br_rdonly(struct au_branch *br)
++{
++	return ((br->br_mnt->mnt_sb->s_flags & MS_RDONLY)
++		|| !au_br_writable(br->br_perm))
++		? -EROFS : 0;
++}
++
++static inline int au_br_hinotifyable(int brperm __maybe_unused)
++{
++#ifdef CONFIG_AUFS_HINOTIFY
++	return brperm != AuBrPerm_RR && brperm != AuBrPerm_RRWH;
++#else
++	return 0;
++#endif
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* branch.c */
++struct au_sbinfo;
++void au_br_free(struct au_sbinfo *sinfo);
++int au_br_index(struct super_block *sb, aufs_bindex_t br_id);
++struct au_opt_add;
++int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount);
++struct au_opt_del;
++int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount);
++struct au_opt_mod;
++int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
++	      int *do_update);
++
++/* xino.c */
++static const loff_t au_loff_max = LLONG_MAX;
++
++int au_xib_trunc(struct super_block *sb);
++ssize_t xino_fread(au_readf_t func, struct file *file, void *buf, size_t size,
++		   loff_t *pos);
++ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size,
++		    loff_t *pos);
++struct file *au_xino_create2(struct file *base_file, struct file *copy_src);
++struct file *au_xino_create(struct super_block *sb, char *fname, int silent);
++ino_t au_xino_new_ino(struct super_block *sb);
++int au_xino_write0(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
++		   ino_t ino);
++int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
++		  ino_t ino);
++int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
++		 ino_t *ino);
++int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t hino,
++	       struct file *base_file, int do_test);
++int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex);
++
++struct au_opt_xino;
++int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount);
++void au_xino_clr(struct super_block *sb);
++struct file *au_xino_def(struct super_block *sb);
++int au_xino_path(struct seq_file *seq, struct file *file);
++
++/* ---------------------------------------------------------------------- */
++
++/* Superblock to branch */
++static inline
++aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex)
++{
++	return au_sbr(sb, bindex)->br_id;
++}
++
++static inline
++struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex)
++{
++	return au_sbr(sb, bindex)->br_mnt;
++}
++
++static inline
++struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex)
++{
++	return au_sbr_mnt(sb, bindex)->mnt_sb;
++}
++
++static inline void au_sbr_put(struct super_block *sb, aufs_bindex_t bindex)
++{
++	atomic_dec_return(&au_sbr(sb, bindex)->br_count);
++}
++
++static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex)
++{
++	return au_sbr(sb, bindex)->br_perm;
++}
++
++static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex)
++{
++	return au_br_whable(au_sbr_perm(sb, bindex));
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * wbr_wh_read_lock, wbr_wh_write_lock
++ * wbr_wh_read_unlock, wbr_wh_write_unlock, wbr_wh_downgrade_lock
++ */
++AuSimpleRwsemFuncs(wbr_wh, struct au_wbr *wbr, &wbr->wbr_wh_rwsem);
++
++#define WbrWhMustNoWaiters(wbr)	AuRwMustNoWaiters(&wbr->wbr_wh_rwsem)
++#define WbrWhMustAnyLock(wbr)	AuRwMustAnyLock(&wbr->wbr_wh_rwsem)
++#define WbrWhMustWriteLock(wbr)	AuRwMustWriteLock(&wbr->wbr_wh_rwsem)
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_BRANCH_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/cpup.c linux-2.6.31.5/fs/aufs/cpup.c
+--- linux-2.6.31.5.orig/fs/aufs/cpup.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/cpup.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,1048 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * copy-up functions, see wbr_policy.c for copy-down
++ */
++
++#include <linux/file.h>
++#include <linux/fs_stack.h>
++#include <linux/mm.h>
++#include <linux/uaccess.h>
++#include "aufs.h"
++
++void au_cpup_attr_flags(struct inode *dst, struct inode *src)
++{
++	const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE
++		| S_NOATIME | S_NOCMTIME;
++
++	dst->i_flags |= src->i_flags & ~mask;
++	if (au_test_fs_notime(dst->i_sb))
++		dst->i_flags |= S_NOATIME | S_NOCMTIME;
++}
++
++void au_cpup_attr_timesizes(struct inode *inode)
++{
++	struct inode *h_inode;
++
++	h_inode = au_h_iptr(inode, au_ibstart(inode));
++	fsstack_copy_attr_times(inode, h_inode);
++	vfsub_copy_inode_size(inode, h_inode);
++}
++
++void au_cpup_attr_nlink(struct inode *inode, int force)
++{
++	struct inode *h_inode;
++	struct super_block *sb;
++	aufs_bindex_t bindex, bend;
++
++	sb = inode->i_sb;
++	bindex = au_ibstart(inode);
++	h_inode = au_h_iptr(inode, bindex);
++	if (!force
++	    && !S_ISDIR(h_inode->i_mode)
++	    && au_opt_test(au_mntflags(sb), PLINK)
++	    && au_plink_test(inode))
++		return;
++
++	inode->i_nlink = h_inode->i_nlink;
++
++	/*
++	 * fewer nlink makes find(1) noisy, but larger nlink doesn't.
++	 * it may includes whplink directory.
++	 */
++	if (S_ISDIR(h_inode->i_mode)) {
++		bend = au_ibend(inode);
++		for (bindex++; bindex <= bend; bindex++) {
++			h_inode = au_h_iptr(inode, bindex);
++			if (h_inode)
++				au_add_nlink(inode, h_inode);
++		}
++	}
++}
++
++void au_cpup_attr_changeable(struct inode *inode)
++{
++	struct inode *h_inode;
++
++	h_inode = au_h_iptr(inode, au_ibstart(inode));
++	inode->i_mode = h_inode->i_mode;
++	inode->i_uid = h_inode->i_uid;
++	inode->i_gid = h_inode->i_gid;
++	au_cpup_attr_timesizes(inode);
++	au_cpup_attr_flags(inode, h_inode);
++}
++
++void au_cpup_igen(struct inode *inode, struct inode *h_inode)
++{
++	struct au_iinfo *iinfo = au_ii(inode);
++
++	IiMustWriteLock(inode);
++
++	iinfo->ii_higen = h_inode->i_generation;
++	iinfo->ii_hsb1 = h_inode->i_sb;
++}
++
++void au_cpup_attr_all(struct inode *inode, int force)
++{
++	struct inode *h_inode;
++
++	h_inode = au_h_iptr(inode, au_ibstart(inode));
++	au_cpup_attr_changeable(inode);
++	if (inode->i_nlink > 0)
++		au_cpup_attr_nlink(inode, force);
++	inode->i_rdev = h_inode->i_rdev;
++	inode->i_blkbits = h_inode->i_blkbits;
++	au_cpup_igen(inode, h_inode);
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */
++
++/* keep the timestamps of the parent dir when cpup */
++void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
++		    struct path *h_path)
++{
++	struct inode *h_inode;
++
++	dt->dt_dentry = dentry;
++	dt->dt_h_path = *h_path;
++	h_inode = h_path->dentry->d_inode;
++	dt->dt_atime = h_inode->i_atime;
++	dt->dt_mtime = h_inode->i_mtime;
++	/* smp_mb(); */
++}
++
++void au_dtime_revert(struct au_dtime *dt)
++{
++	struct iattr attr;
++	int err;
++
++	attr.ia_atime = dt->dt_atime;
++	attr.ia_mtime = dt->dt_mtime;
++	attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET
++		| ATTR_ATIME | ATTR_ATIME_SET;
++
++	err = vfsub_notify_change(&dt->dt_h_path, &attr);
++	if (unlikely(err))
++		AuWarn("restoring timestamps failed(%d). ignored\n", err);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static noinline_for_stack
++int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src)
++{
++	int err, sbits;
++	struct iattr ia;
++	struct path h_path;
++	struct inode *h_isrc, *h_idst;
++
++	h_path.dentry = au_h_dptr(dst, bindex);
++	h_idst = h_path.dentry->d_inode;
++	h_path.mnt = au_sbr_mnt(dst->d_sb, bindex);
++	h_isrc = h_src->d_inode;
++	ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID
++		| ATTR_ATIME | ATTR_MTIME
++		| ATTR_ATIME_SET | ATTR_MTIME_SET;
++	ia.ia_uid = h_isrc->i_uid;
++	ia.ia_gid = h_isrc->i_gid;
++	ia.ia_atime = h_isrc->i_atime;
++	ia.ia_mtime = h_isrc->i_mtime;
++	if (h_idst->i_mode != h_isrc->i_mode
++	    && !S_ISLNK(h_idst->i_mode)) {
++		ia.ia_valid |= ATTR_MODE;
++		ia.ia_mode = h_isrc->i_mode;
++	}
++	sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID));
++	au_cpup_attr_flags(h_idst, h_isrc);
++	err = vfsub_notify_change(&h_path, &ia);
++
++	/* is this nfs only? */
++	if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) {
++		ia.ia_valid = ATTR_FORCE | ATTR_MODE;
++		ia.ia_mode = h_isrc->i_mode;
++		err = vfsub_notify_change(&h_path, &ia);
++	}
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
++			   char *buf, unsigned long blksize)
++{
++	int err;
++	size_t sz, rbytes, wbytes;
++	unsigned char all_zero;
++	char *p, *zp;
++	struct mutex *h_mtx;
++	/* reduce stack usage */
++	struct iattr *ia;
++
++	zp = page_address(ZERO_PAGE(0));
++	if (unlikely(!zp))
++		return -ENOMEM; /* possible? */
++
++	err = 0;
++	all_zero = 0;
++	while (len) {
++		AuDbg("len %lld\n", len);
++		sz = blksize;
++		if (len < blksize)
++			sz = len;
++
++		rbytes = 0;
++		/* todo: signal_pending? */
++		while (!rbytes || err == -EAGAIN || err == -EINTR) {
++			rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
++			err = rbytes;
++		}
++		if (unlikely(err < 0))
++			break;
++
++		all_zero = 0;
++		if (len >= rbytes && rbytes == blksize)
++			all_zero = !memcmp(buf, zp, rbytes);
++		if (!all_zero) {
++			wbytes = rbytes;
++			p = buf;
++			while (wbytes) {
++				size_t b;
++
++				b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
++				err = b;
++				/* todo: signal_pending? */
++				if (unlikely(err == -EAGAIN || err == -EINTR))
++					continue;
++				if (unlikely(err < 0))
++					break;
++				wbytes -= b;
++				p += b;
++			}
++		} else {
++			loff_t res;
++
++			AuLabel(hole);
++			res = vfsub_llseek(dst, rbytes, SEEK_CUR);
++			err = res;
++			if (unlikely(res < 0))
++				break;
++		}
++		len -= rbytes;
++		err = 0;
++	}
++
++	/* the last block may be a hole */
++	if (!err && all_zero) {
++		AuLabel(last hole);
++
++		err = 1;
++		if (au_test_nfs(dst->f_dentry->d_sb)) {
++			/* nfs requires this step to make last hole */
++			/* is this only nfs? */
++			do {
++				/* todo: signal_pending? */
++				err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
++			} while (err == -EAGAIN || err == -EINTR);
++			if (err == 1)
++				dst->f_pos--;
++		}
++
++		if (err == 1) {
++			ia = (void *)buf;
++			ia->ia_size = dst->f_pos;
++			ia->ia_valid = ATTR_SIZE | ATTR_FILE;
++			ia->ia_file = dst;
++			h_mtx = &dst->f_dentry->d_inode->i_mutex;
++			mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
++			err = vfsub_notify_change(&dst->f_path, ia);
++			mutex_unlock(h_mtx);
++		}
++	}
++
++	return err;
++}
++
++int au_copy_file(struct file *dst, struct file *src, loff_t len)
++{
++	int err;
++	unsigned long blksize;
++	unsigned char do_kfree;
++	char *buf;
++
++	err = -ENOMEM;
++	blksize = dst->f_dentry->d_sb->s_blocksize;
++	if (!blksize || PAGE_SIZE < blksize)
++		blksize = PAGE_SIZE;
++	AuDbg("blksize %lu\n", blksize);
++	do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *));
++	if (do_kfree)
++		buf = kmalloc(blksize, GFP_NOFS);
++	else
++		buf = (void *)__get_free_page(GFP_NOFS);
++	if (unlikely(!buf))
++		goto out;
++
++	if (len > (1 << 22))
++		AuDbg("copying a large file %lld\n", (long long)len);
++
++	src->f_pos = 0;
++	dst->f_pos = 0;
++	err = au_do_copy_file(dst, src, len, buf, blksize);
++	if (do_kfree)
++		kfree(buf);
++	else
++		free_page((unsigned long)buf);
++
++ out:
++	return err;
++}
++
++/*
++ * to support a sparse file which is opened with O_APPEND,
++ * we need to close the file.
++ */
++static int au_cp_regular(struct dentry *dentry, aufs_bindex_t bdst,
++			aufs_bindex_t bsrc, loff_t len)
++{
++	int err, i;
++	enum { SRC, DST };
++	struct {
++		aufs_bindex_t bindex;
++		unsigned int flags;
++		struct dentry *dentry;
++		struct file *file;
++		void *label, *label_file;
++	} *f, file[] = {
++		{
++			.bindex = bsrc,
++			.flags = O_RDONLY | O_NOATIME | O_LARGEFILE,
++			.file = NULL,
++			.label = &&out,
++			.label_file = &&out_src
++		},
++		{
++			.bindex = bdst,
++			.flags = O_WRONLY | O_NOATIME | O_LARGEFILE,
++			.file = NULL,
++			.label = &&out_src,
++			.label_file = &&out_dst
++		}
++	};
++	struct super_block *sb;
++
++	/* bsrc branch can be ro/rw. */
++	sb = dentry->d_sb;
++	f = file;
++	for (i = 0; i < 2; i++, f++) {
++		f->dentry = au_h_dptr(dentry, f->bindex);
++		f->file = au_h_open(dentry, f->bindex, f->flags, /*file*/NULL);
++		err = PTR_ERR(f->file);
++		if (IS_ERR(f->file))
++			goto *f->label;
++		err = -EINVAL;
++		if (unlikely(!f->file->f_op))
++			goto *f->label_file;
++	}
++
++	/* try stopping to update while we copyup */
++	IMustLock(file[SRC].dentry->d_inode);
++	err = au_copy_file(file[DST].file, file[SRC].file, len);
++
++ out_dst:
++	fput(file[DST].file);
++	au_sbr_put(sb, file[DST].bindex);
++ out_src:
++	fput(file[SRC].file);
++	au_sbr_put(sb, file[SRC].bindex);
++ out:
++	return err;
++}
++
++static int au_do_cpup_regular(struct dentry *dentry, aufs_bindex_t bdst,
++			      aufs_bindex_t bsrc, loff_t len,
++			      struct inode *h_dir, struct path *h_path)
++{
++	int err, rerr;
++	loff_t l;
++
++	err = 0;
++	l = i_size_read(au_h_iptr(dentry->d_inode, bsrc));
++	if (len == -1 || l < len)
++		len = l;
++	if (len)
++		err = au_cp_regular(dentry, bdst, bsrc, len);
++	if (!err)
++		goto out; /* success */
++
++	rerr = vfsub_unlink(h_dir, h_path, /*force*/0);
++	if (rerr) {
++		AuIOErr("failed unlinking cpup-ed %.*s(%d, %d)\n",
++			AuDLNPair(h_path->dentry), err, rerr);
++		err = -EIO;
++	}
++
++ out:
++	return err;
++}
++
++static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src,
++			      struct inode *h_dir)
++{
++	int err, symlen;
++	mm_segment_t old_fs;
++	char *sym;
++
++	err = -ENOSYS;
++	if (unlikely(!h_src->d_inode->i_op->readlink))
++		goto out;
++
++	err = -ENOMEM;
++	sym = __getname();
++	if (unlikely(!sym))
++		goto out;
++
++	old_fs = get_fs();
++	set_fs(KERNEL_DS);
++	symlen = h_src->d_inode->i_op->readlink(h_src, (char __user *)sym,
++						PATH_MAX);
++	err = symlen;
++	set_fs(old_fs);
++
++	if (symlen > 0) {
++		sym[symlen] = 0;
++		err = vfsub_symlink(h_dir, h_path, sym);
++	}
++	__putname(sym);
++
++ out:
++	return err;
++}
++
++/* return with the lower dst inode is locked */
++static noinline_for_stack
++int cpup_entry(struct dentry *dentry, aufs_bindex_t bdst,
++	       aufs_bindex_t bsrc, loff_t len, unsigned int flags,
++	       struct dentry *dst_parent)
++{
++	int err;
++	umode_t mode;
++	unsigned int mnt_flags;
++	unsigned char isdir;
++	const unsigned char do_dt = !!au_ftest_cpup(flags, DTIME);
++	struct au_dtime dt;
++	struct path h_path;
++	struct dentry *h_src, *h_dst, *h_parent;
++	struct inode *h_inode, *h_dir;
++	struct super_block *sb;
++
++	/* bsrc branch can be ro/rw. */
++	h_src = au_h_dptr(dentry, bsrc);
++	h_inode = h_src->d_inode;
++	AuDebugOn(h_inode != au_h_iptr(dentry->d_inode, bsrc));
++
++	/* try stopping to be referenced while we are creating */
++	h_dst = au_h_dptr(dentry, bdst);
++	h_parent = h_dst->d_parent; /* dir inode is locked */
++	h_dir = h_parent->d_inode;
++	IMustLock(h_dir);
++	AuDebugOn(h_parent != h_dst->d_parent);
++
++	sb = dentry->d_sb;
++	h_path.mnt = au_sbr_mnt(sb, bdst);
++	if (do_dt) {
++		h_path.dentry = h_parent;
++		au_dtime_store(&dt, dst_parent, &h_path);
++	}
++	h_path.dentry = h_dst;
++
++	isdir = 0;
++	mode = h_inode->i_mode;
++	switch (mode & S_IFMT) {
++	case S_IFREG:
++		/* try stopping to update while we are referencing */
++		IMustLock(h_inode);
++		err = vfsub_create(h_dir, &h_path, mode | S_IWUSR);
++		if (!err)
++			err = au_do_cpup_regular
++				(dentry, bdst, bsrc, len,
++				 au_h_iptr(dst_parent->d_inode, bdst), &h_path);
++		break;
++	case S_IFDIR:
++		isdir = 1;
++		err = vfsub_mkdir(h_dir, &h_path, mode);
++		if (!err) {
++			/*
++			 * strange behaviour from the users view,
++			 * particularry setattr case
++			 */
++			if (au_ibstart(dst_parent->d_inode) == bdst)
++				au_cpup_attr_nlink(dst_parent->d_inode,
++						   /*force*/1);
++			au_cpup_attr_nlink(dentry->d_inode, /*force*/1);
++		}
++		break;
++	case S_IFLNK:
++		err = au_do_cpup_symlink(&h_path, h_src, h_dir);
++		break;
++	case S_IFCHR:
++	case S_IFBLK:
++		AuDebugOn(!capable(CAP_MKNOD));
++		/*FALLTHROUGH*/
++	case S_IFIFO:
++	case S_IFSOCK:
++		err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev);
++		break;
++	default:
++		AuIOErr("Unknown inode type 0%o\n", mode);
++		err = -EIO;
++	}
++
++	mnt_flags = au_mntflags(sb);
++	if (!au_opt_test(mnt_flags, UDBA_NONE)
++	    && !isdir
++	    && au_opt_test(mnt_flags, XINO)
++	    && h_inode->i_nlink == 1
++	    /* todo: unnecessary? */
++	    /* && dentry->d_inode->i_nlink == 1 */
++	    && bdst < bsrc
++	    && !au_ftest_cpup(flags, KEEPLINO))
++		au_xino_write(sb, bsrc, h_inode->i_ino, /*ino*/0);
++		/* ignore this error */
++
++	if (do_dt)
++		au_dtime_revert(&dt);
++	return err;
++}
++
++/*
++ * copyup the @dentry from @bsrc to @bdst.
++ * the caller must set the both of lower dentries.
++ * @len is for truncating when it is -1 copyup the entire file.
++ * in link/rename cases, @dst_parent may be different from the real one.
++ */
++static int au_cpup_single(struct dentry *dentry, aufs_bindex_t bdst,
++			  aufs_bindex_t bsrc, loff_t len, unsigned int flags,
++			  struct dentry *dst_parent)
++{
++	int err, rerr;
++	aufs_bindex_t old_ibstart;
++	unsigned char isdir, plink;
++	struct au_dtime dt;
++	struct path h_path;
++	struct dentry *h_src, *h_dst, *h_parent;
++	struct inode *dst_inode, *h_dir, *inode;
++	struct super_block *sb;
++
++	AuDebugOn(bsrc <= bdst);
++
++	sb = dentry->d_sb;
++	h_path.mnt = au_sbr_mnt(sb, bdst);
++	h_dst = au_h_dptr(dentry, bdst);
++	h_parent = h_dst->d_parent; /* dir inode is locked */
++	h_dir = h_parent->d_inode;
++	IMustLock(h_dir);
++
++	h_src = au_h_dptr(dentry, bsrc);
++	inode = dentry->d_inode;
++
++	if (!dst_parent)
++		dst_parent = dget_parent(dentry);
++	else
++		dget(dst_parent);
++
++	plink = !!au_opt_test(au_mntflags(sb), PLINK);
++	dst_inode = au_h_iptr(inode, bdst);
++	if (dst_inode) {
++		if (unlikely(!plink)) {
++			err = -EIO;
++			AuIOErr("i%lu exists on a upper branch "
++				"but plink is disabled\n", inode->i_ino);
++			goto out;
++		}
++
++		if (dst_inode->i_nlink) {
++			const int do_dt = au_ftest_cpup(flags, DTIME);
++
++			h_src = au_plink_lkup(inode, bdst);
++			err = PTR_ERR(h_src);
++			if (IS_ERR(h_src))
++				goto out;
++			if (unlikely(!h_src->d_inode)) {
++				err = -EIO;
++				AuIOErr("i%lu exists on a upper branch "
++					"but plink is broken\n", inode->i_ino);
++				dput(h_src);
++				goto out;
++			}
++
++			if (do_dt) {
++				h_path.dentry = h_parent;
++				au_dtime_store(&dt, dst_parent, &h_path);
++			}
++			h_path.dentry = h_dst;
++			err = vfsub_link(h_src, h_dir, &h_path);
++			if (do_dt)
++				au_dtime_revert(&dt);
++			dput(h_src);
++			goto out;
++		} else
++			/* todo: cpup_wh_file? */
++			/* udba work */
++			au_update_brange(inode, 1);
++	}
++
++	old_ibstart = au_ibstart(inode);
++	err = cpup_entry(dentry, bdst, bsrc, len, flags, dst_parent);
++	if (unlikely(err))
++		goto out;
++	dst_inode = h_dst->d_inode;
++	mutex_lock_nested(&dst_inode->i_mutex, AuLsc_I_CHILD2);
++
++	err = cpup_iattr(dentry, bdst, h_src);
++	isdir = S_ISDIR(dst_inode->i_mode);
++	if (!err) {
++		if (bdst < old_ibstart)
++			au_set_ibstart(inode, bdst);
++		au_set_h_iptr(inode, bdst, au_igrab(dst_inode),
++			      au_hi_flags(inode, isdir));
++		mutex_unlock(&dst_inode->i_mutex);
++		if (!isdir
++		    && h_src->d_inode->i_nlink > 1
++		    && plink)
++			au_plink_append(inode, bdst, h_dst);
++		goto out; /* success */
++	}
++
++	/* revert */
++	h_path.dentry = h_parent;
++	mutex_unlock(&dst_inode->i_mutex);
++	au_dtime_store(&dt, dst_parent, &h_path);
++	h_path.dentry = h_dst;
++	if (!isdir)
++		rerr = vfsub_unlink(h_dir, &h_path, /*force*/0);
++	else
++		rerr = vfsub_rmdir(h_dir, &h_path);
++	au_dtime_revert(&dt);
++	if (rerr) {
++		AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr);
++		err = -EIO;
++	}
++
++ out:
++	dput(dst_parent);
++	return err;
++}
++
++struct au_cpup_single_args {
++	int *errp;
++	struct dentry *dentry;
++	aufs_bindex_t bdst, bsrc;
++	loff_t len;
++	unsigned int flags;
++	struct dentry *dst_parent;
++};
++
++static void au_call_cpup_single(void *args)
++{
++	struct au_cpup_single_args *a = args;
++	*a->errp = au_cpup_single(a->dentry, a->bdst, a->bsrc, a->len,
++				  a->flags, a->dst_parent);
++}
++
++int au_sio_cpup_single(struct dentry *dentry, aufs_bindex_t bdst,
++		       aufs_bindex_t bsrc, loff_t len, unsigned int flags,
++		       struct dentry *dst_parent)
++{
++	int err, wkq_err;
++	umode_t mode;
++	struct dentry *h_dentry;
++
++	h_dentry = au_h_dptr(dentry, bsrc);
++	mode = h_dentry->d_inode->i_mode & S_IFMT;
++	if ((mode != S_IFCHR && mode != S_IFBLK)
++	    || capable(CAP_MKNOD))
++		err = au_cpup_single(dentry, bdst, bsrc, len, flags,
++				     dst_parent);
++	else {
++		struct au_cpup_single_args args = {
++			.errp		= &err,
++			.dentry		= dentry,
++			.bdst		= bdst,
++			.bsrc		= bsrc,
++			.len		= len,
++			.flags		= flags,
++			.dst_parent	= dst_parent
++		};
++		wkq_err = au_wkq_wait(au_call_cpup_single, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	}
++
++	return err;
++}
++
++/*
++ * copyup the @dentry from the first active lower branch to @bdst,
++ * using au_cpup_single().
++ */
++static int au_cpup_simple(struct dentry *dentry, aufs_bindex_t bdst, loff_t len,
++			  unsigned int flags)
++{
++	int err;
++	aufs_bindex_t bsrc, bend;
++
++	bend = au_dbend(dentry);
++	for (bsrc = bdst + 1; bsrc <= bend; bsrc++)
++		if (au_h_dptr(dentry, bsrc))
++			break;
++
++	err = au_lkup_neg(dentry, bdst);
++	if (!err) {
++		err = au_cpup_single(dentry, bdst, bsrc, len, flags, NULL);
++		if (!err)
++			return 0; /* success */
++
++		/* revert */
++		au_set_h_dptr(dentry, bdst, NULL);
++		au_set_dbstart(dentry, bsrc);
++	}
++
++	return err;
++}
++
++struct au_cpup_simple_args {
++	int *errp;
++	struct dentry *dentry;
++	aufs_bindex_t bdst;
++	loff_t len;
++	unsigned int flags;
++};
++
++static void au_call_cpup_simple(void *args)
++{
++	struct au_cpup_simple_args *a = args;
++	*a->errp = au_cpup_simple(a->dentry, a->bdst, a->len, a->flags);
++}
++
++int au_sio_cpup_simple(struct dentry *dentry, aufs_bindex_t bdst, loff_t len,
++		       unsigned int flags)
++{
++	int err, wkq_err;
++	unsigned char do_sio;
++	struct dentry *parent;
++	struct inode *h_dir;
++
++	parent = dget_parent(dentry);
++	h_dir = au_h_iptr(parent->d_inode, bdst);
++	do_sio = !!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE);
++	if (!do_sio) {
++		/*
++		 * testing CAP_MKNOD is for generic fs,
++		 * but CAP_FSETID is for xfs only, currently.
++		 */
++		umode_t mode = dentry->d_inode->i_mode;
++		do_sio = (((mode & (S_IFCHR | S_IFBLK))
++			   && !capable(CAP_MKNOD))
++			  || ((mode & (S_ISUID | S_ISGID))
++			      && !capable(CAP_FSETID)));
++	}
++	if (!do_sio)
++		err = au_cpup_simple(dentry, bdst, len, flags);
++	else {
++		struct au_cpup_simple_args args = {
++			.errp		= &err,
++			.dentry		= dentry,
++			.bdst		= bdst,
++			.len		= len,
++			.flags		= flags
++		};
++		wkq_err = au_wkq_wait(au_call_cpup_simple, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	}
++
++	dput(parent);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * copyup the deleted file for writing.
++ */
++static int au_do_cpup_wh(struct dentry *dentry, aufs_bindex_t bdst,
++			 struct dentry *wh_dentry, struct file *file,
++			 loff_t len)
++{
++	int err;
++	aufs_bindex_t bstart;
++	struct au_dinfo *dinfo;
++	struct dentry *h_d_dst, *h_d_start;
++
++	dinfo = au_di(dentry);
++	AuRwMustWriteLock(&dinfo->di_rwsem);
++
++	bstart = dinfo->di_bstart;
++	h_d_dst = dinfo->di_hdentry[0 + bdst].hd_dentry;
++	dinfo->di_bstart = bdst;
++	dinfo->di_hdentry[0 + bdst].hd_dentry = wh_dentry;
++	h_d_start = dinfo->di_hdentry[0 + bstart].hd_dentry;
++	if (file)
++		dinfo->di_hdentry[0 + bstart].hd_dentry
++			= au_h_fptr(file, au_fbstart(file))->f_dentry;
++	err = au_cpup_single(dentry, bdst, bstart, len, !AuCpup_DTIME,
++			     /*h_parent*/NULL);
++	if (!err && file) {
++		err = au_reopen_nondir(file);
++		dinfo->di_hdentry[0 + bstart].hd_dentry = h_d_start;
++	}
++	dinfo->di_hdentry[0 + bdst].hd_dentry = h_d_dst;
++	dinfo->di_bstart = bstart;
++
++	return err;
++}
++
++static int au_cpup_wh(struct dentry *dentry, aufs_bindex_t bdst, loff_t len,
++		      struct file *file)
++{
++	int err;
++	struct au_dtime dt;
++	struct dentry *parent, *h_parent, *wh_dentry;
++	struct au_branch *br;
++	struct path h_path;
++
++	br = au_sbr(dentry->d_sb, bdst);
++	parent = dget_parent(dentry);
++	h_parent = au_h_dptr(parent, bdst);
++	wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
++	err = PTR_ERR(wh_dentry);
++	if (IS_ERR(wh_dentry))
++		goto out;
++
++	h_path.dentry = h_parent;
++	h_path.mnt = br->br_mnt;
++	au_dtime_store(&dt, parent, &h_path);
++	err = au_do_cpup_wh(dentry, bdst, wh_dentry, file, len);
++	if (unlikely(err))
++		goto out_wh;
++
++	dget(wh_dentry);
++	h_path.dentry = wh_dentry;
++	err = vfsub_unlink(h_parent->d_inode, &h_path, /*force*/0);
++	if (unlikely(err)) {
++		AuIOErr("failed remove copied-up tmp file %.*s(%d)\n",
++			AuDLNPair(wh_dentry), err);
++		err = -EIO;
++	}
++	au_dtime_revert(&dt);
++	au_set_hi_wh(dentry->d_inode, bdst, wh_dentry);
++
++ out_wh:
++	dput(wh_dentry);
++ out:
++	dput(parent);
++	return err;
++}
++
++struct au_cpup_wh_args {
++	int *errp;
++	struct dentry *dentry;
++	aufs_bindex_t bdst;
++	loff_t len;
++	struct file *file;
++};
++
++static void au_call_cpup_wh(void *args)
++{
++	struct au_cpup_wh_args *a = args;
++	*a->errp = au_cpup_wh(a->dentry, a->bdst, a->len, a->file);
++}
++
++int au_sio_cpup_wh(struct dentry *dentry, aufs_bindex_t bdst, loff_t len,
++		   struct file *file)
++{
++	int err, wkq_err;
++	struct dentry *parent, *h_orph, *h_parent, *h_dentry;
++	struct inode *dir, *h_dir, *h_tmpdir, *h_inode;
++	struct au_wbr *wbr;
++
++	parent = dget_parent(dentry);
++	dir = parent->d_inode;
++	h_orph = NULL;
++	h_parent = NULL;
++	h_dir = au_igrab(au_h_iptr(dir, bdst));
++	h_tmpdir = h_dir;
++	if (!h_dir->i_nlink) {
++		wbr = au_sbr(dentry->d_sb, bdst)->br_wbr;
++		h_orph = wbr->wbr_orph;
++
++		h_parent = dget(au_h_dptr(parent, bdst));
++		au_set_h_dptr(parent, bdst, NULL);
++		au_set_h_dptr(parent, bdst, dget(h_orph));
++		h_tmpdir = h_orph->d_inode;
++		au_set_h_iptr(dir, bdst, NULL, 0);
++		au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0);
++
++		/* this temporary unlock is safe */
++		if (file)
++			h_dentry = au_h_fptr(file, au_fbstart(file))->f_dentry;
++		else
++			h_dentry = au_h_dptr(dentry, au_dbstart(dentry));
++		h_inode = h_dentry->d_inode;
++		IMustLock(h_inode);
++		mutex_unlock(&h_inode->i_mutex);
++		mutex_lock_nested(&h_tmpdir->i_mutex, AuLsc_I_PARENT3);
++		mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
++	}
++
++	if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE))
++		err = au_cpup_wh(dentry, bdst, len, file);
++	else {
++		struct au_cpup_wh_args args = {
++			.errp	= &err,
++			.dentry	= dentry,
++			.bdst	= bdst,
++			.len	= len,
++			.file	= file
++		};
++		wkq_err = au_wkq_wait(au_call_cpup_wh, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	}
++
++	if (h_orph) {
++		mutex_unlock(&h_tmpdir->i_mutex);
++		au_set_h_iptr(dir, bdst, NULL, 0);
++		au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0);
++		au_set_h_dptr(parent, bdst, NULL);
++		au_set_h_dptr(parent, bdst, h_parent);
++	}
++	iput(h_dir);
++	dput(parent);
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * generic routine for both of copy-up and copy-down.
++ */
++/* cf. revalidate function in file.c */
++int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
++	       int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
++			 struct dentry *h_parent, void *arg),
++	       void *arg)
++{
++	int err;
++	struct au_pin pin;
++	struct dentry *d, *parent, *h_parent, *real_parent;
++
++	err = 0;
++	parent = dget_parent(dentry);
++	if (IS_ROOT(parent))
++		goto out;
++
++	au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2,
++		    au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE);
++
++	/* do not use au_dpage */
++	real_parent = parent;
++	while (1) {
++		dput(parent);
++		parent = dget_parent(dentry);
++		h_parent = au_h_dptr(parent, bdst);
++		if (h_parent)
++			goto out; /* success */
++
++		/* find top dir which is necessary to cpup */
++		do {
++			d = parent;
++			dput(parent);
++			parent = dget_parent(d);
++			di_read_lock_parent3(parent, !AuLock_IR);
++			h_parent = au_h_dptr(parent, bdst);
++			di_read_unlock(parent, !AuLock_IR);
++		} while (!h_parent);
++
++		if (d != real_parent)
++			di_write_lock_child3(d);
++
++		/* somebody else might create while we were sleeping */
++		if (!au_h_dptr(d, bdst) || !au_h_dptr(d, bdst)->d_inode) {
++			if (au_h_dptr(d, bdst))
++				au_update_dbstart(d);
++
++			au_pin_set_dentry(&pin, d);
++			err = au_do_pin(&pin);
++			if (!err) {
++				err = cp(d, bdst, h_parent, arg);
++				au_unpin(&pin);
++			}
++		}
++
++		if (d != real_parent)
++			di_write_unlock(d);
++		if (unlikely(err))
++			break;
++	}
++
++ out:
++	dput(parent);
++	return err;
++}
++
++static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst,
++		       struct dentry *h_parent __maybe_unused ,
++		       void *arg __maybe_unused)
++{
++	return au_sio_cpup_simple(dentry, bdst, -1, AuCpup_DTIME);
++}
++
++int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
++{
++	return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL);
++}
++
++int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
++{
++	int err;
++	struct dentry *parent;
++	struct inode *dir;
++
++	parent = dget_parent(dentry);
++	dir = parent->d_inode;
++	err = 0;
++	if (au_h_iptr(dir, bdst))
++		goto out;
++
++	di_read_unlock(parent, AuLock_IR);
++	di_write_lock_parent(parent);
++	/* someone else might change our inode while we were sleeping */
++	if (!au_h_iptr(dir, bdst))
++		err = au_cpup_dirs(dentry, bdst);
++	di_downgrade_lock(parent, AuLock_IR);
++
++ out:
++	dput(parent);
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/cpup.h linux-2.6.31.5/fs/aufs/cpup.h
+--- linux-2.6.31.5.orig/fs/aufs/cpup.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/cpup.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,81 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * copy-up/down functions
++ */
++
++#ifndef __AUFS_CPUP_H__
++#define __AUFS_CPUP_H__
++
++#ifdef __KERNEL__
++
++#include <linux/path.h>
++#include <linux/time.h>
++#include <linux/aufs_type.h>
++
++struct inode;
++struct file;
++
++void au_cpup_attr_flags(struct inode *dst, struct inode *src);
++void au_cpup_attr_timesizes(struct inode *inode);
++void au_cpup_attr_nlink(struct inode *inode, int force);
++void au_cpup_attr_changeable(struct inode *inode);
++void au_cpup_igen(struct inode *inode, struct inode *h_inode);
++void au_cpup_attr_all(struct inode *inode, int force);
++
++/* ---------------------------------------------------------------------- */
++
++/* cpup flags */
++#define AuCpup_DTIME	1		/* do dtime_store/revert */
++#define AuCpup_KEEPLINO	(1 << 1)	/* do not clear the lower xino,
++					   for link(2) */
++#define au_ftest_cpup(flags, name)	((flags) & AuCpup_##name)
++#define au_fset_cpup(flags, name)	{ (flags) |= AuCpup_##name; }
++#define au_fclr_cpup(flags, name)	{ (flags) &= ~AuCpup_##name; }
++
++int au_copy_file(struct file *dst, struct file *src, loff_t len);
++int au_sio_cpup_single(struct dentry *dentry, aufs_bindex_t bdst,
++		       aufs_bindex_t bsrc, loff_t len, unsigned int flags,
++		       struct dentry *dst_parent);
++int au_sio_cpup_simple(struct dentry *dentry, aufs_bindex_t bdst, loff_t len,
++		       unsigned int flags);
++int au_sio_cpup_wh(struct dentry *dentry, aufs_bindex_t bdst, loff_t len,
++		   struct file *file);
++
++int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
++	       int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
++			 struct dentry *h_parent, void *arg),
++	       void *arg);
++int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
++int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
++
++/* ---------------------------------------------------------------------- */
++
++/* keep timestamps when copyup */
++struct au_dtime {
++	struct dentry *dt_dentry;
++	struct path dt_h_path;
++	struct timespec dt_atime, dt_mtime;
++};
++void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
++		    struct path *h_path);
++void au_dtime_revert(struct au_dtime *dt);
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_CPUP_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dbgaufs.c linux-2.6.31.5/fs/aufs/dbgaufs.c
+--- linux-2.6.31.5.orig/fs/aufs/dbgaufs.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dbgaufs.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,331 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * debugfs interface
++ */
++
++#include <linux/debugfs.h>
++#include "aufs.h"
++
++#ifndef CONFIG_SYSFS
++#error DEBUG_FS depends upon SYSFS
++#endif
++
++static struct dentry *dbgaufs;
++static const mode_t dbgaufs_mode = S_IRUSR | S_IRGRP | S_IROTH;
++
++/* 20 is max digits length of ulong 64 */
++struct dbgaufs_arg {
++	int n;
++	char a[20 * 4];
++};
++
++/*
++ * common function for all XINO files
++ */
++static int dbgaufs_xi_release(struct inode *inode __maybe_unused,
++			      struct file *file)
++{
++	kfree(file->private_data);
++	return 0;
++}
++
++static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt)
++{
++	int err;
++	struct kstat st;
++	struct dbgaufs_arg *p;
++
++	err = -ENOMEM;
++	p = kmalloc(sizeof(*p), GFP_NOFS);
++	if (unlikely(!p))
++		goto out;
++
++	err = 0;
++	p->n = 0;
++	file->private_data = p;
++	if (!xf)
++		goto out;
++
++	err = vfs_getattr(xf->f_vfsmnt, xf->f_dentry, &st);
++	if (!err) {
++		if (do_fcnt)
++			p->n = snprintf
++				(p->a, sizeof(p->a), "%ld, %llux%lu %lld\n",
++				 (long)file_count(xf), st.blocks, st.blksize,
++				 (long long)st.size);
++		else
++			p->n = snprintf(p->a, sizeof(p->a), "%llux%lu %lld\n",
++					st.blocks, st.blksize,
++					(long long)st.size);
++		AuDebugOn(p->n >= sizeof(p->a));
++	} else {
++		p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err);
++		err = 0;
++	}
++
++ out:
++	return err;
++
++}
++
++static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf,
++			       size_t count, loff_t *ppos)
++{
++	struct dbgaufs_arg *p;
++
++	p = file->private_data;
++	return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int dbgaufs_xib_open(struct inode *inode, struct file *file)
++{
++	int err;
++	struct au_sbinfo *sbinfo;
++	struct super_block *sb;
++
++	sbinfo = inode->i_private;
++	sb = sbinfo->si_sb;
++	si_noflush_read_lock(sb);
++	err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0);
++	si_read_unlock(sb);
++	return err;
++}
++
++static const struct file_operations dbgaufs_xib_fop = {
++	.open		= dbgaufs_xib_open,
++	.release	= dbgaufs_xi_release,
++	.read		= dbgaufs_xi_read
++};
++
++/* ---------------------------------------------------------------------- */
++
++#define DbgaufsXi_PREFIX "xi"
++
++static int dbgaufs_xino_open(struct inode *inode, struct file *file)
++{
++	int err;
++	long l;
++	struct au_sbinfo *sbinfo;
++	struct super_block *sb;
++	struct file *xf;
++	struct qstr *name;
++
++	err = -ENOENT;
++	xf = NULL;
++	name = &file->f_dentry->d_name;
++	if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX)
++		     || memcmp(name->name, DbgaufsXi_PREFIX,
++			       sizeof(DbgaufsXi_PREFIX) - 1)))
++		goto out;
++	err = strict_strtol(name->name + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l);
++	if (unlikely(err))
++		goto out;
++
++	sbinfo = inode->i_private;
++	sb = sbinfo->si_sb;
++	si_noflush_read_lock(sb);
++	if (l <= au_sbend(sb)) {
++		xf = au_sbr(sb, (aufs_bindex_t)l)->br_xino.xi_file;
++		err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1);
++	} else
++		err = -ENOENT;
++	si_read_unlock(sb);
++
++ out:
++	return err;
++}
++
++static const struct file_operations dbgaufs_xino_fop = {
++	.open		= dbgaufs_xino_open,
++	.release	= dbgaufs_xi_release,
++	.read		= dbgaufs_xi_read
++};
++
++void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
++{
++	aufs_bindex_t bend;
++	struct au_branch *br;
++	struct au_xino_file *xi;
++
++	if (!au_sbi(sb)->si_dbgaufs)
++		return;
++
++	bend = au_sbend(sb);
++	for (; bindex <= bend; bindex++) {
++		br = au_sbr(sb, bindex);
++		xi = &br->br_xino;
++		if (xi->xi_dbgaufs) {
++			debugfs_remove(xi->xi_dbgaufs);
++			xi->xi_dbgaufs = NULL;
++		}
++	}
++}
++
++void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
++{
++	struct au_sbinfo *sbinfo;
++	struct dentry *parent;
++	struct au_branch *br;
++	struct au_xino_file *xi;
++	aufs_bindex_t bend;
++	char name[sizeof(DbgaufsXi_PREFIX) + 5]; /* "xi" bindex NULL */
++
++	sbinfo = au_sbi(sb);
++	parent = sbinfo->si_dbgaufs;
++	if (!parent)
++		return;
++
++	bend = au_sbend(sb);
++	for (; bindex <= bend; bindex++) {
++		snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex);
++		br = au_sbr(sb, bindex);
++		xi = &br->br_xino;
++		AuDebugOn(xi->xi_dbgaufs);
++		xi->xi_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent,
++						     sbinfo, &dbgaufs_xino_fop);
++		/* ignore an error */
++		if (unlikely(!xi->xi_dbgaufs))
++			AuWarn1("failed %s under debugfs\n", name);
++	}
++}
++
++/* ---------------------------------------------------------------------- */
++
++#ifdef CONFIG_AUFS_EXPORT
++static int dbgaufs_xigen_open(struct inode *inode, struct file *file)
++{
++	int err;
++	struct au_sbinfo *sbinfo;
++	struct super_block *sb;
++
++	sbinfo = inode->i_private;
++	sb = sbinfo->si_sb;
++	si_noflush_read_lock(sb);
++	err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0);
++	si_read_unlock(sb);
++	return err;
++}
++
++static const struct file_operations dbgaufs_xigen_fop = {
++	.open		= dbgaufs_xigen_open,
++	.release	= dbgaufs_xi_release,
++	.read		= dbgaufs_xi_read
++};
++
++static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
++{
++	int err;
++
++	/*
++	 * This function is a dynamic '__init' fucntion actually,
++	 * so the tiny check for si_rwsem is unnecessary.
++	 */
++	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
++
++	err = -EIO;
++	sbinfo->si_dbgaufs_xigen = debugfs_create_file
++		("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
++		 &dbgaufs_xigen_fop);
++	if (sbinfo->si_dbgaufs_xigen)
++		err = 0;
++
++	return err;
++}
++#else
++static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
++{
++	return 0;
++}
++#endif /* CONFIG_AUFS_EXPORT */
++
++/* ---------------------------------------------------------------------- */
++
++void dbgaufs_si_fin(struct au_sbinfo *sbinfo)
++{
++	/*
++	 * This function is a dynamic '__init' fucntion actually,
++	 * so the tiny check for si_rwsem is unnecessary.
++	 */
++	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
++
++	debugfs_remove_recursive(sbinfo->si_dbgaufs);
++	sbinfo->si_dbgaufs = NULL;
++	kobject_put(&sbinfo->si_kobj);
++}
++
++int dbgaufs_si_init(struct au_sbinfo *sbinfo)
++{
++	int err;
++	char name[SysaufsSiNameLen];
++
++	/*
++	 * This function is a dynamic '__init' fucntion actually,
++	 * so the tiny check for si_rwsem is unnecessary.
++	 */
++	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
++
++	err = -ENOENT;
++	if (!dbgaufs) {
++		AuErr1("/debug/aufs is uninitialized\n");
++		goto out;
++	}
++
++	err = -EIO;
++	sysaufs_name(sbinfo, name);
++	sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs);
++	if (unlikely(!sbinfo->si_dbgaufs))
++		goto out;
++	kobject_get(&sbinfo->si_kobj);
++
++	sbinfo->si_dbgaufs_xib = debugfs_create_file
++		("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
++		 &dbgaufs_xib_fop);
++	if (unlikely(!sbinfo->si_dbgaufs_xib))
++		goto out_dir;
++
++	err = dbgaufs_xigen_init(sbinfo);
++	if (!err)
++		goto out; /* success */
++
++ out_dir:
++	dbgaufs_si_fin(sbinfo);
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++void dbgaufs_fin(void)
++{
++	debugfs_remove(dbgaufs);
++}
++
++int __init dbgaufs_init(void)
++{
++	int err;
++
++	err = -EIO;
++	dbgaufs = debugfs_create_dir(AUFS_NAME, NULL);
++	if (dbgaufs)
++		err = 0;
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dbgaufs.h linux-2.6.31.5/fs/aufs/dbgaufs.h
+--- linux-2.6.31.5.orig/fs/aufs/dbgaufs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dbgaufs.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,79 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * debugfs interface
++ */
++
++#ifndef __DBGAUFS_H__
++#define __DBGAUFS_H__
++
++#ifdef __KERNEL__
++
++#include <linux/init.h>
++#include <linux/aufs_type.h>
++
++struct super_block;
++struct au_sbinfo;
++
++#ifdef CONFIG_DEBUG_FS
++/* dbgaufs.c */
++void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
++void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
++void dbgaufs_si_fin(struct au_sbinfo *sbinfo);
++int dbgaufs_si_init(struct au_sbinfo *sbinfo);
++void dbgaufs_fin(void);
++int __init dbgaufs_init(void);
++
++#else
++
++static inline
++void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
++{
++	/* empty */
++}
++
++static inline
++void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
++{
++	/* empty */
++}
++
++static inline
++void dbgaufs_si_fin(struct au_sbinfo *sbinfo)
++{
++	/* empty */
++}
++
++static inline
++int dbgaufs_si_init(struct au_sbinfo *sbinfo)
++{
++	return 0;
++}
++
++#define dbgaufs_fin()	do {} while (0)
++
++static inline
++int __init dbgaufs_init(void)
++{
++	return 0;
++}
++#endif /* CONFIG_DEBUG_FS */
++
++#endif /* __KERNEL__ */
++#endif /* __DBGAUFS_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dcsub.c linux-2.6.31.5/fs/aufs/dcsub.c
+--- linux-2.6.31.5.orig/fs/aufs/dcsub.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dcsub.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,223 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * sub-routines for dentry cache
++ */
++
++#include "aufs.h"
++
++static void au_dpage_free(struct au_dpage *dpage)
++{
++	int i;
++	struct dentry **p;
++
++	p = dpage->dentries;
++	for (i = 0; i < dpage->ndentry; i++)
++		dput(*p++);
++	free_page((unsigned long)dpage->dentries);
++}
++
++int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp)
++{
++	int err;
++	void *p;
++
++	err = -ENOMEM;
++	dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp);
++	if (unlikely(!dpages->dpages))
++		goto out;
++
++	p = (void *)__get_free_page(gfp);
++	if (unlikely(!p))
++		goto out_dpages;
++
++	dpages->dpages[0].ndentry = 0;
++	dpages->dpages[0].dentries = p;
++	dpages->ndpage = 1;
++	return 0; /* success */
++
++ out_dpages:
++	kfree(dpages->dpages);
++ out:
++	return err;
++}
++
++void au_dpages_free(struct au_dcsub_pages *dpages)
++{
++	int i;
++	struct au_dpage *p;
++
++	p = dpages->dpages;
++	for (i = 0; i < dpages->ndpage; i++)
++		au_dpage_free(p++);
++	kfree(dpages->dpages);
++}
++
++static int au_dpages_append(struct au_dcsub_pages *dpages,
++			    struct dentry *dentry, gfp_t gfp)
++{
++	int err, sz;
++	struct au_dpage *dpage;
++	void *p;
++
++	dpage = dpages->dpages + dpages->ndpage - 1;
++	sz = PAGE_SIZE / sizeof(dentry);
++	if (unlikely(dpage->ndentry >= sz)) {
++		AuLabel(new dpage);
++		err = -ENOMEM;
++		sz = dpages->ndpage * sizeof(*dpages->dpages);
++		p = au_kzrealloc(dpages->dpages, sz,
++				 sz + sizeof(*dpages->dpages), gfp);
++		if (unlikely(!p))
++			goto out;
++
++		dpages->dpages = p;
++		dpage = dpages->dpages + dpages->ndpage;
++		p = (void *)__get_free_page(gfp);
++		if (unlikely(!p))
++			goto out;
++
++		dpage->ndentry = 0;
++		dpage->dentries = p;
++		dpages->ndpage++;
++	}
++
++	dpage->dentries[dpage->ndentry++] = dget(dentry);
++	return 0; /* success */
++
++ out:
++	return err;
++}
++
++int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
++		   au_dpages_test test, void *arg)
++{
++	int err;
++	struct dentry *this_parent = root;
++	struct list_head *next;
++	struct super_block *sb = root->d_sb;
++
++	err = 0;
++	spin_lock(&dcache_lock);
++ repeat:
++	next = this_parent->d_subdirs.next;
++ resume:
++	if (this_parent->d_sb == sb
++	    && !IS_ROOT(this_parent)
++	    && atomic_read(&this_parent->d_count)
++	    && this_parent->d_inode
++	    && (!test || test(this_parent, arg))) {
++		err = au_dpages_append(dpages, this_parent, GFP_ATOMIC);
++		if (unlikely(err))
++			goto out;
++	}
++
++	while (next != &this_parent->d_subdirs) {
++		struct list_head *tmp = next;
++		struct dentry *dentry = list_entry(tmp, struct dentry,
++						   d_u.d_child);
++		next = tmp->next;
++		if (/*d_unhashed(dentry) || */!dentry->d_inode)
++			continue;
++		if (!list_empty(&dentry->d_subdirs)) {
++			this_parent = dentry;
++			goto repeat;
++		}
++		if (dentry->d_sb == sb
++		    && atomic_read(&dentry->d_count)
++		    && (!test || test(dentry, arg))) {
++			err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
++			if (unlikely(err))
++				goto out;
++		}
++	}
++
++	if (this_parent != root) {
++		next = this_parent->d_u.d_child.next;
++		this_parent = this_parent->d_parent; /* dcache_lock is locked */
++		goto resume;
++	}
++ out:
++	spin_unlock(&dcache_lock);
++	return err;
++}
++
++int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
++		       int do_include, au_dpages_test test, void *arg)
++{
++	int err;
++
++	err = 0;
++	spin_lock(&dcache_lock);
++	if (do_include && (!test || test(dentry, arg))) {
++		err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
++		if (unlikely(err))
++			goto out;
++	}
++	while (!IS_ROOT(dentry)) {
++		dentry = dentry->d_parent; /* dcache_lock is locked */
++		if (!test || test(dentry, arg)) {
++			err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
++			if (unlikely(err))
++				break;
++		}
++	}
++
++ out:
++	spin_unlock(&dcache_lock);
++
++	return err;
++}
++
++struct dentry *au_test_subdir(struct dentry *d1, struct dentry *d2)
++{
++	struct dentry *trap, **dentries;
++	int err, i, j;
++	struct au_dcsub_pages dpages;
++	struct au_dpage *dpage;
++
++	trap = ERR_PTR(-ENOMEM);
++	err = au_dpages_init(&dpages, GFP_NOFS);
++	if (unlikely(err))
++		goto out;
++	err = au_dcsub_pages_rev(&dpages, d1, /*do_include*/1, NULL, NULL);
++	if (unlikely(err))
++		goto out_dpages;
++
++	trap = d1;
++	for (i = 0; !err && i < dpages.ndpage; i++) {
++		dpage = dpages.dpages + i;
++		dentries = dpage->dentries;
++		for (j = 0; !err && j < dpage->ndentry; j++) {
++			struct dentry *d;
++
++			d = dentries[j];
++			err = (d == d2);
++			if (!err)
++				trap = d;
++		}
++	}
++	if (!err)
++		trap = NULL;
++
++ out_dpages:
++	au_dpages_free(&dpages);
++ out:
++	return trap;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dcsub.h linux-2.6.31.5/fs/aufs/dcsub.h
+--- linux-2.6.31.5.orig/fs/aufs/dcsub.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dcsub.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,54 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * sub-routines for dentry cache
++ */
++
++#ifndef __AUFS_DCSUB_H__
++#define __AUFS_DCSUB_H__
++
++#ifdef __KERNEL__
++
++#include <linux/types.h>
++
++struct dentry;
++
++struct au_dpage {
++	int ndentry;
++	struct dentry **dentries;
++};
++
++struct au_dcsub_pages {
++	int ndpage;
++	struct au_dpage *dpages;
++};
++
++/* ---------------------------------------------------------------------- */
++
++int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp);
++void au_dpages_free(struct au_dcsub_pages *dpages);
++typedef int (*au_dpages_test)(struct dentry *dentry, void *arg);
++int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
++		   au_dpages_test test, void *arg);
++int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
++		       int do_include, au_dpages_test test, void *arg);
++struct dentry *au_test_subdir(struct dentry *d1, struct dentry *d2);
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_DCSUB_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/debug.c linux-2.6.31.5/fs/aufs/debug.c
+--- linux-2.6.31.5.orig/fs/aufs/debug.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/debug.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,427 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * debug print functions
++ */
++
++#include <linux/module.h>
++#include <linux/vt_kern.h>
++#include "aufs.h"
++
++int aufs_debug;
++MODULE_PARM_DESC(debug, "debug print");
++module_param_named(debug, aufs_debug, int, S_IRUGO | S_IWUSR | S_IWGRP);
++
++char *au_plevel = KERN_DEBUG;
++#define dpri(fmt, arg...) do { \
++	if (au_debug_test()) \
++		printk("%s" fmt, au_plevel, ##arg); \
++} while (0)
++
++/* ---------------------------------------------------------------------- */
++
++void au_dpri_whlist(struct au_nhash *whlist)
++{
++	unsigned long ul, n;
++	struct hlist_head *head;
++	struct au_vdir_wh *tpos;
++	struct hlist_node *pos;
++
++	n = whlist->nh_num;
++	head = whlist->nh_head;
++	for (ul = 0; ul < n; ul++) {
++		hlist_for_each_entry(tpos, pos, head, wh_hash)
++			dpri("b%d, %.*s, %d\n",
++			     tpos->wh_bindex,
++			     tpos->wh_str.len, tpos->wh_str.name,
++			     tpos->wh_str.len);
++		head++;
++	}
++}
++
++void au_dpri_vdir(struct au_vdir *vdir)
++{
++	unsigned long ul;
++	union au_vdir_deblk_p p;
++	unsigned char *o;
++
++	if (!vdir || IS_ERR(vdir)) {
++		dpri("err %ld\n", PTR_ERR(vdir));
++		return;
++	}
++
++	dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %lu\n",
++	     vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk,
++	     vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version);
++	for (ul = 0; ul < vdir->vd_nblk; ul++) {
++		p.deblk = vdir->vd_deblk[ul];
++		o = p.deblk;
++		dpri("[%lu]: %p\n", ul, o);
++	}
++}
++
++static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode,
++			struct dentry *wh)
++{
++	char *n = NULL;
++	int l = 0;
++
++	if (!inode || IS_ERR(inode)) {
++		dpri("i%d: err %ld\n", bindex, PTR_ERR(inode));
++		return -1;
++	}
++
++	/* the type of i_blocks depends upon CONFIG_LSF */
++	BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long)
++		     && sizeof(inode->i_blocks) != sizeof(u64));
++	if (wh) {
++		n = (void *)wh->d_name.name;
++		l = wh->d_name.len;
++	}
++
++	dpri("i%d: i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu,"
++	     " ct %lld, np %lu, st 0x%lx, f 0x%x, g %x%s%.*s\n",
++	     bindex,
++	     inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??",
++	     atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode,
++	     i_size_read(inode), (unsigned long long)inode->i_blocks,
++	     (long long)timespec_to_ns(&inode->i_ctime) & 0x0ffff,
++	     inode->i_mapping ? inode->i_mapping->nrpages : 0,
++	     inode->i_state, inode->i_flags, inode->i_generation,
++	     l ? ", wh " : "", l, n);
++	return 0;
++}
++
++void au_dpri_inode(struct inode *inode)
++{
++	struct au_iinfo *iinfo;
++	aufs_bindex_t bindex;
++	int err;
++
++	err = do_pri_inode(-1, inode, NULL);
++	if (err || !au_test_aufs(inode->i_sb))
++		return;
++
++	iinfo = au_ii(inode);
++	if (!iinfo)
++		return;
++	dpri("i-1: bstart %d, bend %d, gen %d\n",
++	     iinfo->ii_bstart, iinfo->ii_bend, au_iigen(inode));
++	if (iinfo->ii_bstart < 0)
++		return;
++	for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++)
++		do_pri_inode(bindex, iinfo->ii_hinode[0 + bindex].hi_inode,
++			     iinfo->ii_hinode[0 + bindex].hi_whdentry);
++}
++
++static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry)
++{
++	struct dentry *wh = NULL;
++
++	if (!dentry || IS_ERR(dentry)) {
++		dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry));
++		return -1;
++	}
++	/* do not call dget_parent() here */
++	dpri("d%d: %.*s?/%.*s, %s, cnt %d, flags 0x%x\n",
++	     bindex,
++	     AuDLNPair(dentry->d_parent), AuDLNPair(dentry),
++	     dentry->d_sb ? au_sbtype(dentry->d_sb) : "??",
++	     atomic_read(&dentry->d_count), dentry->d_flags);
++	if (bindex >= 0 && dentry->d_inode && au_test_aufs(dentry->d_sb)) {
++		struct au_iinfo *iinfo = au_ii(dentry->d_inode);
++		if (iinfo)
++			wh = iinfo->ii_hinode[0 + bindex].hi_whdentry;
++	}
++	do_pri_inode(bindex, dentry->d_inode, wh);
++	return 0;
++}
++
++void au_dpri_dentry(struct dentry *dentry)
++{
++	struct au_dinfo *dinfo;
++	aufs_bindex_t bindex;
++	int err;
++
++	err = do_pri_dentry(-1, dentry);
++	if (err || !au_test_aufs(dentry->d_sb))
++		return;
++
++	dinfo = au_di(dentry);
++	if (!dinfo)
++		return;
++	dpri("d-1: bstart %d, bend %d, bwh %d, bdiropq %d, gen %d\n",
++	     dinfo->di_bstart, dinfo->di_bend,
++	     dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry));
++	if (dinfo->di_bstart < 0)
++		return;
++	for (bindex = dinfo->di_bstart; bindex <= dinfo->di_bend; bindex++)
++		do_pri_dentry(bindex, dinfo->di_hdentry[0 + bindex].hd_dentry);
++}
++
++static int do_pri_file(aufs_bindex_t bindex, struct file *file)
++{
++	char a[32];
++
++	if (!file || IS_ERR(file)) {
++		dpri("f%d: err %ld\n", bindex, PTR_ERR(file));
++		return -1;
++	}
++	a[0] = 0;
++	if (bindex < 0
++	    && file->f_dentry
++	    && au_test_aufs(file->f_dentry->d_sb)
++	    && au_fi(file))
++		snprintf(a, sizeof(a), ", mmapped %d", au_test_mmapped(file));
++	dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, pos %llu%s\n",
++	     bindex, file->f_mode, file->f_flags, (long)file_count(file),
++	     file->f_pos, a);
++	if (file->f_dentry)
++		do_pri_dentry(bindex, file->f_dentry);
++	return 0;
++}
++
++void au_dpri_file(struct file *file)
++{
++	struct au_finfo *finfo;
++	aufs_bindex_t bindex;
++	int err;
++
++	err = do_pri_file(-1, file);
++	if (err || !file->f_dentry || !au_test_aufs(file->f_dentry->d_sb))
++		return;
++
++	finfo = au_fi(file);
++	if (!finfo)
++		return;
++	if (finfo->fi_bstart < 0)
++		return;
++	for (bindex = finfo->fi_bstart; bindex <= finfo->fi_bend; bindex++) {
++		struct au_hfile *hf;
++
++		hf = finfo->fi_hfile + bindex;
++		do_pri_file(bindex, hf ? hf->hf_file : NULL);
++	}
++}
++
++static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br)
++{
++	struct vfsmount *mnt;
++	struct super_block *sb;
++
++	if (!br || IS_ERR(br))
++		goto out;
++	mnt = br->br_mnt;
++	if (!mnt || IS_ERR(mnt))
++		goto out;
++	sb = mnt->mnt_sb;
++	if (!sb || IS_ERR(sb))
++		goto out;
++
++	dpri("s%d: {perm 0x%x, cnt %d, wbr %p}, "
++	     "%s, dev 0x%02x%02x, flags 0x%lx, cnt(BIAS) %d, active %d, "
++	     "xino %d\n",
++	     bindex, br->br_perm, atomic_read(&br->br_count), br->br_wbr,
++	     au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev),
++	     sb->s_flags, sb->s_count - S_BIAS,
++	     atomic_read(&sb->s_active), !!br->br_xino.xi_file);
++	return 0;
++
++ out:
++	dpri("s%d: err %ld\n", bindex, PTR_ERR(br));
++	return -1;
++}
++
++void au_dpri_sb(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++	aufs_bindex_t bindex;
++	int err;
++	/* to reuduce stack size */
++	struct {
++		struct vfsmount mnt;
++		struct au_branch fake;
++	} *a;
++
++	/* this function can be called from magic sysrq */
++	a = kzalloc(sizeof(*a), GFP_ATOMIC);
++	if (unlikely(!a)) {
++		dpri("no memory\n");
++		return;
++	}
++
++	a->mnt.mnt_sb = sb;
++	a->fake.br_perm = 0;
++	a->fake.br_mnt = &a->mnt;
++	a->fake.br_xino.xi_file = NULL;
++	atomic_set(&a->fake.br_count, 0);
++	smp_mb(); /* atomic_set */
++	err = do_pri_br(-1, &a->fake);
++	kfree(a);
++	dpri("dev 0x%x\n", sb->s_dev);
++	if (err || !au_test_aufs(sb))
++		return;
++
++	sbinfo = au_sbi(sb);
++	if (!sbinfo)
++		return;
++	dpri("nw %d, gen %u, kobj %d\n",
++	     atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation,
++	     atomic_read(&sbinfo->si_kobj.kref.refcount));
++	for (bindex = 0; bindex <= sbinfo->si_bend; bindex++)
++		do_pri_br(bindex, sbinfo->si_branch[0 + bindex]);
++}
++
++/* ---------------------------------------------------------------------- */
++
++void au_dbg_sleep_jiffy(int jiffy)
++{
++	while (jiffy)
++		jiffy = schedule_timeout_uninterruptible(jiffy);
++}
++
++void au_dbg_iattr(struct iattr *ia)
++{
++#define AuBit(name)	if (ia->ia_valid & ATTR_ ## name) \
++				dpri(#name "\n")
++	AuBit(MODE);
++	AuBit(UID);
++	AuBit(GID);
++	AuBit(SIZE);
++	AuBit(ATIME);
++	AuBit(MTIME);
++	AuBit(CTIME);
++	AuBit(ATIME_SET);
++	AuBit(MTIME_SET);
++	AuBit(FORCE);
++	AuBit(ATTR_FLAG);
++	AuBit(KILL_SUID);
++	AuBit(KILL_SGID);
++	AuBit(FILE);
++	AuBit(KILL_PRIV);
++	AuBit(OPEN);
++	AuBit(TIMES_SET);
++#undef	AuBit
++	dpri("ia_file %p\n", ia->ia_file);
++}
++
++/* ---------------------------------------------------------------------- */
++
++void au_dbg_verify_dir_parent(struct dentry *dentry, unsigned int sigen)
++{
++	struct dentry *parent;
++
++	parent = dget_parent(dentry);
++	AuDebugOn(!S_ISDIR(dentry->d_inode->i_mode)
++		  || IS_ROOT(dentry)
++		  || au_digen(parent) != sigen);
++	dput(parent);
++}
++
++void au_dbg_verify_nondir_parent(struct dentry *dentry, unsigned int sigen)
++{
++	struct dentry *parent;
++
++	parent = dget_parent(dentry);
++	AuDebugOn(S_ISDIR(dentry->d_inode->i_mode)
++		  || au_digen(parent) != sigen);
++	dput(parent);
++}
++
++void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen)
++{
++	int err, i, j;
++	struct au_dcsub_pages dpages;
++	struct au_dpage *dpage;
++	struct dentry **dentries;
++
++	err = au_dpages_init(&dpages, GFP_NOFS);
++	AuDebugOn(err);
++	err = au_dcsub_pages_rev(&dpages, parent, /*do_include*/1, NULL, NULL);
++	AuDebugOn(err);
++	for (i = dpages.ndpage - 1; !err && i >= 0; i--) {
++		dpage = dpages.dpages + i;
++		dentries = dpage->dentries;
++		for (j = dpage->ndentry - 1; !err && j >= 0; j--)
++			AuDebugOn(au_digen(dentries[j]) != sigen);
++	}
++	au_dpages_free(&dpages);
++}
++
++void au_dbg_verify_hf(struct au_finfo *finfo)
++{
++	struct au_hfile *hf;
++	aufs_bindex_t bend, bindex;
++
++	if (finfo->fi_bstart >= 0) {
++		bend = finfo->fi_bend;
++		for (bindex = finfo->fi_bstart; bindex <= bend; bindex++) {
++			hf = finfo->fi_hfile + bindex;
++			AuDebugOn(hf->hf_file || hf->hf_br);
++		}
++	}
++}
++
++void au_dbg_verify_kthread(void)
++{
++	if (au_test_wkq(current)) {
++		au_dbg_blocked();
++		BUG();
++	}
++}
++
++/* ---------------------------------------------------------------------- */
++
++void au_debug_sbinfo_init(struct au_sbinfo *sbinfo __maybe_unused)
++{
++#ifdef AuForceNoPlink
++	au_opt_clr(sbinfo->si_mntflags, PLINK);
++#endif
++#ifdef AuForceNoXino
++	au_opt_clr(sbinfo->si_mntflags, XINO);
++#endif
++#ifdef AuForceNoRefrof
++	au_opt_clr(sbinfo->si_mntflags, REFROF);
++#endif
++#ifdef AuForceHinotify
++	au_opt_set_udba(sbinfo->si_mntflags, UDBA_HINOTIFY);
++#endif
++}
++
++int __init au_debug_init(void)
++{
++	aufs_bindex_t bindex;
++	struct au_vdir_destr destr;
++
++	bindex = -1;
++	AuDebugOn(bindex >= 0);
++
++	destr.len = -1;
++	AuDebugOn(destr.len < NAME_MAX);
++
++#ifdef CONFIG_4KSTACKS
++	AuWarn("CONFIG_4KSTACKS is defined.\n");
++#endif
++
++#ifdef AuForceNoBrs
++	sysaufs_brs = 0;
++#endif
++
++	return 0;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/debug.h linux-2.6.31.5/fs/aufs/debug.h
+--- linux-2.6.31.5.orig/fs/aufs/debug.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/debug.h	2009-11-15 22:16:14.000000000 +0100
+@@ -0,0 +1,261 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * debug print functions
++ */
++
++#ifndef __AUFS_DEBUG_H__
++#define __AUFS_DEBUG_H__
++
++#ifdef __KERNEL__
++
++#include <asm/system.h>
++#include <linux/bug.h>
++/* #include <linux/err.h> */
++#include <linux/init.h>
++/* #include <linux/kernel.h> */
++#include <linux/delay.h>
++/* #include <linux/kd.h> */
++/* #include <linux/vt_kern.h> */
++#include <linux/sysrq.h>
++#include <linux/aufs_type.h>
++
++#ifdef CONFIG_AUFS_DEBUG
++#define AuDebugOn(a)		BUG_ON(a)
++
++/* module parameter */
++extern int aufs_debug;
++static inline void au_debug(int n)
++{
++	aufs_debug = n;
++	smp_mb();
++}
++
++static inline int au_debug_test(void)
++{
++	return aufs_debug;
++}
++#else
++#define AuDebugOn(a)		do {} while (0)
++#define au_debug()		do {} while (0)
++static inline int au_debug_test(void)
++{
++	return 0;
++}
++#endif /* CONFIG_AUFS_DEBUG */
++
++/* ---------------------------------------------------------------------- */
++
++/* debug print */
++
++#define AuDpri(lvl, fmt, arg...) \
++	printk(lvl AUFS_NAME " %s:%d:%s[%d]: " fmt, \
++	       __func__, __LINE__, current->comm, current->pid, ##arg)
++#define AuDbg(fmt, arg...) do { \
++	if (au_debug_test()) \
++		AuDpri(KERN_DEBUG, "DEBUG: " fmt, ##arg); \
++} while (0)
++#define AuLabel(l) 		AuDbg(#l "\n")
++#define AuInfo(fmt, arg...)	AuDpri(KERN_INFO, fmt, ##arg)
++#define AuWarn(fmt, arg...)	AuDpri(KERN_WARNING, fmt, ##arg)
++#define AuErr(fmt, arg...)	AuDpri(KERN_ERR, fmt, ##arg)
++#define AuIOErr(fmt, arg...)	AuErr("I/O Error, " fmt, ##arg)
++#define AuWarn1(fmt, arg...) do { \
++	static unsigned char _c; \
++	if (!_c++) \
++		AuWarn(fmt, ##arg); \
++} while (0)
++
++#define AuErr1(fmt, arg...) do { \
++	static unsigned char _c; \
++	if (!_c++) \
++		AuErr(fmt, ##arg); \
++} while (0)
++
++#define AuIOErr1(fmt, arg...) do { \
++	static unsigned char _c; \
++	if (!_c++) \
++		AuIOErr(fmt, ##arg); \
++} while (0)
++
++#define AuUnsupportMsg	"This operation is not supported." \
++			" Please report this application to aufs-users ML."
++#define AuUnsupport(fmt, args...) do { \
++	AuErr(AuUnsupportMsg "\n" fmt, ##args); \
++	dump_stack(); \
++} while (0)
++
++#define AuTraceErr(e) do { \
++	if (unlikely((e) < 0)) \
++		AuDbg("err %d\n", (int)(e)); \
++} while (0)
++
++#define AuTraceErrPtr(p) do { \
++	if (IS_ERR(p)) \
++		AuDbg("err %ld\n", PTR_ERR(p)); \
++} while (0)
++
++/* dirty macros for debug print, use with "%.*s" and caution */
++#define AuLNPair(qstr)		(qstr)->len, (qstr)->name
++#define AuDLNPair(d)		AuLNPair(&(d)->d_name)
++
++/* ---------------------------------------------------------------------- */
++
++struct au_sbinfo;
++struct au_finfo;
++struct dentry;
++#ifdef CONFIG_AUFS_DEBUG
++extern char *au_plevel;
++struct au_nhash;
++void au_dpri_whlist(struct au_nhash *whlist);
++struct au_vdir;
++void au_dpri_vdir(struct au_vdir *vdir);
++struct inode;
++void au_dpri_inode(struct inode *inode);
++void au_dpri_dentry(struct dentry *dentry);
++struct file;
++void au_dpri_file(struct file *filp);
++struct super_block;
++void au_dpri_sb(struct super_block *sb);
++
++void au_dbg_sleep_jiffy(int jiffy);
++struct iattr;
++void au_dbg_iattr(struct iattr *ia);
++
++void au_dbg_verify_dir_parent(struct dentry *dentry, unsigned int sigen);
++void au_dbg_verify_nondir_parent(struct dentry *dentry, unsigned int sigen);
++void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen);
++void au_dbg_verify_hf(struct au_finfo *finfo);
++void au_dbg_verify_kthread(void);
++
++int __init au_debug_init(void);
++void au_debug_sbinfo_init(struct au_sbinfo *sbinfo);
++#define AuDbgWhlist(w) do { \
++	AuDbg(#w "\n"); \
++	au_dpri_whlist(w); \
++} while (0)
++
++#define AuDbgVdir(v) do { \
++	AuDbg(#v "\n"); \
++	au_dpri_vdir(v); \
++} while (0)
++
++#define AuDbgInode(i) do { \
++	AuDbg(#i "\n"); \
++	au_dpri_inode(i); \
++} while (0)
++
++#define AuDbgDentry(d) do { \
++	AuDbg(#d "\n"); \
++	au_dpri_dentry(d); \
++} while (0)
++
++#define AuDbgFile(f) do { \
++	AuDbg(#f "\n"); \
++	au_dpri_file(f); \
++} while (0)
++
++#define AuDbgSb(sb) do { \
++	AuDbg(#sb "\n"); \
++	au_dpri_sb(sb); \
++} while (0)
++
++#define AuDbgSleep(sec) do { \
++	AuDbg("sleep %d sec\n", sec); \
++	ssleep(sec); \
++} while (0)
++
++#define AuDbgSleepJiffy(jiffy) do { \
++	AuDbg("sleep %d jiffies\n", jiffy); \
++	au_dbg_sleep_jiffy(jiffy); \
++} while (0)
++
++#define AuDbgIAttr(ia) do { \
++	AuDbg("ia_valid 0x%x\n", (ia)->ia_valid); \
++	au_dbg_iattr(ia); \
++} while (0)
++#else
++static inline void au_dbg_verify_dir_parent(struct dentry *dentry,
++					    unsigned int sigen)
++{
++	/* empty */
++}
++static inline void au_dbg_verify_nondir_parent(struct dentry *dentry,
++					       unsigned int sigen)
++{
++	/* empty */
++}
++static inline void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen)
++{
++	/* empty */
++}
++static inline void au_dbg_verify_hf(struct au_finfo *finfo)
++{
++	/* empty */
++}
++static inline void au_dbg_verify_kthread(void)
++{
++	/* empty */
++}
++
++static inline int au_debug_init(void)
++{
++	return 0;
++}
++static inline void au_debug_sbinfo_init(struct au_sbinfo *sbinfo)
++{
++	/* empty */
++}
++#define AuDbgWhlist(w)		do {} while (0)
++#define AuDbgVdir(v)		do {} while (0)
++#define AuDbgInode(i)		do {} while (0)
++#define AuDbgDentry(d)		do {} while (0)
++#define AuDbgFile(f)		do {} while (0)
++#define AuDbgSb(sb)		do {} while (0)
++#define AuDbgSleep(sec)		do {} while (0)
++#define AuDbgSleepJiffy(jiffy)	do {} while (0)
++#define AuDbgIAttr(ia)		do {} while (0)
++#endif /* CONFIG_AUFS_DEBUG */
++
++/* ---------------------------------------------------------------------- */
++
++#ifdef CONFIG_AUFS_MAGIC_SYSRQ
++int __init au_sysrq_init(void);
++void au_sysrq_fin(void);
++
++#ifdef CONFIG_HW_CONSOLE
++#define au_dbg_blocked() do { \
++	WARN_ON(1); \
++	handle_sysrq('w', vc_cons[fg_console].d->vc_tty); \
++} while (0)
++#else
++#define au_dbg_blocked()	do {} while (0)
++#endif
++
++#else
++static inline int au_sysrq_init(void)
++{
++	return 0;
++}
++#define au_sysrq_fin()		do {} while (0)
++#define au_dbg_blocked()	do {} while (0)
++#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_DEBUG_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dentry.c linux-2.6.31.5/fs/aufs/dentry.c
+--- linux-2.6.31.5.orig/fs/aufs/dentry.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dentry.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,880 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * lookup and dentry operations
++ */
++
++#include <linux/namei.h>
++#include "aufs.h"
++
++static void au_h_nd(struct nameidata *h_nd, struct nameidata *nd)
++{
++	if (nd) {
++		*h_nd = *nd;
++
++		/*
++		 * gave up supporting LOOKUP_CREATE/OPEN for lower fs,
++		 * due to whiteout and branch permission.
++		 */
++		h_nd->flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE
++				 | LOOKUP_FOLLOW);
++		/* unnecessary? */
++		h_nd->intent.open.file = NULL;
++	} else
++		memset(h_nd, 0, sizeof(*h_nd));
++}
++
++struct au_lkup_one_args {
++	struct dentry **errp;
++	struct qstr *name;
++	struct dentry *h_parent;
++	struct au_branch *br;
++	struct nameidata *nd;
++};
++
++struct dentry *au_lkup_one(struct qstr *name, struct dentry *h_parent,
++			   struct au_branch *br, struct nameidata *nd)
++{
++	struct dentry *h_dentry;
++	int err;
++	struct nameidata h_nd;
++
++	if (au_test_fs_null_nd(h_parent->d_sb))
++		return vfsub_lookup_one_len(name->name, h_parent, name->len);
++
++	au_h_nd(&h_nd, nd);
++	h_nd.path.dentry = h_parent;
++	h_nd.path.mnt = br->br_mnt;
++
++	err = __lookup_one_len(name->name, &h_nd.last, NULL, name->len);
++	h_dentry = ERR_PTR(err);
++	if (!err) {
++		path_get(&h_nd.path);
++		h_dentry = vfsub_lookup_hash(&h_nd);
++		path_put(&h_nd.path);
++	}
++
++	return h_dentry;
++}
++
++static void au_call_lkup_one(void *args)
++{
++	struct au_lkup_one_args *a = args;
++	*a->errp = au_lkup_one(a->name, a->h_parent, a->br, a->nd);
++}
++
++#define AuLkup_ALLOW_NEG	1
++#define au_ftest_lkup(flags, name)	((flags) & AuLkup_##name)
++#define au_fset_lkup(flags, name)	{ (flags) |= AuLkup_##name; }
++#define au_fclr_lkup(flags, name)	{ (flags) &= ~AuLkup_##name; }
++
++struct au_do_lookup_args {
++	unsigned int		flags;
++	mode_t			type;
++	struct nameidata	*nd;
++};
++
++/*
++ * returns positive/negative dentry, NULL or an error.
++ * NULL means whiteout-ed or not-found.
++ */
++static struct dentry*
++au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
++	     aufs_bindex_t bindex, struct qstr *wh_name,
++	     struct au_do_lookup_args *args)
++{
++	struct dentry *h_dentry;
++	struct inode *h_inode, *inode;
++	struct qstr *name;
++	struct au_branch *br;
++	int wh_found, opq;
++	unsigned char wh_able;
++	const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG);
++
++	name = &dentry->d_name;
++	wh_found = 0;
++	br = au_sbr(dentry->d_sb, bindex);
++	wh_able = !!au_br_whable(br->br_perm);
++	if (wh_able)
++		wh_found = au_wh_test(h_parent, wh_name, br, /*try_sio*/0);
++	h_dentry = ERR_PTR(wh_found);
++	if (!wh_found)
++		goto real_lookup;
++	if (unlikely(wh_found < 0))
++		goto out;
++
++	/* We found a whiteout */
++	/* au_set_dbend(dentry, bindex); */
++	au_set_dbwh(dentry, bindex);
++	if (!allow_neg)
++		return NULL; /* success */
++
++ real_lookup:
++	h_dentry = au_lkup_one(name, h_parent, br, args->nd);
++	if (IS_ERR(h_dentry))
++		goto out;
++
++	h_inode = h_dentry->d_inode;
++	if (!h_inode) {
++		if (!allow_neg)
++			goto out_neg;
++	} else if (wh_found
++		   || (args->type && args->type != (h_inode->i_mode & S_IFMT)))
++		goto out_neg;
++
++	if (au_dbend(dentry) <= bindex)
++		au_set_dbend(dentry, bindex);
++	if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
++		au_set_dbstart(dentry, bindex);
++	au_set_h_dptr(dentry, bindex, h_dentry);
++
++	inode = dentry->d_inode;
++	if (!h_inode || !S_ISDIR(h_inode->i_mode) || !wh_able
++	    || (inode && !S_ISDIR(inode->i_mode)))
++		goto out; /* success */
++
++	mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
++	opq = au_diropq_test(h_dentry, br);
++	mutex_unlock(&h_inode->i_mutex);
++	if (opq > 0)
++		au_set_dbdiropq(dentry, bindex);
++	else if (unlikely(opq < 0)) {
++		au_set_h_dptr(dentry, bindex, NULL);
++		h_dentry = ERR_PTR(opq);
++	}
++	goto out;
++
++ out_neg:
++	dput(h_dentry);
++	h_dentry = NULL;
++ out:
++	return h_dentry;
++}
++
++static int au_test_shwh(struct super_block *sb, const struct qstr *name)
++{
++	if (unlikely(!au_opt_test(au_mntflags(sb), SHWH)
++		     && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)))
++		return -EPERM;
++	return 0;
++}
++
++/*
++ * returns the number of lower positive dentries,
++ * otherwise an error.
++ * can be called at unlinking with @type is zero.
++ */
++int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type,
++		   struct nameidata *nd)
++{
++	int npositive, err;
++	aufs_bindex_t bindex, btail, bdiropq;
++	unsigned char isdir;
++	struct qstr whname;
++	struct au_do_lookup_args args = {
++		.flags	= 0,
++		.type	= type,
++		.nd	= nd
++	};
++	const struct qstr *name = &dentry->d_name;
++	struct dentry *parent;
++	struct inode *inode;
++
++	parent = dget_parent(dentry);
++	err = au_test_shwh(dentry->d_sb, name);
++	if (unlikely(err))
++		goto out;
++
++	err = au_wh_name_alloc(&whname, name);
++	if (unlikely(err))
++		goto out;
++
++	inode = dentry->d_inode;
++	isdir = !!(inode && S_ISDIR(inode->i_mode));
++	if (!type)
++		au_fset_lkup(args.flags, ALLOW_NEG);
++
++	npositive = 0;
++	btail = au_dbtaildir(parent);
++	for (bindex = bstart; bindex <= btail; bindex++) {
++		struct dentry *h_parent, *h_dentry;
++		struct inode *h_inode, *h_dir;
++
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (h_dentry) {
++			if (h_dentry->d_inode)
++				npositive++;
++			if (type != S_IFDIR)
++				break;
++			continue;
++		}
++		h_parent = au_h_dptr(parent, bindex);
++		if (!h_parent)
++			continue;
++		h_dir = h_parent->d_inode;
++		if (!h_dir || !S_ISDIR(h_dir->i_mode))
++			continue;
++
++		mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
++		h_dentry = au_do_lookup(h_parent, dentry, bindex, &whname,
++					&args);
++		mutex_unlock(&h_dir->i_mutex);
++		err = PTR_ERR(h_dentry);
++		if (IS_ERR(h_dentry))
++			goto out_wh;
++		au_fclr_lkup(args.flags, ALLOW_NEG);
++
++		if (au_dbwh(dentry) >= 0)
++			break;
++		if (!h_dentry)
++			continue;
++		h_inode = h_dentry->d_inode;
++		if (!h_inode)
++			continue;
++		npositive++;
++		if (!args.type)
++			args.type = h_inode->i_mode & S_IFMT;
++		if (args.type != S_IFDIR)
++			break;
++		else if (isdir) {
++			/* the type of lower may be different */
++			bdiropq = au_dbdiropq(dentry);
++			if (bdiropq >= 0 && bdiropq <= bindex)
++				break;
++		}
++	}
++
++	if (npositive) {
++		AuLabel(positive);
++		au_update_dbstart(dentry);
++	}
++	err = npositive;
++	if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
++		     && au_dbstart(dentry) < 0))
++		/* both of real entry and whiteout found */
++		err = -EIO;
++
++ out_wh:
++	kfree(whname.name);
++ out:
++	dput(parent);
++	return err;
++}
++
++struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent,
++			       struct au_branch *br)
++{
++	struct dentry *dentry;
++	int wkq_err;
++
++	if (!au_test_h_perm_sio(parent->d_inode, MAY_EXEC))
++		dentry = au_lkup_one(name, parent, br, /*nd*/NULL);
++	else {
++		struct au_lkup_one_args args = {
++			.errp		= &dentry,
++			.name		= name,
++			.h_parent	= parent,
++			.br		= br,
++			.nd		= NULL
++		};
++
++		wkq_err = au_wkq_wait(au_call_lkup_one, &args);
++		if (unlikely(wkq_err))
++			dentry = ERR_PTR(wkq_err);
++	}
++
++	return dentry;
++}
++
++/*
++ * lookup @dentry on @bindex which should be negative.
++ */
++int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex)
++{
++	int err;
++	struct dentry *parent, *h_parent, *h_dentry;
++	struct qstr *name;
++
++	name = &dentry->d_name;
++	parent = dget_parent(dentry);
++	h_parent = au_h_dptr(parent, bindex);
++	h_dentry = au_sio_lkup_one(name, h_parent,
++				   au_sbr(dentry->d_sb, bindex));
++	err = PTR_ERR(h_dentry);
++	if (IS_ERR(h_dentry))
++		goto out;
++	if (unlikely(h_dentry->d_inode)) {
++		err = -EIO;
++		AuIOErr("b%d %.*s should be negative.\n",
++			bindex, AuDLNPair(h_dentry));
++		dput(h_dentry);
++		goto out;
++	}
++
++	if (bindex < au_dbstart(dentry))
++		au_set_dbstart(dentry, bindex);
++	if (au_dbend(dentry) < bindex)
++		au_set_dbend(dentry, bindex);
++	au_set_h_dptr(dentry, bindex, h_dentry);
++	err = 0;
++
++ out:
++	dput(parent);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* subset of struct inode */
++struct au_iattr {
++	unsigned long		i_ino;
++	/* unsigned int		i_nlink; */
++	uid_t			i_uid;
++	gid_t			i_gid;
++	u64			i_version;
++/*
++	loff_t			i_size;
++	blkcnt_t		i_blocks;
++*/
++	umode_t			i_mode;
++};
++
++static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode)
++{
++	ia->i_ino = h_inode->i_ino;
++	/* ia->i_nlink = h_inode->i_nlink; */
++	ia->i_uid = h_inode->i_uid;
++	ia->i_gid = h_inode->i_gid;
++	ia->i_version = h_inode->i_version;
++/*
++	ia->i_size = h_inode->i_size;
++	ia->i_blocks = h_inode->i_blocks;
++*/
++	ia->i_mode = (h_inode->i_mode & S_IFMT);
++}
++
++static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode)
++{
++	return ia->i_ino != h_inode->i_ino
++		/* || ia->i_nlink != h_inode->i_nlink */
++		|| ia->i_uid != h_inode->i_uid
++		|| ia->i_gid != h_inode->i_gid
++		|| ia->i_version != h_inode->i_version
++/*
++		|| ia->i_size != h_inode->i_size
++		|| ia->i_blocks != h_inode->i_blocks
++*/
++		|| ia->i_mode != (h_inode->i_mode & S_IFMT);
++}
++
++static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent,
++			      struct au_branch *br)
++{
++	int err;
++	struct au_iattr ia;
++	struct inode *h_inode;
++	struct dentry *h_d;
++	struct super_block *h_sb;
++
++	err = 0;
++	memset(&ia, -1, sizeof(ia));
++	h_sb = h_dentry->d_sb;
++	h_inode = h_dentry->d_inode;
++	if (h_inode)
++		au_iattr_save(&ia, h_inode);
++	else if (au_test_nfs(h_sb) || au_test_fuse(h_sb))
++		/* nfs d_revalidate may return 0 for negative dentry */
++		/* fuse d_revalidate always return 0 for negative dentry */
++		goto out;
++
++	/* main purpose is namei.c:cached_lookup() and d_revalidate */
++	h_d = au_lkup_one(&h_dentry->d_name, h_parent, br, /*nd*/NULL);
++	err = PTR_ERR(h_d);
++	if (IS_ERR(h_d))
++		goto out;
++
++	err = 0;
++	if (unlikely(h_d != h_dentry
++		     || h_d->d_inode != h_inode
++		     || (h_inode && au_iattr_test(&ia, h_inode))))
++		err = au_busy_or_stale();
++	dput(h_d);
++
++ out:
++	AuTraceErr(err);
++	return err;
++}
++
++int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
++		struct dentry *h_parent, struct au_branch *br)
++{
++	int err;
++
++	err = 0;
++	if (udba == AuOpt_UDBA_REVAL) {
++		IMustLock(h_dir);
++		err = (h_dentry->d_parent->d_inode != h_dir);
++	} else if (udba == AuOpt_UDBA_HINOTIFY)
++		err = au_h_verify_dentry(h_dentry, h_parent, br);
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static void au_do_refresh_hdentry(struct au_hdentry *p, struct au_dinfo *dinfo,
++				  struct dentry *parent)
++{
++	struct dentry *h_d, *h_dp;
++	struct au_hdentry tmp, *q;
++	struct super_block *sb;
++	aufs_bindex_t new_bindex, bindex, bend, bwh, bdiropq;
++
++	AuRwMustWriteLock(&dinfo->di_rwsem);
++
++	bend = dinfo->di_bend;
++	bwh = dinfo->di_bwh;
++	bdiropq = dinfo->di_bdiropq;
++	for (bindex = dinfo->di_bstart; bindex <= bend; bindex++, p++) {
++		h_d = p->hd_dentry;
++		if (!h_d)
++			continue;
++
++		h_dp = dget_parent(h_d);
++		if (h_dp == au_h_dptr(parent, bindex)) {
++			dput(h_dp);
++			continue;
++		}
++
++		new_bindex = au_find_dbindex(parent, h_dp);
++		dput(h_dp);
++		if (dinfo->di_bwh == bindex)
++			bwh = new_bindex;
++		if (dinfo->di_bdiropq == bindex)
++			bdiropq = new_bindex;
++		if (new_bindex < 0) {
++			au_hdput(p);
++			p->hd_dentry = NULL;
++			continue;
++		}
++
++		/* swap two lower dentries, and loop again */
++		q = dinfo->di_hdentry + new_bindex;
++		tmp = *q;
++		*q = *p;
++		*p = tmp;
++		if (tmp.hd_dentry) {
++			bindex--;
++			p--;
++		}
++	}
++
++	sb = parent->d_sb;
++	dinfo->di_bwh = -1;
++	if (bwh >= 0 && bwh <= au_sbend(sb) && au_sbr_whable(sb, bwh))
++		dinfo->di_bwh = bwh;
++
++	dinfo->di_bdiropq = -1;
++	if (bdiropq >= 0
++	    && bdiropq <= au_sbend(sb)
++	    && au_sbr_whable(sb, bdiropq))
++		dinfo->di_bdiropq = bdiropq;
++
++	bend = au_dbend(parent);
++	p = dinfo->di_hdentry;
++	for (bindex = 0; bindex <= bend; bindex++, p++)
++		if (p->hd_dentry) {
++			dinfo->di_bstart = bindex;
++			break;
++		}
++
++	p = dinfo->di_hdentry + bend;
++	for (bindex = bend; bindex >= 0; bindex--, p--)
++		if (p->hd_dentry) {
++			dinfo->di_bend = bindex;
++			break;
++		}
++}
++
++/*
++ * returns the number of found lower positive dentries,
++ * otherwise an error.
++ */
++int au_refresh_hdentry(struct dentry *dentry, mode_t type)
++{
++	int npositive, err;
++	unsigned int sigen;
++	aufs_bindex_t bstart;
++	struct au_dinfo *dinfo;
++	struct super_block *sb;
++	struct dentry *parent;
++
++	DiMustWriteLock(dentry);
++
++	sb = dentry->d_sb;
++	AuDebugOn(IS_ROOT(dentry));
++	sigen = au_sigen(sb);
++	parent = dget_parent(dentry);
++	AuDebugOn(au_digen(parent) != sigen
++		  || au_iigen(parent->d_inode) != sigen);
++
++	dinfo = au_di(dentry);
++	err = au_di_realloc(dinfo, au_sbend(sb) + 1);
++	npositive = err;
++	if (unlikely(err))
++		goto out;
++	au_do_refresh_hdentry(dinfo->di_hdentry + dinfo->di_bstart, dinfo,
++			      parent);
++
++	npositive = 0;
++	bstart = au_dbstart(parent);
++	if (type != S_IFDIR && dinfo->di_bstart == bstart)
++		goto out_dgen; /* success */
++
++	npositive = au_lkup_dentry(dentry, bstart, type, /*nd*/NULL);
++	if (npositive < 0)
++		goto out;
++	if (dinfo->di_bwh >= 0 && dinfo->di_bwh <= dinfo->di_bstart)
++		d_drop(dentry);
++
++ out_dgen:
++	au_update_digen(dentry);
++ out:
++	dput(parent);
++	AuTraceErr(npositive);
++	return npositive;
++}
++
++static noinline_for_stack
++int au_do_h_d_reval(struct dentry *h_dentry, struct nameidata *nd,
++		    struct dentry *dentry, aufs_bindex_t bindex)
++{
++	int err, valid;
++	int (*reval)(struct dentry *, struct nameidata *);
++
++	err = 0;
++	reval = NULL;
++	if (h_dentry->d_op)
++		reval = h_dentry->d_op->d_revalidate;
++	if (!reval)
++		goto out;
++
++	AuDbg("b%d\n", bindex);
++	if (au_test_fs_null_nd(h_dentry->d_sb))
++		/* it may return tri-state */
++		valid = reval(h_dentry, NULL);
++	else {
++		struct nameidata h_nd;
++		int locked;
++		struct dentry *parent;
++
++		au_h_nd(&h_nd, nd);
++		parent = nd->path.dentry;
++		locked = (nd && nd->path.dentry != dentry);
++		if (locked)
++			di_read_lock_parent(parent, AuLock_IR);
++		BUG_ON(bindex > au_dbend(parent));
++		h_nd.path.dentry = au_h_dptr(parent, bindex);
++		BUG_ON(!h_nd.path.dentry);
++		h_nd.path.mnt = au_sbr(parent->d_sb, bindex)->br_mnt;
++		path_get(&h_nd.path);
++		valid = reval(h_dentry, &h_nd);
++		path_put(&h_nd.path);
++		if (locked)
++			di_read_unlock(parent, AuLock_IR);
++	}
++
++	if (unlikely(valid < 0))
++		err = valid;
++	else if (!valid)
++		err = -EINVAL;
++
++ out:
++	AuTraceErr(err);
++	return err;
++}
++
++/* todo: remove this */
++static int h_d_revalidate(struct dentry *dentry, struct inode *inode,
++			  struct nameidata *nd, int do_udba)
++{
++	int err;
++	umode_t mode, h_mode;
++	aufs_bindex_t bindex, btail, bstart, ibs, ibe;
++	unsigned char plus, unhashed, is_root, h_plus;
++	struct inode *first, *h_inode, *h_cached_inode;
++	struct dentry *h_dentry;
++	struct qstr *name, *h_name;
++
++	err = 0;
++	plus = 0;
++	mode = 0;
++	first = NULL;
++	ibs = -1;
++	ibe = -1;
++	unhashed = !!d_unhashed(dentry);
++	is_root = !!IS_ROOT(dentry);
++	name = &dentry->d_name;
++
++	/*
++	 * Theoretically, REVAL test should be unnecessary in case of INOTIFY.
++	 * But inotify doesn't fire some necessary events,
++	 *	IN_ATTRIB for atime/nlink/pageio
++	 *	IN_DELETE for NFS dentry
++	 * Let's do REVAL test too.
++	 */
++	if (do_udba && inode) {
++		mode = (inode->i_mode & S_IFMT);
++		plus = (inode->i_nlink > 0);
++		first = au_h_iptr(inode, au_ibstart(inode));
++		ibs = au_ibstart(inode);
++		ibe = au_ibend(inode);
++	}
++
++	bstart = au_dbstart(dentry);
++	btail = bstart;
++	if (inode && S_ISDIR(inode->i_mode))
++		btail = au_dbtaildir(dentry);
++	for (bindex = bstart; bindex <= btail; bindex++) {
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (!h_dentry)
++			continue;
++
++		AuDbg("b%d, %.*s\n", bindex, AuDLNPair(h_dentry));
++		h_name = &h_dentry->d_name;
++		if (unlikely(do_udba
++			     && !is_root
++			     && (unhashed != !!d_unhashed(h_dentry)
++				 || name->len != h_name->len
++				 || memcmp(name->name, h_name->name, name->len))
++			    )) {
++			AuDbg("unhash 0x%x 0x%x, %.*s %.*s\n",
++				  unhashed, d_unhashed(h_dentry),
++				  AuDLNPair(dentry), AuDLNPair(h_dentry));
++			goto err;
++		}
++
++		err = au_do_h_d_reval(h_dentry, nd, dentry, bindex);
++		if (unlikely(err))
++			/* do not goto err, to keep the errno */
++			break;
++
++		/* todo: plink too? */
++		if (!do_udba)
++			continue;
++
++		/* UDBA tests */
++		h_inode = h_dentry->d_inode;
++		if (unlikely(!!inode != !!h_inode))
++			goto err;
++
++		h_plus = plus;
++		h_mode = mode;
++		h_cached_inode = h_inode;
++		if (h_inode) {
++			h_mode = (h_inode->i_mode & S_IFMT);
++			h_plus = (h_inode->i_nlink > 0);
++		}
++		if (inode && ibs <= bindex && bindex <= ibe)
++			h_cached_inode = au_h_iptr(inode, bindex);
++
++		if (unlikely(plus != h_plus
++			     || mode != h_mode
++			     || h_cached_inode != h_inode))
++			goto err;
++		continue;
++
++	err:
++		err = -EINVAL;
++		break;
++	}
++
++	return err;
++}
++
++static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen)
++{
++	int err;
++	struct dentry *parent;
++	struct inode *inode;
++
++	inode = dentry->d_inode;
++	if (au_digen(dentry) == sigen && au_iigen(inode) == sigen)
++		return 0;
++
++	parent = dget_parent(dentry);
++	di_read_lock_parent(parent, AuLock_IR);
++	AuDebugOn(au_digen(parent) != sigen
++		  || au_iigen(parent->d_inode) != sigen);
++	au_dbg_verify_gen(parent, sigen);
++
++	/* returns a number of positive dentries */
++	err = au_refresh_hdentry(dentry, inode->i_mode & S_IFMT);
++	if (err >= 0)
++		err = au_refresh_hinode(inode, dentry);
++
++	di_read_unlock(parent, AuLock_IR);
++	dput(parent);
++	return err;
++}
++
++int au_reval_dpath(struct dentry *dentry, unsigned int sigen)
++{
++	int err;
++	struct dentry *d, *parent;
++	struct inode *inode;
++
++	if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIRS))
++		return simple_reval_dpath(dentry, sigen);
++
++	/* slow loop, keep it simple and stupid */
++	/* cf: au_cpup_dirs() */
++	err = 0;
++	parent = NULL;
++	while (au_digen(dentry) != sigen
++	       || au_iigen(dentry->d_inode) != sigen) {
++		d = dentry;
++		while (1) {
++			dput(parent);
++			parent = dget_parent(d);
++			if (au_digen(parent) == sigen
++			    && au_iigen(parent->d_inode) == sigen)
++				break;
++			d = parent;
++		}
++
++		inode = d->d_inode;
++		if (d != dentry)
++			di_write_lock_child(d);
++
++		/* someone might update our dentry while we were sleeping */
++		if (au_digen(d) != sigen || au_iigen(d->d_inode) != sigen) {
++			di_read_lock_parent(parent, AuLock_IR);
++			/* returns a number of positive dentries */
++			err = au_refresh_hdentry(d, inode->i_mode & S_IFMT);
++			if (err >= 0)
++				err = au_refresh_hinode(inode, d);
++			di_read_unlock(parent, AuLock_IR);
++		}
++
++		if (d != dentry)
++			di_write_unlock(d);
++		dput(parent);
++		if (unlikely(err))
++			break;
++	}
++
++	return err;
++}
++
++/*
++ * if valid returns 1, otherwise 0.
++ */
++static int aufs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
++{
++	int valid, err;
++	unsigned int sigen;
++	unsigned char do_udba;
++	struct super_block *sb;
++	struct inode *inode;
++
++	err = -EINVAL;
++	sb = dentry->d_sb;
++	inode = dentry->d_inode;
++	aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW);
++	sigen = au_sigen(sb);
++	if (au_digen(dentry) != sigen) {
++		AuDebugOn(IS_ROOT(dentry));
++		if (inode)
++			err = au_reval_dpath(dentry, sigen);
++		if (unlikely(err))
++			goto out_dgrade;
++		AuDebugOn(au_digen(dentry) != sigen);
++	}
++	if (inode && au_iigen(inode) != sigen) {
++		AuDebugOn(IS_ROOT(dentry));
++		err = au_refresh_hinode(inode, dentry);
++		if (unlikely(err))
++			goto out_dgrade;
++		AuDebugOn(au_iigen(inode) != sigen);
++	}
++	di_downgrade_lock(dentry, AuLock_IR);
++
++	AuDebugOn(au_digen(dentry) != sigen);
++	AuDebugOn(inode && au_iigen(inode) != sigen);
++	err = -EINVAL;
++	do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE);
++	if (do_udba && inode) {
++		aufs_bindex_t bstart = au_ibstart(inode);
++
++		if (bstart >= 0
++		    && au_test_higen(inode, au_h_iptr(inode, bstart)))
++			goto out;
++	}
++
++	err = h_d_revalidate(dentry, inode, nd, do_udba);
++	if (unlikely(!err && do_udba && au_dbstart(dentry) < 0))
++		/* both of real entry and whiteout found */
++		err = -EIO;
++	goto out;
++
++ out_dgrade:
++	di_downgrade_lock(dentry, AuLock_IR);
++ out:
++	au_store_oflag(nd, inode);
++	aufs_read_unlock(dentry, AuLock_IR);
++	AuTraceErr(err);
++	valid = !err;
++	if (!valid)
++		AuDbg("%.*s invalid\n", AuDLNPair(dentry));
++	return valid;
++}
++
++static void aufs_d_release(struct dentry *dentry)
++{
++	struct au_dinfo *dinfo;
++	aufs_bindex_t bend, bindex;
++
++	dinfo = dentry->d_fsdata;
++	if (!dinfo)
++		return;
++
++	/* dentry may not be revalidated */
++	bindex = dinfo->di_bstart;
++	if (bindex >= 0) {
++		struct au_hdentry *p;
++
++		bend = dinfo->di_bend;
++		p = dinfo->di_hdentry + bindex;
++		while (bindex++ <= bend) {
++			if (p->hd_dentry)
++				au_hdput(p);
++			p++;
++		}
++	}
++	kfree(dinfo->di_hdentry);
++	AuRwDestroy(&dinfo->di_rwsem);
++	au_cache_free_dinfo(dinfo);
++	au_hin_di_reinit(dentry);
++}
++
++struct dentry_operations aufs_dop = {
++	.d_revalidate	= aufs_d_revalidate,
++	.d_release	= aufs_d_release
++};
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dentry.h linux-2.6.31.5/fs/aufs/dentry.h
+--- linux-2.6.31.5.orig/fs/aufs/dentry.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dentry.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,231 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * lookup and dentry operations
++ */
++
++#ifndef __AUFS_DENTRY_H__
++#define __AUFS_DENTRY_H__
++
++#ifdef __KERNEL__
++
++#include <linux/dcache.h>
++#include <linux/aufs_type.h>
++#include "rwsem.h"
++
++/* make a single member structure for future use */
++/* todo: remove this structure */
++struct au_hdentry {
++	struct dentry		*hd_dentry;
++};
++
++struct au_dinfo {
++	atomic_t		di_generation;
++
++	struct au_rwsem		di_rwsem;
++	aufs_bindex_t		di_bstart, di_bend, di_bwh, di_bdiropq;
++	struct au_hdentry	*di_hdentry;
++};
++
++/* ---------------------------------------------------------------------- */
++
++/* dentry.c */
++extern struct dentry_operations aufs_dop;
++struct au_branch;
++struct dentry *au_lkup_one(struct qstr *name, struct dentry *h_parent,
++			   struct au_branch *br, struct nameidata *nd);
++struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent,
++			       struct au_branch *br);
++int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
++		struct dentry *h_parent, struct au_branch *br);
++
++int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type,
++		   struct nameidata *nd);
++int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex);
++int au_refresh_hdentry(struct dentry *dentry, mode_t type);
++int au_reval_dpath(struct dentry *dentry, unsigned int sigen);
++
++/* dinfo.c */
++int au_alloc_dinfo(struct dentry *dentry);
++int au_di_realloc(struct au_dinfo *dinfo, int nbr);
++
++void di_read_lock(struct dentry *d, int flags, unsigned int lsc);
++void di_read_unlock(struct dentry *d, int flags);
++void di_downgrade_lock(struct dentry *d, int flags);
++void di_write_lock(struct dentry *d, unsigned int lsc);
++void di_write_unlock(struct dentry *d);
++void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir);
++void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir);
++void di_write_unlock2(struct dentry *d1, struct dentry *d2);
++
++struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex);
++aufs_bindex_t au_dbtail(struct dentry *dentry);
++aufs_bindex_t au_dbtaildir(struct dentry *dentry);
++
++void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
++		   struct dentry *h_dentry);
++void au_update_digen(struct dentry *dentry);
++void au_update_dbrange(struct dentry *dentry, int do_put_zero);
++void au_update_dbstart(struct dentry *dentry);
++void au_update_dbend(struct dentry *dentry);
++int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry);
++
++/* ---------------------------------------------------------------------- */
++
++static inline struct au_dinfo *au_di(struct dentry *dentry)
++{
++	return dentry->d_fsdata;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* lock subclass for dinfo */
++enum {
++	AuLsc_DI_CHILD,		/* child first */
++	AuLsc_DI_CHILD2,	/* rename(2), link(2), and cpup at hinotify */
++	AuLsc_DI_CHILD3,	/* copyup dirs */
++	AuLsc_DI_PARENT,
++	AuLsc_DI_PARENT2,
++	AuLsc_DI_PARENT3
++};
++
++/*
++ * di_read_lock_child, di_write_lock_child,
++ * di_read_lock_child2, di_write_lock_child2,
++ * di_read_lock_child3, di_write_lock_child3,
++ * di_read_lock_parent, di_write_lock_parent,
++ * di_read_lock_parent2, di_write_lock_parent2,
++ * di_read_lock_parent3, di_write_lock_parent3,
++ */
++#define AuReadLockFunc(name, lsc) \
++static inline void di_read_lock_##name(struct dentry *d, int flags) \
++{ di_read_lock(d, flags, AuLsc_DI_##lsc); }
++
++#define AuWriteLockFunc(name, lsc) \
++static inline void di_write_lock_##name(struct dentry *d) \
++{ di_write_lock(d, AuLsc_DI_##lsc); }
++
++#define AuRWLockFuncs(name, lsc) \
++	AuReadLockFunc(name, lsc) \
++	AuWriteLockFunc(name, lsc)
++
++AuRWLockFuncs(child, CHILD);
++AuRWLockFuncs(child2, CHILD2);
++AuRWLockFuncs(child3, CHILD3);
++AuRWLockFuncs(parent, PARENT);
++AuRWLockFuncs(parent2, PARENT2);
++AuRWLockFuncs(parent3, PARENT3);
++
++#undef AuReadLockFunc
++#undef AuWriteLockFunc
++#undef AuRWLockFuncs
++
++#define DiMustNoWaiters(d)	AuRwMustNoWaiters(&au_di(d)->di_rwsem)
++#define DiMustAnyLock(d)	AuRwMustAnyLock(&au_di(d)->di_rwsem)
++#define DiMustWriteLock(d)	AuRwMustWriteLock(&au_di(d)->di_rwsem)
++
++/* ---------------------------------------------------------------------- */
++
++/* todo: memory barrier? */
++static inline unsigned int au_digen(struct dentry *d)
++{
++	return atomic_read(&au_di(d)->di_generation);
++}
++
++static inline void au_h_dentry_init(struct au_hdentry *hdentry)
++{
++	hdentry->hd_dentry = NULL;
++}
++
++static inline void au_hdput(struct au_hdentry *hd)
++{
++	dput(hd->hd_dentry);
++}
++
++static inline aufs_bindex_t au_dbstart(struct dentry *dentry)
++{
++	DiMustAnyLock(dentry);
++	return au_di(dentry)->di_bstart;
++}
++
++static inline aufs_bindex_t au_dbend(struct dentry *dentry)
++{
++	DiMustAnyLock(dentry);
++	return au_di(dentry)->di_bend;
++}
++
++static inline aufs_bindex_t au_dbwh(struct dentry *dentry)
++{
++	DiMustAnyLock(dentry);
++	return au_di(dentry)->di_bwh;
++}
++
++static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry)
++{
++	DiMustAnyLock(dentry);
++	return au_di(dentry)->di_bdiropq;
++}
++
++/* todo: hard/soft set? */
++static inline void au_set_dbstart(struct dentry *dentry, aufs_bindex_t bindex)
++{
++	DiMustWriteLock(dentry);
++	au_di(dentry)->di_bstart = bindex;
++}
++
++static inline void au_set_dbend(struct dentry *dentry, aufs_bindex_t bindex)
++{
++	DiMustWriteLock(dentry);
++	au_di(dentry)->di_bend = bindex;
++}
++
++static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex)
++{
++	DiMustWriteLock(dentry);
++	/* dbwh can be outside of bstart - bend range */
++	au_di(dentry)->di_bwh = bindex;
++}
++
++static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex)
++{
++	DiMustWriteLock(dentry);
++	au_di(dentry)->di_bdiropq = bindex;
++}
++
++/* ---------------------------------------------------------------------- */
++
++#ifdef CONFIG_AUFS_HINOTIFY
++static inline void au_digen_dec(struct dentry *d)
++{
++	atomic_dec_return(&au_di(d)->di_generation);
++}
++
++static inline void au_hin_di_reinit(struct dentry *dentry)
++{
++	dentry->d_fsdata = NULL;
++}
++#else
++static inline void au_hin_di_reinit(struct dentry *dentry __maybe_unused)
++{
++	/* empty */
++}
++#endif /* CONFIG_AUFS_HINOTIFY */
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_DENTRY_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dinfo.c linux-2.6.31.5/fs/aufs/dinfo.c
+--- linux-2.6.31.5.orig/fs/aufs/dinfo.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dinfo.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,367 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * dentry private data
++ */
++
++#include "aufs.h"
++
++int au_alloc_dinfo(struct dentry *dentry)
++{
++	struct au_dinfo *dinfo;
++	struct super_block *sb;
++	int nbr;
++
++	dinfo = au_cache_alloc_dinfo();
++	if (unlikely(!dinfo))
++		goto out;
++
++	sb = dentry->d_sb;
++	nbr = au_sbend(sb) + 1;
++	if (nbr <= 0)
++		nbr = 1;
++	dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS);
++	if (unlikely(!dinfo->di_hdentry))
++		goto out_dinfo;
++
++	atomic_set(&dinfo->di_generation, au_sigen(sb));
++	/* smp_mb(); */ /* atomic_set */
++	au_rw_init_wlock_nested(&dinfo->di_rwsem, AuLsc_DI_CHILD);
++	dinfo->di_bstart = -1;
++	dinfo->di_bend = -1;
++	dinfo->di_bwh = -1;
++	dinfo->di_bdiropq = -1;
++
++	dentry->d_fsdata = dinfo;
++	dentry->d_op = &aufs_dop;
++	return 0; /* success */
++
++ out_dinfo:
++	au_cache_free_dinfo(dinfo);
++ out:
++	return -ENOMEM;
++}
++
++int au_di_realloc(struct au_dinfo *dinfo, int nbr)
++{
++	int err, sz;
++	struct au_hdentry *hdp;
++
++	AuRwMustWriteLock(&dinfo->di_rwsem);
++
++	err = -ENOMEM;
++	sz = sizeof(*hdp) * (dinfo->di_bend + 1);
++	if (!sz)
++		sz = sizeof(*hdp);
++	hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS);
++	if (hdp) {
++		dinfo->di_hdentry = hdp;
++		err = 0;
++	}
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static void do_ii_write_lock(struct inode *inode, unsigned int lsc)
++{
++	switch (lsc) {
++	case AuLsc_DI_CHILD:
++		ii_write_lock_child(inode);
++		break;
++	case AuLsc_DI_CHILD2:
++		ii_write_lock_child2(inode);
++		break;
++	case AuLsc_DI_CHILD3:
++		ii_write_lock_child3(inode);
++		break;
++	case AuLsc_DI_PARENT:
++		ii_write_lock_parent(inode);
++		break;
++	case AuLsc_DI_PARENT2:
++		ii_write_lock_parent2(inode);
++		break;
++	case AuLsc_DI_PARENT3:
++		ii_write_lock_parent3(inode);
++		break;
++	default:
++		BUG();
++	}
++}
++
++static void do_ii_read_lock(struct inode *inode, unsigned int lsc)
++{
++	switch (lsc) {
++	case AuLsc_DI_CHILD:
++		ii_read_lock_child(inode);
++		break;
++	case AuLsc_DI_CHILD2:
++		ii_read_lock_child2(inode);
++		break;
++	case AuLsc_DI_CHILD3:
++		ii_read_lock_child3(inode);
++		break;
++	case AuLsc_DI_PARENT:
++		ii_read_lock_parent(inode);
++		break;
++	case AuLsc_DI_PARENT2:
++		ii_read_lock_parent2(inode);
++		break;
++	case AuLsc_DI_PARENT3:
++		ii_read_lock_parent3(inode);
++		break;
++	default:
++		BUG();
++	}
++}
++
++void di_read_lock(struct dentry *d, int flags, unsigned int lsc)
++{
++	au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc);
++	if (d->d_inode) {
++		if (au_ftest_lock(flags, IW))
++			do_ii_write_lock(d->d_inode, lsc);
++		else if (au_ftest_lock(flags, IR))
++			do_ii_read_lock(d->d_inode, lsc);
++	}
++}
++
++void di_read_unlock(struct dentry *d, int flags)
++{
++	if (d->d_inode) {
++		if (au_ftest_lock(flags, IW))
++			ii_write_unlock(d->d_inode);
++		else if (au_ftest_lock(flags, IR))
++			ii_read_unlock(d->d_inode);
++	}
++	au_rw_read_unlock(&au_di(d)->di_rwsem);
++}
++
++void di_downgrade_lock(struct dentry *d, int flags)
++{
++	if (d->d_inode && au_ftest_lock(flags, IR))
++		ii_downgrade_lock(d->d_inode);
++	au_rw_dgrade_lock(&au_di(d)->di_rwsem);
++}
++
++void di_write_lock(struct dentry *d, unsigned int lsc)
++{
++	au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc);
++	if (d->d_inode)
++		do_ii_write_lock(d->d_inode, lsc);
++}
++
++void di_write_unlock(struct dentry *d)
++{
++	if (d->d_inode)
++		ii_write_unlock(d->d_inode);
++	au_rw_write_unlock(&au_di(d)->di_rwsem);
++}
++
++void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir)
++{
++	AuDebugOn(d1 == d2
++		  || d1->d_inode == d2->d_inode
++		  || d1->d_sb != d2->d_sb);
++
++	if (isdir && au_test_subdir(d1, d2)) {
++		di_write_lock_child(d1);
++		di_write_lock_child2(d2);
++	} else {
++		/* there should be no races */
++		di_write_lock_child(d2);
++		di_write_lock_child2(d1);
++	}
++}
++
++void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir)
++{
++	AuDebugOn(d1 == d2
++		  || d1->d_inode == d2->d_inode
++		  || d1->d_sb != d2->d_sb);
++
++	if (isdir && au_test_subdir(d1, d2)) {
++		di_write_lock_parent(d1);
++		di_write_lock_parent2(d2);
++	} else {
++		/* there should be no races */
++		di_write_lock_parent(d2);
++		di_write_lock_parent2(d1);
++	}
++}
++
++void di_write_unlock2(struct dentry *d1, struct dentry *d2)
++{
++	di_write_unlock(d1);
++	if (d1->d_inode == d2->d_inode)
++		au_rw_write_unlock(&au_di(d2)->di_rwsem);
++	else
++		di_write_unlock(d2);
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex)
++{
++	struct dentry *d;
++
++	DiMustAnyLock(dentry);
++
++	if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
++		return NULL;
++	AuDebugOn(bindex < 0);
++	d = au_di(dentry)->di_hdentry[0 + bindex].hd_dentry;
++	AuDebugOn(d && (atomic_read(&d->d_count) <= 0));
++	return d;
++}
++
++aufs_bindex_t au_dbtail(struct dentry *dentry)
++{
++	aufs_bindex_t bend, bwh;
++
++	bend = au_dbend(dentry);
++	if (0 <= bend) {
++		bwh = au_dbwh(dentry);
++		if (!bwh)
++			return bwh;
++		if (0 < bwh && bwh < bend)
++			return bwh - 1;
++	}
++	return bend;
++}
++
++aufs_bindex_t au_dbtaildir(struct dentry *dentry)
++{
++	aufs_bindex_t bend, bopq;
++
++	bend = au_dbtail(dentry);
++	if (0 <= bend) {
++		bopq = au_dbdiropq(dentry);
++		if (0 <= bopq && bopq < bend)
++			bend = bopq;
++	}
++	return bend;
++}
++
++/* ---------------------------------------------------------------------- */
++
++void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
++		   struct dentry *h_dentry)
++{
++	struct au_hdentry *hd = au_di(dentry)->di_hdentry + bindex;
++
++	DiMustWriteLock(dentry);
++
++	if (hd->hd_dentry)
++		au_hdput(hd);
++	hd->hd_dentry = h_dentry;
++}
++
++void au_update_digen(struct dentry *dentry)
++{
++	atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb));
++	/* smp_mb(); */ /* atomic_set */
++}
++
++void au_update_dbrange(struct dentry *dentry, int do_put_zero)
++{
++	struct au_dinfo *dinfo;
++	struct dentry *h_d;
++
++	DiMustWriteLock(dentry);
++
++	dinfo = au_di(dentry);
++	if (!dinfo || dinfo->di_bstart < 0)
++		return;
++
++	if (do_put_zero) {
++		aufs_bindex_t bindex, bend;
++
++		bend = dinfo->di_bend;
++		for (bindex = dinfo->di_bstart; bindex <= bend; bindex++) {
++			h_d = dinfo->di_hdentry[0 + bindex].hd_dentry;
++			if (h_d && !h_d->d_inode)
++				au_set_h_dptr(dentry, bindex, NULL);
++		}
++	}
++
++	dinfo->di_bstart = -1;
++	while (++dinfo->di_bstart <= dinfo->di_bend)
++		if (dinfo->di_hdentry[0 + dinfo->di_bstart].hd_dentry)
++			break;
++	if (dinfo->di_bstart > dinfo->di_bend) {
++		dinfo->di_bstart = -1;
++		dinfo->di_bend = -1;
++		return;
++	}
++
++	dinfo->di_bend++;
++	while (0 <= --dinfo->di_bend)
++		if (dinfo->di_hdentry[0 + dinfo->di_bend].hd_dentry)
++			break;
++	AuDebugOn(dinfo->di_bstart > dinfo->di_bend || dinfo->di_bend < 0);
++}
++
++void au_update_dbstart(struct dentry *dentry)
++{
++	aufs_bindex_t bindex, bend;
++	struct dentry *h_dentry;
++
++	bend = au_dbend(dentry);
++	for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (!h_dentry)
++			continue;
++		if (h_dentry->d_inode) {
++			au_set_dbstart(dentry, bindex);
++			return;
++		}
++		au_set_h_dptr(dentry, bindex, NULL);
++	}
++}
++
++void au_update_dbend(struct dentry *dentry)
++{
++	aufs_bindex_t bindex, bstart;
++	struct dentry *h_dentry;
++
++	bstart = au_dbstart(dentry);
++	for (bindex = au_dbend(dentry); bindex <= bstart; bindex--) {
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (!h_dentry)
++			continue;
++		if (h_dentry->d_inode) {
++			au_set_dbend(dentry, bindex);
++			return;
++		}
++		au_set_h_dptr(dentry, bindex, NULL);
++	}
++}
++
++int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry)
++{
++	aufs_bindex_t bindex, bend;
++
++	bend = au_dbend(dentry);
++	for (bindex = au_dbstart(dentry); bindex <= bend; bindex++)
++		if (au_h_dptr(dentry, bindex) == h_dentry)
++			return bindex;
++	return -1;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dir.c linux-2.6.31.5/fs/aufs/dir.c
+--- linux-2.6.31.5.orig/fs/aufs/dir.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dir.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,538 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * directory operations
++ */
++
++#include <linux/file.h>
++#include <linux/fs_stack.h>
++#include "aufs.h"
++
++void au_add_nlink(struct inode *dir, struct inode *h_dir)
++{
++	AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
++
++	dir->i_nlink += h_dir->i_nlink - 2;
++	if (h_dir->i_nlink < 2)
++		dir->i_nlink += 2;
++}
++
++void au_sub_nlink(struct inode *dir, struct inode *h_dir)
++{
++	AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
++
++	dir->i_nlink -= h_dir->i_nlink - 2;
++	if (h_dir->i_nlink < 2)
++		dir->i_nlink -= 2;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int reopen_dir(struct file *file)
++{
++	int err;
++	unsigned int flags;
++	aufs_bindex_t bindex, btail, bstart;
++	struct dentry *dentry, *h_dentry;
++	struct file *h_file;
++
++	/* open all lower dirs */
++	dentry = file->f_dentry;
++	bstart = au_dbstart(dentry);
++	for (bindex = au_fbstart(file); bindex < bstart; bindex++)
++		au_set_h_fptr(file, bindex, NULL);
++	au_set_fbstart(file, bstart);
++
++	btail = au_dbtaildir(dentry);
++	for (bindex = au_fbend(file); btail < bindex; bindex--)
++		au_set_h_fptr(file, bindex, NULL);
++	au_set_fbend(file, btail);
++
++	flags = file->f_flags;
++	for (bindex = bstart; bindex <= btail; bindex++) {
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (!h_dentry)
++			continue;
++		h_file = au_h_fptr(file, bindex);
++		if (h_file)
++			continue;
++
++		h_file = au_h_open(dentry, bindex, flags, file);
++		err = PTR_ERR(h_file);
++		if (IS_ERR(h_file))
++			goto out; /* close all? */
++		au_set_h_fptr(file, bindex, h_file);
++	}
++	au_update_figen(file);
++	/* todo: necessary? */
++	/* file->f_ra = h_file->f_ra; */
++	err = 0;
++
++ out:
++	return err;
++}
++
++static int do_open_dir(struct file *file, int flags)
++{
++	int err;
++	aufs_bindex_t bindex, btail;
++	struct dentry *dentry, *h_dentry;
++	struct file *h_file;
++
++	FiMustWriteLock(file);
++
++	err = 0;
++	dentry = file->f_dentry;
++	au_set_fvdir_cache(file, NULL);
++	au_fi(file)->fi_maintain_plink = 0;
++	file->f_version = dentry->d_inode->i_version;
++	bindex = au_dbstart(dentry);
++	au_set_fbstart(file, bindex);
++	btail = au_dbtaildir(dentry);
++	au_set_fbend(file, btail);
++	for (; !err && bindex <= btail; bindex++) {
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (!h_dentry)
++			continue;
++
++		h_file = au_h_open(dentry, bindex, flags, file);
++		if (IS_ERR(h_file)) {
++			err = PTR_ERR(h_file);
++			break;
++		}
++		au_set_h_fptr(file, bindex, h_file);
++	}
++	au_update_figen(file);
++	/* todo: necessary? */
++	/* file->f_ra = h_file->f_ra; */
++	if (!err)
++		return 0; /* success */
++
++	/* close all */
++	for (bindex = au_fbstart(file); bindex <= btail; bindex++)
++		au_set_h_fptr(file, bindex, NULL);
++	au_set_fbstart(file, -1);
++	au_set_fbend(file, -1);
++	return err;
++}
++
++static int aufs_open_dir(struct inode *inode __maybe_unused,
++			 struct file *file)
++{
++	return au_do_open(file, do_open_dir);
++}
++
++static int aufs_release_dir(struct inode *inode __maybe_unused,
++			    struct file *file)
++{
++	struct au_vdir *vdir_cache;
++	struct super_block *sb;
++	struct au_sbinfo *sbinfo;
++
++	sb = file->f_dentry->d_sb;
++	si_noflush_read_lock(sb);
++	fi_write_lock(file);
++	vdir_cache = au_fvdir_cache(file);
++	if (vdir_cache)
++		au_vdir_free(vdir_cache);
++	if (au_fi(file)->fi_maintain_plink) {
++		sbinfo = au_sbi(sb);
++		/* clear the flag without write-lock */
++		sbinfo->au_si_status &= ~AuSi_MAINTAIN_PLINK;
++		smp_mb();
++		wake_up_all(&sbinfo->si_plink_wq);
++	}
++	fi_write_unlock(file);
++	au_finfo_fin(file);
++	si_read_unlock(sb);
++	return 0;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync)
++{
++	int err;
++	aufs_bindex_t bend, bindex;
++	struct inode *inode;
++	struct super_block *sb;
++
++	err = 0;
++	sb = dentry->d_sb;
++	inode = dentry->d_inode;
++	IMustLock(inode);
++	bend = au_dbend(dentry);
++	for (bindex = au_dbstart(dentry); !err && bindex <= bend; bindex++) {
++		struct path h_path;
++		struct inode *h_inode;
++
++		if (au_test_ro(sb, bindex, inode))
++			continue;
++		h_path.dentry = au_h_dptr(dentry, bindex);
++		if (!h_path.dentry)
++			continue;
++		h_inode = h_path.dentry->d_inode;
++		if (!h_inode)
++			continue;
++
++		/* no mnt_want_write() */
++		/* cf. fs/nsfd/vfs.c and fs/nfsd/nfs4recover.c */
++		/* todo: inotiry fired? */
++		h_path.mnt = au_sbr_mnt(sb, bindex);
++		mutex_lock(&h_inode->i_mutex);
++		err = filemap_fdatawrite(h_inode->i_mapping);
++		AuDebugOn(!h_inode->i_fop);
++		if (!err && h_inode->i_fop->fsync)
++			err = h_inode->i_fop->fsync(NULL, h_path.dentry,
++						    datasync);
++		if (!err)
++			err = filemap_fdatawrite(h_inode->i_mapping);
++		if (!err)
++			vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
++		mutex_unlock(&h_inode->i_mutex);
++	}
++
++	return err;
++}
++
++static int au_do_fsync_dir(struct file *file, int datasync)
++{
++	int err;
++	aufs_bindex_t bend, bindex;
++	struct file *h_file;
++	struct super_block *sb;
++	struct inode *inode;
++	struct mutex *h_mtx;
++
++	err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
++	if (unlikely(err))
++		goto out;
++
++	sb = file->f_dentry->d_sb;
++	inode = file->f_dentry->d_inode;
++	bend = au_fbend(file);
++	for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
++		h_file = au_h_fptr(file, bindex);
++		if (!h_file || au_test_ro(sb, bindex, inode))
++			continue;
++
++		err = vfs_fsync(h_file, h_file->f_dentry, datasync);
++		if (!err) {
++			h_mtx = &h_file->f_dentry->d_inode->i_mutex;
++			mutex_lock(h_mtx);
++			vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL);
++			/*ignore*/
++			mutex_unlock(h_mtx);
++		}
++	}
++
++ out:
++	return err;
++}
++
++/*
++ * @file may be NULL
++ */
++static int aufs_fsync_dir(struct file *file, struct dentry *dentry,
++			  int datasync)
++{
++	int err;
++	struct super_block *sb;
++
++	IMustLock(dentry->d_inode);
++
++	err = 0;
++	sb = dentry->d_sb;
++	si_noflush_read_lock(sb);
++	if (file)
++		err = au_do_fsync_dir(file, datasync);
++	else {
++		di_write_lock_child(dentry);
++		err = au_do_fsync_dir_no_file(dentry, datasync);
++	}
++	au_cpup_attr_timesizes(dentry->d_inode);
++	di_write_unlock(dentry);
++	if (file)
++		fi_write_unlock(file);
++
++	si_read_unlock(sb);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int aufs_readdir(struct file *file, void *dirent, filldir_t filldir)
++{
++	int err;
++	struct dentry *dentry;
++	struct inode *inode;
++	struct super_block *sb;
++
++	dentry = file->f_dentry;
++	inode = dentry->d_inode;
++	IMustLock(inode);
++
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
++	if (unlikely(err))
++		goto out;
++	err = au_vdir_init(file);
++	di_downgrade_lock(dentry, AuLock_IR);
++	if (unlikely(err))
++		goto out_unlock;
++
++	if (!au_test_nfsd(current)) {
++		err = au_vdir_fill_de(file, dirent, filldir);
++		fsstack_copy_attr_atime(inode,
++					au_h_iptr(inode, au_ibstart(inode)));
++	} else {
++		/*
++		 * nfsd filldir may call lookup_one_len(), vfs_getattr(),
++		 * encode_fh() and others.
++		 */
++		struct inode *h_inode = au_h_iptr(inode, au_ibstart(inode));
++
++		di_read_unlock(dentry, AuLock_IR);
++		si_read_unlock(sb);
++		lockdep_off();
++		err = au_vdir_fill_de(file, dirent, filldir);
++		lockdep_on();
++		fsstack_copy_attr_atime(inode, h_inode);
++		fi_write_unlock(file);
++
++		AuTraceErr(err);
++		return err;
++	}
++
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++	fi_write_unlock(file);
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++#define AuTestEmpty_WHONLY	1
++#define AuTestEmpty_CALLED	(1 << 1)
++#define AuTestEmpty_SHWH	(1 << 2)
++#define au_ftest_testempty(flags, name)	((flags) & AuTestEmpty_##name)
++#define au_fset_testempty(flags, name)	{ (flags) |= AuTestEmpty_##name; }
++#define au_fclr_testempty(flags, name)	{ (flags) &= ~AuTestEmpty_##name; }
++
++#ifndef CONFIG_AUFS_SHWH
++#undef AuTestEmpty_SHWH
++#define AuTestEmpty_SHWH	0
++#endif
++
++struct test_empty_arg {
++	struct au_nhash whlist;
++	unsigned int flags;
++	int err;
++	aufs_bindex_t bindex;
++};
++
++static int test_empty_cb(void *__arg, const char *__name, int namelen,
++			 loff_t offset __maybe_unused, u64 ino,
++			 unsigned int d_type)
++{
++	struct test_empty_arg *arg = __arg;
++	char *name = (void *)__name;
++
++	arg->err = 0;
++	au_fset_testempty(arg->flags, CALLED);
++	/* smp_mb(); */
++	if (name[0] == '.'
++	    && (namelen == 1 || (name[1] == '.' && namelen == 2)))
++		goto out; /* success */
++
++	if (namelen <= AUFS_WH_PFX_LEN
++	    || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
++		if (au_ftest_testempty(arg->flags, WHONLY)
++		    && !au_nhash_test_known_wh(&arg->whlist, name, namelen))
++			arg->err = -ENOTEMPTY;
++		goto out;
++	}
++
++	name += AUFS_WH_PFX_LEN;
++	namelen -= AUFS_WH_PFX_LEN;
++	if (!au_nhash_test_known_wh(&arg->whlist, name, namelen))
++		arg->err = au_nhash_append_wh
++			(&arg->whlist, name, namelen, ino, d_type, arg->bindex,
++			 au_ftest_testempty(arg->flags, SHWH));
++
++ out:
++	/* smp_mb(); */
++	AuTraceErr(arg->err);
++	return arg->err;
++}
++
++static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
++{
++	int err;
++	struct file *h_file;
++
++	h_file = au_h_open(dentry, arg->bindex,
++			   O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE,
++			   /*file*/NULL);
++	err = PTR_ERR(h_file);
++	if (IS_ERR(h_file))
++		goto out;
++
++	err = 0;
++	if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
++	    && !h_file->f_dentry->d_inode->i_nlink)
++		goto out_put;
++
++	do {
++		arg->err = 0;
++		au_fclr_testempty(arg->flags, CALLED);
++		/* smp_mb(); */
++		err = vfsub_readdir(h_file, test_empty_cb, arg);
++		if (err >= 0)
++			err = arg->err;
++	} while (!err && au_ftest_testempty(arg->flags, CALLED));
++
++ out_put:
++	fput(h_file);
++	au_sbr_put(dentry->d_sb, arg->bindex);
++ out:
++	return err;
++}
++
++struct do_test_empty_args {
++	int *errp;
++	struct dentry *dentry;
++	struct test_empty_arg *arg;
++};
++
++static void call_do_test_empty(void *args)
++{
++	struct do_test_empty_args *a = args;
++	*a->errp = do_test_empty(a->dentry, a->arg);
++}
++
++static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
++{
++	int err, wkq_err;
++	struct dentry *h_dentry;
++	struct inode *h_inode;
++
++	h_dentry = au_h_dptr(dentry, arg->bindex);
++	h_inode = h_dentry->d_inode;
++	mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
++	err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ);
++	mutex_unlock(&h_inode->i_mutex);
++	if (!err)
++		err = do_test_empty(dentry, arg);
++	else {
++		struct do_test_empty_args args = {
++			.errp	= &err,
++			.dentry	= dentry,
++			.arg	= arg
++		};
++		unsigned int flags = arg->flags;
++
++		wkq_err = au_wkq_wait(call_do_test_empty, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++		arg->flags = flags;
++	}
++
++	return err;
++}
++
++int au_test_empty_lower(struct dentry *dentry)
++{
++	int err;
++	aufs_bindex_t bindex, bstart, btail;
++	struct test_empty_arg arg;
++
++	SiMustAnyLock(dentry->d_sb);
++
++	err = au_nhash_alloc(&arg.whlist, au_sbi(dentry->d_sb)->si_rdhash,
++			     GFP_NOFS);
++	if (unlikely(err))
++		goto out;
++
++	bstart = au_dbstart(dentry);
++	arg.flags = 0;
++	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
++		au_fset_testempty(arg.flags, SHWH);
++	arg.bindex = bstart;
++	err = do_test_empty(dentry, &arg);
++	if (unlikely(err))
++		goto out_whlist;
++
++	au_fset_testempty(arg.flags, WHONLY);
++	btail = au_dbtaildir(dentry);
++	for (bindex = bstart + 1; !err && bindex <= btail; bindex++) {
++		struct dentry *h_dentry;
++
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (h_dentry && h_dentry->d_inode) {
++			arg.bindex = bindex;
++			err = do_test_empty(dentry, &arg);
++		}
++	}
++
++ out_whlist:
++	au_nhash_wh_free(&arg.whlist);
++ out:
++	return err;
++}
++
++int au_test_empty(struct dentry *dentry, struct au_nhash *whlist)
++{
++	int err;
++	struct test_empty_arg arg;
++	aufs_bindex_t bindex, btail;
++
++	err = 0;
++	arg.whlist = *whlist;
++	arg.flags = AuTestEmpty_WHONLY;
++	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
++		au_fset_testempty(arg.flags, SHWH);
++	btail = au_dbtaildir(dentry);
++	for (bindex = au_dbstart(dentry); !err && bindex <= btail; bindex++) {
++		struct dentry *h_dentry;
++
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (h_dentry && h_dentry->d_inode) {
++			arg.bindex = bindex;
++			err = sio_test_empty(dentry, &arg);
++		}
++	}
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++const struct file_operations aufs_dir_fop = {
++	.read		= generic_read_dir,
++	.readdir	= aufs_readdir,
++	.unlocked_ioctl	= aufs_ioctl_dir,
++	.open		= aufs_open_dir,
++	.release	= aufs_release_dir,
++	.flush		= aufs_flush,
++	.fsync		= aufs_fsync_dir
++};
+diff -Nur linux-2.6.31.5.orig/fs/aufs/dir.h linux-2.6.31.5/fs/aufs/dir.h
+--- linux-2.6.31.5.orig/fs/aufs/dir.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/dir.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,114 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * directory operations
++ */
++
++#ifndef __AUFS_DIR_H__
++#define __AUFS_DIR_H__
++
++#ifdef __KERNEL__
++
++#include <linux/fs.h>
++#include <linux/aufs_type.h>
++
++/* ---------------------------------------------------------------------- */
++
++/* need to be faster and smaller */
++
++struct au_nhash {
++	unsigned int		nh_num;
++	struct hlist_head	*nh_head;
++};
++
++struct au_vdir_destr {
++	unsigned char	len;
++	unsigned char	name[0];
++} __packed;
++
++struct au_vdir_dehstr {
++	struct hlist_node	hash;
++	struct au_vdir_destr	*str;
++};
++
++struct au_vdir_de {
++	ino_t			de_ino;
++	unsigned char		de_type;
++	/* caution: packed */
++	struct au_vdir_destr	de_str;
++} __packed;
++
++struct au_vdir_wh {
++	struct hlist_node	wh_hash;
++#ifdef CONFIG_AUFS_SHWH
++	ino_t			wh_ino;
++	aufs_bindex_t		wh_bindex;
++	unsigned char		wh_type;
++#else
++	aufs_bindex_t		wh_bindex;
++#endif
++	/* caution: packed */
++	struct au_vdir_destr	wh_str;
++} __packed;
++
++union au_vdir_deblk_p {
++	unsigned char		*deblk;
++	struct au_vdir_de	*de;
++};
++
++struct au_vdir {
++	unsigned char	**vd_deblk;
++	unsigned long	vd_nblk;
++	struct {
++		unsigned long		ul;
++		union au_vdir_deblk_p	p;
++	} vd_last;
++
++	unsigned long	vd_version;
++	unsigned int	vd_deblk_sz;
++	unsigned long	vd_jiffy;
++};
++
++/* ---------------------------------------------------------------------- */
++
++/* dir.c */
++extern const struct file_operations aufs_dir_fop;
++void au_add_nlink(struct inode *dir, struct inode *h_dir);
++void au_sub_nlink(struct inode *dir, struct inode *h_dir);
++int au_test_empty_lower(struct dentry *dentry);
++int au_test_empty(struct dentry *dentry, struct au_nhash *whlist);
++
++/* vdir.c */
++int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp);
++void au_nhash_wh_free(struct au_nhash *whlist);
++int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
++			    int limit);
++int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen);
++int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
++		       unsigned int d_type, aufs_bindex_t bindex,
++		       unsigned char shwh);
++void au_vdir_free(struct au_vdir *vdir);
++int au_vdir_init(struct file *file);
++int au_vdir_fill_de(struct file *file, void *dirent, filldir_t filldir);
++
++/* ioctl.c */
++long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg);
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_DIR_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/export.c linux-2.6.31.5/fs/aufs/export.c
+--- linux-2.6.31.5.orig/fs/aufs/export.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/export.c	2009-11-15 22:27:33.000000000 +0100
+@@ -0,0 +1,746 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * export via nfs
++ */
++
++#include <linux/exportfs.h>
++#include <linux/file.h>
++#include <linux/mnt_namespace.h>
++#include <linux/nsproxy.h>
++#include <linux/namei.h>
++#include <linux/random.h>
++#include "aufs.h"
++
++union conv {
++#ifdef CONFIG_AUFS_INO_T_64
++	__u32 a[2];
++#else
++	__u32 a[1];
++#endif
++	ino_t ino;
++};
++
++static ino_t decode_ino(__u32 *a)
++{
++	union conv u;
++
++	BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a));
++	u.a[0] = a[0];
++#ifdef CONFIG_AUFS_INO_T_64
++	u.a[1] = a[1];
++#endif
++	return u.ino;
++}
++
++static void encode_ino(__u32 *a, ino_t ino)
++{
++	union conv u;
++
++	u.ino = ino;
++	a[0] = u.a[0];
++#ifdef CONFIG_AUFS_INO_T_64
++	a[1] = u.a[1];
++#endif
++}
++
++/* NFS file handle */
++enum {
++	Fh_br_id,
++	Fh_sigen,
++#ifdef CONFIG_AUFS_INO_T_64
++	/* support 64bit inode number */
++	Fh_ino1,
++	Fh_ino2,
++	Fh_dir_ino1,
++	Fh_dir_ino2,
++#else
++	Fh_ino1,
++	Fh_dir_ino1,
++#endif
++	Fh_igen,
++	Fh_h_type,
++	Fh_tail,
++
++	Fh_ino = Fh_ino1,
++	Fh_dir_ino = Fh_dir_ino1
++};
++
++static int au_test_anon(struct dentry *dentry)
++{
++	return !!(dentry->d_flags & DCACHE_DISCONNECTED);
++}
++
++/* ---------------------------------------------------------------------- */
++/* inode generation external table */
++
++int au_xigen_inc(struct inode *inode)
++{
++	int err;
++	loff_t pos;
++	ssize_t sz;
++	__u32 igen;
++	struct super_block *sb;
++	struct au_sbinfo *sbinfo;
++
++	err = 0;
++	sb = inode->i_sb;
++	sbinfo = au_sbi(sb);
++	/*
++	 * temporary workaround for escaping from SiMustAnyLock() in
++	 * au_mntflags(), since this function is called from au_iinfo_fin().
++	 */
++	if (unlikely(!au_opt_test(sbinfo->si_mntflags, XINO)))
++		goto out;
++
++	pos = inode->i_ino;
++	pos *= sizeof(igen);
++	igen = inode->i_generation + 1;
++	sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen,
++			 sizeof(igen), &pos);
++	if (sz == sizeof(igen))
++		goto out; /* success */
++
++	err = sz;
++	if (unlikely(sz >= 0)) {
++		err = -EIO;
++		AuIOErr("xigen error (%zd)\n", sz);
++	}
++
++ out:
++	return err;
++}
++
++int au_xigen_new(struct inode *inode)
++{
++	int err;
++	loff_t pos;
++	ssize_t sz;
++	struct super_block *sb;
++	struct au_sbinfo *sbinfo;
++	struct file *file;
++
++	err = 0;
++	/* todo: dirty, at mount time */
++	if (inode->i_ino == AUFS_ROOT_INO)
++		goto out;
++	sb = inode->i_sb;
++	SiMustAnyLock(sb);
++	if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
++		goto out;
++
++	err = -EFBIG;
++	pos = inode->i_ino;
++	if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) {
++		AuIOErr1("too large i%lld\n", pos);
++		goto out;
++	}
++	pos *= sizeof(inode->i_generation);
++
++	err = 0;
++	sbinfo = au_sbi(sb);
++	file = sbinfo->si_xigen;
++	BUG_ON(!file);
++
++	if (i_size_read(file->f_dentry->d_inode)
++	    < pos + sizeof(inode->i_generation)) {
++		inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next);
++		sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation,
++				 sizeof(inode->i_generation), &pos);
++	} else
++		sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation,
++				sizeof(inode->i_generation), &pos);
++	if (sz == sizeof(inode->i_generation))
++		goto out; /* success */
++
++	err = sz;
++	if (unlikely(sz >= 0)) {
++		err = -EIO;
++		AuIOErr("xigen error (%zd)\n", sz);
++	}
++
++ out:
++	return err;
++}
++
++int au_xigen_set(struct super_block *sb, struct file *base)
++{
++	int err;
++	struct au_sbinfo *sbinfo;
++	struct file *file;
++
++	SiMustWriteLock(sb);
++
++	sbinfo = au_sbi(sb);
++	file = au_xino_create2(base, sbinfo->si_xigen);
++	err = PTR_ERR(file);
++	if (IS_ERR(file))
++		goto out;
++	err = 0;
++	if (sbinfo->si_xigen)
++		fput(sbinfo->si_xigen);
++	sbinfo->si_xigen = file;
++
++ out:
++	return err;
++}
++
++void au_xigen_clr(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++
++	SiMustWriteLock(sb);
++
++	sbinfo = au_sbi(sb);
++	if (sbinfo->si_xigen) {
++		fput(sbinfo->si_xigen);
++		sbinfo->si_xigen = NULL;
++	}
++}
++
++/* ---------------------------------------------------------------------- */
++
++static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino,
++				    ino_t dir_ino)
++{
++	struct dentry *dentry, *d;
++	struct inode *inode;
++	unsigned int sigen;
++
++	dentry = NULL;
++	inode = ilookup(sb, ino);
++	if (!inode)
++		goto out;
++
++	dentry = ERR_PTR(-ESTALE);
++	sigen = au_sigen(sb);
++	if (unlikely(is_bad_inode(inode)
++		     || IS_DEADDIR(inode)
++		     || sigen != au_iigen(inode)))
++		goto out_iput;
++
++	dentry = NULL;
++	if (!dir_ino || S_ISDIR(inode->i_mode))
++		dentry = d_find_alias(inode);
++	else {
++		spin_lock(&dcache_lock);
++		list_for_each_entry(d, &inode->i_dentry, d_alias)
++			if (!au_test_anon(d)
++			    && d->d_parent->d_inode->i_ino == dir_ino) {
++				dentry = dget_locked(d);
++				break;
++			}
++		spin_unlock(&dcache_lock);
++	}
++	if (unlikely(dentry && sigen != au_digen(dentry))) {
++		dput(dentry);
++		dentry = ERR_PTR(-ESTALE);
++	}
++
++ out_iput:
++	iput(inode);
++ out:
++	return dentry;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* todo: dirty? */
++/* if exportfs_decode_fh() passed vfsmount*, we could be happy */
++static struct vfsmount *au_mnt_get(struct super_block *sb)
++{
++	struct mnt_namespace *ns;
++	struct vfsmount *pos, *mnt;
++
++	spin_lock(&vfsmount_lock);
++	/* no get/put ?? */
++	AuDebugOn(!current->nsproxy);
++	ns = current->nsproxy->mnt_ns;
++	AuDebugOn(!ns);
++	mnt = NULL;
++	/* the order (reverse) will not be a problem */
++	list_for_each_entry(pos, &ns->list, mnt_list)
++		if (pos->mnt_sb == sb) {
++			mnt = mntget(pos);
++			break;
++		}
++	spin_unlock(&vfsmount_lock);
++	AuDebugOn(!mnt);
++
++	return mnt;
++}
++
++struct au_nfsd_si_lock {
++	const unsigned int sigen;
++	const aufs_bindex_t br_id;
++	unsigned char force_lock;
++};
++
++static aufs_bindex_t si_nfsd_read_lock(struct super_block *sb,
++				       struct au_nfsd_si_lock *nsi_lock)
++{
++	aufs_bindex_t bindex;
++
++	si_read_lock(sb, AuLock_FLUSH);
++
++	/* branch id may be wrapped around */
++	bindex = au_br_index(sb, nsi_lock->br_id);
++	if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb))
++		goto out; /* success */
++
++	if (!nsi_lock->force_lock)
++		si_read_unlock(sb);
++	bindex = -1;
++
++ out:
++	return bindex;
++}
++
++struct find_name_by_ino {
++	int called, found;
++	ino_t ino;
++	char *name;
++	int namelen;
++};
++
++static int
++find_name_by_ino(void *arg, const char *name, int namelen, loff_t offset,
++		 u64 ino, unsigned int d_type)
++{
++	struct find_name_by_ino *a = arg;
++
++	a->called++;
++	if (a->ino != ino)
++		return 0;
++
++	memcpy(a->name, name, namelen);
++	a->namelen = namelen;
++	a->found = 1;
++	return 1;
++}
++
++static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino,
++				     struct au_nfsd_si_lock *nsi_lock)
++{
++	struct dentry *dentry, *parent;
++	struct file *file;
++	struct inode *dir;
++	struct find_name_by_ino arg;
++	int err;
++
++	parent = path->dentry;
++	if (nsi_lock)
++		si_read_unlock(parent->d_sb);
++	path_get(path);
++	file = dentry_open(parent, path->mnt, au_dir_roflags, current_cred());
++	dentry = (void *)file;
++	if (IS_ERR(file))
++		goto out;
++
++	dentry = ERR_PTR(-ENOMEM);
++	arg.name = __getname();
++	if (unlikely(!arg.name))
++		goto out_file;
++	arg.ino = ino;
++	arg.found = 0;
++	do {
++		arg.called = 0;
++		/* smp_mb(); */
++		err = vfsub_readdir(file, find_name_by_ino, &arg);
++	} while (!err && !arg.found && arg.called);
++	dentry = ERR_PTR(err);
++	if (unlikely(err))
++		goto out_name;
++	dentry = ERR_PTR(-ENOENT);
++	if (!arg.found)
++		goto out_name;
++
++	/* do not call au_lkup_one() */
++	dir = parent->d_inode;
++	mutex_lock(&dir->i_mutex);
++	dentry = vfsub_lookup_one_len(arg.name, parent, arg.namelen);
++	mutex_unlock(&dir->i_mutex);
++	AuTraceErrPtr(dentry);
++	if (IS_ERR(dentry))
++		goto out_name;
++	AuDebugOn(au_test_anon(dentry));
++	if (unlikely(!dentry->d_inode)) {
++		dput(dentry);
++		dentry = ERR_PTR(-ENOENT);
++	}
++
++ out_name:
++	__putname(arg.name);
++ out_file:
++	fput(file);
++ out:
++	if (unlikely(nsi_lock
++		     && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0))
++		if (!IS_ERR(dentry)) {
++			dput(dentry);
++			dentry = ERR_PTR(-ESTALE);
++		}
++	AuTraceErrPtr(dentry);
++	return dentry;
++}
++
++static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino,
++					ino_t dir_ino,
++					struct au_nfsd_si_lock *nsi_lock)
++{
++	struct dentry *dentry;
++	struct path path;
++
++	if (dir_ino != AUFS_ROOT_INO) {
++		path.dentry = decode_by_ino(sb, dir_ino, 0);
++		dentry = path.dentry;
++		if (!path.dentry || IS_ERR(path.dentry))
++			goto out;
++		AuDebugOn(au_test_anon(path.dentry));
++	} else
++		path.dentry = dget(sb->s_root);
++
++	path.mnt = au_mnt_get(sb);
++	dentry = au_lkup_by_ino(&path, ino, nsi_lock);
++	path_put(&path);
++
++ out:
++	AuTraceErrPtr(dentry);
++	return dentry;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int h_acceptable(void *expv, struct dentry *dentry)
++{
++	return 1;
++}
++
++static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath,
++			   char *buf, int len, struct super_block *sb)
++{
++	char *p;
++	int n;
++	struct path path;
++
++	p = d_path(h_rootpath, buf, len);
++	if (IS_ERR(p))
++		goto out;
++	n = strlen(p);
++
++	path.mnt = h_rootpath->mnt;
++	path.dentry = h_parent;
++	p = d_path(&path, buf, len);
++	if (IS_ERR(p))
++		goto out;
++	if (n != 1)
++		p += n;
++
++	path.mnt = au_mnt_get(sb);
++	path.dentry = sb->s_root;
++	p = d_path(&path, buf, len - strlen(p));
++	mntput(path.mnt);
++	if (IS_ERR(p))
++		goto out;
++	if (n != 1)
++		p[strlen(p)] = '/';
++
++ out:
++	AuTraceErrPtr(p);
++	return p;
++}
++
++static
++struct dentry *decode_by_path(struct super_block *sb, aufs_bindex_t bindex,
++			      ino_t ino, __u32 *fh, int fh_len,
++			      struct au_nfsd_si_lock *nsi_lock)
++{
++	struct dentry *dentry, *h_parent, *root;
++	struct super_block *h_sb;
++	char *pathname, *p;
++	struct vfsmount *h_mnt;
++	struct au_branch *br;
++	int err;
++	struct path path;
++
++	br = au_sbr(sb, bindex);
++	/* au_br_get(br); */
++	h_mnt = br->br_mnt;
++	h_sb = h_mnt->mnt_sb;
++	/* todo: call lower fh_to_dentry()? fh_to_parent()? */
++	h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail),
++				      fh_len - Fh_tail, fh[Fh_h_type],
++				      h_acceptable, /*context*/NULL);
++	dentry = h_parent;
++	if (unlikely(!h_parent || IS_ERR(h_parent))) {
++		AuWarn1("%s decode_fh failed, %ld\n",
++			au_sbtype(h_sb), PTR_ERR(h_parent));
++		goto out;
++	}
++	dentry = NULL;
++	if (unlikely(au_test_anon(h_parent))) {
++		AuWarn1("%s decode_fh returned a disconnected dentry\n",
++			au_sbtype(h_sb));
++		goto out_h_parent;
++	}
++
++	dentry = ERR_PTR(-ENOMEM);
++	pathname = (void *)__get_free_page(GFP_NOFS);
++	if (unlikely(!pathname))
++		goto out_h_parent;
++
++	root = sb->s_root;
++	path.mnt = h_mnt;
++	di_read_lock_parent(root, !AuLock_IR);
++	path.dentry = au_h_dptr(root, bindex);
++	di_read_unlock(root, !AuLock_IR);
++	p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb);
++	dentry = (void *)p;
++	if (IS_ERR(p))
++		goto out_pathname;
++
++	si_read_unlock(sb);
++	err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
++	dentry = ERR_PTR(err);
++	if (unlikely(err))
++		goto out_relock;
++
++	dentry = ERR_PTR(-ENOENT);
++	AuDebugOn(au_test_anon(path.dentry));
++	if (unlikely(!path.dentry->d_inode))
++		goto out_path;
++
++	if (ino != path.dentry->d_inode->i_ino)
++		dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL);
++	else
++		dentry = dget(path.dentry);
++
++ out_path:
++	path_put(&path);
++ out_relock:
++	if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0))
++		if (!IS_ERR(dentry)) {
++			dput(dentry);
++			dentry = ERR_PTR(-ESTALE);
++		}
++ out_pathname:
++	free_page((unsigned long)pathname);
++ out_h_parent:
++	dput(h_parent);
++ out:
++	/* au_br_put(br); */
++	AuTraceErrPtr(dentry);
++	return dentry;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static struct dentry *
++aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len,
++		  int fh_type)
++{
++	struct dentry *dentry;
++	__u32 *fh = fid->raw;
++	ino_t ino, dir_ino;
++	aufs_bindex_t bindex;
++	struct au_nfsd_si_lock nsi_lock = {
++		.sigen		= fh[Fh_sigen],
++		.br_id		= fh[Fh_br_id],
++		.force_lock	= 0
++	};
++
++	AuDebugOn(fh_len < Fh_tail);
++
++	dentry = ERR_PTR(-ESTALE);
++	/* branch id may be wrapped around */
++	bindex = si_nfsd_read_lock(sb, &nsi_lock);
++	if (unlikely(bindex < 0))
++		goto out;
++	nsi_lock.force_lock = 1;
++
++	/* is this inode still cached? */
++	ino = decode_ino(fh + Fh_ino);
++	AuDebugOn(ino == AUFS_ROOT_INO);
++	dir_ino = decode_ino(fh + Fh_dir_ino);
++	dentry = decode_by_ino(sb, ino, dir_ino);
++	if (IS_ERR(dentry))
++		goto out_unlock;
++	if (dentry)
++		goto accept;
++
++	/* is the parent dir cached? */
++	dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock);
++	if (IS_ERR(dentry))
++		goto out_unlock;
++	if (dentry)
++		goto accept;
++
++	/* lookup path */
++	dentry = decode_by_path(sb, bindex, ino, fh, fh_len, &nsi_lock);
++	if (IS_ERR(dentry))
++		goto out_unlock;
++	if (unlikely(!dentry))
++		/* todo?: make it ESTALE */
++		goto out_unlock;
++
++ accept:
++	if (dentry->d_inode->i_generation == fh[Fh_igen])
++		goto out_unlock; /* success */
++
++	dput(dentry);
++	dentry = ERR_PTR(-ESTALE);
++ out_unlock:
++	si_read_unlock(sb);
++ out:
++	AuTraceErrPtr(dentry);
++	return dentry;
++}
++
++#if 0 /* reserved for future use */
++/* support subtreecheck option */
++static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid,
++					int fh_len, int fh_type)
++{
++	struct dentry *parent;
++	__u32 *fh = fid->raw;
++	ino_t dir_ino;
++
++	dir_ino = decode_ino(fh + Fh_dir_ino);
++	parent = decode_by_ino(sb, dir_ino, 0);
++	if (IS_ERR(parent))
++		goto out;
++	if (!parent)
++		parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]),
++					dir_ino, fh, fh_len);
++
++ out:
++	AuTraceErrPtr(parent);
++	return parent;
++}
++#endif
++
++/* ---------------------------------------------------------------------- */
++
++static int aufs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
++			  int connectable)
++{
++	int err;
++	aufs_bindex_t bindex, bend;
++	struct super_block *sb, *h_sb;
++	struct inode *inode;
++	struct dentry *parent, *h_parent;
++	struct au_branch *br;
++
++	AuDebugOn(au_test_anon(dentry));
++
++	parent = NULL;
++	err = -ENOSPC;
++	if (unlikely(*max_len <= Fh_tail)) {
++		AuWarn1("NFSv2 client (max_len %d)?\n", *max_len);
++		goto out;
++	}
++
++	err = FILEID_ROOT;
++	if (IS_ROOT(dentry)) {
++		AuDebugOn(dentry->d_inode->i_ino != AUFS_ROOT_INO);
++		goto out;
++	}
++
++	err = -EIO;
++	h_parent = NULL;
++	sb = dentry->d_sb;
++	aufs_read_lock(dentry, AuLock_FLUSH | AuLock_IR);
++	parent = dget_parent(dentry);
++	di_read_lock_parent(parent, !AuLock_IR);
++	inode = dentry->d_inode;
++	AuDebugOn(!inode);
++#ifdef CONFIG_AUFS_DEBUG
++	if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
++		AuWarn1("NFS-exporting requires xino\n");
++#endif
++
++	bend = au_dbtaildir(parent);
++	for (bindex = au_dbstart(parent); bindex <= bend; bindex++) {
++		h_parent = au_h_dptr(parent, bindex);
++		if (h_parent) {
++			dget(h_parent);
++			break;
++		}
++	}
++	if (unlikely(!h_parent))
++		goto out_unlock;
++
++	err = -EPERM;
++	br = au_sbr(sb, bindex);
++	h_sb = br->br_mnt->mnt_sb;
++	if (unlikely(!h_sb->s_export_op)) {
++		AuErr1("%s branch is not exportable\n", au_sbtype(h_sb));
++		goto out_dput;
++	}
++
++	fh[Fh_br_id] = br->br_id;
++	fh[Fh_sigen] = au_sigen(sb);
++	encode_ino(fh + Fh_ino, inode->i_ino);
++	encode_ino(fh + Fh_dir_ino, parent->d_inode->i_ino);
++	fh[Fh_igen] = inode->i_generation;
++
++	*max_len -= Fh_tail;
++	fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail),
++					   max_len,
++					   /*connectable or subtreecheck*/0);
++	err = fh[Fh_h_type];
++	*max_len += Fh_tail;
++	/* todo: macros? */
++	if (err != 255)
++		err = 99;
++	else
++		AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb));
++
++ out_dput:
++	dput(h_parent);
++ out_unlock:
++	di_read_unlock(parent, !AuLock_IR);
++	dput(parent);
++	aufs_read_unlock(dentry, AuLock_IR);
++ out:
++	if (unlikely(err < 0))
++		err = 255;
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static struct export_operations aufs_export_op = {
++	.fh_to_dentry	= aufs_fh_to_dentry,
++	/* .fh_to_parent	= aufs_fh_to_parent, */
++	.encode_fh	= aufs_encode_fh
++};
++
++void au_export_init(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++	__u32 u;
++
++	sb->s_export_op = &aufs_export_op;
++	sbinfo = au_sbi(sb);
++	sbinfo->si_xigen = NULL;
++	get_random_bytes(&u, sizeof(u));
++	BUILD_BUG_ON(sizeof(u) != sizeof(int));
++	atomic_set(&sbinfo->si_xigen_next, u);
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/file.c linux-2.6.31.5/fs/aufs/file.c
+--- linux-2.6.31.5.orig/fs/aufs/file.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/file.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,578 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * handling file/dir, and address_space operation
++ */
++
++#include <linux/file.h>
++#include <linux/fsnotify.h>
++#include <linux/namei.h>
++#include <linux/pagemap.h>
++#include "aufs.h"
++
++/*
++ * a dirty trick for handling deny_write_access().
++ * because FMODE_EXEC flag is not passed to f_op->open(),
++ * set it to file->private_data temporary.
++ */
++void au_store_oflag(struct nameidata *nd, struct inode *inode)
++{
++	if (nd
++	    /* && !(nd->flags & LOOKUP_CONTINUE) */
++	    && (nd->flags & LOOKUP_OPEN)
++	    && (nd->intent.open.flags & vfsub_fmode_to_uint(FMODE_EXEC))
++	    && inode
++	    && S_ISREG(inode->i_mode)) {
++		/* suppress a warning in lp64 */
++		unsigned long flags = nd->intent.open.flags;
++		nd->intent.open.file->private_data = (void *)flags;
++		/* smp_mb(); */
++	}
++}
++
++/* drop flags for writing */
++unsigned int au_file_roflags(unsigned int flags)
++{
++	flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC);
++	flags |= O_RDONLY | O_NOATIME;
++	return flags;
++}
++
++/* common functions to regular file and dir */
++struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
++		       struct file *file)
++{
++	struct file *h_file;
++	struct dentry *h_dentry;
++	struct inode *h_inode;
++	struct super_block *sb;
++	struct au_branch *br;
++	int err;
++
++	/* a race condition can happen between open and unlink/rmdir */
++	h_file = ERR_PTR(-ENOENT);
++	h_dentry = au_h_dptr(dentry, bindex);
++	if (au_test_nfsd(current) && !h_dentry)
++		goto out;
++	h_inode = h_dentry->d_inode;
++	if (au_test_nfsd(current) && !h_inode)
++		goto out;
++	if (unlikely((!d_unhashed(dentry) && d_unhashed(h_dentry))
++		     || !h_inode))
++		goto out;
++
++	sb = dentry->d_sb;
++	br = au_sbr(sb, bindex);
++	h_file = ERR_PTR(-EACCES);
++	if (file && (file->f_mode & FMODE_EXEC)
++	    && (br->br_mnt->mnt_flags & MNT_NOEXEC))
++		goto out;
++
++	/* drop flags for writing */
++	if (au_test_ro(sb, bindex, dentry->d_inode))
++		flags = au_file_roflags(flags);
++	flags &= ~O_CREAT;
++	atomic_inc(&br->br_count);
++	h_file = dentry_open(dget(h_dentry), mntget(br->br_mnt), flags,
++			     current_cred());
++	if (IS_ERR(h_file))
++		goto out_br;
++
++	if (file && (file->f_mode & FMODE_EXEC)) {
++		h_file->f_mode |= FMODE_EXEC;
++		err = deny_write_access(h_file);
++		if (unlikely(err)) {
++			fput(h_file);
++			h_file = ERR_PTR(err);
++			goto out_br;
++		}
++	}
++	fsnotify_open(h_dentry);
++	goto out; /* success */
++
++ out_br:
++	atomic_dec(&br->br_count);
++ out:
++	return h_file;
++}
++
++int au_do_open(struct file *file, int (*open)(struct file *file, int flags))
++{
++	int err;
++	struct dentry *dentry;
++	struct super_block *sb;
++
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_finfo_init(file);
++	if (unlikely(err))
++		goto out;
++
++	di_read_lock_child(dentry, AuLock_IR);
++	err = open(file, file->f_flags);
++	di_read_unlock(dentry, AuLock_IR);
++
++	fi_write_unlock(file);
++	if (unlikely(err))
++		au_finfo_fin(file);
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++int au_reopen_nondir(struct file *file)
++{
++	int err;
++	aufs_bindex_t bstart, bindex, bend;
++	struct dentry *dentry;
++	struct file *h_file, *h_file_tmp;
++
++	dentry = file->f_dentry;
++	bstart = au_dbstart(dentry);
++	h_file_tmp = NULL;
++	if (au_fbstart(file) == bstart) {
++		h_file = au_h_fptr(file, bstart);
++		if (file->f_mode == h_file->f_mode)
++			return 0; /* success */
++		h_file_tmp = h_file;
++		get_file(h_file_tmp);
++		au_set_h_fptr(file, bstart, NULL);
++	}
++	AuDebugOn(au_fbstart(file) < bstart
++		  || au_fi(file)->fi_hfile[0 + bstart].hf_file);
++
++	h_file = au_h_open(dentry, bstart, file->f_flags & ~O_TRUNC, file);
++	err = PTR_ERR(h_file);
++	if (IS_ERR(h_file))
++		goto out; /* todo: close all? */
++
++	err = 0;
++	au_set_fbstart(file, bstart);
++	au_set_h_fptr(file, bstart, h_file);
++	au_update_figen(file);
++	/* todo: necessary? */
++	/* file->f_ra = h_file->f_ra; */
++
++	/* close lower files */
++	bend = au_fbend(file);
++	for (bindex = bstart + 1; bindex <= bend; bindex++)
++		au_set_h_fptr(file, bindex, NULL);
++	au_set_fbend(file, bstart);
++
++ out:
++	if (h_file_tmp)
++		fput(h_file_tmp);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int au_reopen_wh(struct file *file, aufs_bindex_t btgt,
++			struct dentry *hi_wh)
++{
++	int err;
++	aufs_bindex_t bstart;
++	struct au_dinfo *dinfo;
++	struct dentry *h_dentry;
++
++	dinfo = au_di(file->f_dentry);
++	AuRwMustWriteLock(&dinfo->di_rwsem);
++
++	bstart = dinfo->di_bstart;
++	dinfo->di_bstart = btgt;
++	h_dentry = dinfo->di_hdentry[0 + btgt].hd_dentry;
++	dinfo->di_hdentry[0 + btgt].hd_dentry = hi_wh;
++	err = au_reopen_nondir(file);
++	dinfo->di_hdentry[0 + btgt].hd_dentry = h_dentry;
++	dinfo->di_bstart = bstart;
++
++	return err;
++}
++
++static int au_ready_to_write_wh(struct file *file, loff_t len,
++				aufs_bindex_t bcpup)
++{
++	int err;
++	struct inode *inode;
++	struct dentry *dentry, *hi_wh;
++	struct super_block *sb;
++
++	dentry = file->f_dentry;
++	inode = dentry->d_inode;
++	hi_wh = au_hi_wh(inode, bcpup);
++	if (!hi_wh)
++		err = au_sio_cpup_wh(dentry, bcpup, len, file);
++	else
++		/* already copied-up after unlink */
++		err = au_reopen_wh(file, bcpup, hi_wh);
++
++	sb = dentry->d_sb;
++	if (!err && inode->i_nlink > 1 && au_opt_test(au_mntflags(sb), PLINK))
++		au_plink_append(inode, bcpup, au_h_dptr(dentry, bcpup));
++
++	return err;
++}
++
++/*
++ * prepare the @file for writing.
++ */
++int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin)
++{
++	int err;
++	aufs_bindex_t bstart, bcpup;
++	struct dentry *dentry, *parent, *h_dentry;
++	struct inode *h_inode, *inode;
++	struct super_block *sb;
++
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	bstart = au_fbstart(file);
++	inode = dentry->d_inode;
++	err = au_test_ro(sb, bstart, inode);
++	if (!err && (au_h_fptr(file, bstart)->f_mode & FMODE_WRITE)) {
++		err = au_pin(pin, dentry, bstart, AuOpt_UDBA_NONE, /*flags*/0);
++		goto out;
++	}
++
++	/* need to cpup */
++	parent = dget_parent(dentry);
++	di_write_lock_parent(parent);
++	err = AuWbrCopyup(au_sbi(sb), dentry);
++	bcpup = err;
++	if (unlikely(err < 0))
++		goto out_dgrade;
++	err = 0;
++
++	if (!au_h_dptr(parent, bcpup)) {
++		err = au_cpup_dirs(dentry, bcpup);
++		if (unlikely(err))
++			goto out_dgrade;
++	}
++
++	err = au_pin(pin, dentry, bcpup, AuOpt_UDBA_NONE,
++		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
++	if (unlikely(err))
++		goto out_dgrade;
++
++	h_dentry = au_h_fptr(file, bstart)->f_dentry;
++	h_inode = h_dentry->d_inode;
++	mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
++	if (d_unhashed(dentry) /* || d_unhashed(h_dentry) */
++	    /* || !h_inode->i_nlink */) {
++		err = au_ready_to_write_wh(file, len, bcpup);
++		di_downgrade_lock(parent, AuLock_IR);
++	} else {
++		di_downgrade_lock(parent, AuLock_IR);
++		if (!au_h_dptr(dentry, bcpup))
++			err = au_sio_cpup_simple(dentry, bcpup, len,
++						 AuCpup_DTIME);
++		if (!err)
++			err = au_reopen_nondir(file);
++	}
++	mutex_unlock(&h_inode->i_mutex);
++
++	if (!err) {
++		au_pin_set_parent_lflag(pin, /*lflag*/0);
++		goto out_dput; /* success */
++	}
++	au_unpin(pin);
++	goto out_unlock;
++
++ out_dgrade:
++	di_downgrade_lock(parent, AuLock_IR);
++ out_unlock:
++	di_read_unlock(parent, AuLock_IR);
++ out_dput:
++	dput(parent);
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int au_file_refresh_by_inode(struct file *file, int *need_reopen)
++{
++	int err;
++	aufs_bindex_t bstart;
++	struct au_pin pin;
++	struct au_finfo *finfo;
++	struct dentry *dentry, *parent, *hi_wh;
++	struct inode *inode;
++	struct super_block *sb;
++
++	FiMustWriteLock(file);
++
++	err = 0;
++	finfo = au_fi(file);
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	inode = dentry->d_inode;
++	bstart = au_ibstart(inode);
++	if (bstart == finfo->fi_bstart)
++		goto out;
++
++	parent = dget_parent(dentry);
++	if (au_test_ro(sb, bstart, inode)) {
++		di_read_lock_parent(parent, !AuLock_IR);
++		err = AuWbrCopyup(au_sbi(sb), dentry);
++		bstart = err;
++		di_read_unlock(parent, !AuLock_IR);
++		if (unlikely(err < 0))
++			goto out_parent;
++		err = 0;
++	}
++
++	di_read_lock_parent(parent, AuLock_IR);
++	hi_wh = au_hi_wh(inode, bstart);
++	if (au_opt_test(au_mntflags(sb), PLINK)
++	    && au_plink_test(inode)
++	    && !d_unhashed(dentry)) {
++		err = au_test_and_cpup_dirs(dentry, bstart);
++		if (unlikely(err))
++			goto out_unlock;
++
++		/* always superio. */
++		err = au_pin(&pin, dentry, bstart, AuOpt_UDBA_NONE,
++			     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
++		if (!err)
++			err = au_sio_cpup_simple(dentry, bstart, -1,
++						 AuCpup_DTIME);
++		au_unpin(&pin);
++	} else if (hi_wh) {
++		/* already copied-up after unlink */
++		err = au_reopen_wh(file, bstart, hi_wh);
++		*need_reopen = 0;
++	}
++
++ out_unlock:
++	di_read_unlock(parent, AuLock_IR);
++ out_parent:
++	dput(parent);
++ out:
++	return err;
++}
++
++static void au_do_refresh_file(struct file *file)
++{
++	aufs_bindex_t bindex, bend, new_bindex, brid;
++	struct au_hfile *p, tmp, *q;
++	struct au_finfo *finfo;
++	struct super_block *sb;
++
++	FiMustWriteLock(file);
++
++	sb = file->f_dentry->d_sb;
++	finfo = au_fi(file);
++	p = finfo->fi_hfile + finfo->fi_bstart;
++	brid = p->hf_br->br_id;
++	bend = finfo->fi_bend;
++	for (bindex = finfo->fi_bstart; bindex <= bend; bindex++, p++) {
++		if (!p->hf_file)
++			continue;
++
++		new_bindex = au_br_index(sb, p->hf_br->br_id);
++		if (new_bindex == bindex)
++			continue;
++		if (new_bindex < 0) {
++			au_set_h_fptr(file, bindex, NULL);
++			continue;
++		}
++
++		/* swap two lower inode, and loop again */
++		q = finfo->fi_hfile + new_bindex;
++		tmp = *q;
++		*q = *p;
++		*p = tmp;
++		if (tmp.hf_file) {
++			bindex--;
++			p--;
++		}
++	}
++
++	p = finfo->fi_hfile;
++	if (!au_test_mmapped(file) && !d_unhashed(file->f_dentry)) {
++		bend = au_sbend(sb);
++		for (finfo->fi_bstart = 0; finfo->fi_bstart <= bend;
++		     finfo->fi_bstart++, p++)
++			if (p->hf_file) {
++				if (p->hf_file->f_dentry
++				    && p->hf_file->f_dentry->d_inode)
++					break;
++				else
++					au_hfput(p, file);
++			}
++	} else {
++		bend = au_br_index(sb, brid);
++		for (finfo->fi_bstart = 0; finfo->fi_bstart < bend;
++		     finfo->fi_bstart++, p++)
++			if (p->hf_file)
++				au_hfput(p, file);
++		bend = au_sbend(sb);
++	}
++
++	p = finfo->fi_hfile + bend;
++	for (finfo->fi_bend = bend; finfo->fi_bend >= finfo->fi_bstart;
++	     finfo->fi_bend--, p--)
++		if (p->hf_file) {
++			if (p->hf_file->f_dentry
++			    && p->hf_file->f_dentry->d_inode)
++				break;
++			else
++				au_hfput(p, file);
++		}
++	AuDebugOn(finfo->fi_bend < finfo->fi_bstart);
++}
++
++/*
++ * after branch manipulating, refresh the file.
++ */
++static int refresh_file(struct file *file, int (*reopen)(struct file *file))
++{
++	int err, need_reopen;
++	struct dentry *dentry;
++	aufs_bindex_t bend, bindex;
++
++	dentry = file->f_dentry;
++	err = au_fi_realloc(au_fi(file), au_sbend(dentry->d_sb) + 1);
++	if (unlikely(err))
++		goto out;
++	au_do_refresh_file(file);
++
++	err = 0;
++	need_reopen = 1;
++	if (!au_test_mmapped(file))
++		err = au_file_refresh_by_inode(file, &need_reopen);
++	if (!err && need_reopen && !d_unhashed(dentry))
++		err = reopen(file);
++	if (!err) {
++		au_update_figen(file);
++		return 0; /* success */
++	}
++
++	/* error, close all lower files */
++	bend = au_fbend(file);
++	for (bindex = au_fbstart(file); bindex <= bend; bindex++)
++		au_set_h_fptr(file, bindex, NULL);
++
++ out:
++	return err;
++}
++
++/* common function to regular file and dir */
++int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
++			  int wlock)
++{
++	int err;
++	unsigned int sigen, figen;
++	aufs_bindex_t bstart;
++	unsigned char pseudo_link;
++	struct dentry *dentry;
++
++	err = 0;
++	dentry = file->f_dentry;
++	sigen = au_sigen(dentry->d_sb);
++	fi_write_lock(file);
++	figen = au_figen(file);
++	di_write_lock_child(dentry);
++	bstart = au_dbstart(dentry);
++	pseudo_link = (bstart != au_ibstart(dentry->d_inode));
++	if (sigen == figen && !pseudo_link && au_fbstart(file) == bstart) {
++		if (!wlock) {
++			di_downgrade_lock(dentry, AuLock_IR);
++			fi_downgrade_lock(file);
++		}
++		goto out; /* success */
++	}
++
++	AuDbg("sigen %d, figen %d\n", sigen, figen);
++	if (sigen != au_digen(dentry)
++	    || sigen != au_iigen(dentry->d_inode)) {
++		err = au_reval_dpath(dentry, sigen);
++		if (unlikely(err < 0))
++			goto out;
++		AuDebugOn(au_digen(dentry) != sigen
++			  || au_iigen(dentry->d_inode) != sigen);
++	}
++
++	err = refresh_file(file, reopen);
++	if (!err) {
++		if (!wlock) {
++			di_downgrade_lock(dentry, AuLock_IR);
++			fi_downgrade_lock(file);
++		}
++	} else {
++		di_write_unlock(dentry);
++		fi_write_unlock(file);
++	}
++
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* cf. aufs_nopage() */
++/* for madvise(2) */
++static int aufs_readpage(struct file *file __maybe_unused, struct page *page)
++{
++	unlock_page(page);
++	return 0;
++}
++
++/* they will never be called. */
++#ifdef CONFIG_AUFS_DEBUG
++static int aufs_write_begin(struct file *file, struct address_space *mapping,
++			    loff_t pos, unsigned len, unsigned flags,
++			    struct page **pagep, void **fsdata)
++{ AuUnsupport(); return 0; }
++static int aufs_write_end(struct file *file, struct address_space *mapping,
++			  loff_t pos, unsigned len, unsigned copied,
++			  struct page *page, void *fsdata)
++{ AuUnsupport(); return 0; }
++static int aufs_writepage(struct page *page, struct writeback_control *wbc)
++{ AuUnsupport(); return 0; }
++static void aufs_sync_page(struct page *page)
++{ AuUnsupport(); }
++
++static int aufs_set_page_dirty(struct page *page)
++{ AuUnsupport(); return 0; }
++static void aufs_invalidatepage(struct page *page, unsigned long offset)
++{ AuUnsupport(); }
++static int aufs_releasepage(struct page *page, gfp_t gfp)
++{ AuUnsupport(); return 0; }
++static ssize_t aufs_direct_IO(int rw, struct kiocb *iocb,
++			      const struct iovec *iov, loff_t offset,
++			      unsigned long nr_segs)
++{ AuUnsupport(); return 0; }
++#endif /* CONFIG_AUFS_DEBUG */
++
++struct address_space_operations aufs_aop = {
++	.readpage	= aufs_readpage,
++#ifdef CONFIG_AUFS_DEBUG
++	.writepage	= aufs_writepage,
++	.sync_page	= aufs_sync_page,
++	.set_page_dirty	= aufs_set_page_dirty,
++	.write_begin	= aufs_write_begin,
++	.write_end	= aufs_write_end,
++	.invalidatepage	= aufs_invalidatepage,
++	.releasepage	= aufs_releasepage,
++	.direct_IO	= aufs_direct_IO,
++#endif /* CONFIG_AUFS_DEBUG */
++};
+diff -Nur linux-2.6.31.5.orig/fs/aufs/file.h linux-2.6.31.5/fs/aufs/file.h
+--- linux-2.6.31.5.orig/fs/aufs/file.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/file.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,175 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * file operations
++ */
++
++#ifndef __AUFS_FILE_H__
++#define __AUFS_FILE_H__
++
++#ifdef __KERNEL__
++
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <linux/aufs_type.h>
++#include "rwsem.h"
++
++struct au_branch;
++struct au_hfile {
++	struct file		*hf_file;
++	struct au_branch	*hf_br;
++};
++
++struct au_vdir;
++struct au_finfo {
++	atomic_t		fi_generation;
++
++	struct au_rwsem		fi_rwsem;
++	struct au_hfile		*fi_hfile;
++	aufs_bindex_t		fi_bstart, fi_bend;
++
++	union {
++		/* non-dir only */
++		struct {
++			struct vm_operations_struct	*fi_h_vm_ops;
++			struct vm_operations_struct	*fi_vm_ops;
++		};
++
++		/* dir only */
++		struct {
++			struct au_vdir		*fi_vdir_cache;
++			int			fi_maintain_plink;
++		};
++	};
++};
++
++/* ---------------------------------------------------------------------- */
++
++/* file.c */
++extern struct address_space_operations aufs_aop;
++void au_store_oflag(struct nameidata *nd, struct inode *inode);
++unsigned int au_file_roflags(unsigned int flags);
++struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
++		       struct file *file);
++int au_do_open(struct file *file, int (*open)(struct file *file, int flags));
++int au_reopen_nondir(struct file *file);
++struct au_pin;
++int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin);
++int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
++			  int wlock);
++
++/* poll.c */
++#ifdef CONFIG_AUFS_POLL
++unsigned int aufs_poll(struct file *file, poll_table *wait);
++#endif
++
++/* f_op.c */
++extern const struct file_operations aufs_file_fop;
++int aufs_flush(struct file *file, fl_owner_t id);
++
++/* finfo.c */
++void au_hfput(struct au_hfile *hf, struct file *file);
++void au_set_h_fptr(struct file *file, aufs_bindex_t bindex,
++		   struct file *h_file);
++
++void au_update_figen(struct file *file);
++
++void au_finfo_fin(struct file *file);
++int au_finfo_init(struct file *file);
++int au_fi_realloc(struct au_finfo *finfo, int nbr);
++
++/* ---------------------------------------------------------------------- */
++
++static inline struct au_finfo *au_fi(struct file *file)
++{
++	return file->private_data;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * fi_read_lock, fi_write_lock,
++ * fi_read_unlock, fi_write_unlock, fi_downgrade_lock
++ */
++AuSimpleRwsemFuncs(fi, struct file *f, &au_fi(f)->fi_rwsem);
++
++#define FiMustNoWaiters(f)	AuRwMustNoWaiters(&au_fi(f)->fi_rwsem)
++#define FiMustAnyLock(f)	AuRwMustAnyLock(&au_fi(f)->fi_rwsem)
++#define FiMustWriteLock(f)	AuRwMustWriteLock(&au_fi(f)->fi_rwsem)
++
++/* ---------------------------------------------------------------------- */
++
++/* todo: hard/soft set? */
++static inline aufs_bindex_t au_fbstart(struct file *file)
++{
++	FiMustAnyLock(file);
++	return au_fi(file)->fi_bstart;
++}
++
++static inline aufs_bindex_t au_fbend(struct file *file)
++{
++	FiMustAnyLock(file);
++	return au_fi(file)->fi_bend;
++}
++
++static inline struct au_vdir *au_fvdir_cache(struct file *file)
++{
++	FiMustAnyLock(file);
++	return au_fi(file)->fi_vdir_cache;
++}
++
++static inline void au_set_fbstart(struct file *file, aufs_bindex_t bindex)
++{
++	FiMustWriteLock(file);
++	au_fi(file)->fi_bstart = bindex;
++}
++
++static inline void au_set_fbend(struct file *file, aufs_bindex_t bindex)
++{
++	FiMustWriteLock(file);
++	au_fi(file)->fi_bend = bindex;
++}
++
++static inline void au_set_fvdir_cache(struct file *file,
++				      struct au_vdir *vdir_cache)
++{
++	FiMustWriteLock(file);
++	au_fi(file)->fi_vdir_cache = vdir_cache;
++}
++
++static inline struct file *au_h_fptr(struct file *file, aufs_bindex_t bindex)
++{
++	FiMustAnyLock(file);
++	return au_fi(file)->fi_hfile[0 + bindex].hf_file;
++}
++
++/* todo: memory barrier? */
++static inline unsigned int au_figen(struct file *f)
++{
++	return atomic_read(&au_fi(f)->fi_generation);
++}
++
++static inline int au_test_mmapped(struct file *f)
++{
++	FiMustAnyLock(f);
++	return !!(au_fi(f)->fi_h_vm_ops);
++}
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_FILE_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/finfo.c linux-2.6.31.5/fs/aufs/finfo.c
+--- linux-2.6.31.5.orig/fs/aufs/finfo.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/finfo.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,133 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * file private data
++ */
++
++#include <linux/file.h>
++#include "aufs.h"
++
++void au_hfput(struct au_hfile *hf, struct file *file)
++{
++	if (file->f_mode & FMODE_EXEC)
++		allow_write_access(hf->hf_file);
++	fput(hf->hf_file);
++	hf->hf_file = NULL;
++	atomic_dec_return(&hf->hf_br->br_count);
++	hf->hf_br = NULL;
++}
++
++void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val)
++{
++	struct au_finfo *finfo = au_fi(file);
++	struct au_hfile *hf;
++
++	hf = finfo->fi_hfile + bindex;
++	if (hf->hf_file)
++		au_hfput(hf, file);
++	if (val) {
++		hf->hf_file = val;
++		hf->hf_br = au_sbr(file->f_dentry->d_sb, bindex);
++	}
++}
++
++void au_update_figen(struct file *file)
++{
++	atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_dentry));
++	/* smp_mb(); */ /* atomic_set */
++}
++
++/* ---------------------------------------------------------------------- */
++
++void au_finfo_fin(struct file *file)
++{
++	struct au_finfo *finfo;
++	aufs_bindex_t bindex, bend;
++
++	fi_write_lock(file);
++	bend = au_fbend(file);
++	bindex = au_fbstart(file);
++	if (bindex >= 0)
++		/*
++		 * calls fput() instead of filp_close(),
++		 * since no dnotify or lock for the lower file.
++		 */
++		for (; bindex <= bend; bindex++)
++			au_set_h_fptr(file, bindex, NULL);
++
++	finfo = au_fi(file);
++	au_dbg_verify_hf(finfo);
++	kfree(finfo->fi_hfile);
++	fi_write_unlock(file);
++	AuRwDestroy(&finfo->fi_rwsem);
++	au_cache_free_finfo(finfo);
++}
++
++int au_finfo_init(struct file *file)
++{
++	struct au_finfo *finfo;
++	struct dentry *dentry;
++	unsigned long ul;
++
++	dentry = file->f_dentry;
++	finfo = au_cache_alloc_finfo();
++	if (unlikely(!finfo))
++		goto out;
++
++	finfo->fi_hfile = kcalloc(au_sbend(dentry->d_sb) + 1,
++				  sizeof(*finfo->fi_hfile), GFP_NOFS);
++	if (unlikely(!finfo->fi_hfile))
++		goto out_finfo;
++
++	au_rw_init_wlock(&finfo->fi_rwsem);
++	finfo->fi_bstart = -1;
++	finfo->fi_bend = -1;
++	atomic_set(&finfo->fi_generation, au_digen(dentry));
++	/* smp_mb(); */ /* atomic_set */
++
++	/* cf. au_store_oflag() */
++	/* suppress a warning in lp64 */
++	ul = (unsigned long)file->private_data;
++	file->f_mode |= (vfsub_uint_to_fmode(ul) & FMODE_EXEC);
++	file->private_data = finfo;
++	return 0; /* success */
++
++ out_finfo:
++	au_cache_free_finfo(finfo);
++ out:
++	return -ENOMEM;
++}
++
++int au_fi_realloc(struct au_finfo *finfo, int nbr)
++{
++	int err, sz;
++	struct au_hfile *hfp;
++
++	err = -ENOMEM;
++	sz = sizeof(*hfp) * (finfo->fi_bend + 1);
++	if (!sz)
++		sz = sizeof(*hfp);
++	hfp = au_kzrealloc(finfo->fi_hfile, sz, sizeof(*hfp) * nbr, GFP_NOFS);
++	if (hfp) {
++		finfo->fi_hfile = hfp;
++		err = 0;
++	}
++
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/f_op.c linux-2.6.31.5/fs/aufs/f_op.c
+--- linux-2.6.31.5.orig/fs/aufs/f_op.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/f_op.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,802 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * file and vm operations
++ */
++
++#include <linux/file.h>
++#include <linux/fs_stack.h>
++#include <linux/mm.h>
++#include <linux/security.h>
++#include "aufs.h"
++
++/* common function to regular file and dir */
++int aufs_flush(struct file *file, fl_owner_t id)
++{
++	int err;
++	aufs_bindex_t bindex, bend;
++	struct dentry *dentry;
++	struct file *h_file;
++
++	dentry = file->f_dentry;
++	si_noflush_read_lock(dentry->d_sb);
++	fi_read_lock(file);
++	di_read_lock_child(dentry, AuLock_IW);
++
++	err = 0;
++	bend = au_fbend(file);
++	for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
++		h_file = au_h_fptr(file, bindex);
++		if (!h_file || !h_file->f_op || !h_file->f_op->flush)
++			continue;
++
++		err = h_file->f_op->flush(h_file, id);
++		if (!err)
++			vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL);
++		/*ignore*/
++	}
++	au_cpup_attr_timesizes(dentry->d_inode);
++
++	di_read_unlock(dentry, AuLock_IW);
++	fi_read_unlock(file);
++	si_read_unlock(dentry->d_sb);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int do_open_nondir(struct file *file, int flags)
++{
++	int err;
++	aufs_bindex_t bindex;
++	struct file *h_file;
++	struct dentry *dentry;
++	struct au_finfo *finfo;
++
++	FiMustWriteLock(file);
++
++	err = 0;
++	dentry = file->f_dentry;
++	finfo = au_fi(file);
++	finfo->fi_h_vm_ops = NULL;
++	finfo->fi_vm_ops = NULL;
++	bindex = au_dbstart(dentry);
++	/* O_TRUNC is processed already */
++	BUG_ON(au_test_ro(dentry->d_sb, bindex, dentry->d_inode)
++	       && (flags & O_TRUNC));
++
++	h_file = au_h_open(dentry, bindex, flags, file);
++	if (IS_ERR(h_file))
++		err = PTR_ERR(h_file);
++	else {
++		au_set_fbstart(file, bindex);
++		au_set_fbend(file, bindex);
++		au_set_h_fptr(file, bindex, h_file);
++		au_update_figen(file);
++		/* todo: necessary? */
++		/* file->f_ra = h_file->f_ra; */
++	}
++	return err;
++}
++
++static int aufs_open_nondir(struct inode *inode __maybe_unused,
++			    struct file *file)
++{
++	return au_do_open(file, do_open_nondir);
++}
++
++static int aufs_release_nondir(struct inode *inode __maybe_unused,
++			       struct file *file)
++{
++	struct super_block *sb = file->f_dentry->d_sb;
++
++	si_noflush_read_lock(sb);
++	kfree(au_fi(file)->fi_vm_ops);
++	au_finfo_fin(file);
++	si_read_unlock(sb);
++	return 0;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static ssize_t aufs_read(struct file *file, char __user *buf, size_t count,
++			 loff_t *ppos)
++{
++	ssize_t err;
++	struct dentry *dentry;
++	struct file *h_file;
++	struct super_block *sb;
++
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
++	if (unlikely(err))
++		goto out;
++
++	h_file = au_h_fptr(file, au_fbstart(file));
++	err = vfsub_read_u(h_file, buf, count, ppos);
++	/* todo: necessary? */
++	/* file->f_ra = h_file->f_ra; */
++	fsstack_copy_attr_atime(dentry->d_inode, h_file->f_dentry->d_inode);
++
++	di_read_unlock(dentry, AuLock_IR);
++	fi_read_unlock(file);
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++static ssize_t aufs_write(struct file *file, const char __user *ubuf,
++			  size_t count, loff_t *ppos)
++{
++	ssize_t err;
++	aufs_bindex_t bstart;
++	struct au_pin pin;
++	struct dentry *dentry;
++	struct inode *inode;
++	struct super_block *sb;
++	struct file *h_file;
++	char __user *buf = (char __user *)ubuf;
++
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	inode = dentry->d_inode;
++	mutex_lock(&inode->i_mutex);
++	si_read_lock(sb, AuLock_FLUSH);
++
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
++	if (unlikely(err))
++		goto out;
++
++	err = au_ready_to_write(file, -1, &pin);
++	di_downgrade_lock(dentry, AuLock_IR);
++	if (unlikely(err))
++		goto out_unlock;
++
++	bstart = au_fbstart(file);
++	h_file = au_h_fptr(file, bstart);
++	au_unpin(&pin);
++	err = vfsub_write_u(h_file, buf, count, ppos);
++	au_cpup_attr_timesizes(inode);
++	inode->i_mode = h_file->f_dentry->d_inode->i_mode;
++
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++	fi_write_unlock(file);
++ out:
++	si_read_unlock(sb);
++	mutex_unlock(&inode->i_mutex);
++	return err;
++}
++
++static ssize_t aufs_aio_read(struct kiocb *kio, const struct iovec *iov,
++			     unsigned long nv, loff_t pos)
++{
++	ssize_t err;
++	struct file *file, *h_file;
++	struct dentry *dentry;
++	struct super_block *sb;
++
++	file = kio->ki_filp;
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
++	if (unlikely(err))
++		goto out;
++
++	err = -ENOSYS;
++	h_file = au_h_fptr(file, au_fbstart(file));
++	if (h_file->f_op && h_file->f_op->aio_read) {
++		err = security_file_permission(h_file, MAY_READ);
++		if (unlikely(err))
++			goto out_unlock;
++		if (!is_sync_kiocb(kio)) {
++			get_file(h_file);
++			fput(file);
++		}
++		kio->ki_filp = h_file;
++		err = h_file->f_op->aio_read(kio, iov, nv, pos);
++		/* todo: necessary? */
++		/* file->f_ra = h_file->f_ra; */
++		fsstack_copy_attr_atime(dentry->d_inode,
++					h_file->f_dentry->d_inode);
++	} else
++		/* currently there is no such fs */
++		WARN_ON_ONCE(h_file->f_op && h_file->f_op->read);
++
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++	fi_read_unlock(file);
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++static ssize_t aufs_aio_write(struct kiocb *kio, const struct iovec *iov,
++			      unsigned long nv, loff_t pos)
++{
++	ssize_t err;
++	aufs_bindex_t bstart;
++	struct au_pin pin;
++	struct dentry *dentry;
++	struct inode *inode;
++	struct super_block *sb;
++	struct file *file, *h_file;
++
++	file = kio->ki_filp;
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	inode = dentry->d_inode;
++	mutex_lock(&inode->i_mutex);
++	si_read_lock(sb, AuLock_FLUSH);
++
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
++	if (unlikely(err))
++		goto out;
++
++	err = au_ready_to_write(file, -1, &pin);
++	di_downgrade_lock(dentry, AuLock_IR);
++	if (unlikely(err))
++		goto out_unlock;
++
++	err = -ENOSYS;
++	bstart = au_fbstart(file);
++	h_file = au_h_fptr(file, bstart);
++	au_unpin(&pin);
++	if (h_file->f_op && h_file->f_op->aio_write) {
++		err = security_file_permission(h_file, MAY_WRITE);
++		if (unlikely(err))
++			goto out_unlock;
++		if (!is_sync_kiocb(kio)) {
++			get_file(h_file);
++			fput(file);
++		}
++		kio->ki_filp = h_file;
++		err = h_file->f_op->aio_write(kio, iov, nv, pos);
++		au_cpup_attr_timesizes(inode);
++		inode->i_mode = h_file->f_dentry->d_inode->i_mode;
++	} else
++		/* currently there is no such fs */
++		WARN_ON_ONCE(h_file->f_op && h_file->f_op->write);
++
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++	fi_write_unlock(file);
++ out:
++	si_read_unlock(sb);
++	mutex_unlock(&inode->i_mutex);
++	return err;
++}
++
++static ssize_t aufs_splice_read(struct file *file, loff_t *ppos,
++				struct pipe_inode_info *pipe, size_t len,
++				unsigned int flags)
++{
++	ssize_t err;
++	struct file *h_file;
++	struct dentry *dentry;
++	struct super_block *sb;
++
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
++	if (unlikely(err))
++		goto out;
++
++	err = -EINVAL;
++	h_file = au_h_fptr(file, au_fbstart(file));
++	if (au_test_loopback_kthread()) {
++		file->f_mapping = h_file->f_mapping;
++		smp_mb(); /* unnecessary? */
++	}
++	err = vfsub_splice_to(h_file, ppos, pipe, len, flags);
++	/* todo: necessasry? */
++	/* file->f_ra = h_file->f_ra; */
++	fsstack_copy_attr_atime(dentry->d_inode, h_file->f_dentry->d_inode);
++
++	di_read_unlock(dentry, AuLock_IR);
++	fi_read_unlock(file);
++
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++static ssize_t
++aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos,
++		  size_t len, unsigned int flags)
++{
++	ssize_t err;
++	struct au_pin pin;
++	struct dentry *dentry;
++	struct inode *inode;
++	struct super_block *sb;
++	struct file *h_file;
++
++	dentry = file->f_dentry;
++	inode = dentry->d_inode;
++	mutex_lock(&inode->i_mutex);
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
++	if (unlikely(err))
++		goto out;
++
++	err = au_ready_to_write(file, -1, &pin);
++	di_downgrade_lock(dentry, AuLock_IR);
++	if (unlikely(err))
++		goto out_unlock;
++
++	h_file = au_h_fptr(file, au_fbstart(file));
++	au_unpin(&pin);
++	err = vfsub_splice_from(pipe, h_file, ppos, len, flags);
++	au_cpup_attr_timesizes(inode);
++	inode->i_mode = h_file->f_dentry->d_inode->i_mode;
++
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++	fi_write_unlock(file);
++ out:
++	si_read_unlock(sb);
++	mutex_unlock(&inode->i_mutex);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static struct file *au_safe_file(struct vm_area_struct *vma)
++{
++	struct file *file;
++
++	file = vma->vm_file;
++	if (file->private_data && au_test_aufs(file->f_dentry->d_sb))
++		return file;
++	return NULL;
++}
++
++static void au_reset_file(struct vm_area_struct *vma, struct file *file)
++{
++	vma->vm_file = file;
++	/* smp_mb(); */ /* flush vm_file */
++}
++
++static int aufs_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	int err;
++	static DECLARE_WAIT_QUEUE_HEAD(wq);
++	struct file *file, *h_file;
++	struct au_finfo *finfo;
++
++	/* todo: non-robr mode, user vm_file as it is? */
++	wait_event(wq, (file = au_safe_file(vma)));
++
++	/* do not revalidate, no si lock */
++	finfo = au_fi(file);
++	h_file = finfo->fi_hfile[0 + finfo->fi_bstart].hf_file;
++	AuDebugOn(!h_file || !finfo->fi_h_vm_ops);
++
++	fi_write_lock(file);
++	vma->vm_file = h_file;
++	err = finfo->fi_h_vm_ops->fault(vma, vmf);
++	/* todo: necessary? */
++	/* file->f_ra = h_file->f_ra; */
++	au_reset_file(vma, file);
++	fi_write_unlock(file);
++#if 0 /* def CONFIG_SMP */
++	/* wake_up_nr(&wq, online_cpu - 1); */
++	wake_up_all(&wq);
++#else
++	wake_up(&wq);
++#endif
++
++	return err;
++}
++
++static int aufs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	int err;
++	static DECLARE_WAIT_QUEUE_HEAD(wq);
++	struct file *file, *h_file;
++	struct au_finfo *finfo;
++
++	wait_event(wq, (file = au_safe_file(vma)));
++
++	finfo = au_fi(file);
++	h_file = finfo->fi_hfile[0 + finfo->fi_bstart].hf_file;
++	AuDebugOn(!h_file || !finfo->fi_h_vm_ops);
++
++	fi_write_lock(file);
++	vma->vm_file = h_file;
++	err = finfo->fi_h_vm_ops->page_mkwrite(vma, vmf);
++	au_reset_file(vma, file);
++	fi_write_unlock(file);
++	wake_up(&wq);
++
++	return err;
++}
++
++static void aufs_vm_close(struct vm_area_struct *vma)
++{
++	static DECLARE_WAIT_QUEUE_HEAD(wq);
++	struct file *file, *h_file;
++	struct au_finfo *finfo;
++
++	wait_event(wq, (file = au_safe_file(vma)));
++
++	finfo = au_fi(file);
++	h_file = finfo->fi_hfile[0 + finfo->fi_bstart].hf_file;
++	AuDebugOn(!h_file || !finfo->fi_h_vm_ops);
++
++	fi_write_lock(file);
++	vma->vm_file = h_file;
++	finfo->fi_h_vm_ops->close(vma);
++	au_reset_file(vma, file);
++	fi_write_unlock(file);
++	wake_up(&wq);
++}
++
++static struct vm_operations_struct aufs_vm_ops = {
++	/* .close and .page_mkwrite are not set by default */
++	.fault		= aufs_fault,
++};
++
++/* ---------------------------------------------------------------------- */
++
++static struct vm_operations_struct *au_vm_ops(struct file *h_file,
++					      struct vm_area_struct *vma)
++{
++	struct vm_operations_struct *vm_ops;
++	int err;
++
++	vm_ops = ERR_PTR(-ENODEV);
++	if (!h_file->f_op || !h_file->f_op->mmap)
++		goto out;
++
++	err = h_file->f_op->mmap(h_file, vma);
++	vm_ops = ERR_PTR(err);
++	if (unlikely(err))
++		goto out;
++
++	vm_ops = vma->vm_ops;
++	err = do_munmap(current->mm, vma->vm_start,
++			vma->vm_end - vma->vm_start);
++	if (unlikely(err)) {
++		AuIOErr("failed internal unmapping %.*s, %d\n",
++			AuDLNPair(h_file->f_dentry), err);
++		vm_ops = ERR_PTR(-EIO);
++	}
++
++ out:
++	return vm_ops;
++}
++
++static int au_custom_vm_ops(struct au_finfo *finfo, struct vm_area_struct *vma)
++{
++	int err;
++	struct vm_operations_struct *h_ops;
++
++	AuRwMustAnyLock(&finfo->fi_rwsem);
++
++	err = 0;
++	h_ops = finfo->fi_h_vm_ops;
++	AuDebugOn(!h_ops);
++	if ((!h_ops->page_mkwrite && !h_ops->close)
++	    || finfo->fi_vm_ops)
++		goto out;
++
++	err = -ENOMEM;
++	finfo->fi_vm_ops = kmemdup(&aufs_vm_ops, sizeof(aufs_vm_ops), GFP_NOFS);
++	if (unlikely(!finfo->fi_vm_ops))
++		goto out;
++
++	err = 0;
++	if (h_ops->page_mkwrite)
++		finfo->fi_vm_ops->page_mkwrite = aufs_page_mkwrite;
++	if (h_ops->close)
++		finfo->fi_vm_ops->close = aufs_vm_close;
++
++	vma->vm_ops = finfo->fi_vm_ops;
++
++ out:
++	return err;
++}
++
++static int aufs_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	int err;
++	unsigned char wlock, mmapped;
++	struct dentry *dentry;
++	struct super_block *sb;
++	struct file *h_file;
++	struct vm_operations_struct *vm_ops;
++
++	dentry = file->f_dentry;
++	wlock = !!(file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED);
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
++	if (unlikely(err))
++		goto out;
++
++	mmapped = !!au_test_mmapped(file);
++	if (wlock) {
++		struct au_pin pin;
++
++		err = au_ready_to_write(file, -1, &pin);
++		di_downgrade_lock(dentry, AuLock_IR);
++		if (unlikely(err))
++			goto out_unlock;
++		au_unpin(&pin);
++	} else
++		di_downgrade_lock(dentry, AuLock_IR);
++
++	h_file = au_h_fptr(file, au_fbstart(file));
++	if (!mmapped && au_test_fs_bad_mapping(h_file->f_dentry->d_sb)) {
++		/*
++		 * by this assignment, f_mapping will differs from aufs inode
++		 * i_mapping.
++		 * if someone else mixes the use of f_dentry->d_inode and
++		 * f_mapping->host, then a problem may arise.
++		 */
++		file->f_mapping = h_file->f_mapping;
++	}
++
++	vm_ops = NULL;
++	if (!mmapped) {
++		vm_ops = au_vm_ops(h_file, vma);
++		err = PTR_ERR(vm_ops);
++		if (IS_ERR(vm_ops))
++			goto out_unlock;
++	}
++
++	/*
++	 * unnecessary to handle MAP_DENYWRITE and deny_write_access()?
++	 * currently MAP_DENYWRITE from userspace is ignored, but elf loader
++	 * sets it. when FMODE_EXEC is set (by open_exec() or sys_uselib()),
++	 * both of the aufs file and the lower file is deny_write_access()-ed.
++	 * finally I hope we can skip handlling MAP_DENYWRITE here.
++	 */
++	err = generic_file_mmap(file, vma);
++	if (unlikely(err))
++		goto out_unlock;
++
++	vma->vm_ops = &aufs_vm_ops;
++	/* test again */
++	if (!au_test_mmapped(file))
++		au_fi(file)->fi_h_vm_ops = vm_ops;
++
++	err = au_custom_vm_ops(au_fi(file), vma);
++	if (unlikely(err))
++		goto out_unlock;
++
++	vfsub_file_accessed(h_file);
++	fsstack_copy_attr_atime(dentry->d_inode, h_file->f_dentry->d_inode);
++
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++	fi_write_unlock(file);
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int aufs_fsync_nondir(struct file *file, struct dentry *dentry,
++			     int datasync)
++{
++	int err;
++	struct au_pin pin;
++	struct inode *inode;
++	struct file *h_file;
++	struct super_block *sb;
++
++	inode = dentry->d_inode;
++	IMustLock(file->f_mapping->host);
++	if (inode != file->f_mapping->host) {
++		mutex_unlock(&file->f_mapping->host->i_mutex);
++		mutex_lock(&inode->i_mutex);
++	}
++	IMustLock(inode);
++
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++
++	err = 0; /* -EBADF; */ /* posix? */
++	if (unlikely(!(file->f_mode & FMODE_WRITE)))
++		goto out;
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
++	if (unlikely(err))
++		goto out;
++
++	err = au_ready_to_write(file, -1, &pin);
++	di_downgrade_lock(dentry, AuLock_IR);
++	if (unlikely(err))
++		goto out_unlock;
++	au_unpin(&pin);
++
++	err = -EINVAL;
++	h_file = au_h_fptr(file, au_fbstart(file));
++	if (h_file->f_op && h_file->f_op->fsync) {
++		struct dentry *h_d;
++		struct mutex *h_mtx;
++
++		/*
++		 * no filemap_fdatawrite() since aufs file has no its own
++		 * mapping, but dir.
++		 */
++		h_d = h_file->f_dentry;
++		h_mtx = &h_d->d_inode->i_mutex;
++		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++		err = h_file->f_op->fsync(h_file, h_d, datasync);
++		if (!err)
++			vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL);
++		/*ignore*/
++		au_cpup_attr_timesizes(inode);
++		mutex_unlock(h_mtx);
++	}
++
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++	fi_write_unlock(file);
++ out:
++	si_read_unlock(sb);
++	if (inode != file->f_mapping->host) {
++		mutex_unlock(&inode->i_mutex);
++		mutex_lock(&file->f_mapping->host->i_mutex);
++	}
++	return err;
++}
++
++/* no one supports this operation, currently */
++#if 0
++static int aufs_aio_fsync_nondir(struct kiocb *kio, int datasync)
++{
++	int err;
++	struct au_pin pin;
++	struct dentry *dentry;
++	struct inode *inode;
++	struct file *file, *h_file;
++	struct super_block *sb;
++
++	file = kio->ki_filp;
++	dentry = file->f_dentry;
++	inode = dentry->d_inode;
++	mutex_lock(&inode->i_mutex);
++
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++
++	err = 0; /* -EBADF; */ /* posix? */
++	if (unlikely(!(file->f_mode & FMODE_WRITE)))
++		goto out;
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
++	if (unlikely(err))
++		goto out;
++
++	err = au_ready_to_write(file, -1, &pin);
++	di_downgrade_lock(dentry, AuLock_IR);
++	if (unlikely(err))
++		goto out_unlock;
++	au_unpin(&pin);
++
++	err = -ENOSYS;
++	h_file = au_h_fptr(file, au_fbstart(file));
++	if (h_file->f_op && h_file->f_op->aio_fsync) {
++		struct dentry *h_d;
++		struct mutex *h_mtx;
++
++		h_d = h_file->f_dentry;
++		h_mtx = &h_d->d_inode->i_mutex;
++		if (!is_sync_kiocb(kio)) {
++			get_file(h_file);
++			fput(file);
++		}
++		kio->ki_filp = h_file;
++		err = h_file->f_op->aio_fsync(kio, datasync);
++		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++		if (!err)
++			vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL);
++		/*ignore*/
++		au_cpup_attr_timesizes(inode);
++		mutex_unlock(h_mtx);
++	}
++
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++	fi_write_unlock(file);
++ out:
++	si_read_unlock(sb);
++	mutex_unlock(&inode->i_mutex);
++	return err;
++}
++#endif
++
++static int aufs_fasync(int fd, struct file *file, int flag)
++{
++	int err;
++	struct file *h_file;
++	struct dentry *dentry;
++	struct super_block *sb;
++
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
++	if (unlikely(err))
++		goto out;
++
++	h_file = au_h_fptr(file, au_fbstart(file));
++	if (h_file->f_op && h_file->f_op->fasync)
++		err = h_file->f_op->fasync(fd, h_file, flag);
++
++	di_read_unlock(dentry, AuLock_IR);
++	fi_read_unlock(file);
++
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* no one supports this operation, currently */
++#if 0
++static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset,
++			     size_t len, loff_t *pos , int more)
++{
++}
++#endif
++
++/* ---------------------------------------------------------------------- */
++
++const struct file_operations aufs_file_fop = {
++	/*
++	 * while generic_file_llseek/_unlocked() don't use BKL,
++	 * don't use it since it operates file->f_mapping->host.
++	 * in aufs, it may be a real file and may confuse users by UDBA.
++	 */
++	/* .llseek		= generic_file_llseek, */
++
++	.read		= aufs_read,
++	.write		= aufs_write,
++	.aio_read	= aufs_aio_read,
++	.aio_write	= aufs_aio_write,
++#ifdef CONFIG_AUFS_POLL
++	.poll		= aufs_poll,
++#endif
++	.mmap		= aufs_mmap,
++	.open		= aufs_open_nondir,
++	.flush		= aufs_flush,
++	.release	= aufs_release_nondir,
++	.fsync		= aufs_fsync_nondir,
++	/* .aio_fsync	= aufs_aio_fsync_nondir, */
++	.fasync		= aufs_fasync,
++	/* .sendpage	= aufs_sendpage, */
++	.splice_write	= aufs_splice_write,
++	.splice_read	= aufs_splice_read,
++#if 0
++	.aio_splice_write = aufs_aio_splice_write,
++	.aio_splice_read  = aufs_aio_splice_read
++#endif
++};
+diff -Nur linux-2.6.31.5.orig/fs/aufs/fstype.h linux-2.6.31.5/fs/aufs/fstype.h
+--- linux-2.6.31.5.orig/fs/aufs/fstype.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/fstype.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,474 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * judging filesystem type
++ */
++
++#ifndef __AUFS_FSTYPE_H__
++#define __AUFS_FSTYPE_H__
++
++#ifdef __KERNEL__
++
++#include <linux/cramfs_fs.h>
++#include <linux/fs.h>
++#include <linux/magic.h>
++#include <linux/romfs_fs.h>
++#include <linux/aufs_type.h>
++
++static inline int au_test_aufs(struct super_block *sb)
++{
++	return sb->s_magic == AUFS_SUPER_MAGIC;
++}
++
++static inline const char *au_sbtype(struct super_block *sb)
++{
++	return sb->s_type->name;
++}
++
++static inline int au_test_iso9660(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_ROMFS_FS) || defined(CONFIG_ROMFS_FS_MODULE)
++	return sb->s_magic == ROMFS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_romfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_ISO9660_FS) || defined(CONFIG_ISO9660_FS_MODULE)
++	return sb->s_magic == ISOFS_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_cramfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_CRAMFS) || defined(CONFIG_CRAMFS_MODULE)
++	return sb->s_magic == CRAMFS_MAGIC;
++#endif
++	return 0;
++}
++
++static inline int au_test_nfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_NFS_FS) || defined(CONFIG_NFS_FS_MODULE)
++	return sb->s_magic == NFS_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_fuse(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_FUSE_FS) || defined(CONFIG_FUSE_FS_MODULE)
++	return sb->s_magic == FUSE_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_xfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_XFS_FS) || defined(CONFIG_XFS_FS_MODULE)
++	return sb->s_magic == XFS_SB_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_tmpfs(struct super_block *sb __maybe_unused)
++{
++#ifdef CONFIG_TMPFS
++	return sb->s_magic == TMPFS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_ECRYPT_FS) || defined(CONFIG_ECRYPT_FS_MODULE)
++	return !strcmp(au_sbtype(sb), "ecryptfs");
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_smbfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_SMB_FS) || defined(CONFIG_SMB_FS_MODULE)
++	return sb->s_magic == SMB_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_ocfs2(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_OCFS2_FS) || defined(CONFIG_OCFS2_FS_MODULE)
++	return sb->s_magic == OCFS2_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_ocfs2_dlmfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_OCFS2_FS_O2CB) || defined(CONFIG_OCFS2_FS_O2CB_MODULE)
++	return sb->s_magic == DLMFS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_coda(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_CODA_FS) || defined(CONFIG_CODA_FS_MODULE)
++	return sb->s_magic == CODA_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_v9fs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_9P_FS) || defined(CONFIG_9P_FS_MODULE)
++	return sb->s_magic == V9FS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_ext4(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_EXT4DEV_FS) || defined(CONFIG_EXT4DEV_FS_MODULE)
++	return sb->s_magic == EXT4_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_sysv(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_SYSV_FS) || defined(CONFIG_SYSV_FS_MODULE)
++	return !strcmp(au_sbtype(sb), "sysv");
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_ramfs(struct super_block *sb)
++{
++	return sb->s_magic == RAMFS_MAGIC;
++}
++
++static inline int au_test_ubifs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_UBIFS_FS) || defined(CONFIG_UBIFS_FS_MODULE)
++	return sb->s_magic == UBIFS_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_procfs(struct super_block *sb __maybe_unused)
++{
++#ifdef CONFIG_PROC_FS
++	return sb->s_magic == PROC_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_sysfs(struct super_block *sb __maybe_unused)
++{
++#ifdef CONFIG_SYSFS
++	return sb->s_magic == SYSFS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_configfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_CONFIGFS_FS) || defined(CONFIG_CONFIGFS_FS_MODULE)
++	return sb->s_magic == CONFIGFS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_minix(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_MINIX_FS) || defined(CONFIG_MINIX_FS_MODULE)
++	return sb->s_magic == MINIX3_SUPER_MAGIC
++		|| sb->s_magic == MINIX2_SUPER_MAGIC
++		|| sb->s_magic == MINIX2_SUPER_MAGIC2
++		|| sb->s_magic == MINIX_SUPER_MAGIC
++		|| sb->s_magic == MINIX_SUPER_MAGIC2;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_cifs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_CIFS_FS) || defined(CONFIGCIFS_FS_MODULE)
++	return sb->s_magic == CIFS_MAGIC_NUMBER;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_fat(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_FAT_FS) || defined(CONFIG_FAT_FS_MODULE)
++	return sb->s_magic == MSDOS_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_msdos(struct super_block *sb)
++{
++	return au_test_fat(sb);
++}
++
++static inline int au_test_vfat(struct super_block *sb)
++{
++	return au_test_fat(sb);
++}
++
++static inline int au_test_securityfs(struct super_block *sb __maybe_unused)
++{
++#ifdef CONFIG_SECURITYFS
++	return sb->s_magic == SECURITYFS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_squashfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_SQUASHFS) || defined(CONFIG_SQUASHFS_MODULE)
++	return sb->s_magic == SQUASHFS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_btrfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++	return sb->s_magic == BTRFS_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_xenfs(struct super_block *sb __maybe_unused)
++{
++#if defined(CONFIG_XENFS) || defined(CONFIG_XENFS_MODULE)
++	return sb->s_magic == XENFS_SUPER_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++static inline int au_test_debugfs(struct super_block *sb __maybe_unused)
++{
++#ifdef CONFIG_DEBUG_FS
++	return sb->s_magic == DEBUGFS_MAGIC;
++#else
++	return 0;
++#endif
++}
++
++/* ---------------------------------------------------------------------- */
++/*
++ * they can't be an aufs branch.
++ */
++static inline int au_test_fs_unsuppoted(struct super_block *sb)
++{
++	return
++#ifndef CONFIG_AUFS_BR_RAMFS
++		au_test_ramfs(sb) ||
++#endif
++		au_test_procfs(sb)
++		|| au_test_sysfs(sb)
++		|| au_test_configfs(sb)
++		|| au_test_debugfs(sb)
++		|| au_test_securityfs(sb)
++		|| au_test_xenfs(sb)
++		/* || !strcmp(au_sbtype(sb), "unionfs") */
++		|| au_test_aufs(sb); /* will be supported in next version */
++}
++
++/*
++ * If the filesystem supports NFS-export, then it has to support NULL as
++ * a nameidata parameter for ->create(), ->lookup() and ->d_revalidate().
++ * We can apply this principle when we handle a lower filesystem.
++ */
++static inline int au_test_fs_null_nd(struct super_block *sb)
++{
++	return !!sb->s_export_op;
++}
++
++static inline int au_test_fs_remote(struct super_block *sb)
++{
++	return !au_test_tmpfs(sb)
++#ifdef CONFIG_AUFS_BR_RAMFS
++		&& !au_test_ramfs(sb)
++#endif
++		&& !(sb->s_type->fs_flags & FS_REQUIRES_DEV);
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * Note: these functions (below) are created after reading ->getattr() in all
++ * filesystems under linux/fs. it means we have to do so in every update...
++ */
++
++/*
++ * some filesystems require getattr to refresh the inode attributes before
++ * referencing.
++ * in most cases, we can rely on the inode attribute in NFS (or every remote fs)
++ * and leave the work for d_revalidate()
++ */
++static inline int au_test_fs_refresh_iattr(struct super_block *sb)
++{
++	return au_test_nfs(sb)
++		|| au_test_fuse(sb)
++		/* || au_test_smbfs(sb) */	/* untested */
++		/* || au_test_ocfs2(sb) */	/* untested */
++		/* || au_test_btrfs(sb) */	/* untested */
++		/* || au_test_coda(sb) */	/* untested */
++		/* || au_test_v9fs(sb) */	/* untested */
++		;
++}
++
++/*
++ * filesystems which don't maintain i_size or i_blocks.
++ */
++static inline int au_test_fs_bad_iattr_size(struct super_block *sb)
++{
++	return au_test_xfs(sb)
++		/* || au_test_ext4(sb) */	/* untested */
++		/* || au_test_ocfs2(sb) */	/* untested */
++		/* || au_test_ocfs2_dlmfs(sb) */ /* untested */
++		/* || au_test_sysv(sb) */	/* untested */
++		/* || au_test_ubifs(sb) */	/* untested */
++		/* || au_test_minix(sb) */	/* untested */
++		;
++}
++
++/*
++ * filesystems which don't store the correct value in some of their inode
++ * attributes.
++ */
++static inline int au_test_fs_bad_iattr(struct super_block *sb)
++{
++	return au_test_fs_bad_iattr_size(sb)
++		/* || au_test_cifs(sb) */	/* untested */
++		|| au_test_fat(sb)
++		|| au_test_msdos(sb)
++		|| au_test_vfat(sb);
++}
++
++/* they don't check i_nlink in link(2) */
++static inline int au_test_fs_no_limit_nlink(struct super_block *sb)
++{
++	return au_test_tmpfs(sb)
++#ifdef CONFIG_AUFS_BR_RAMFS
++		|| au_test_ramfs(sb)
++#endif
++		|| au_test_ubifs(sb);
++}
++
++/*
++ * filesystems which sets S_NOATIME and S_NOCMTIME.
++ */
++static inline int au_test_fs_notime(struct super_block *sb)
++{
++	return au_test_nfs(sb)
++		|| au_test_fuse(sb)
++		|| au_test_ubifs(sb)
++		/* || au_test_cifs(sb) */	/* untested */
++		;
++}
++
++/*
++ * filesystems which requires replacing i_mapping.
++ */
++static inline int au_test_fs_bad_mapping(struct super_block *sb)
++{
++	return au_test_fuse(sb)
++		|| au_test_ubifs(sb);
++}
++
++/* temporary support for i#1 in cramfs */
++static inline int au_test_fs_unique_ino(struct inode *inode)
++{
++	if (au_test_cramfs(inode->i_sb))
++		return inode->i_ino != 1;
++	return 1;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * the filesystem where the xino files placed must support i/o after unlink and
++ * maintain i_size and i_blocks.
++ */
++static inline int au_test_fs_bad_xino(struct super_block *sb)
++{
++	return au_test_fs_remote(sb)
++		|| au_test_fs_bad_iattr_size(sb)
++#ifdef CONFIG_AUFS_BR_RAMFS
++		|| !(au_test_ramfs(sb) || au_test_fs_null_nd(sb))
++#else
++		|| !au_test_fs_null_nd(sb) /* to keep xino code simple */
++#endif
++		/* don't want unnecessary work for xino */
++		|| au_test_aufs(sb)
++		|| au_test_ecryptfs(sb);
++}
++
++static inline int au_test_fs_trunc_xino(struct super_block *sb)
++{
++	return au_test_tmpfs(sb)
++		|| au_test_ramfs(sb);
++}
++
++/*
++ * test if the @sb is real-readonly.
++ */
++static inline int au_test_fs_rr(struct super_block *sb)
++{
++	return au_test_squashfs(sb)
++		|| au_test_iso9660(sb)
++		|| au_test_cramfs(sb)
++		|| au_test_romfs(sb);
++}
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_FSTYPE_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/hinotify.c linux-2.6.31.5/fs/aufs/hinotify.c
+--- linux-2.6.31.5.orig/fs/aufs/hinotify.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/hinotify.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,755 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * inotify for the lower directories
++ */
++
++#include "aufs.h"
++
++static const __u32 AuHinMask = (IN_MOVE | IN_DELETE | IN_CREATE);
++static struct inotify_handle *au_hin_handle;
++
++AuCacheFuncs(hinotify, HINOTIFY);
++
++int au_hin_alloc(struct au_hinode *hinode, struct inode *inode,
++		 struct inode *h_inode)
++{
++	int err;
++	struct au_hinotify *hin;
++	s32 wd;
++
++	err = -ENOMEM;
++	hin = au_cache_alloc_hinotify();
++	if (hin) {
++		AuDebugOn(hinode->hi_notify);
++		hinode->hi_notify = hin;
++		hin->hin_aufs_inode = inode;
++
++		inotify_init_watch(&hin->hin_watch);
++		wd = inotify_add_watch(au_hin_handle, &hin->hin_watch, h_inode,
++				       AuHinMask);
++		if (wd >= 0)
++			return 0; /* success */
++
++		err = wd;
++		put_inotify_watch(&hin->hin_watch);
++		au_cache_free_hinotify(hin);
++		hinode->hi_notify = NULL;
++	}
++
++	return err;
++}
++
++void au_hin_free(struct au_hinode *hinode)
++{
++	int err;
++	struct au_hinotify *hin;
++
++	hin = hinode->hi_notify;
++	if (hin) {
++		err = 0;
++		if (atomic_read(&hin->hin_watch.count))
++			err = inotify_rm_watch(au_hin_handle, &hin->hin_watch);
++		if (unlikely(err))
++			/* it means the watch is already removed */
++			AuWarn("failed inotify_rm_watch() %d\n", err);
++		au_cache_free_hinotify(hin);
++		hinode->hi_notify = NULL;
++	}
++}
++
++/* ---------------------------------------------------------------------- */
++
++void au_hin_ctl(struct au_hinode *hinode, int do_set)
++{
++	struct inode *h_inode;
++	struct inotify_watch *watch;
++
++	if (!hinode->hi_notify)
++		return;
++
++	h_inode = hinode->hi_inode;
++	IMustLock(h_inode);
++
++	/* todo: try inotify_find_update_watch()? */
++	watch = &hinode->hi_notify->hin_watch;
++	mutex_lock(&h_inode->inotify_mutex);
++	/* mutex_lock(&watch->ih->mutex); */
++	if (do_set) {
++		AuDebugOn(watch->mask & AuHinMask);
++		watch->mask |= AuHinMask;
++	} else {
++		AuDebugOn(!(watch->mask & AuHinMask));
++		watch->mask &= ~AuHinMask;
++	}
++	/* mutex_unlock(&watch->ih->mutex); */
++	mutex_unlock(&h_inode->inotify_mutex);
++}
++
++void au_reset_hinotify(struct inode *inode, unsigned int flags)
++{
++	aufs_bindex_t bindex, bend;
++	struct inode *hi;
++	struct dentry *iwhdentry;
++
++	bend = au_ibend(inode);
++	for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
++		hi = au_h_iptr(inode, bindex);
++		if (!hi)
++			continue;
++
++		/* mutex_lock_nested(&hi->i_mutex, AuLsc_I_CHILD); */
++		iwhdentry = au_hi_wh(inode, bindex);
++		if (iwhdentry)
++			dget(iwhdentry);
++		au_igrab(hi);
++		au_set_h_iptr(inode, bindex, NULL, 0);
++		au_set_h_iptr(inode, bindex, au_igrab(hi),
++			      flags & ~AuHi_XINO);
++		iput(hi);
++		dput(iwhdentry);
++		/* mutex_unlock(&hi->i_mutex); */
++	}
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int hin_xino(struct inode *inode, struct inode *h_inode)
++{
++	int err;
++	aufs_bindex_t bindex, bend, bfound, bstart;
++	struct inode *h_i;
++
++	err = 0;
++	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
++		AuWarn("branch root dir was changed\n");
++		goto out;
++	}
++
++	bfound = -1;
++	bend = au_ibend(inode);
++	bstart = au_ibstart(inode);
++#if 0 /* reserved for future use */
++	if (bindex == bend) {
++		/* keep this ino in rename case */
++		goto out;
++	}
++#endif
++	for (bindex = bstart; bindex <= bend; bindex++) {
++		if (au_h_iptr(inode, bindex) == h_inode) {
++			bfound = bindex;
++			break;
++		}
++	}
++	if (bfound < 0)
++		goto out;
++
++	for (bindex = bstart; bindex <= bend; bindex++) {
++		h_i = au_h_iptr(inode, bindex);
++		if (!h_i)
++			continue;
++
++		err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0);
++		/* ignore this error */
++		/* bad action? */
++	}
++
++	/* children inode number will be broken */
++
++ out:
++	AuTraceErr(err);
++	return err;
++}
++
++static int hin_gen_tree(struct dentry *dentry)
++{
++	int err, i, j, ndentry;
++	struct au_dcsub_pages dpages;
++	struct au_dpage *dpage;
++	struct dentry **dentries;
++
++	err = au_dpages_init(&dpages, GFP_NOFS);
++	if (unlikely(err))
++		goto out;
++	err = au_dcsub_pages(&dpages, dentry, NULL, NULL);
++	if (unlikely(err))
++		goto out_dpages;
++
++	for (i = 0; i < dpages.ndpage; i++) {
++		dpage = dpages.dpages + i;
++		dentries = dpage->dentries;
++		ndentry = dpage->ndentry;
++		for (j = 0; j < ndentry; j++) {
++			struct dentry *d;
++
++			d = dentries[j];
++			if (IS_ROOT(d))
++				continue;
++
++			d_drop(d);
++			au_digen_dec(d);
++			if (d->d_inode)
++				/* todo: reset children xino?
++				   cached children only? */
++				au_iigen_dec(d->d_inode);
++		}
++	}
++
++ out_dpages:
++	au_dpages_free(&dpages);
++
++	/* discard children */
++	dentry_unhash(dentry);
++	dput(dentry);
++ out:
++	return err;
++}
++
++/*
++ * return 0 if processed.
++ */
++static int hin_gen_by_inode(char *name, unsigned int nlen, struct inode *inode,
++			    const unsigned int isdir)
++{
++	int err;
++	struct dentry *d;
++	struct qstr *dname;
++
++	err = 1;
++	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
++		AuWarn("branch root dir was changed\n");
++		err = 0;
++		goto out;
++	}
++
++	if (!isdir) {
++		AuDebugOn(!name);
++		au_iigen_dec(inode);
++		spin_lock(&dcache_lock);
++		list_for_each_entry(d, &inode->i_dentry, d_alias) {
++			dname = &d->d_name;
++			if (dname->len != nlen
++			    && memcmp(dname->name, name, nlen))
++				continue;
++			err = 0;
++			spin_lock(&d->d_lock);
++			__d_drop(d);
++			au_digen_dec(d);
++			spin_unlock(&d->d_lock);
++			break;
++		}
++		spin_unlock(&dcache_lock);
++	} else {
++		au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIRS);
++		d = d_find_alias(inode);
++		if (!d) {
++			au_iigen_dec(inode);
++			goto out;
++		}
++
++		dname = &d->d_name;
++		if (dname->len == nlen && !memcmp(dname->name, name, nlen))
++			err = hin_gen_tree(d);
++		dput(d);
++	}
++
++ out:
++	AuTraceErr(err);
++	return err;
++}
++
++static int hin_gen_by_name(struct dentry *dentry, const unsigned int isdir)
++{
++	int err;
++	struct inode *inode;
++
++	inode = dentry->d_inode;
++	if (IS_ROOT(dentry)
++	    /* || (inode && inode->i_ino == AUFS_ROOT_INO) */
++		) {
++		AuWarn("branch root dir was changed\n");
++		return 0;
++	}
++
++	err = 0;
++	if (!isdir) {
++		d_drop(dentry);
++		au_digen_dec(dentry);
++		if (inode)
++			au_iigen_dec(inode);
++	} else {
++		au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIRS);
++		if (inode)
++			err = hin_gen_tree(dentry);
++	}
++
++	AuTraceErr(err);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* hinotify job flags */
++#define AuHinJob_XINO0		1
++#define AuHinJob_GEN		(1 << 1)
++#define AuHinJob_DIRENT		(1 << 2)
++#define AuHinJob_ISDIR		(1 << 3)
++#define AuHinJob_TRYXINO0	(1 << 4)
++#define AuHinJob_MNTPNT		(1 << 5)
++#define au_ftest_hinjob(flags, name)	((flags) & AuHinJob_##name)
++#define au_fset_hinjob(flags, name)	{ (flags) |= AuHinJob_##name; }
++#define au_fclr_hinjob(flags, name)	{ (flags) &= ~AuHinJob_##name; }
++
++struct hin_job_args {
++	unsigned int flags;
++	struct inode *inode, *h_inode, *dir, *h_dir;
++	struct dentry *dentry;
++	char *h_name;
++	int h_nlen;
++};
++
++static int hin_job(struct hin_job_args *a)
++{
++	const unsigned int isdir = au_ftest_hinjob(a->flags, ISDIR);
++
++	/* reset xino */
++	if (au_ftest_hinjob(a->flags, XINO0) && a->inode)
++		hin_xino(a->inode, a->h_inode); /* ignore this error */
++
++	if (au_ftest_hinjob(a->flags, TRYXINO0)
++	    && a->inode
++	    && a->h_inode) {
++		mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
++		if (!a->h_inode->i_nlink)
++			hin_xino(a->inode, a->h_inode); /* ignore this error */
++		mutex_unlock(&a->h_inode->i_mutex);
++	}
++
++	/* make the generation obsolete */
++	if (au_ftest_hinjob(a->flags, GEN)) {
++		int err = -1;
++		if (a->inode)
++			err = hin_gen_by_inode(a->h_name, a->h_nlen, a->inode,
++					       isdir);
++		if (err && a->dentry)
++			hin_gen_by_name(a->dentry, isdir);
++		/* ignore this error */
++	}
++
++	/* make dir entries obsolete */
++	if (au_ftest_hinjob(a->flags, DIRENT) && a->inode) {
++		struct au_vdir *vdir;
++
++		vdir = au_ivdir(a->inode);
++		if (vdir)
++			vdir->vd_jiffy = 0;
++		/* IMustLock(a->inode); */
++		/* a->inode->i_version++; */
++	}
++
++	/* can do nothing but warn */
++	if (au_ftest_hinjob(a->flags, MNTPNT)
++	    && a->dentry
++	    && d_mountpoint(a->dentry))
++		AuWarn("mount-point %.*s is removed or renamed\n",
++		       AuDLNPair(a->dentry));
++
++	return 0;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static char *in_name(u32 mask)
++{
++#ifdef CONFIG_AUFS_DEBUG
++#define test_ret(flag)	if (mask & flag) \
++				return #flag;
++	test_ret(IN_ACCESS);
++	test_ret(IN_MODIFY);
++	test_ret(IN_ATTRIB);
++	test_ret(IN_CLOSE_WRITE);
++	test_ret(IN_CLOSE_NOWRITE);
++	test_ret(IN_OPEN);
++	test_ret(IN_MOVED_FROM);
++	test_ret(IN_MOVED_TO);
++	test_ret(IN_CREATE);
++	test_ret(IN_DELETE);
++	test_ret(IN_DELETE_SELF);
++	test_ret(IN_MOVE_SELF);
++	test_ret(IN_UNMOUNT);
++	test_ret(IN_Q_OVERFLOW);
++	test_ret(IN_IGNORED);
++	return "";
++#undef test_ret
++#else
++	return "??";
++#endif
++}
++
++static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen,
++					   struct inode *dir)
++{
++	struct dentry *dentry, *d, *parent;
++	struct qstr *dname;
++
++	parent = d_find_alias(dir);
++	if (!parent)
++		return NULL;
++
++	dentry = NULL;
++	spin_lock(&dcache_lock);
++	list_for_each_entry(d, &parent->d_subdirs, d_u.d_child) {
++		/* AuDbg("%.*s\n", AuDLNPair(d)); */
++		dname = &d->d_name;
++		if (dname->len != nlen || memcmp(dname->name, name, nlen))
++			continue;
++		if (!atomic_read(&d->d_count) || !d->d_fsdata) {
++			spin_lock(&d->d_lock);
++			__d_drop(d);
++			spin_unlock(&d->d_lock);
++			continue;
++		}
++
++		dentry = dget(d);
++		break;
++	}
++	spin_unlock(&dcache_lock);
++	dput(parent);
++
++	if (dentry)
++		di_write_lock_child(dentry);
++
++	return dentry;
++}
++
++static struct inode *lookup_wlock_by_ino(struct super_block *sb,
++					 aufs_bindex_t bindex, ino_t h_ino)
++{
++	struct inode *inode;
++	ino_t ino;
++	int err;
++
++	inode = NULL;
++	err = au_xino_read(sb, bindex, h_ino, &ino);
++	if (!err && ino)
++		inode = ilookup(sb, ino);
++	if (!inode)
++		goto out;
++
++	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
++		AuWarn("wrong root branch\n");
++		iput(inode);
++		inode = NULL;
++		goto out;
++	}
++
++	ii_write_lock_child(inode);
++
++ out:
++	return inode;
++}
++
++enum { CHILD, PARENT };
++struct postproc_args {
++	struct inode *h_dir, *dir, *h_child_inode;
++	u32 mask;
++	unsigned int flags[2];
++	unsigned int h_child_nlen;
++	char h_child_name[];
++};
++
++static void postproc(void *_args)
++{
++	struct postproc_args *a = _args;
++	struct super_block *sb;
++	aufs_bindex_t bindex, bend, bfound;
++	unsigned char xino, try_iput;
++	int err;
++	struct inode *inode;
++	ino_t h_ino;
++	struct hin_job_args args;
++	struct dentry *dentry;
++	struct au_sbinfo *sbinfo;
++
++	AuDebugOn(!_args);
++	AuDebugOn(!a->h_dir);
++	AuDebugOn(!a->dir);
++	AuDebugOn(!a->mask);
++	AuDbg("mask 0x%x %s, i%lu, hi%lu, hci%lu\n",
++	      a->mask, in_name(a->mask), a->dir->i_ino, a->h_dir->i_ino,
++	      a->h_child_inode ? a->h_child_inode->i_ino : 0);
++
++	inode = NULL;
++	dentry = NULL;
++	/*
++	 * do not lock a->dir->i_mutex here
++	 * because of d_revalidate() may cause a deadlock.
++	 */
++	sb = a->dir->i_sb;
++	AuDebugOn(!sb);
++	sbinfo = au_sbi(sb);
++	AuDebugOn(!sbinfo);
++	/* big aufs lock */
++	si_noflush_write_lock(sb);
++
++	ii_read_lock_parent(a->dir);
++	bfound = -1;
++	bend = au_ibend(a->dir);
++	for (bindex = au_ibstart(a->dir); bindex <= bend; bindex++)
++		if (au_h_iptr(a->dir, bindex) == a->h_dir) {
++			bfound = bindex;
++			break;
++		}
++	ii_read_unlock(a->dir);
++	if (unlikely(bfound < 0))
++		goto out;
++
++	xino = !!au_opt_test(au_mntflags(sb), XINO);
++	h_ino = 0;
++	if (a->h_child_inode)
++		h_ino = a->h_child_inode->i_ino;
++
++	if (a->h_child_nlen
++	    && (au_ftest_hinjob(a->flags[CHILD], GEN)
++		|| au_ftest_hinjob(a->flags[CHILD], MNTPNT)))
++		dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen,
++					      a->dir);
++	try_iput = 0;
++	if (dentry)
++		inode = dentry->d_inode;
++	if (xino && !inode && h_ino
++	    && (au_ftest_hinjob(a->flags[CHILD], XINO0)
++		|| au_ftest_hinjob(a->flags[CHILD], TRYXINO0)
++		|| au_ftest_hinjob(a->flags[CHILD], GEN))) {
++		inode = lookup_wlock_by_ino(sb, bfound, h_ino);
++		try_iput = 1;
++	    }
++
++	args.flags = a->flags[CHILD];
++	args.dentry = dentry;
++	args.inode = inode;
++	args.h_inode = a->h_child_inode;
++	args.dir = a->dir;
++	args.h_dir = a->h_dir;
++	args.h_name = a->h_child_name;
++	args.h_nlen = a->h_child_nlen;
++	err = hin_job(&args);
++	if (dentry) {
++		if (dentry->d_fsdata)
++			di_write_unlock(dentry);
++		dput(dentry);
++	}
++	if (inode && try_iput) {
++		ii_write_unlock(inode);
++		iput(inode);
++	}
++
++	ii_write_lock_parent(a->dir);
++	args.flags = a->flags[PARENT];
++	args.dentry = NULL;
++	args.inode = a->dir;
++	args.h_inode = a->h_dir;
++	args.dir = NULL;
++	args.h_dir = NULL;
++	args.h_name = NULL;
++	args.h_nlen = 0;
++	err = hin_job(&args);
++	ii_write_unlock(a->dir);
++
++ out:
++	au_nwt_done(&sbinfo->si_nowait);
++	si_write_unlock(sb);
++
++	iput(a->h_child_inode);
++	iput(a->h_dir);
++	iput(a->dir);
++	kfree(a);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static void aufs_inotify(struct inotify_watch *watch, u32 wd __maybe_unused,
++			 u32 mask, u32 cookie __maybe_unused,
++			 const char *h_child_name, struct inode *h_child_inode)
++{
++	struct au_hinotify *hinotify;
++	struct postproc_args *args;
++	int len, wkq_err;
++	unsigned char isdir, isroot, wh;
++	char *p;
++	struct inode *dir;
++	unsigned int flags[2];
++
++	/* if IN_UNMOUNT happens, there must be another bug */
++	AuDebugOn(mask & IN_UNMOUNT);
++	if (mask & (IN_IGNORED | IN_UNMOUNT)) {
++		put_inotify_watch(watch);
++		return;
++	}
++#ifdef AuDbgHinotify
++	au_debug(1);
++	if (1 || !h_child_name || strcmp(h_child_name, AUFS_XINO_FNAME)) {
++		AuDbg("i%lu, wd %d, mask 0x%x %s, cookie 0x%x, hcname %s,"
++		      " hi%lu\n",
++		      watch->inode->i_ino, wd, mask, in_name(mask), cookie,
++		      h_child_name ? h_child_name : "",
++		      h_child_inode ? h_child_inode->i_ino : 0);
++		WARN_ON(1);
++	}
++	au_debug(0);
++#endif
++
++	hinotify = container_of(watch, struct au_hinotify, hin_watch);
++	AuDebugOn(!hinotify || !hinotify->hin_aufs_inode);
++	dir = igrab(hinotify->hin_aufs_inode);
++	if (!dir)
++		return;
++
++	isroot = (dir->i_ino == AUFS_ROOT_INO);
++	len = 0;
++	wh = 0;
++	if (h_child_name) {
++		len = strlen(h_child_name);
++		if (!memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
++			h_child_name += AUFS_WH_PFX_LEN;
++			len -= AUFS_WH_PFX_LEN;
++			wh = 1;
++		}
++	}
++
++	isdir = 0;
++	if (h_child_inode)
++		isdir = !!S_ISDIR(h_child_inode->i_mode);
++	flags[PARENT] = AuHinJob_ISDIR;
++	flags[CHILD] = 0;
++	if (isdir)
++		flags[CHILD] = AuHinJob_ISDIR;
++	switch (mask & IN_ALL_EVENTS) {
++	case IN_MOVED_FROM:
++	case IN_MOVED_TO:
++		AuDebugOn(!h_child_name || !h_child_inode);
++		au_fset_hinjob(flags[CHILD], GEN);
++		au_fset_hinjob(flags[CHILD], XINO0);
++		au_fset_hinjob(flags[CHILD], MNTPNT);
++		au_fset_hinjob(flags[PARENT], DIRENT);
++		break;
++
++	case IN_CREATE:
++		AuDebugOn(!h_child_name || !h_child_inode);
++		au_fset_hinjob(flags[PARENT], DIRENT);
++		au_fset_hinjob(flags[CHILD], GEN);
++		break;
++
++	case IN_DELETE:
++		/*
++		 * aufs never be able to get this child inode.
++		 * revalidation should be in d_revalidate()
++		 * by checking i_nlink, i_generation or d_unhashed().
++		 */
++		AuDebugOn(!h_child_name);
++		au_fset_hinjob(flags[PARENT], DIRENT);
++		au_fset_hinjob(flags[CHILD], GEN);
++		au_fset_hinjob(flags[CHILD], TRYXINO0);
++		au_fset_hinjob(flags[CHILD], MNTPNT);
++		break;
++
++	default:
++		AuDebugOn(1);
++	}
++
++	if (wh)
++		h_child_inode = NULL;
++
++	/* iput() and kfree() will be called in postproc() */
++	/*
++	 * inotify_mutex is already acquired and kmalloc/prune_icache may lock
++	 * iprune_mutex. strange.
++	 */
++	lockdep_off();
++	args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS);
++	lockdep_on();
++	if (unlikely(!args)) {
++		AuErr1("no memory\n");
++		iput(dir);
++		return;
++	}
++	args->flags[PARENT] = flags[PARENT];
++	args->flags[CHILD] = flags[CHILD];
++	args->mask = mask;
++	args->dir = dir;
++	args->h_dir = igrab(watch->inode);
++	if (h_child_inode)
++		h_child_inode = igrab(h_child_inode); /* can be NULL */
++	args->h_child_inode = h_child_inode;
++	args->h_child_nlen = len;
++	if (len) {
++		p = (void *)args;
++		p += sizeof(*args);
++		memcpy(p, h_child_name, len + 1);
++	}
++
++	lockdep_off();
++	wkq_err = au_wkq_nowait(postproc, args, dir->i_sb);
++	lockdep_on();
++	if (unlikely(wkq_err))
++		AuErr("wkq %d\n", wkq_err);
++}
++
++static void aufs_inotify_destroy(struct inotify_watch *watch __maybe_unused)
++{
++	return;
++}
++
++static struct inotify_operations aufs_inotify_ops = {
++	.handle_event	= aufs_inotify,
++	.destroy_watch	= aufs_inotify_destroy
++};
++
++/* ---------------------------------------------------------------------- */
++
++static void au_hin_destroy_cache(void)
++{
++	kmem_cache_destroy(au_cachep[AuCache_HINOTIFY]);
++	au_cachep[AuCache_HINOTIFY] = NULL;
++}
++
++int __init au_hinotify_init(void)
++{
++	int err;
++
++	err = -ENOMEM;
++	au_cachep[AuCache_HINOTIFY] = AuCache(au_hinotify);
++	if (au_cachep[AuCache_HINOTIFY]) {
++		err = 0;
++		au_hin_handle = inotify_init(&aufs_inotify_ops);
++		if (IS_ERR(au_hin_handle)) {
++			err = PTR_ERR(au_hin_handle);
++			au_hin_destroy_cache();
++		}
++	}
++	AuTraceErr(err);
++	return err;
++}
++
++void au_hinotify_fin(void)
++{
++	inotify_destroy(au_hin_handle);
++	if (au_cachep[AuCache_HINOTIFY])
++		au_hin_destroy_cache();
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/iinfo.c linux-2.6.31.5/fs/aufs/iinfo.c
+--- linux-2.6.31.5.orig/fs/aufs/iinfo.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/iinfo.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,283 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * inode private data
++ */
++
++#include "aufs.h"
++
++struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex)
++{
++	struct inode *h_inode;
++
++	IiMustAnyLock(inode);
++
++	h_inode = au_ii(inode)->ii_hinode[0 + bindex].hi_inode;
++	AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
++	return h_inode;
++}
++
++/* todo: hard/soft set? */
++void au_set_ibstart(struct inode *inode, aufs_bindex_t bindex)
++{
++	struct au_iinfo *iinfo = au_ii(inode);
++	struct inode *h_inode;
++
++	IiMustWriteLock(inode);
++
++	iinfo->ii_bstart = bindex;
++	h_inode = iinfo->ii_hinode[bindex + 0].hi_inode;
++	if (h_inode)
++		au_cpup_igen(inode, h_inode);
++}
++
++void au_hiput(struct au_hinode *hinode)
++{
++	au_hin_free(hinode);
++	dput(hinode->hi_whdentry);
++	iput(hinode->hi_inode);
++}
++
++unsigned int au_hi_flags(struct inode *inode, int isdir)
++{
++	unsigned int flags;
++	const unsigned int mnt_flags = au_mntflags(inode->i_sb);
++
++	flags = 0;
++	if (au_opt_test(mnt_flags, XINO))
++		au_fset_hi(flags, XINO);
++	if (isdir && au_opt_test(mnt_flags, UDBA_HINOTIFY))
++		au_fset_hi(flags, HINOTIFY);
++	return flags;
++}
++
++void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
++		   struct inode *h_inode, unsigned int flags)
++{
++	struct au_hinode *hinode;
++	struct inode *hi;
++	struct au_iinfo *iinfo = au_ii(inode);
++
++	IiMustWriteLock(inode);
++
++	hinode = iinfo->ii_hinode + bindex;
++	hi = hinode->hi_inode;
++	AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
++	AuDebugOn(h_inode && hi);
++
++	if (hi)
++		au_hiput(hinode);
++	hinode->hi_inode = h_inode;
++	if (h_inode) {
++		int err;
++		struct super_block *sb = inode->i_sb;
++		struct au_branch *br;
++
++		if (bindex == iinfo->ii_bstart)
++			au_cpup_igen(inode, h_inode);
++		br = au_sbr(sb, bindex);
++		hinode->hi_id = br->br_id;
++		if (au_ftest_hi(flags, XINO)) {
++			err = au_xino_write(sb, bindex, h_inode->i_ino,
++					    inode->i_ino);
++			if (unlikely(err))
++				AuIOErr1("failed au_xino_write() %d\n", err);
++		}
++
++		if (au_ftest_hi(flags, HINOTIFY)
++		    && au_br_hinotifyable(br->br_perm)) {
++			err = au_hin_alloc(hinode, inode, h_inode);
++			if (unlikely(err))
++				AuIOErr1("au_hin_alloc() %d\n", err);
++		}
++	}
++}
++
++void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
++		  struct dentry *h_wh)
++{
++	struct au_hinode *hinode;
++
++	IiMustWriteLock(inode);
++
++	hinode = au_ii(inode)->ii_hinode + bindex;
++	AuDebugOn(hinode->hi_whdentry);
++	hinode->hi_whdentry = h_wh;
++}
++
++void au_update_iigen(struct inode *inode)
++{
++	atomic_set(&au_ii(inode)->ii_generation, au_sigen(inode->i_sb));
++	/* smp_mb(); */ /* atomic_set */
++}
++
++/* it may be called at remount time, too */
++void au_update_brange(struct inode *inode, int do_put_zero)
++{
++	struct au_iinfo *iinfo;
++
++	iinfo = au_ii(inode);
++	if (!iinfo || iinfo->ii_bstart < 0)
++		return;
++
++	IiMustWriteLock(inode);
++
++	if (do_put_zero) {
++		aufs_bindex_t bindex;
++
++		for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
++		     bindex++) {
++			struct inode *h_i;
++
++			h_i = iinfo->ii_hinode[0 + bindex].hi_inode;
++			if (h_i && !h_i->i_nlink)
++				au_set_h_iptr(inode, bindex, NULL, 0);
++		}
++	}
++
++	iinfo->ii_bstart = -1;
++	while (++iinfo->ii_bstart <= iinfo->ii_bend)
++		if (iinfo->ii_hinode[0 + iinfo->ii_bstart].hi_inode)
++			break;
++	if (iinfo->ii_bstart > iinfo->ii_bend) {
++		iinfo->ii_bstart = -1;
++		iinfo->ii_bend = -1;
++		return;
++	}
++
++	iinfo->ii_bend++;
++	while (0 <= --iinfo->ii_bend)
++		if (iinfo->ii_hinode[0 + iinfo->ii_bend].hi_inode)
++			break;
++	AuDebugOn(iinfo->ii_bstart > iinfo->ii_bend || iinfo->ii_bend < 0);
++}
++
++/* ---------------------------------------------------------------------- */
++
++int au_iinfo_init(struct inode *inode)
++{
++	struct au_iinfo *iinfo;
++	struct super_block *sb;
++	int nbr, i;
++
++	sb = inode->i_sb;
++	iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
++	nbr = au_sbend(sb) + 1;
++	if (unlikely(nbr <= 0))
++		nbr = 1;
++	iinfo->ii_hinode = kcalloc(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS);
++	if (iinfo->ii_hinode) {
++		for (i = 0; i < nbr; i++)
++			iinfo->ii_hinode[i].hi_id = -1;
++
++		atomic_set(&iinfo->ii_generation, au_sigen(sb));
++		/* smp_mb(); */ /* atomic_set */
++		au_rw_init(&iinfo->ii_rwsem);
++		iinfo->ii_bstart = -1;
++		iinfo->ii_bend = -1;
++		iinfo->ii_vdir = NULL;
++		return 0;
++	}
++	return -ENOMEM;
++}
++
++int au_ii_realloc(struct au_iinfo *iinfo, int nbr)
++{
++	int err, sz;
++	struct au_hinode *hip;
++
++	AuRwMustWriteLock(&iinfo->ii_rwsem);
++
++	err = -ENOMEM;
++	sz = sizeof(*hip) * (iinfo->ii_bend + 1);
++	if (!sz)
++		sz = sizeof(*hip);
++	hip = au_kzrealloc(iinfo->ii_hinode, sz, sizeof(*hip) * nbr, GFP_NOFS);
++	if (hip) {
++		iinfo->ii_hinode = hip;
++		err = 0;
++	}
++
++	return err;
++}
++
++static int au_iinfo_write0(struct super_block *sb, struct au_hinode *hinode,
++			   ino_t ino)
++{
++	int err;
++	aufs_bindex_t bindex;
++	unsigned char locked;
++
++	err = 0;
++	locked = !!si_noflush_read_trylock(sb);
++	bindex = au_br_index(sb, hinode->hi_id);
++	if (bindex >= 0)
++		err = au_xino_write0(sb, bindex, hinode->hi_inode->i_ino, ino);
++	/* error action? */
++	if (locked)
++		si_read_unlock(sb);
++	return err;
++}
++
++void au_iinfo_fin(struct inode *inode)
++{
++	ino_t ino;
++	aufs_bindex_t bend;
++	unsigned char unlinked = !inode->i_nlink;
++	struct au_iinfo *iinfo;
++	struct au_hinode *hi;
++	struct super_block *sb;
++
++	if (unlinked) {
++		int err = au_xigen_inc(inode);
++		if (unlikely(err))
++			AuWarn1("failed resetting i_generation, %d\n", err);
++	}
++
++	iinfo = au_ii(inode);
++	/* bad_inode case */
++	if (!iinfo)
++		return;
++
++	if (iinfo->ii_vdir)
++		au_vdir_free(iinfo->ii_vdir);
++
++	if (iinfo->ii_bstart >= 0) {
++		sb = inode->i_sb;
++		ino = 0;
++		if (unlinked)
++			ino = inode->i_ino;
++		hi = iinfo->ii_hinode + iinfo->ii_bstart;
++		bend = iinfo->ii_bend;
++		while (iinfo->ii_bstart++ <= bend) {
++			if (hi->hi_inode) {
++				if (unlinked || !hi->hi_inode->i_nlink) {
++					au_iinfo_write0(sb, hi, ino);
++					/* ignore this error */
++					ino = 0;
++				}
++				au_hiput(hi);
++			}
++			hi++;
++		}
++	}
++
++	kfree(iinfo->ii_hinode);
++	AuRwDestroy(&iinfo->ii_rwsem);
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/inode.c linux-2.6.31.5/fs/aufs/inode.c
+--- linux-2.6.31.5.orig/fs/aufs/inode.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/inode.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,380 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * inode functions
++ */
++
++#include "aufs.h"
++
++struct inode *au_igrab(struct inode *inode)
++{
++	if (inode) {
++		AuDebugOn(!atomic_read(&inode->i_count));
++		atomic_inc_return(&inode->i_count);
++	}
++	return inode;
++}
++
++static void au_refresh_hinode_attr(struct inode *inode, int do_version)
++{
++	au_cpup_attr_all(inode, /*force*/0);
++	au_update_iigen(inode);
++	if (do_version)
++		inode->i_version++;
++}
++
++int au_refresh_hinode_self(struct inode *inode, int do_attr)
++{
++	int err;
++	aufs_bindex_t bindex, new_bindex;
++	unsigned char update;
++	struct inode *first;
++	struct au_hinode *p, *q, tmp;
++	struct super_block *sb;
++	struct au_iinfo *iinfo;
++
++	IiMustWriteLock(inode);
++
++	update = 0;
++	sb = inode->i_sb;
++	iinfo = au_ii(inode);
++	err = au_ii_realloc(iinfo, au_sbend(sb) + 1);
++	if (unlikely(err))
++		goto out;
++
++	p = iinfo->ii_hinode + iinfo->ii_bstart;
++	first = p->hi_inode;
++	err = 0;
++	for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
++	     bindex++, p++) {
++		if (!p->hi_inode)
++			continue;
++
++		new_bindex = au_br_index(sb, p->hi_id);
++		if (new_bindex == bindex)
++			continue;
++
++		if (new_bindex < 0) {
++			update++;
++			au_hiput(p);
++			p->hi_inode = NULL;
++			continue;
++		}
++
++		if (new_bindex < iinfo->ii_bstart)
++			iinfo->ii_bstart = new_bindex;
++		if (iinfo->ii_bend < new_bindex)
++			iinfo->ii_bend = new_bindex;
++		/* swap two lower inode, and loop again */
++		q = iinfo->ii_hinode + new_bindex;
++		tmp = *q;
++		*q = *p;
++		*p = tmp;
++		if (tmp.hi_inode) {
++			bindex--;
++			p--;
++		}
++	}
++	au_update_brange(inode, /*do_put_zero*/0);
++	if (do_attr)
++		au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode));
++
++ out:
++	return err;
++}
++
++int au_refresh_hinode(struct inode *inode, struct dentry *dentry)
++{
++	int err, update;
++	unsigned int flags;
++	aufs_bindex_t bindex, bend;
++	unsigned char isdir;
++	struct inode *first;
++	struct au_hinode *p;
++	struct au_iinfo *iinfo;
++
++	err = au_refresh_hinode_self(inode, /*do_attr*/0);
++	if (unlikely(err))
++		goto out;
++
++	update = 0;
++	iinfo = au_ii(inode);
++	p = iinfo->ii_hinode + iinfo->ii_bstart;
++	first = p->hi_inode;
++	isdir = S_ISDIR(inode->i_mode);
++	flags = au_hi_flags(inode, isdir);
++	bend = au_dbend(dentry);
++	for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
++		struct inode *h_i;
++		struct dentry *h_d;
++
++		h_d = au_h_dptr(dentry, bindex);
++		if (!h_d || !h_d->d_inode)
++			continue;
++
++		if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) {
++			h_i = au_h_iptr(inode, bindex);
++			if (h_i) {
++				if (h_i == h_d->d_inode)
++					continue;
++				err = -EIO;
++				break;
++			}
++		}
++		if (bindex < iinfo->ii_bstart)
++			iinfo->ii_bstart = bindex;
++		if (iinfo->ii_bend < bindex)
++			iinfo->ii_bend = bindex;
++		au_set_h_iptr(inode, bindex, au_igrab(h_d->d_inode), flags);
++		update = 1;
++	}
++	au_update_brange(inode, /*do_put_zero*/0);
++
++	if (unlikely(err))
++		goto out;
++
++	au_refresh_hinode_attr(inode, update && isdir);
++
++ out:
++	return err;
++}
++
++static int set_inode(struct inode *inode, struct dentry *dentry)
++{
++	int err;
++	unsigned int flags;
++	umode_t mode;
++	aufs_bindex_t bindex, bstart, btail;
++	unsigned char isdir;
++	struct dentry *h_dentry;
++	struct inode *h_inode;
++	struct au_iinfo *iinfo;
++
++	IiMustWriteLock(inode);
++
++	err = 0;
++	isdir = 0;
++	bstart = au_dbstart(dentry);
++	h_inode = au_h_dptr(dentry, bstart)->d_inode;
++	mode = h_inode->i_mode;
++	switch (mode & S_IFMT) {
++	case S_IFREG:
++		btail = au_dbtail(dentry);
++		inode->i_op = &aufs_iop;
++		inode->i_fop = &aufs_file_fop;
++		inode->i_mapping->a_ops = &aufs_aop;
++		break;
++	case S_IFDIR:
++		isdir = 1;
++		btail = au_dbtaildir(dentry);
++		inode->i_op = &aufs_dir_iop;
++		inode->i_fop = &aufs_dir_fop;
++		break;
++	case S_IFLNK:
++		btail = au_dbtail(dentry);
++		inode->i_op = &aufs_symlink_iop;
++		break;
++	case S_IFBLK:
++	case S_IFCHR:
++	case S_IFIFO:
++	case S_IFSOCK:
++		btail = au_dbtail(dentry);
++		inode->i_op = &aufs_iop;
++		init_special_inode(inode, mode, h_inode->i_rdev);
++		break;
++	default:
++		AuIOErr("Unknown file type 0%o\n", mode);
++		err = -EIO;
++		goto out;
++	}
++
++	/* do not set inotify for whiteouted dirs (SHWH mode) */
++	flags = au_hi_flags(inode, isdir);
++	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)
++	    && au_ftest_hi(flags, HINOTIFY)
++	    && dentry->d_name.len > AUFS_WH_PFX_LEN
++	    && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))
++		au_fclr_hi(flags, HINOTIFY);
++	iinfo = au_ii(inode);
++	iinfo->ii_bstart = bstart;
++	iinfo->ii_bend = btail;
++	for (bindex = bstart; bindex <= btail; bindex++) {
++		h_dentry = au_h_dptr(dentry, bindex);
++		if (h_dentry)
++			au_set_h_iptr(inode, bindex,
++				      au_igrab(h_dentry->d_inode), flags);
++	}
++	au_cpup_attr_all(inode, /*force*/1);
++
++ out:
++	return err;
++}
++
++/* successful returns with iinfo write_locked */
++static int reval_inode(struct inode *inode, struct dentry *dentry, int *matched)
++{
++	int err;
++	aufs_bindex_t bindex, bend;
++	struct inode *h_inode, *h_dinode;
++
++	*matched = 0;
++
++	/*
++	 * before this function, if aufs got any iinfo lock, it must be only
++	 * one, the parent dir.
++	 * it can happen by UDBA and the obsoleted inode number.
++	 */
++	err = -EIO;
++	if (unlikely(inode->i_ino == parent_ino(dentry)))
++		goto out;
++
++	err = 0;
++	ii_write_lock_new_child(inode);
++	h_dinode = au_h_dptr(dentry, au_dbstart(dentry))->d_inode;
++	bend = au_ibend(inode);
++	for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
++		h_inode = au_h_iptr(inode, bindex);
++		if (h_inode && h_inode == h_dinode) {
++			*matched = 1;
++			err = 0;
++			if (au_iigen(inode) != au_digen(dentry))
++				err = au_refresh_hinode(inode, dentry);
++			break;
++		}
++	}
++
++	if (unlikely(err))
++		ii_write_unlock(inode);
++ out:
++	return err;
++}
++
++/* successful returns with iinfo write_locked */
++/* todo: return with unlocked? */
++struct inode *au_new_inode(struct dentry *dentry, int must_new)
++{
++	struct inode *inode;
++	struct dentry *h_dentry;
++	struct super_block *sb;
++	ino_t h_ino, ino;
++	int err, match;
++	aufs_bindex_t bstart;
++
++	sb = dentry->d_sb;
++	bstart = au_dbstart(dentry);
++	h_dentry = au_h_dptr(dentry, bstart);
++	h_ino = h_dentry->d_inode->i_ino;
++	err = au_xino_read(sb, bstart, h_ino, &ino);
++	inode = ERR_PTR(err);
++	if (unlikely(err))
++		goto out;
++ new_ino:
++	if (!ino) {
++		ino = au_xino_new_ino(sb);
++		if (unlikely(!ino)) {
++			inode = ERR_PTR(-EIO);
++			goto out;
++		}
++	}
++
++	AuDbg("i%lu\n", (unsigned long)ino);
++	inode = au_iget_locked(sb, ino);
++	err = PTR_ERR(inode);
++	if (IS_ERR(inode))
++		goto out;
++
++	AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW));
++	if (inode->i_state & I_NEW) {
++		ii_write_lock_new_child(inode);
++		err = set_inode(inode, dentry);
++		unlock_new_inode(inode);
++		if (!err)
++			goto out; /* success */
++
++		iget_failed(inode);
++		ii_write_unlock(inode);
++		goto out_iput;
++	} else if (!must_new) {
++		err = reval_inode(inode, dentry, &match);
++		if (!err)
++			goto out; /* success */
++		else if (match)
++			goto out_iput;
++	}
++
++	if (unlikely(au_test_fs_unique_ino(h_dentry->d_inode)))
++		AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir,"
++			" b%d, %s, %.*s, hi%lu, i%lu.\n",
++			bstart, au_sbtype(h_dentry->d_sb), AuDLNPair(dentry),
++			(unsigned long)h_ino, (unsigned long)ino);
++	ino = 0;
++	err = au_xino_write(sb, bstart, h_ino, /*ino*/0);
++	if (!err) {
++		iput(inode);
++		goto new_ino;
++	}
++
++ out_iput:
++	iput(inode);
++	inode = ERR_PTR(err);
++ out:
++	return inode;
++}
++
++/* ---------------------------------------------------------------------- */
++
++int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
++	       struct inode *inode)
++{
++	int err;
++
++	err = au_br_rdonly(au_sbr(sb, bindex));
++
++	/* pseudo-link after flushed may happen out of bounds */
++	if (!err
++	    && inode
++	    && au_ibstart(inode) <= bindex
++	    && bindex <= au_ibend(inode)) {
++		/*
++		 * permission check is unnecessary since vfsub routine
++		 * will be called later
++		 */
++		struct inode *hi = au_h_iptr(inode, bindex);
++		if (hi)
++			err = IS_IMMUTABLE(hi) ? -EROFS : 0;
++	}
++
++	return err;
++}
++
++int au_test_h_perm(struct inode *h_inode, int mask)
++{
++	if (!current_fsuid())
++		return 0;
++	return inode_permission(h_inode, mask);
++}
++
++int au_test_h_perm_sio(struct inode *h_inode, int mask)
++{
++	if (au_test_nfs(h_inode->i_sb)
++	    && (mask & MAY_WRITE)
++	    && S_ISDIR(h_inode->i_mode))
++		mask |= MAY_READ; /* force permission check */
++	return au_test_h_perm(h_inode, mask);
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/inode.h linux-2.6.31.5/fs/aufs/inode.h
+--- linux-2.6.31.5.orig/fs/aufs/inode.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/inode.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,484 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * inode operations
++ */
++
++#ifndef __AUFS_INODE_H__
++#define __AUFS_INODE_H__
++
++#ifdef __KERNEL__
++
++#include <linux/fs.h>
++#include <linux/inotify.h>
++#include <linux/aufs_type.h>
++#include "rwsem.h"
++
++struct vfsmount;
++
++struct au_hinotify {
++#ifdef CONFIG_AUFS_HINOTIFY
++	struct inotify_watch	hin_watch;
++	struct inode		*hin_aufs_inode;	/* no get/put */
++#endif
++};
++
++struct au_hinode {
++	struct inode		*hi_inode;
++	aufs_bindex_t		hi_id;
++#ifdef CONFIG_AUFS_HINOTIFY
++	struct au_hinotify	*hi_notify;
++#endif
++
++	/* reference to the copied-up whiteout with get/put */
++	struct dentry		*hi_whdentry;
++};
++
++struct au_vdir;
++struct au_iinfo {
++	atomic_t		ii_generation;
++	struct super_block	*ii_hsb1;	/* no get/put */
++
++	struct au_rwsem		ii_rwsem;
++	aufs_bindex_t		ii_bstart, ii_bend;
++	__u32			ii_higen;
++	struct au_hinode	*ii_hinode;
++	struct au_vdir		*ii_vdir;
++};
++
++struct au_icntnr {
++	struct au_iinfo iinfo;
++	struct inode vfs_inode;
++};
++
++/* au_pin flags */
++#define AuPin_DI_LOCKED		1
++#define AuPin_MNT_WRITE		(1 << 1)
++#define au_ftest_pin(flags, name)	((flags) & AuPin_##name)
++#define au_fset_pin(flags, name)	{ (flags) |= AuPin_##name; }
++#define au_fclr_pin(flags, name)	{ (flags) &= ~AuPin_##name; }
++
++struct au_pin {
++	/* input */
++	struct dentry *dentry;
++	unsigned int udba;
++	unsigned char lsc_di, lsc_hi, flags;
++	aufs_bindex_t bindex;
++
++	/* output */
++	struct dentry *parent;
++	struct au_hinode *hdir;
++	struct vfsmount *h_mnt;
++};
++
++/* ---------------------------------------------------------------------- */
++
++static inline struct au_iinfo *au_ii(struct inode *inode)
++{
++	struct au_iinfo *iinfo;
++
++	iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
++	if (iinfo->ii_hinode)
++		return iinfo;
++	return NULL; /* debugging bad_inode case */
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* inode.c */
++struct inode *au_igrab(struct inode *inode);
++int au_refresh_hinode_self(struct inode *inode, int do_attr);
++int au_refresh_hinode(struct inode *inode, struct dentry *dentry);
++struct inode *au_new_inode(struct dentry *dentry, int must_new);
++int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
++	       struct inode *inode);
++int au_test_h_perm(struct inode *h_inode, int mask);
++int au_test_h_perm_sio(struct inode *h_inode, int mask);
++
++/* i_op.c */
++extern struct inode_operations aufs_iop, aufs_symlink_iop, aufs_dir_iop;
++
++/* au_wr_dir flags */
++#define AuWrDir_ADD_ENTRY	1
++#define AuWrDir_ISDIR		(1 << 1)
++#define au_ftest_wrdir(flags, name)	((flags) & AuWrDir_##name)
++#define au_fset_wrdir(flags, name)	{ (flags) |= AuWrDir_##name; }
++#define au_fclr_wrdir(flags, name)	{ (flags) &= ~AuWrDir_##name; }
++
++struct au_wr_dir_args {
++	aufs_bindex_t force_btgt;
++	unsigned char flags;
++};
++int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
++	      struct au_wr_dir_args *args);
++
++struct dentry *au_pinned_h_parent(struct au_pin *pin);
++void au_pin_init(struct au_pin *pin, struct dentry *dentry,
++		 aufs_bindex_t bindex, int lsc_di, int lsc_hi,
++		 unsigned int udba, unsigned char flags);
++int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
++	   unsigned int udba, unsigned char flags) __must_check;
++int au_do_pin(struct au_pin *pin) __must_check;
++void au_unpin(struct au_pin *pin);
++
++/* i_op_add.c */
++int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
++	       struct dentry *h_parent, int isdir);
++int aufs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev);
++int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname);
++int aufs_create(struct inode *dir, struct dentry *dentry, int mode,
++		struct nameidata *nd);
++int aufs_link(struct dentry *src_dentry, struct inode *dir,
++	      struct dentry *dentry);
++int aufs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
++
++/* i_op_del.c */
++int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup);
++int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
++	       struct dentry *h_parent, int isdir);
++int aufs_unlink(struct inode *dir, struct dentry *dentry);
++int aufs_rmdir(struct inode *dir, struct dentry *dentry);
++
++/* i_op_ren.c */
++int au_wbr(struct dentry *dentry, aufs_bindex_t btgt);
++int aufs_rename(struct inode *src_dir, struct dentry *src_dentry,
++		struct inode *dir, struct dentry *dentry);
++
++/* iinfo.c */
++struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex);
++void au_hiput(struct au_hinode *hinode);
++void au_set_ibstart(struct inode *inode, aufs_bindex_t bindex);
++void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
++		  struct dentry *h_wh);
++unsigned int au_hi_flags(struct inode *inode, int isdir);
++
++/* hinode flags */
++#define AuHi_XINO	1
++#define AuHi_HINOTIFY	(1 << 1)
++#define au_ftest_hi(flags, name)	((flags) & AuHi_##name)
++#define au_fset_hi(flags, name)		{ (flags) |= AuHi_##name; }
++#define au_fclr_hi(flags, name)		{ (flags) &= ~AuHi_##name; }
++
++#ifndef CONFIG_AUFS_HINOTIFY
++#undef AuHi_HINOTIFY
++#define AuHi_HINOTIFY	0
++#endif
++
++void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
++		   struct inode *h_inode, unsigned int flags);
++
++void au_update_iigen(struct inode *inode);
++void au_update_brange(struct inode *inode, int do_put_zero);
++
++int au_iinfo_init(struct inode *inode);
++void au_iinfo_fin(struct inode *inode);
++int au_ii_realloc(struct au_iinfo *iinfo, int nbr);
++
++/* plink.c */
++void au_plink_block_maintain(struct super_block *sb);
++#ifdef CONFIG_AUFS_DEBUG
++void au_plink_list(struct super_block *sb);
++#else
++static inline void au_plink_list(struct super_block *sb)
++{
++	/* nothing */
++}
++#endif
++int au_plink_test(struct inode *inode);
++struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex);
++void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
++		     struct dentry *h_dentry);
++void au_plink_put(struct super_block *sb);
++void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id);
++
++/* ---------------------------------------------------------------------- */
++
++/* lock subclass for iinfo */
++enum {
++	AuLsc_II_CHILD,		/* child first */
++	AuLsc_II_CHILD2,	/* rename(2), link(2), and cpup at hinotify */
++	AuLsc_II_CHILD3,	/* copyup dirs */
++	AuLsc_II_PARENT,	/* see AuLsc_I_PARENT in vfsub.h */
++	AuLsc_II_PARENT2,
++	AuLsc_II_PARENT3,	/* copyup dirs */
++	AuLsc_II_NEW_CHILD
++};
++
++/*
++ * ii_read_lock_child, ii_write_lock_child,
++ * ii_read_lock_child2, ii_write_lock_child2,
++ * ii_read_lock_child3, ii_write_lock_child3,
++ * ii_read_lock_parent, ii_write_lock_parent,
++ * ii_read_lock_parent2, ii_write_lock_parent2,
++ * ii_read_lock_parent3, ii_write_lock_parent3,
++ * ii_read_lock_new_child, ii_write_lock_new_child,
++ */
++#define AuReadLockFunc(name, lsc) \
++static inline void ii_read_lock_##name(struct inode *i) \
++{ \
++	au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
++}
++
++#define AuWriteLockFunc(name, lsc) \
++static inline void ii_write_lock_##name(struct inode *i) \
++{ \
++	au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
++}
++
++#define AuRWLockFuncs(name, lsc) \
++	AuReadLockFunc(name, lsc) \
++	AuWriteLockFunc(name, lsc)
++
++AuRWLockFuncs(child, CHILD);
++AuRWLockFuncs(child2, CHILD2);
++AuRWLockFuncs(child3, CHILD3);
++AuRWLockFuncs(parent, PARENT);
++AuRWLockFuncs(parent2, PARENT2);
++AuRWLockFuncs(parent3, PARENT3);
++AuRWLockFuncs(new_child, NEW_CHILD);
++
++#undef AuReadLockFunc
++#undef AuWriteLockFunc
++#undef AuRWLockFuncs
++
++/*
++ * ii_read_unlock, ii_write_unlock, ii_downgrade_lock
++ */
++AuSimpleUnlockRwsemFuncs(ii, struct inode *i, &au_ii(i)->ii_rwsem);
++
++#define IiMustNoWaiters(i)	AuRwMustNoWaiters(&au_ii(i)->ii_rwsem)
++#define IiMustAnyLock(i)	AuRwMustAnyLock(&au_ii(i)->ii_rwsem)
++#define IiMustWriteLock(i)	AuRwMustWriteLock(&au_ii(i)->ii_rwsem)
++
++/* ---------------------------------------------------------------------- */
++
++static inline unsigned int au_iigen(struct inode *inode)
++{
++	return atomic_read(&au_ii(inode)->ii_generation);
++}
++
++/* tiny test for inode number */
++/* tmpfs generation is too rough */
++static inline int au_test_higen(struct inode *inode, struct inode *h_inode)
++{
++	struct au_iinfo *iinfo;
++
++	iinfo = au_ii(inode);
++	AuRwMustAnyLock(&iinfo->ii_rwsem);
++	return !(iinfo->ii_hsb1 == h_inode->i_sb
++		 && iinfo->ii_higen == h_inode->i_generation);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static inline aufs_bindex_t au_ii_br_id(struct inode *inode,
++					aufs_bindex_t bindex)
++{
++	IiMustAnyLock(inode);
++	return au_ii(inode)->ii_hinode[0 + bindex].hi_id;
++}
++
++static inline aufs_bindex_t au_ibstart(struct inode *inode)
++{
++	IiMustAnyLock(inode);
++	return au_ii(inode)->ii_bstart;
++}
++
++static inline aufs_bindex_t au_ibend(struct inode *inode)
++{
++	IiMustAnyLock(inode);
++	return au_ii(inode)->ii_bend;
++}
++
++static inline struct au_vdir *au_ivdir(struct inode *inode)
++{
++	IiMustAnyLock(inode);
++	return au_ii(inode)->ii_vdir;
++}
++
++static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex)
++{
++	IiMustAnyLock(inode);
++	return au_ii(inode)->ii_hinode[0 + bindex].hi_whdentry;
++}
++
++static inline void au_set_ibend(struct inode *inode, aufs_bindex_t bindex)
++{
++	IiMustWriteLock(inode);
++	au_ii(inode)->ii_bend = bindex;
++}
++
++static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir)
++{
++	IiMustWriteLock(inode);
++	au_ii(inode)->ii_vdir = vdir;
++}
++
++static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex)
++{
++	IiMustAnyLock(inode);
++	return au_ii(inode)->ii_hinode + bindex;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static inline struct dentry *au_pinned_parent(struct au_pin *pin)
++{
++	if (pin)
++		return pin->parent;
++	return NULL;
++}
++
++static inline struct inode *au_pinned_h_dir(struct au_pin *pin)
++{
++	if (pin && pin->hdir)
++		return pin->hdir->hi_inode;
++	return NULL;
++}
++
++static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin)
++{
++	if (pin)
++		return pin->hdir;
++	return NULL;
++}
++
++static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry)
++{
++	if (pin)
++		pin->dentry = dentry;
++}
++
++static inline void au_pin_set_parent_lflag(struct au_pin *pin,
++					   unsigned char lflag)
++{
++	if (pin) {
++		/* dirty macros require brackets */
++		if (lflag) {
++			au_fset_pin(pin->flags, DI_LOCKED);
++		} else {
++			au_fclr_pin(pin->flags, DI_LOCKED);
++		}
++	}
++}
++
++static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent)
++{
++	if (pin) {
++		dput(pin->parent);
++		pin->parent = dget(parent);
++	}
++}
++
++/* ---------------------------------------------------------------------- */
++
++#ifdef CONFIG_AUFS_HINOTIFY
++/* hinotify.c */
++int au_hin_alloc(struct au_hinode *hinode, struct inode *inode,
++		 struct inode *h_inode);
++void au_hin_free(struct au_hinode *hinode);
++void au_hin_ctl(struct au_hinode *hinode, int do_set);
++void au_reset_hinotify(struct inode *inode, unsigned int flags);
++
++int __init au_hinotify_init(void);
++void au_hinotify_fin(void);
++
++static inline
++void au_hin_init(struct au_hinode *hinode, struct au_hinotify *val)
++{
++	hinode->hi_notify = val;
++}
++
++static inline void au_iigen_dec(struct inode *inode)
++{
++	atomic_dec_return(&au_ii(inode)->ii_generation);
++}
++
++#else
++static inline
++int au_hin_alloc(struct au_hinode *hinode __maybe_unused,
++		 struct inode *inode __maybe_unused,
++		 struct inode *h_inode __maybe_unused)
++{
++	return -EOPNOTSUPP;
++}
++
++static inline void au_hin_free(struct au_hinode *hinode __maybe_unused)
++{
++	/* nothing */
++}
++
++static inline void au_hin_ctl(struct au_hinode *hinode __maybe_unused,
++			      int do_set __maybe_unused)
++{
++	/* nothing */
++}
++
++static inline void au_reset_hinotify(struct inode *inode __maybe_unused,
++				     unsigned int flags __maybe_unused)
++{
++	/* nothing */
++}
++
++static inline int au_hinotify_init(void)
++{
++	return 0;
++}
++
++#define au_hinotify_fin()	do {} while (0)
++
++static inline
++void au_hin_init(struct au_hinode *hinode __maybe_unused,
++		 struct au_hinotify *val __maybe_unused)
++{
++	/* empty */
++}
++#endif /* CONFIG_AUFS_HINOTIFY */
++
++static inline void au_hin_suspend(struct au_hinode *hdir)
++{
++	au_hin_ctl(hdir, /*do_set*/0);
++}
++
++static inline void au_hin_resume(struct au_hinode *hdir)
++{
++	au_hin_ctl(hdir, /*do_set*/1);
++}
++
++static inline void au_hin_imtx_lock(struct au_hinode *hdir)
++{
++	mutex_lock(&hdir->hi_inode->i_mutex);
++	au_hin_suspend(hdir);
++}
++
++static inline void au_hin_imtx_lock_nested(struct au_hinode *hdir,
++					   unsigned int sc __maybe_unused)
++{
++	mutex_lock_nested(&hdir->hi_inode->i_mutex, sc);
++	au_hin_suspend(hdir);
++}
++
++static inline void au_hin_imtx_unlock(struct au_hinode *hdir)
++{
++	au_hin_resume(hdir);
++	mutex_unlock(&hdir->hi_inode->i_mutex);
++}
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_INODE_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/ioctl.c linux-2.6.31.5/fs/aufs/ioctl.c
+--- linux-2.6.31.5.orig/fs/aufs/ioctl.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/ioctl.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,67 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * ioctl
++ * currently plink-management only.
++ */
++
++#include <linux/uaccess.h>
++#include "aufs.h"
++
++long aufs_ioctl_dir(struct file *file, unsigned int cmd,
++		    unsigned long arg __maybe_unused)
++{
++	long err;
++	struct super_block *sb;
++	struct au_sbinfo *sbinfo;
++
++	err = -EACCES;
++	if (!capable(CAP_SYS_ADMIN))
++		goto out;
++
++	err = 0;
++	sb = file->f_dentry->d_sb;
++	sbinfo = au_sbi(sb);
++	switch (cmd) {
++	case AUFS_CTL_PLINK_MAINT:
++		/*
++		 * pseudo-link maintenance mode,
++		 * cleared by aufs_release_dir()
++		 */
++		si_write_lock(sb);
++		if (!au_ftest_si(sbinfo, MAINTAIN_PLINK)) {
++			au_fset_si(sbinfo, MAINTAIN_PLINK);
++			au_fi(file)->fi_maintain_plink = 1;
++		} else
++			err = -EBUSY;
++		si_write_unlock(sb);
++		break;
++	case AUFS_CTL_PLINK_CLEAN:
++		aufs_write_lock(sb->s_root);
++		if (au_opt_test(sbinfo->si_mntflags, PLINK))
++			au_plink_put(sb);
++		aufs_write_unlock(sb->s_root);
++		break;
++	default:
++		err = -EINVAL;
++	}
++
++ out:
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/i_op_add.c linux-2.6.31.5/fs/aufs/i_op_add.c
+--- linux-2.6.31.5.orig/fs/aufs/i_op_add.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/i_op_add.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,649 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * inode operations (add entry)
++ */
++
++#include "aufs.h"
++
++/*
++ * final procedure of adding a new entry, except link(2).
++ * remove whiteout, instantiate, copyup the parent dir's times and size
++ * and update version.
++ * if it failed, re-create the removed whiteout.
++ */
++static int epilog(struct inode *dir, aufs_bindex_t bindex,
++		  struct dentry *wh_dentry, struct dentry *dentry)
++{
++	int err, rerr;
++	aufs_bindex_t bwh;
++	struct path h_path;
++	struct inode *inode, *h_dir;
++	struct dentry *wh;
++
++	bwh = -1;
++	if (wh_dentry) {
++		h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */
++		IMustLock(h_dir);
++		AuDebugOn(au_h_iptr(dir, bindex) != h_dir);
++		bwh = au_dbwh(dentry);
++		h_path.dentry = wh_dentry;
++		h_path.mnt = au_sbr_mnt(dir->i_sb, bindex);
++		err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path,
++					  dentry);
++		if (unlikely(err))
++			goto out;
++	}
++
++	inode = au_new_inode(dentry, /*must_new*/1);
++	if (!IS_ERR(inode)) {
++		d_instantiate(dentry, inode);
++		dir = dentry->d_parent->d_inode; /* dir inode is locked */
++		IMustLock(dir);
++		if (au_ibstart(dir) == au_dbstart(dentry))
++			au_cpup_attr_timesizes(dir);
++		dir->i_version++;
++		return 0; /* success */
++	}
++
++	err = PTR_ERR(inode);
++	if (!wh_dentry)
++		goto out;
++
++	/* revert */
++	/* dir inode is locked */
++	wh = au_wh_create(dentry, bwh, wh_dentry->d_parent);
++	rerr = PTR_ERR(wh);
++	if (IS_ERR(wh)) {
++		AuIOErr("%.*s reverting whiteout failed(%d, %d)\n",
++			AuDLNPair(dentry), err, rerr);
++		err = -EIO;
++	} else
++		dput(wh);
++
++ out:
++	return err;
++}
++
++/*
++ * simple tests for the adding inode operations.
++ * following the checks in vfs, plus the parent-child relationship.
++ */
++int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
++	       struct dentry *h_parent, int isdir)
++{
++	int err;
++	umode_t h_mode;
++	struct dentry *h_dentry;
++	struct inode *h_inode;
++
++	h_dentry = au_h_dptr(dentry, bindex);
++	h_inode = h_dentry->d_inode;
++	if (!dentry->d_inode) {
++		err = -EEXIST;
++		if (unlikely(h_inode))
++			goto out;
++	} else {
++		/* rename(2) case */
++		err = -EIO;
++		if (unlikely(!h_inode || !h_inode->i_nlink))
++			goto out;
++
++		h_mode = h_inode->i_mode;
++		if (!isdir) {
++			err = -EISDIR;
++			if (unlikely(S_ISDIR(h_mode)))
++				goto out;
++		} else if (unlikely(!S_ISDIR(h_mode))) {
++			err = -ENOTDIR;
++			goto out;
++		}
++	}
++
++	err = -EIO;
++	/* expected parent dir is locked */
++	if (unlikely(h_parent != h_dentry->d_parent))
++		goto out;
++	err = 0;
++
++ out:
++	return err;
++}
++
++/*
++ * initial procedure of adding a new entry.
++ * prepare writable branch and the parent dir, lock it,
++ * and lookup whiteout for the new entry.
++ */
++static struct dentry*
++lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt,
++		  struct dentry *src_dentry, struct au_pin *pin,
++		  struct au_wr_dir_args *wr_dir_args)
++{
++	struct dentry *wh_dentry, *h_parent;
++	struct super_block *sb;
++	struct au_branch *br;
++	int err;
++	unsigned int udba;
++	aufs_bindex_t bcpup;
++
++	err = au_wr_dir(dentry, src_dentry, wr_dir_args);
++	bcpup = err;
++	wh_dentry = ERR_PTR(err);
++	if (unlikely(err < 0))
++		goto out;
++
++	sb = dentry->d_sb;
++	udba = au_opt_udba(sb);
++	err = au_pin(pin, dentry, bcpup, udba,
++		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
++	wh_dentry = ERR_PTR(err);
++	if (unlikely(err))
++		goto out;
++
++	h_parent = au_pinned_h_parent(pin);
++	if (udba != AuOpt_UDBA_NONE
++	    && au_dbstart(dentry) == bcpup) {
++		err = au_may_add(dentry, bcpup, h_parent,
++				 au_ftest_wrdir(wr_dir_args->flags, ISDIR));
++		wh_dentry = ERR_PTR(err);
++		if (unlikely(err))
++			goto out_unpin;
++	}
++
++	br = au_sbr(sb, bcpup);
++	if (dt) {
++		struct path tmp = {
++			.dentry	= h_parent,
++			.mnt	= br->br_mnt
++		};
++		au_dtime_store(dt, au_pinned_parent(pin), &tmp);
++	}
++
++	wh_dentry = NULL;
++	if (bcpup != au_dbwh(dentry))
++		goto out; /* success */
++
++	wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
++
++ out_unpin:
++	if (IS_ERR(wh_dentry))
++		au_unpin(pin);
++ out:
++	return wh_dentry;
++}
++
++/* ---------------------------------------------------------------------- */
++
++enum { Mknod, Symlink, Creat };
++struct simple_arg {
++	int type;
++	union {
++		struct {
++			int mode;
++			struct nameidata *nd;
++		} c;
++		struct {
++			const char *symname;
++		} s;
++		struct {
++			int mode;
++			dev_t dev;
++		} m;
++	} u;
++};
++
++static int add_simple(struct inode *dir, struct dentry *dentry,
++		      struct simple_arg *arg)
++{
++	int err;
++	aufs_bindex_t bstart;
++	unsigned char created;
++	struct au_dtime dt;
++	struct au_pin pin;
++	struct path h_path;
++	struct dentry *wh_dentry, *parent;
++	struct inode *h_dir;
++	struct au_wr_dir_args wr_dir_args = {
++		.force_btgt	= -1,
++		.flags		= AuWrDir_ADD_ENTRY
++	};
++
++	IMustLock(dir);
++
++	parent = dentry->d_parent; /* dir inode is locked */
++	aufs_read_lock(dentry, AuLock_DW);
++	di_write_lock_parent(parent);
++	wh_dentry = lock_hdir_lkup_wh(dentry, &dt, /*src_dentry*/NULL, &pin,
++				      &wr_dir_args);
++	err = PTR_ERR(wh_dentry);
++	if (IS_ERR(wh_dentry))
++		goto out;
++
++	bstart = au_dbstart(dentry);
++	h_path.dentry = au_h_dptr(dentry, bstart);
++	h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
++	h_dir = au_pinned_h_dir(&pin);
++	switch (arg->type) {
++	case Creat:
++		err = vfsub_create(h_dir, &h_path, arg->u.c.mode);
++		break;
++	case Symlink:
++		err = vfsub_symlink(h_dir, &h_path, arg->u.s.symname);
++		break;
++	case Mknod:
++		err = vfsub_mknod(h_dir, &h_path, arg->u.m.mode, arg->u.m.dev);
++		break;
++	default:
++		BUG();
++	}
++	created = !err;
++	if (!err)
++		err = epilog(dir, bstart, wh_dentry, dentry);
++
++	/* revert */
++	if (unlikely(created && err && h_path.dentry->d_inode)) {
++		int rerr;
++		rerr = vfsub_unlink(h_dir, &h_path, /*force*/0);
++		if (rerr) {
++			AuIOErr("%.*s revert failure(%d, %d)\n",
++				AuDLNPair(dentry), err, rerr);
++			err = -EIO;
++		}
++		au_dtime_revert(&dt);
++		d_drop(dentry);
++	}
++
++	au_unpin(&pin);
++	dput(wh_dentry);
++
++ out:
++	if (unlikely(err)) {
++		au_update_dbstart(dentry);
++		d_drop(dentry);
++	}
++	di_write_unlock(parent);
++	aufs_read_unlock(dentry, AuLock_DW);
++	return err;
++}
++
++int aufs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
++{
++	struct simple_arg arg = {
++		.type = Mknod,
++		.u.m = {
++			.mode	= mode,
++			.dev	= dev
++		}
++	};
++	return add_simple(dir, dentry, &arg);
++}
++
++int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
++{
++	struct simple_arg arg = {
++		.type = Symlink,
++		.u.s.symname = symname
++	};
++	return add_simple(dir, dentry, &arg);
++}
++
++int aufs_create(struct inode *dir, struct dentry *dentry, int mode,
++		struct nameidata *nd)
++{
++	struct simple_arg arg = {
++		.type = Creat,
++		.u.c = {
++			.mode	= mode,
++			.nd	= nd
++		}
++	};
++	return add_simple(dir, dentry, &arg);
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct au_link_args {
++	aufs_bindex_t bdst, bsrc;
++	struct au_pin pin;
++	struct path h_path;
++	struct dentry *src_parent, *parent;
++};
++
++static int au_cpup_before_link(struct dentry *src_dentry,
++			       struct au_link_args *a)
++{
++	int err;
++	struct dentry *h_src_dentry;
++	struct mutex *h_mtx;
++
++	di_read_lock_parent(a->src_parent, AuLock_IR);
++	err = au_test_and_cpup_dirs(src_dentry, a->bdst);
++	if (unlikely(err))
++		goto out;
++
++	h_src_dentry = au_h_dptr(src_dentry, a->bsrc);
++	h_mtx = &h_src_dentry->d_inode->i_mutex;
++	err = au_pin(&a->pin, src_dentry, a->bdst,
++		     au_opt_udba(src_dentry->d_sb),
++		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
++	if (unlikely(err))
++		goto out;
++	mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++	err = au_sio_cpup_simple(src_dentry, a->bdst, -1,
++				 AuCpup_DTIME /* | AuCpup_KEEPLINO */);
++	mutex_unlock(h_mtx);
++	au_unpin(&a->pin);
++
++ out:
++	di_read_unlock(a->src_parent, AuLock_IR);
++	return err;
++}
++
++static int au_cpup_or_link(struct dentry *src_dentry, struct au_link_args *a)
++{
++	int err;
++	unsigned char plink;
++	struct inode *h_inode, *inode;
++	struct dentry *h_src_dentry;
++	struct super_block *sb;
++
++	plink = 0;
++	h_inode = NULL;
++	sb = src_dentry->d_sb;
++	inode = src_dentry->d_inode;
++	if (au_ibstart(inode) <= a->bdst)
++		h_inode = au_h_iptr(inode, a->bdst);
++	if (!h_inode || !h_inode->i_nlink) {
++		/* copyup src_dentry as the name of dentry. */
++		au_set_dbstart(src_dentry, a->bdst);
++		au_set_h_dptr(src_dentry, a->bdst, dget(a->h_path.dentry));
++		h_inode = au_h_dptr(src_dentry, a->bsrc)->d_inode;
++		mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
++		err = au_sio_cpup_single(src_dentry, a->bdst, a->bsrc, -1,
++					 AuCpup_KEEPLINO, a->parent);
++		mutex_unlock(&h_inode->i_mutex);
++		au_set_h_dptr(src_dentry, a->bdst, NULL);
++		au_set_dbstart(src_dentry, a->bsrc);
++	} else {
++		/* the inode of src_dentry already exists on a.bdst branch */
++		h_src_dentry = d_find_alias(h_inode);
++		if (!h_src_dentry && au_plink_test(inode)) {
++			plink = 1;
++			h_src_dentry = au_plink_lkup(inode, a->bdst);
++			err = PTR_ERR(h_src_dentry);
++			if (IS_ERR(h_src_dentry))
++				goto out;
++
++			if (unlikely(!h_src_dentry->d_inode)) {
++				dput(h_src_dentry);
++				h_src_dentry = NULL;
++			}
++
++		}
++		if (h_src_dentry) {
++			err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
++					 &a->h_path);
++			dput(h_src_dentry);
++		} else {
++			AuIOErr("no dentry found for hi%lu on b%d\n",
++				h_inode->i_ino, a->bdst);
++			err = -EIO;
++		}
++	}
++
++	if (!err && !plink)
++		au_plink_append(inode, a->bdst, a->h_path.dentry);
++
++out:
++	return err;
++}
++
++int aufs_link(struct dentry *src_dentry, struct inode *dir,
++	      struct dentry *dentry)
++{
++	int err, rerr;
++	struct au_dtime dt;
++	struct au_link_args *a;
++	struct dentry *wh_dentry, *h_src_dentry;
++	struct inode *inode;
++	struct super_block *sb;
++	struct au_wr_dir_args wr_dir_args = {
++		/* .force_btgt	= -1, */
++		.flags		= AuWrDir_ADD_ENTRY
++	};
++
++	IMustLock(dir);
++	inode = src_dentry->d_inode;
++	IMustLock(inode);
++
++	err = -ENOENT;
++	if (unlikely(!inode->i_nlink))
++		goto out;
++
++	err = -ENOMEM;
++	a = kzalloc(sizeof(*a), GFP_NOFS);
++	if (unlikely(!a))
++		goto out;
++
++	a->parent = dentry->d_parent; /* dir inode is locked */
++	aufs_read_and_write_lock2(dentry, src_dentry, /*AuLock_FLUSH*/0);
++	a->src_parent = dget_parent(src_dentry);
++	wr_dir_args.force_btgt = au_dbstart(src_dentry);
++
++	di_write_lock_parent(a->parent);
++	wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt);
++	wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin,
++				      &wr_dir_args);
++	err = PTR_ERR(wh_dentry);
++	if (IS_ERR(wh_dentry))
++		goto out_unlock;
++
++	err = 0;
++	sb = dentry->d_sb;
++	a->bdst = au_dbstart(dentry);
++	a->h_path.dentry = au_h_dptr(dentry, a->bdst);
++	a->h_path.mnt = au_sbr_mnt(sb, a->bdst);
++	a->bsrc = au_dbstart(src_dentry);
++	if (au_opt_test(au_mntflags(sb), PLINK)) {
++		if (a->bdst < a->bsrc
++		    /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */)
++			err = au_cpup_or_link(src_dentry, a);
++		else {
++			h_src_dentry = au_h_dptr(src_dentry, a->bdst);
++			err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
++					 &a->h_path);
++		}
++	} else {
++		/*
++		 * copyup src_dentry to the branch we process,
++		 * and then link(2) to it.
++		 */
++		if (a->bdst < a->bsrc
++		    /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) {
++			au_unpin(&a->pin);
++			di_write_unlock(a->parent);
++			err = au_cpup_before_link(src_dentry, a);
++			di_write_lock_parent(a->parent);
++			if (!err)
++				err = au_pin(&a->pin, dentry, a->bdst,
++					     au_opt_udba(sb),
++					     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
++			if (unlikely(err))
++				goto out_wh;
++		}
++		if (!err) {
++			h_src_dentry = au_h_dptr(src_dentry, a->bdst);
++			err = -ENOENT;
++			if (h_src_dentry && h_src_dentry->d_inode)
++				err = vfsub_link(h_src_dentry,
++						 au_pinned_h_dir(&a->pin),
++						 &a->h_path);
++		}
++	}
++	if (unlikely(err))
++		goto out_unpin;
++
++	if (wh_dentry) {
++		a->h_path.dentry = wh_dentry;
++		err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path,
++					  dentry);
++		if (unlikely(err))
++			goto out_revert;
++	}
++
++	dir->i_version++;
++	if (au_ibstart(dir) == au_dbstart(dentry))
++		au_cpup_attr_timesizes(dir);
++	inc_nlink(inode);
++	inode->i_ctime = dir->i_ctime;
++	if (!d_unhashed(a->h_path.dentry))
++		d_instantiate(dentry, au_igrab(inode));
++	else
++		/* some filesystem calls d_drop() */
++		d_drop(dentry);
++	goto out_unpin; /* success */
++
++ out_revert:
++	rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path, /*force*/0);
++	if (!rerr)
++		goto out_dt;
++	AuIOErr("%.*s reverting failed(%d, %d)\n",
++		AuDLNPair(dentry), err, rerr);
++	err = -EIO;
++ out_dt:
++	d_drop(dentry);
++	au_dtime_revert(&dt);
++ out_unpin:
++	au_unpin(&a->pin);
++ out_wh:
++	dput(wh_dentry);
++ out_unlock:
++	if (unlikely(err)) {
++		au_update_dbstart(dentry);
++		d_drop(dentry);
++	}
++	di_write_unlock(a->parent);
++	dput(a->src_parent);
++	aufs_read_and_write_unlock2(dentry, src_dentry);
++	kfree(a);
++ out:
++	return err;
++}
++
++int aufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++	int err, rerr;
++	aufs_bindex_t bindex;
++	unsigned char diropq;
++	struct path h_path;
++	struct dentry *wh_dentry, *parent, *opq_dentry;
++	struct mutex *h_mtx;
++	struct super_block *sb;
++	struct {
++		struct au_pin pin;
++		struct au_dtime dt;
++	} *a; /* reduce the stack usage */
++	struct au_wr_dir_args wr_dir_args = {
++		.force_btgt	= -1,
++		.flags		= AuWrDir_ADD_ENTRY | AuWrDir_ISDIR
++	};
++
++	IMustLock(dir);
++
++	err = -ENOMEM;
++	a = kmalloc(sizeof(*a), GFP_NOFS);
++	if (unlikely(!a))
++		goto out;
++
++	aufs_read_lock(dentry, AuLock_DW);
++	parent = dentry->d_parent; /* dir inode is locked */
++	di_write_lock_parent(parent);
++	wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
++				      &a->pin, &wr_dir_args);
++	err = PTR_ERR(wh_dentry);
++	if (IS_ERR(wh_dentry))
++		goto out_free;
++
++	sb = dentry->d_sb;
++	bindex = au_dbstart(dentry);
++	h_path.dentry = au_h_dptr(dentry, bindex);
++	h_path.mnt = au_sbr_mnt(sb, bindex);
++	err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode);
++	if (unlikely(err))
++		goto out_unlock;
++
++	/* make the dir opaque */
++	diropq = 0;
++	h_mtx = &h_path.dentry->d_inode->i_mutex;
++	if (wh_dentry
++	    || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) {
++		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++		opq_dentry = au_diropq_create(dentry, bindex);
++		mutex_unlock(h_mtx);
++		err = PTR_ERR(opq_dentry);
++		if (IS_ERR(opq_dentry))
++			goto out_dir;
++		dput(opq_dentry);
++		diropq = 1;
++	}
++
++	err = epilog(dir, bindex, wh_dentry, dentry);
++	if (!err) {
++		inc_nlink(dir);
++		goto out_unlock; /* success */
++	}
++
++	/* revert */
++	if (diropq) {
++		AuLabel(revert opq);
++		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++		rerr = au_diropq_remove(dentry, bindex);
++		mutex_unlock(h_mtx);
++		if (rerr) {
++			AuIOErr("%.*s reverting diropq failed(%d, %d)\n",
++				AuDLNPair(dentry), err, rerr);
++			err = -EIO;
++		}
++	}
++
++ out_dir:
++	AuLabel(revert dir);
++	rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path);
++	if (rerr) {
++		AuIOErr("%.*s reverting dir failed(%d, %d)\n",
++			AuDLNPair(dentry), err, rerr);
++		err = -EIO;
++	}
++	d_drop(dentry);
++	au_dtime_revert(&a->dt);
++ out_unlock:
++	au_unpin(&a->pin);
++	dput(wh_dentry);
++ out_free:
++	if (unlikely(err)) {
++		au_update_dbstart(dentry);
++		d_drop(dentry);
++	}
++	di_write_unlock(parent);
++	aufs_read_unlock(dentry, AuLock_DW);
++	kfree(a);
++ out:
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/i_op.c linux-2.6.31.5/fs/aufs/i_op.c
+--- linux-2.6.31.5.orig/fs/aufs/i_op.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/i_op.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,872 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * inode operations (except add/del/rename)
++ */
++
++#include <linux/device_cgroup.h>
++#include <linux/fs_stack.h>
++#include <linux/mm.h>
++#include <linux/namei.h>
++#include <linux/security.h>
++#include <linux/uaccess.h>
++#include "aufs.h"
++
++static int h_permission(struct inode *h_inode, int mask,
++			struct vfsmount *h_mnt, int brperm)
++{
++	int err;
++	const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
++
++	err = -EACCES;
++	if ((write_mask && IS_IMMUTABLE(h_inode))
++	    || ((mask & MAY_EXEC)
++		&& S_ISREG(h_inode->i_mode)
++		&& ((h_mnt->mnt_flags & MNT_NOEXEC)
++		    || !(h_inode->i_mode & S_IXUGO))))
++		goto out;
++
++	/*
++	 * - skip the lower fs test in the case of write to ro branch.
++	 * - nfs dir permission write check is optimized, but a policy for
++	 *   link/rename requires a real check.
++	 */
++	if ((write_mask && !au_br_writable(brperm))
++	    || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode)
++		&& write_mask && !(mask & MAY_READ))
++	    || !h_inode->i_op->permission) {
++		/* AuLabel(generic_permission); */
++		err = generic_permission(h_inode, mask, NULL);
++	} else {
++		/* AuLabel(h_inode->permission); */
++		err = h_inode->i_op->permission(h_inode, mask);
++		AuTraceErr(err);
++	}
++
++	if (!err)
++		err = devcgroup_inode_permission(h_inode, mask);
++	if (!err)
++		err = security_inode_permission
++			(h_inode, mask & (MAY_READ | MAY_WRITE | MAY_EXEC
++					  | MAY_APPEND));
++
++ out:
++	return err;
++}
++
++static int aufs_permission(struct inode *inode, int mask)
++{
++	int err;
++	aufs_bindex_t bindex, bend;
++	const unsigned char isdir = !!S_ISDIR(inode->i_mode);
++	const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
++	struct inode *h_inode;
++	struct super_block *sb;
++	struct au_branch *br;
++
++	sb = inode->i_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	ii_read_lock_child(inode);
++
++	if (!isdir || write_mask) {
++		h_inode = au_h_iptr(inode, au_ibstart(inode));
++		AuDebugOn(!h_inode
++			  || ((h_inode->i_mode & S_IFMT)
++			      != (inode->i_mode & S_IFMT)));
++		err = 0;
++		bindex = au_ibstart(inode);
++		br = au_sbr(sb, bindex);
++		err = h_permission(h_inode, mask, br->br_mnt, br->br_perm);
++
++		if (write_mask && !err) {
++			/* test whether the upper writable branch exists */
++			err = -EROFS;
++			for (; bindex >= 0; bindex--)
++				if (!au_br_rdonly(au_sbr(sb, bindex))) {
++					err = 0;
++					break;
++				}
++		}
++		goto out;
++	}
++
++	/* non-write to dir */
++	err = 0;
++	bend = au_ibend(inode);
++	for (bindex = au_ibstart(inode); !err && bindex <= bend; bindex++) {
++		h_inode = au_h_iptr(inode, bindex);
++		if (h_inode) {
++			AuDebugOn(!S_ISDIR(h_inode->i_mode));
++			br = au_sbr(sb, bindex);
++			err = h_permission(h_inode, mask, br->br_mnt,
++					   br->br_perm);
++		}
++	}
++
++ out:
++	ii_read_unlock(inode);
++	si_read_unlock(sb);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry,
++				  struct nameidata *nd)
++{
++	struct dentry *ret, *parent;
++	struct inode *inode, *h_inode;
++	struct mutex *mtx;
++	struct super_block *sb;
++	int err, npositive;
++	aufs_bindex_t bstart;
++
++	/* temporary workaround for a bug in NFSD readdir */
++	if (!au_test_nfsd(current))
++		IMustLock(dir);
++	else
++		WARN_ONCE(!mutex_is_locked(&dir->i_mutex),
++			  "a known problem of NFSD readdir since 2.6.28\n");
++
++	sb = dir->i_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_alloc_dinfo(dentry);
++	ret = ERR_PTR(err);
++	if (unlikely(err))
++		goto out;
++
++	parent = dentry->d_parent; /* dir inode is locked */
++	di_read_lock_parent(parent, AuLock_IR);
++	npositive = au_lkup_dentry(dentry, au_dbstart(parent), /*type*/0, nd);
++	di_read_unlock(parent, AuLock_IR);
++	err = npositive;
++	ret = ERR_PTR(err);
++	if (unlikely(err < 0))
++		goto out_unlock;
++
++	inode = NULL;
++	if (npositive) {
++		bstart = au_dbstart(dentry);
++		h_inode = au_h_dptr(dentry, bstart)->d_inode;
++		if (!S_ISDIR(h_inode->i_mode)) {
++			/*
++			 * stop 'race'-ing between hardlinks under different
++			 * parents.
++			 */
++			mtx = &au_sbr(sb, bstart)->br_xino.xi_nondir_mtx;
++			mutex_lock(mtx);
++			inode = au_new_inode(dentry, /*must_new*/0);
++			mutex_unlock(mtx);
++		} else
++			inode = au_new_inode(dentry, /*must_new*/0);
++		ret = (void *)inode;
++	}
++	if (IS_ERR(inode))
++		goto out_unlock;
++
++	ret = d_splice_alias(inode, dentry);
++	if (unlikely(IS_ERR(ret) && inode))
++		ii_write_unlock(inode);
++	au_store_oflag(nd, inode);
++
++ out_unlock:
++	di_write_unlock(dentry);
++ out:
++	si_read_unlock(sb);
++	return ret;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent,
++			  const unsigned char add_entry, aufs_bindex_t bcpup,
++			  aufs_bindex_t bstart)
++{
++	int err;
++	struct dentry *h_parent;
++	struct inode *h_dir;
++
++	if (add_entry) {
++		au_update_dbstart(dentry);
++		IMustLock(parent->d_inode);
++	} else
++		di_write_lock_parent(parent);
++
++	err = 0;
++	if (!au_h_dptr(parent, bcpup)) {
++		if (bstart < bcpup)
++			err = au_cpdown_dirs(dentry, bcpup);
++		else
++			err = au_cpup_dirs(dentry, bcpup);
++	}
++	if (!err && add_entry) {
++		h_parent = au_h_dptr(parent, bcpup);
++		h_dir = h_parent->d_inode;
++		mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
++		err = au_lkup_neg(dentry, bcpup);
++		/* todo: no unlock here */
++		mutex_unlock(&h_dir->i_mutex);
++		if (bstart < bcpup && au_dbstart(dentry) < 0) {
++			au_set_dbstart(dentry, 0);
++			au_update_dbrange(dentry, /*do_put_zero*/0);
++		}
++	}
++
++	if (!add_entry)
++		di_write_unlock(parent);
++	if (!err)
++		err = bcpup; /* success */
++
++	return err;
++}
++
++/*
++ * decide the branch and the parent dir where we will create a new entry.
++ * returns new bindex or an error.
++ * copyup the parent dir if needed.
++ */
++int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
++	      struct au_wr_dir_args *args)
++{
++	int err;
++	aufs_bindex_t bcpup, bstart, src_bstart;
++	const unsigned char add_entry = !!au_ftest_wrdir(args->flags,
++							 ADD_ENTRY);
++	struct super_block *sb;
++	struct dentry *parent;
++	struct au_sbinfo *sbinfo;
++
++	sb = dentry->d_sb;
++	sbinfo = au_sbi(sb);
++	parent = dget_parent(dentry);
++	bstart = au_dbstart(dentry);
++	bcpup = bstart;
++	if (args->force_btgt < 0) {
++		if (src_dentry) {
++			src_bstart = au_dbstart(src_dentry);
++			if (src_bstart < bstart)
++				bcpup = src_bstart;
++		} else if (add_entry) {
++			err = AuWbrCreate(sbinfo, dentry,
++					  au_ftest_wrdir(args->flags, ISDIR));
++			bcpup = err;
++		}
++
++		if (bcpup < 0 || au_test_ro(sb, bcpup, dentry->d_inode)) {
++			if (add_entry)
++				err = AuWbrCopyup(sbinfo, dentry);
++			else {
++				if (!IS_ROOT(dentry)) {
++					di_read_lock_parent(parent, !AuLock_IR);
++					err = AuWbrCopyup(sbinfo, dentry);
++					di_read_unlock(parent, !AuLock_IR);
++				} else
++					err = AuWbrCopyup(sbinfo, dentry);
++			}
++			bcpup = err;
++			if (unlikely(err < 0))
++				goto out;
++		}
++	} else {
++		bcpup = args->force_btgt;
++		AuDebugOn(au_test_ro(sb, bcpup, dentry->d_inode));
++	}
++	AuDbg("bstart %d, bcpup %d\n", bstart, bcpup);
++	if (bstart < bcpup)
++		au_update_dbrange(dentry, /*do_put_zero*/1);
++
++	err = bcpup;
++	if (bcpup == bstart)
++		goto out; /* success */
++
++	/* copyup the new parent into the branch we process */
++	err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, bstart);
++
++ out:
++	dput(parent);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct dentry *au_pinned_h_parent(struct au_pin *pin)
++{
++	if (pin && pin->parent)
++		return au_h_dptr(pin->parent, pin->bindex);
++	return NULL;
++}
++
++void au_unpin(struct au_pin *p)
++{
++	if (au_ftest_pin(p->flags, MNT_WRITE))
++		mnt_drop_write(p->h_mnt);
++	if (!p->hdir)
++		return;
++
++	au_hin_imtx_unlock(p->hdir);
++	if (!au_ftest_pin(p->flags, DI_LOCKED))
++		di_read_unlock(p->parent, AuLock_IR);
++	iput(p->hdir->hi_inode);
++	dput(p->parent);
++	p->parent = NULL;
++	p->hdir = NULL;
++	p->h_mnt = NULL;
++}
++
++int au_do_pin(struct au_pin *p)
++{
++	int err;
++	struct super_block *sb;
++	struct dentry *h_dentry, *h_parent;
++	struct au_branch *br;
++	struct inode *h_dir;
++
++	err = 0;
++	sb = p->dentry->d_sb;
++	br = au_sbr(sb, p->bindex);
++	if (IS_ROOT(p->dentry)) {
++		if (au_ftest_pin(p->flags, MNT_WRITE)) {
++			p->h_mnt = br->br_mnt;
++			err = mnt_want_write(p->h_mnt);
++			if (unlikely(err)) {
++				au_fclr_pin(p->flags, MNT_WRITE);
++				goto out_err;
++			}
++		}
++		goto out;
++	}
++
++	h_dentry = NULL;
++	if (p->bindex <= au_dbend(p->dentry))
++		h_dentry = au_h_dptr(p->dentry, p->bindex);
++
++	p->parent = dget_parent(p->dentry);
++	if (!au_ftest_pin(p->flags, DI_LOCKED))
++		di_read_lock(p->parent, AuLock_IR, p->lsc_di);
++
++	h_dir = NULL;
++	h_parent = au_h_dptr(p->parent, p->bindex);
++	p->hdir = au_hi(p->parent->d_inode, p->bindex);
++	if (p->hdir)
++		h_dir = p->hdir->hi_inode;
++
++	/* udba case */
++	if (unlikely(!p->hdir || !h_dir)) {
++		if (!au_ftest_pin(p->flags, DI_LOCKED))
++			di_read_unlock(p->parent, AuLock_IR);
++		dput(p->parent);
++		p->parent = NULL;
++		goto out_err;
++	}
++
++	au_igrab(h_dir);
++	au_hin_imtx_lock_nested(p->hdir, p->lsc_hi);
++
++	if (unlikely(p->hdir->hi_inode != h_parent->d_inode)) {
++		err = -EBUSY;
++		goto out_unpin;
++	}
++	if (h_dentry) {
++		err = au_h_verify(h_dentry, p->udba, h_dir, h_parent, br);
++		if (unlikely(err)) {
++			au_fclr_pin(p->flags, MNT_WRITE);
++			goto out_unpin;
++		}
++	}
++
++	if (au_ftest_pin(p->flags, MNT_WRITE)) {
++		p->h_mnt = br->br_mnt;
++		err = mnt_want_write(p->h_mnt);
++		if (unlikely(err)) {
++			au_fclr_pin(p->flags, MNT_WRITE);
++			goto out_unpin;
++		}
++	}
++	goto out; /* success */
++
++ out_unpin:
++	au_unpin(p);
++ out_err:
++	AuErr("err %d\n", err);
++	err = au_busy_or_stale();
++ out:
++	return err;
++}
++
++void au_pin_init(struct au_pin *p, struct dentry *dentry,
++		 aufs_bindex_t bindex, int lsc_di, int lsc_hi,
++		 unsigned int udba, unsigned char flags)
++{
++	p->dentry = dentry;
++	p->udba = udba;
++	p->lsc_di = lsc_di;
++	p->lsc_hi = lsc_hi;
++	p->flags = flags;
++	p->bindex = bindex;
++
++	p->parent = NULL;
++	p->hdir = NULL;
++	p->h_mnt = NULL;
++}
++
++int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
++	   unsigned int udba, unsigned char flags)
++{
++	au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2,
++		    udba, flags);
++	return au_do_pin(pin);
++}
++
++/* ---------------------------------------------------------------------- */
++
++#define AuIcpup_DID_CPUP	1
++#define au_ftest_icpup(flags, name)	((flags) & AuIcpup_##name)
++#define au_fset_icpup(flags, name)	{ (flags) |= AuIcpup_##name; }
++#define au_fclr_icpup(flags, name)	{ (flags) &= ~AuIcpup_##name; }
++
++struct au_icpup_args {
++	unsigned char flags;
++	unsigned char pin_flags;
++	aufs_bindex_t btgt;
++	struct au_pin pin;
++	struct path h_path;
++	struct inode *h_inode;
++};
++
++static int au_lock_and_icpup(struct dentry *dentry, struct iattr *ia,
++			     struct au_icpup_args *a)
++{
++	int err;
++	unsigned int udba;
++	loff_t sz;
++	aufs_bindex_t bstart;
++	struct dentry *hi_wh, *parent;
++	struct inode *inode;
++	struct au_wr_dir_args wr_dir_args = {
++		.force_btgt	= -1,
++		.flags		= 0
++	};
++
++	di_write_lock_child(dentry);
++	bstart = au_dbstart(dentry);
++	inode = dentry->d_inode;
++	if (S_ISDIR(inode->i_mode))
++		au_fset_wrdir(wr_dir_args.flags, ISDIR);
++	/* plink or hi_wh() case */
++	if (bstart != au_ibstart(inode))
++		wr_dir_args.force_btgt = au_ibstart(inode);
++	err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
++	if (unlikely(err < 0))
++		goto out_dentry;
++	a->btgt = err;
++	if (err != bstart)
++		au_fset_icpup(a->flags, DID_CPUP);
++
++	err = 0;
++	a->pin_flags = AuPin_MNT_WRITE;
++	parent = NULL;
++	if (!IS_ROOT(dentry)) {
++		au_fset_pin(a->pin_flags, DI_LOCKED);
++		parent = dget_parent(dentry);
++		di_write_lock_parent(parent);
++	}
++
++	udba = au_opt_udba(dentry->d_sb);
++	if (d_unhashed(dentry) || (ia->ia_valid & ATTR_FILE))
++		udba = AuOpt_UDBA_NONE;
++	err = au_pin(&a->pin, dentry, a->btgt, udba, a->pin_flags);
++	if (unlikely(err)) {
++		if (parent) {
++			di_write_unlock(parent);
++			dput(parent);
++		}
++		goto out_dentry;
++	}
++	a->h_path.dentry = au_h_dptr(dentry, bstart);
++	a->h_inode = a->h_path.dentry->d_inode;
++	mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
++	sz = -1;
++	if ((ia->ia_valid & ATTR_SIZE) && ia->ia_size < i_size_read(a->h_inode))
++		sz = ia->ia_size;
++
++	hi_wh = NULL;
++	if (au_ftest_icpup(a->flags, DID_CPUP) && d_unhashed(dentry)) {
++		hi_wh = au_hi_wh(inode, a->btgt);
++		if (!hi_wh) {
++			err = au_sio_cpup_wh(dentry, a->btgt, sz, /*file*/NULL);
++			if (unlikely(err))
++				goto out_unlock;
++			hi_wh = au_hi_wh(inode, a->btgt);
++			/* todo: revalidate hi_wh? */
++		}
++	}
++
++	if (parent) {
++		au_pin_set_parent_lflag(&a->pin, /*lflag*/0);
++		di_downgrade_lock(parent, AuLock_IR);
++		dput(parent);
++	}
++	if (!au_ftest_icpup(a->flags, DID_CPUP))
++		goto out; /* success */
++
++	if (!d_unhashed(dentry)) {
++		err = au_sio_cpup_simple(dentry, a->btgt, sz, AuCpup_DTIME);
++		if (!err)
++			a->h_path.dentry = au_h_dptr(dentry, a->btgt);
++	} else if (!hi_wh)
++		a->h_path.dentry = au_h_dptr(dentry, a->btgt);
++	else
++		a->h_path.dentry = hi_wh; /* do not dget here */
++
++ out_unlock:
++	mutex_unlock(&a->h_inode->i_mutex);
++	a->h_inode = a->h_path.dentry->d_inode;
++	if (!err) {
++		mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
++		goto out; /* success */
++	}
++
++	au_unpin(&a->pin);
++
++ out_dentry:
++	di_write_unlock(dentry);
++ out:
++	return err;
++}
++
++static int aufs_setattr(struct dentry *dentry, struct iattr *ia)
++{
++	int err;
++	struct inode *inode;
++	struct super_block *sb;
++	struct file *file;
++	struct au_icpup_args *a;
++
++	err = -ENOMEM;
++	a = kzalloc(sizeof(*a), GFP_NOFS);
++	if (unlikely(!a))
++		goto out;
++
++	inode = dentry->d_inode;
++	IMustLock(inode);
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++
++	file = NULL;
++	if (ia->ia_valid & ATTR_FILE) {
++		/* currently ftruncate(2) only */
++		file = ia->ia_file;
++		fi_write_lock(file);
++		ia->ia_file = au_h_fptr(file, au_fbstart(file));
++	}
++
++	if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
++		ia->ia_valid &= ~ATTR_MODE;
++
++	err = au_lock_and_icpup(dentry, ia, a);
++	if (unlikely(err < 0))
++		goto out_si;
++	if (au_ftest_icpup(a->flags, DID_CPUP)) {
++		ia->ia_file = NULL;
++		ia->ia_valid &= ~ATTR_FILE;
++	}
++
++	a->h_path.mnt = au_sbr_mnt(sb, a->btgt);
++	if (ia->ia_valid & ATTR_SIZE) {
++		struct file *f;
++
++		if (ia->ia_size < i_size_read(inode)) {
++			/* unmap only */
++			err = vmtruncate(inode, ia->ia_size);
++			if (unlikely(err))
++				goto out_unlock;
++		}
++
++		f = NULL;
++		if (ia->ia_valid & ATTR_FILE)
++			f = ia->ia_file;
++		mutex_unlock(&a->h_inode->i_mutex);
++		err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f);
++		mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
++	} else
++		err = vfsub_notify_change(&a->h_path, ia);
++	if (!err)
++		au_cpup_attr_changeable(inode);
++
++ out_unlock:
++	mutex_unlock(&a->h_inode->i_mutex);
++	au_unpin(&a->pin);
++	di_write_unlock(dentry);
++ out_si:
++	if (file) {
++		fi_write_unlock(file);
++		ia->ia_file = file;
++		ia->ia_valid |= ATTR_FILE;
++	}
++	si_read_unlock(sb);
++	kfree(a);
++ out:
++	return err;
++}
++
++static int au_getattr_lock_reval(struct dentry *dentry, unsigned int sigen)
++{
++	int err;
++	struct inode *inode;
++	struct dentry *parent;
++
++	err = 0;
++	inode = dentry->d_inode;
++	di_write_lock_child(dentry);
++	if (au_digen(dentry) != sigen || au_iigen(inode) != sigen) {
++		parent = dget_parent(dentry);
++		di_read_lock_parent(parent, AuLock_IR);
++		/* returns a number of positive dentries */
++		err = au_refresh_hdentry(dentry, inode->i_mode & S_IFMT);
++		if (err > 0)
++			err = au_refresh_hinode(inode, dentry);
++		di_read_unlock(parent, AuLock_IR);
++		dput(parent);
++		if (unlikely(!err))
++			err = -EIO;
++	}
++	di_downgrade_lock(dentry, AuLock_IR);
++	if (unlikely(err))
++		di_read_unlock(dentry, AuLock_IR);
++
++	return err;
++}
++
++static void au_refresh_iattr(struct inode *inode, struct kstat *st,
++			     unsigned int nlink)
++{
++	inode->i_mode = st->mode;
++	inode->i_uid = st->uid;
++	inode->i_gid = st->gid;
++	inode->i_atime = st->atime;
++	inode->i_mtime = st->mtime;
++	inode->i_ctime = st->ctime;
++
++	au_cpup_attr_nlink(inode, /*force*/0);
++	if (S_ISDIR(inode->i_mode)) {
++		inode->i_nlink -= nlink;
++		inode->i_nlink += st->nlink;
++	}
++
++	spin_lock(&inode->i_lock);
++	inode->i_blocks = st->blocks;
++	i_size_write(inode, st->size);
++	spin_unlock(&inode->i_lock);
++}
++
++static int aufs_getattr(struct vfsmount *mnt __maybe_unused,
++			struct dentry *dentry, struct kstat *st)
++{
++	int err;
++	unsigned int mnt_flags;
++	aufs_bindex_t bindex;
++	unsigned char udba_none, positive;
++	struct super_block *sb, *h_sb;
++	struct inode *inode;
++	struct vfsmount *h_mnt;
++	struct dentry *h_dentry;
++
++	err = 0;
++	sb = dentry->d_sb;
++	inode = dentry->d_inode;
++	si_read_lock(sb, AuLock_FLUSH);
++	mnt_flags = au_mntflags(sb);
++	udba_none = !!au_opt_test(mnt_flags, UDBA_NONE);
++
++	/* support fstat(2) */
++	if (!d_unhashed(dentry) && !udba_none) {
++		unsigned int sigen = au_sigen(sb);
++		if (au_digen(dentry) == sigen && au_iigen(inode) == sigen)
++			di_read_lock_child(dentry, AuLock_IR);
++		else {
++			AuDebugOn(!IS_ROOT(dentry));
++			err = au_getattr_lock_reval(dentry, sigen);
++			if (unlikely(err))
++				goto out;
++		}
++	} else
++		di_read_lock_child(dentry, AuLock_IR);
++
++	bindex = au_ibstart(inode);
++	h_mnt = au_sbr_mnt(sb, bindex);
++	h_sb = h_mnt->mnt_sb;
++	if (!au_test_fs_bad_iattr(h_sb) && udba_none)
++		goto out_fill; /* success */
++
++	h_dentry = NULL;
++	if (au_dbstart(dentry) == bindex)
++		h_dentry = dget(au_h_dptr(dentry, bindex));
++	else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) {
++		h_dentry = au_plink_lkup(inode, bindex);
++		if (IS_ERR(h_dentry))
++			goto out_fill; /* pretending success */
++	}
++	/* illegally overlapped or something */
++	if (unlikely(!h_dentry))
++		goto out_fill; /* pretending success */
++
++	positive = !!h_dentry->d_inode;
++	if (positive)
++		err = vfs_getattr(h_mnt, h_dentry, st);
++	dput(h_dentry);
++	if (!err) {
++		if (positive)
++			au_refresh_iattr(inode, st, h_dentry->d_inode->i_nlink);
++		goto out_fill; /* success */
++	}
++	goto out_unlock;
++
++ out_fill:
++	generic_fillattr(inode, st);
++ out_unlock:
++	di_read_unlock(dentry, AuLock_IR);
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int h_readlink(struct dentry *dentry, int bindex, char __user *buf,
++		      int bufsiz)
++{
++	int err;
++	struct super_block *sb;
++	struct dentry *h_dentry;
++
++	err = -EINVAL;
++	h_dentry = au_h_dptr(dentry, bindex);
++	if (unlikely(/* !h_dentry
++		     || !h_dentry->d_inode
++		     || !h_dentry->d_inode->i_op
++		     || */ !h_dentry->d_inode->i_op->readlink))
++		goto out;
++
++	err = security_inode_readlink(h_dentry);
++	if (unlikely(err))
++		goto out;
++
++	sb = dentry->d_sb;
++	if (!au_test_ro(sb, bindex, dentry->d_inode)) {
++		vfsub_touch_atime(au_sbr_mnt(sb, bindex), h_dentry);
++		fsstack_copy_attr_atime(dentry->d_inode, h_dentry->d_inode);
++	}
++	err = h_dentry->d_inode->i_op->readlink(h_dentry, buf, bufsiz);
++
++ out:
++	return err;
++}
++
++static int aufs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
++{
++	int err;
++
++	aufs_read_lock(dentry, AuLock_IR);
++	err = h_readlink(dentry, au_dbstart(dentry), buf, bufsiz);
++	aufs_read_unlock(dentry, AuLock_IR);
++
++	return err;
++}
++
++static void *aufs_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++	int err;
++	char *buf;
++	mm_segment_t old_fs;
++
++	err = -ENOMEM;
++	buf = __getname();
++	if (unlikely(!buf))
++		goto out;
++
++	aufs_read_lock(dentry, AuLock_IR);
++	old_fs = get_fs();
++	set_fs(KERNEL_DS);
++	err = h_readlink(dentry, au_dbstart(dentry), (char __user *)buf,
++			 PATH_MAX);
++	set_fs(old_fs);
++	aufs_read_unlock(dentry, AuLock_IR);
++
++	if (err >= 0) {
++		buf[err] = 0;
++		/* will be freed by put_link */
++		nd_set_link(nd, buf);
++		return NULL; /* success */
++	}
++	__putname(buf);
++
++ out:
++	path_put(&nd->path);
++	AuTraceErr(err);
++	return ERR_PTR(err);
++}
++
++static void aufs_put_link(struct dentry *dentry __maybe_unused,
++			  struct nameidata *nd, void *cookie __maybe_unused)
++{
++	__putname(nd_get_link(nd));
++}
++
++/* ---------------------------------------------------------------------- */
++
++static void aufs_truncate_range(struct inode *inode __maybe_unused,
++				loff_t start __maybe_unused,
++				loff_t end __maybe_unused)
++{
++	AuUnsupport();
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct inode_operations aufs_symlink_iop = {
++	.permission	= aufs_permission,
++	.setattr	= aufs_setattr,
++	.getattr	= aufs_getattr,
++	.readlink	= aufs_readlink,
++	.follow_link	= aufs_follow_link,
++	.put_link	= aufs_put_link
++};
++
++struct inode_operations aufs_dir_iop = {
++	.create		= aufs_create,
++	.lookup		= aufs_lookup,
++	.link		= aufs_link,
++	.unlink		= aufs_unlink,
++	.symlink	= aufs_symlink,
++	.mkdir		= aufs_mkdir,
++	.rmdir		= aufs_rmdir,
++	.mknod		= aufs_mknod,
++	.rename		= aufs_rename,
++
++	.permission	= aufs_permission,
++	.setattr	= aufs_setattr,
++	.getattr	= aufs_getattr
++};
++
++struct inode_operations aufs_iop = {
++	.permission	= aufs_permission,
++	.setattr	= aufs_setattr,
++	.getattr	= aufs_getattr,
++	.truncate_range	= aufs_truncate_range
++};
+diff -Nur linux-2.6.31.5.orig/fs/aufs/i_op_del.c linux-2.6.31.5/fs/aufs/i_op_del.c
+--- linux-2.6.31.5.orig/fs/aufs/i_op_del.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/i_op_del.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,468 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * inode operations (del entry)
++ */
++
++#include "aufs.h"
++
++/*
++ * decide if a new whiteout for @dentry is necessary or not.
++ * when it is necessary, prepare the parent dir for the upper branch whose
++ * branch index is @bcpup for creation. the actual creation of the whiteout will
++ * be done by caller.
++ * return value:
++ * 0: wh is unnecessary
++ * plus: wh is necessary
++ * minus: error
++ */
++int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup)
++{
++	int need_wh, err;
++	aufs_bindex_t bstart;
++	struct super_block *sb;
++
++	sb = dentry->d_sb;
++	bstart = au_dbstart(dentry);
++	if (*bcpup < 0) {
++		*bcpup = bstart;
++		if (au_test_ro(sb, bstart, dentry->d_inode)) {
++			err = AuWbrCopyup(au_sbi(sb), dentry);
++			*bcpup = err;
++			if (unlikely(err < 0))
++				goto out;
++		}
++	} else
++		AuDebugOn(bstart < *bcpup
++			  || au_test_ro(sb, *bcpup, dentry->d_inode));
++	AuDbg("bcpup %d, bstart %d\n", *bcpup, bstart);
++
++	if (*bcpup != bstart) {
++		err = au_cpup_dirs(dentry, *bcpup);
++		if (unlikely(err))
++			goto out;
++		need_wh = 1;
++	} else {
++		aufs_bindex_t old_bend, new_bend, bdiropq = -1;
++
++		old_bend = au_dbend(dentry);
++		if (isdir) {
++			bdiropq = au_dbdiropq(dentry);
++			au_set_dbdiropq(dentry, -1);
++		}
++		need_wh = au_lkup_dentry(dentry, bstart + 1, /*type*/0,
++					 /*nd*/NULL);
++		err = need_wh;
++		if (isdir)
++			au_set_dbdiropq(dentry, bdiropq);
++		if (unlikely(err < 0))
++			goto out;
++		new_bend = au_dbend(dentry);
++		if (!need_wh && old_bend != new_bend) {
++			au_set_h_dptr(dentry, new_bend, NULL);
++			au_set_dbend(dentry, old_bend);
++		}
++	}
++	AuDbg("need_wh %d\n", need_wh);
++	err = need_wh;
++
++ out:
++	return err;
++}
++
++/*
++ * simple tests for the del-entry operations.
++ * following the checks in vfs, plus the parent-child relationship.
++ */
++int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
++	       struct dentry *h_parent, int isdir)
++{
++	int err;
++	umode_t h_mode;
++	struct dentry *h_dentry, *h_latest;
++	struct inode *h_inode;
++
++	h_dentry = au_h_dptr(dentry, bindex);
++	h_inode = h_dentry->d_inode;
++	if (dentry->d_inode) {
++		err = -ENOENT;
++		if (unlikely(!h_inode || !h_inode->i_nlink))
++			goto out;
++
++		h_mode = h_inode->i_mode;
++		if (!isdir) {
++			err = -EISDIR;
++			if (unlikely(S_ISDIR(h_mode)))
++				goto out;
++		} else if (unlikely(!S_ISDIR(h_mode))) {
++			err = -ENOTDIR;
++			goto out;
++		}
++	} else {
++		/* rename(2) case */
++		err = -EIO;
++		if (unlikely(h_inode))
++			goto out;
++	}
++
++	err = -ENOENT;
++	/* expected parent dir is locked */
++	if (unlikely(h_parent != h_dentry->d_parent))
++		goto out;
++	err = 0;
++
++	/*
++	 * rmdir a dir may break the consistency on some filesystem.
++	 * let's try heavy test.
++	 */
++	err = -EACCES;
++	if (unlikely(au_test_h_perm(h_parent->d_inode, MAY_EXEC | MAY_WRITE)))
++		goto out;
++
++	h_latest = au_sio_lkup_one(&dentry->d_name, h_parent,
++				   au_sbr(dentry->d_sb, bindex));
++	err = -EIO;
++	if (IS_ERR(h_latest))
++		goto out;
++	if (h_latest == h_dentry)
++		err = 0;
++	dput(h_latest);
++
++ out:
++	return err;
++}
++
++/*
++ * decide the branch where we operate for @dentry. the branch index will be set
++ * @rbcpup. after diciding it, 'pin' it and store the timestamps of the parent
++ * dir for reverting.
++ * when a new whiteout is necessary, create it.
++ */
++static struct dentry*
++lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup,
++		    struct au_dtime *dt, struct au_pin *pin)
++{
++	struct dentry *wh_dentry;
++	struct super_block *sb;
++	struct path h_path;
++	int err, need_wh;
++	unsigned int udba;
++	aufs_bindex_t bcpup;
++
++	need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup);
++	wh_dentry = ERR_PTR(need_wh);
++	if (unlikely(need_wh < 0))
++		goto out;
++
++	sb = dentry->d_sb;
++	udba = au_opt_udba(sb);
++	bcpup = *rbcpup;
++	err = au_pin(pin, dentry, bcpup, udba,
++		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
++	wh_dentry = ERR_PTR(err);
++	if (unlikely(err))
++		goto out;
++
++	h_path.dentry = au_pinned_h_parent(pin);
++	if (udba != AuOpt_UDBA_NONE
++	    && au_dbstart(dentry) == bcpup) {
++		err = au_may_del(dentry, bcpup, h_path.dentry, isdir);
++		wh_dentry = ERR_PTR(err);
++		if (unlikely(err))
++			goto out_unpin;
++	}
++
++	h_path.mnt = au_sbr_mnt(sb, bcpup);
++	au_dtime_store(dt, au_pinned_parent(pin), &h_path);
++	wh_dentry = NULL;
++	if (!need_wh)
++		goto out; /* success, no need to create whiteout */
++
++	wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry);
++	if (!IS_ERR(wh_dentry))
++		goto out; /* success */
++	/* returns with the parent is locked and wh_dentry is dget-ed */
++
++ out_unpin:
++	au_unpin(pin);
++ out:
++	return wh_dentry;
++}
++
++/*
++ * when removing a dir, rename it to a unique temporary whiteout-ed name first
++ * in order to be revertible and save time for removing many child whiteouts
++ * under the dir.
++ * returns 1 when there are too many child whiteout and caller should remove
++ * them asynchronously. returns 0 when the number of children is enough small to
++ * remove now or the branch fs is a remote fs.
++ * otherwise return an error.
++ */
++static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex,
++			   struct au_nhash *whlist, struct inode *dir)
++{
++	int rmdir_later, err, dirwh;
++	struct dentry *h_dentry;
++	struct super_block *sb;
++
++	sb = dentry->d_sb;
++	SiMustAnyLock(sb);
++	h_dentry = au_h_dptr(dentry, bindex);
++	err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex));
++	if (unlikely(err))
++		goto out;
++
++	/* stop monitoring */
++	au_hin_free(au_hi(dentry->d_inode, bindex));
++
++	if (!au_test_fs_remote(h_dentry->d_sb)) {
++		dirwh = au_sbi(sb)->si_dirwh;
++		rmdir_later = (dirwh <= 1);
++		if (!rmdir_later)
++			rmdir_later = au_nhash_test_longer_wh(whlist, bindex,
++							      dirwh);
++		if (rmdir_later)
++			return rmdir_later;
++	}
++
++	err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist);
++	if (unlikely(err)) {
++		AuIOErr("rmdir %.*s, b%d failed, %d. ignored\n",
++			AuDLNPair(h_dentry), bindex, err);
++		err = 0;
++	}
++
++ out:
++	return err;
++}
++
++/*
++ * final procedure for deleting a entry.
++ * maintain dentry and iattr.
++ */
++static void epilog(struct inode *dir, struct dentry *dentry,
++		   aufs_bindex_t bindex)
++{
++	struct inode *inode;
++
++	inode = dentry->d_inode;
++	d_drop(dentry);
++	inode->i_ctime = dir->i_ctime;
++
++	if (atomic_read(&dentry->d_count) == 1) {
++		au_set_h_dptr(dentry, au_dbstart(dentry), NULL);
++		au_update_dbstart(dentry);
++	}
++	if (au_ibstart(dir) == bindex)
++		au_cpup_attr_timesizes(dir);
++	dir->i_version++;
++}
++
++/*
++ * when an error happened, remove the created whiteout and revert everything.
++ */
++static int do_revert(int err, struct inode *dir, aufs_bindex_t bwh,
++		     struct dentry *wh_dentry, struct dentry *dentry,
++		     struct au_dtime *dt)
++{
++	int rerr;
++	struct path h_path = {
++		.dentry	= wh_dentry,
++		.mnt	= au_sbr_mnt(dir->i_sb, bwh)
++	};
++
++	rerr = au_wh_unlink_dentry(au_h_iptr(dir, bwh), &h_path, dentry);
++	if (!rerr) {
++		au_set_dbwh(dentry, bwh);
++		au_dtime_revert(dt);
++		return 0;
++	}
++
++	AuIOErr("%.*s reverting whiteout failed(%d, %d)\n",
++		AuDLNPair(dentry), err, rerr);
++	return -EIO;
++}
++
++/* ---------------------------------------------------------------------- */
++
++int aufs_unlink(struct inode *dir, struct dentry *dentry)
++{
++	int err;
++	aufs_bindex_t bwh, bindex, bstart;
++	struct au_dtime dt;
++	struct au_pin pin;
++	struct path h_path;
++	struct inode *inode, *h_dir;
++	struct dentry *parent, *wh_dentry;
++
++	IMustLock(dir);
++	inode = dentry->d_inode;
++	if (unlikely(!inode))
++		return -ENOENT; /* possible? */
++	IMustLock(inode);
++
++	aufs_read_lock(dentry, AuLock_DW);
++	parent = dentry->d_parent; /* dir inode is locked */
++	di_write_lock_parent(parent);
++
++	bstart = au_dbstart(dentry);
++	bwh = au_dbwh(dentry);
++	bindex = -1;
++	wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &dt, &pin);
++	err = PTR_ERR(wh_dentry);
++	if (IS_ERR(wh_dentry))
++		goto out;
++
++	h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
++	h_path.dentry = au_h_dptr(dentry, bstart);
++	dget(h_path.dentry);
++	if (bindex == bstart) {
++		h_dir = au_pinned_h_dir(&pin);
++		err = vfsub_unlink(h_dir, &h_path, /*force*/0);
++	} else {
++		/* dir inode is locked */
++		h_dir = wh_dentry->d_parent->d_inode;
++		IMustLock(h_dir);
++		err = 0;
++	}
++
++	if (!err) {
++		drop_nlink(inode);
++		epilog(dir, dentry, bindex);
++
++		/* update target timestamps */
++		if (bindex == bstart) {
++			vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
++			inode->i_ctime = h_path.dentry->d_inode->i_ctime;
++		} else
++			/* todo: this timestamp may be reverted later */
++			inode->i_ctime = h_dir->i_ctime;
++		goto out_unlock; /* success */
++	}
++
++	/* revert */
++	if (wh_dentry) {
++		int rerr;
++
++		rerr = do_revert(err, dir, bwh, wh_dentry, dentry, &dt);
++		if (rerr)
++			err = rerr;
++	}
++
++ out_unlock:
++	au_unpin(&pin);
++	dput(wh_dentry);
++	dput(h_path.dentry);
++ out:
++	di_write_unlock(parent);
++	aufs_read_unlock(dentry, AuLock_DW);
++	return err;
++}
++
++int aufs_rmdir(struct inode *dir, struct dentry *dentry)
++{
++	int err, rmdir_later;
++	aufs_bindex_t bwh, bindex, bstart;
++	struct au_dtime dt;
++	struct au_pin pin;
++	struct inode *inode;
++	struct dentry *parent, *wh_dentry, *h_dentry;
++	struct au_whtmp_rmdir *args;
++
++	IMustLock(dir);
++	inode = dentry->d_inode;
++	err = -ENOENT; /* possible? */
++	if (unlikely(!inode))
++		goto out;
++	IMustLock(inode);
++
++	aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH);
++	err = -ENOMEM;
++	args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS);
++	if (unlikely(!args))
++		goto out_unlock;
++
++	parent = dentry->d_parent; /* dir inode is locked */
++	di_write_lock_parent(parent);
++	err = au_test_empty(dentry, &args->whlist);
++	if (unlikely(err))
++		goto out_args;
++
++	bstart = au_dbstart(dentry);
++	bwh = au_dbwh(dentry);
++	bindex = -1;
++	wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &dt, &pin);
++	err = PTR_ERR(wh_dentry);
++	if (IS_ERR(wh_dentry))
++		goto out_args;
++
++	h_dentry = au_h_dptr(dentry, bstart);
++	dget(h_dentry);
++	rmdir_later = 0;
++	if (bindex == bstart) {
++		err = renwh_and_rmdir(dentry, bstart, &args->whlist, dir);
++		if (err > 0) {
++			rmdir_later = err;
++			err = 0;
++		}
++	} else {
++		/* stop monitoring */
++		au_hin_free(au_hi(inode, bstart));
++
++		/* dir inode is locked */
++		IMustLock(wh_dentry->d_parent->d_inode);
++		err = 0;
++	}
++
++	if (!err) {
++		clear_nlink(inode);
++		au_set_dbdiropq(dentry, -1);
++		epilog(dir, dentry, bindex);
++
++		if (rmdir_later) {
++			au_whtmp_kick_rmdir(dir, bstart, h_dentry, args);
++			args = NULL;
++		}
++
++		goto out_unpin; /* success */
++	}
++
++	/* revert */
++	AuLabel(revert);
++	if (wh_dentry) {
++		int rerr;
++
++		rerr = do_revert(err, dir, bwh, wh_dentry, dentry, &dt);
++		if (rerr)
++			err = rerr;
++	}
++
++ out_unpin:
++	au_unpin(&pin);
++	dput(wh_dentry);
++	dput(h_dentry);
++ out_args:
++	di_write_unlock(parent);
++	if (args)
++		au_whtmp_rmdir_free(args);
++ out_unlock:
++	aufs_read_unlock(dentry, AuLock_DW);
++ out:
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/i_op_ren.c linux-2.6.31.5/fs/aufs/i_op_ren.c
+--- linux-2.6.31.5.orig/fs/aufs/i_op_ren.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/i_op_ren.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,948 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * inode operation (rename entry)
++ * todo: this is crazy monster
++ */
++
++#include "aufs.h"
++
++enum { AuSRC, AuDST, AuSrcDst };
++enum { AuPARENT, AuCHILD, AuParentChild };
++
++#define AuRen_ISDIR	1
++#define AuRen_ISSAMEDIR	(1 << 1)
++#define AuRen_WHSRC	(1 << 2)
++#define AuRen_WHDST	(1 << 3)
++#define AuRen_MNT_WRITE	(1 << 4)
++#define AuRen_DT_DSTDIR	(1 << 5)
++#define AuRen_DIROPQ	(1 << 6)
++#define AuRen_CPUP	(1 << 7)
++#define au_ftest_ren(flags, name)	((flags) & AuRen_##name)
++#define au_fset_ren(flags, name)	{ (flags) |= AuRen_##name; }
++#define au_fclr_ren(flags, name)	{ (flags) &= ~AuRen_##name; }
++
++struct au_ren_args {
++	struct {
++		struct dentry *dentry, *h_dentry, *parent, *h_parent,
++			*wh_dentry;
++		struct inode *dir, *inode;
++		struct au_hinode *hdir;
++		struct au_dtime dt[AuParentChild];
++		aufs_bindex_t bstart;
++	} sd[AuSrcDst];
++
++#define src_dentry	sd[AuSRC].dentry
++#define src_dir		sd[AuSRC].dir
++#define src_inode	sd[AuSRC].inode
++#define src_h_dentry	sd[AuSRC].h_dentry
++#define src_parent	sd[AuSRC].parent
++#define src_h_parent	sd[AuSRC].h_parent
++#define src_wh_dentry	sd[AuSRC].wh_dentry
++#define src_hdir	sd[AuSRC].hdir
++#define src_h_dir	sd[AuSRC].hdir->hi_inode
++#define src_dt		sd[AuSRC].dt
++#define src_bstart	sd[AuSRC].bstart
++
++#define dst_dentry	sd[AuDST].dentry
++#define dst_dir		sd[AuDST].dir
++#define dst_inode	sd[AuDST].inode
++#define dst_h_dentry	sd[AuDST].h_dentry
++#define dst_parent	sd[AuDST].parent
++#define dst_h_parent	sd[AuDST].h_parent
++#define dst_wh_dentry	sd[AuDST].wh_dentry
++#define dst_hdir	sd[AuDST].hdir
++#define dst_h_dir	sd[AuDST].hdir->hi_inode
++#define dst_dt		sd[AuDST].dt
++#define dst_bstart	sd[AuDST].bstart
++
++	struct dentry *h_trap;
++	struct au_branch *br;
++	struct au_hinode *src_hinode;
++	struct path h_path;
++	struct au_nhash whlist;
++	aufs_bindex_t btgt;
++
++	unsigned int flags;
++
++	struct au_whtmp_rmdir *thargs;
++	struct dentry *h_dst;
++};
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * functions for reverting.
++ * when an error happened in a single rename systemcall, we should revert
++ * everything as if nothing happend.
++ * we don't need to revert the copied-up/down the parent dir since they are
++ * harmless.
++ */
++
++#define RevertFailure(fmt, args...) do { \
++	AuIOErr("revert failure: " fmt " (%d, %d)\n", \
++		##args, err, rerr); \
++	err = -EIO; \
++} while (0)
++
++static void au_ren_rev_diropq(int err, struct au_ren_args *a)
++{
++	int rerr;
++
++	au_hin_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
++	rerr = au_diropq_remove(a->src_dentry, a->btgt);
++	au_hin_imtx_unlock(a->src_hinode);
++	if (rerr)
++		RevertFailure("remove diropq %.*s", AuDLNPair(a->src_dentry));
++}
++
++
++static void au_ren_rev_rename(int err, struct au_ren_args *a)
++{
++	int rerr;
++
++	a->h_path.dentry = au_lkup_one(&a->src_dentry->d_name, a->src_h_parent,
++				       a->br, /*nd*/NULL);
++	rerr = PTR_ERR(a->h_path.dentry);
++	if (IS_ERR(a->h_path.dentry)) {
++		RevertFailure("au_lkup_one %.*s", AuDLNPair(a->src_dentry));
++		return;
++	}
++
++	rerr = vfsub_rename(a->dst_h_dir,
++			    au_h_dptr(a->src_dentry, a->btgt),
++			    a->src_h_dir, &a->h_path);
++	d_drop(a->h_path.dentry);
++	dput(a->h_path.dentry);
++	/* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */
++	if (rerr)
++		RevertFailure("rename %.*s", AuDLNPair(a->src_dentry));
++}
++
++static void au_ren_rev_cpup(int err, struct au_ren_args *a)
++{
++	int rerr;
++
++	a->h_path.dentry = a->dst_h_dentry;
++	rerr = vfsub_unlink(a->dst_h_dir, &a->h_path, /*force*/0);
++	au_set_h_dptr(a->src_dentry, a->btgt, NULL);
++	au_set_dbstart(a->src_dentry, a->src_bstart);
++	if (rerr)
++		RevertFailure("unlink %.*s", AuDLNPair(a->dst_h_dentry));
++}
++
++
++static void au_ren_rev_whtmp(int err, struct au_ren_args *a)
++{
++	int rerr;
++
++	a->h_path.dentry = au_lkup_one(&a->dst_dentry->d_name, a->dst_h_parent,
++				       a->br, /*nd*/NULL);
++	rerr = PTR_ERR(a->h_path.dentry);
++	if (IS_ERR(a->h_path.dentry)) {
++		RevertFailure("lookup %.*s", AuDLNPair(a->dst_dentry));
++		return;
++	}
++	if (a->h_path.dentry->d_inode) {
++		d_drop(a->h_path.dentry);
++		dput(a->h_path.dentry);
++		return;
++	}
++
++	rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path);
++	d_drop(a->h_path.dentry);
++	dput(a->h_path.dentry);
++	if (!rerr) {
++		au_set_h_dptr(a->dst_dentry, a->btgt, NULL);
++		au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst));
++	} else
++		RevertFailure("rename %.*s", AuDLNPair(a->h_dst));
++}
++
++static void au_ren_rev_whsrc(int err, struct au_ren_args *a)
++{
++	int rerr;
++
++	a->h_path.dentry = a->src_wh_dentry;
++	rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry);
++	if (rerr)
++		RevertFailure("unlink %.*s", AuDLNPair(a->src_wh_dentry));
++}
++
++static void au_ren_rev_drop(struct au_ren_args *a)
++{
++	struct dentry *d, *h_d;
++	int i;
++	aufs_bindex_t bend, bindex;
++
++	for (i = 0; i < AuSrcDst; i++) {
++		d = a->sd[i].dentry;
++		d_drop(d);
++		bend = au_dbend(d);
++		for (bindex = au_dbstart(d); bindex <= bend; bindex++) {
++			h_d = au_h_dptr(d, bindex);
++			if (h_d)
++				d_drop(h_d);
++		}
++	}
++
++	au_update_dbstart(a->dst_dentry);
++	if (a->thargs)
++		d_drop(a->h_dst);
++}
++#undef RevertFailure
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * when we have to copyup the renaming entry, do it with the rename-target name
++ * in order to minimize the cost (the later actual rename is unnecessary).
++ * otherwise rename it on the target branch.
++ */
++static int au_ren_or_cpup(struct au_ren_args *a)
++{
++	int err;
++	struct dentry *d;
++
++	d = a->src_dentry;
++	if (au_dbstart(d) == a->btgt) {
++		a->h_path.dentry = a->dst_h_dentry;
++		if (au_ftest_ren(a->flags, DIROPQ)
++		    && au_dbdiropq(d) == a->btgt)
++			au_fclr_ren(a->flags, DIROPQ);
++		AuDebugOn(au_dbstart(d) != a->btgt);
++		err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt),
++				   a->dst_h_dir, &a->h_path);
++	} else {
++		struct mutex *h_mtx = &a->src_h_dentry->d_inode->i_mutex;
++
++		au_fset_ren(a->flags, CPUP);
++		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++		au_set_dbstart(d, a->btgt);
++		au_set_h_dptr(d, a->btgt, dget(a->dst_h_dentry));
++		err = au_sio_cpup_single(d, a->btgt, a->src_bstart, -1,
++					 !AuCpup_DTIME, a->dst_parent);
++		if (unlikely(err)) {
++			au_set_h_dptr(d, a->btgt, NULL);
++			au_set_dbstart(d, a->src_bstart);
++		}
++		mutex_unlock(h_mtx);
++	}
++
++	return err;
++}
++
++/* cf. aufs_rmdir() */
++static int au_ren_del_whtmp(struct au_ren_args *a)
++{
++	int err;
++	struct inode *dir;
++
++	dir = a->dst_dir;
++	SiMustAnyLock(dir->i_sb);
++	if (!au_nhash_test_longer_wh(&a->whlist, a->btgt,
++				     au_sbi(dir->i_sb)->si_dirwh)
++	    || au_test_fs_remote(a->h_dst->d_sb)) {
++		err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist);
++		if (unlikely(err))
++			AuWarn("failed removing whtmp dir %.*s (%d), "
++			       "ignored.\n", AuDLNPair(a->h_dst), err);
++	} else {
++		au_nhash_wh_free(&a->thargs->whlist);
++		a->thargs->whlist = a->whlist;
++		a->whlist.nh_num = 0;
++		au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs);
++		dput(a->h_dst);
++		a->thargs = NULL;
++	}
++
++	return 0;
++}
++
++/* make it 'opaque' dir. */
++static int au_ren_diropq(struct au_ren_args *a)
++{
++	int err;
++	struct dentry *diropq;
++
++	err = 0;
++	a->src_hinode = au_hi(a->src_inode, a->btgt);
++	au_hin_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
++	diropq = au_diropq_create(a->src_dentry, a->btgt);
++	au_hin_imtx_unlock(a->src_hinode);
++	if (IS_ERR(diropq))
++		err = PTR_ERR(diropq);
++	dput(diropq);
++
++	return err;
++}
++
++static int do_rename(struct au_ren_args *a)
++{
++	int err;
++	struct dentry *d, *h_d;
++
++	/* prepare workqueue args for asynchronous rmdir */
++	h_d = a->dst_h_dentry;
++	if (au_ftest_ren(a->flags, ISDIR) && h_d->d_inode) {
++		err = -ENOMEM;
++		a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb, GFP_NOFS);
++		if (unlikely(!a->thargs))
++			goto out;
++		a->h_dst = dget(h_d);
++	}
++
++	/* create whiteout for src_dentry */
++	if (au_ftest_ren(a->flags, WHSRC)) {
++		a->src_wh_dentry
++			= au_wh_create(a->src_dentry, a->btgt, a->src_h_parent);
++		err = PTR_ERR(a->src_wh_dentry);
++		if (IS_ERR(a->src_wh_dentry))
++			goto out_thargs;
++	}
++
++	/* lookup whiteout for dentry */
++	if (au_ftest_ren(a->flags, WHDST)) {
++		h_d = au_wh_lkup(a->dst_h_parent, &a->dst_dentry->d_name,
++				 a->br);
++		err = PTR_ERR(h_d);
++		if (IS_ERR(h_d))
++			goto out_whsrc;
++		if (!h_d->d_inode)
++			dput(h_d);
++		else
++			a->dst_wh_dentry = h_d;
++	}
++
++	/* rename dentry to tmpwh */
++	if (a->thargs) {
++		err = au_whtmp_ren(a->dst_h_dentry, a->br);
++		if (unlikely(err))
++			goto out_whdst;
++
++		d = a->dst_dentry;
++		au_set_h_dptr(d, a->btgt, NULL);
++		err = au_lkup_neg(d, a->btgt);
++		if (unlikely(err))
++			goto out_whtmp;
++		a->dst_h_dentry = au_h_dptr(d, a->btgt);
++	}
++
++	/* cpup src */
++	if (a->dst_h_dentry->d_inode && a->src_bstart != a->btgt) {
++		struct mutex *h_mtx = &a->src_h_dentry->d_inode->i_mutex;
++
++		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++		err = au_sio_cpup_simple(a->src_dentry, a->btgt, -1,
++					 !AuCpup_DTIME);
++		mutex_unlock(h_mtx);
++		if (unlikely(err))
++			goto out_whtmp;
++	}
++
++	/* rename by vfs_rename or cpup */
++	d = a->dst_dentry;
++	if (au_ftest_ren(a->flags, ISDIR)
++	    && (a->dst_wh_dentry
++		|| au_dbdiropq(d) == a->btgt
++		/* hide the lower to keep xino */
++		|| a->btgt < au_dbend(d)
++		|| au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ)))
++		au_fset_ren(a->flags, DIROPQ);
++	err = au_ren_or_cpup(a);
++	if (unlikely(err))
++		/* leave the copied-up one */
++		goto out_whtmp;
++
++	/* make dir opaque */
++	if (au_ftest_ren(a->flags, DIROPQ)) {
++		err = au_ren_diropq(a);
++		if (unlikely(err))
++			goto out_rename;
++	}
++
++	/* update target timestamps */
++	AuDebugOn(au_dbstart(a->src_dentry) != a->btgt);
++	a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt);
++	vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
++	a->src_inode->i_ctime = a->h_path.dentry->d_inode->i_ctime;
++
++	/* remove whiteout for dentry */
++	if (a->dst_wh_dentry) {
++		a->h_path.dentry = a->dst_wh_dentry;
++		err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path,
++					  a->dst_dentry);
++		if (unlikely(err))
++			goto out_diropq;
++	}
++
++	/* remove whtmp */
++	if (a->thargs)
++		au_ren_del_whtmp(a); /* ignore this error */
++
++	err = 0;
++	goto out_success;
++
++ out_diropq:
++	if (au_ftest_ren(a->flags, DIROPQ))
++		au_ren_rev_diropq(err, a);
++ out_rename:
++	if (!au_ftest_ren(a->flags, CPUP))
++		au_ren_rev_rename(err, a);
++	else
++		au_ren_rev_cpup(err, a);
++ out_whtmp:
++	if (a->thargs)
++		au_ren_rev_whtmp(err, a);
++ out_whdst:
++	dput(a->dst_wh_dentry);
++	a->dst_wh_dentry = NULL;
++ out_whsrc:
++	if (a->src_wh_dentry)
++		au_ren_rev_whsrc(err, a);
++	au_ren_rev_drop(a);
++ out_success:
++	dput(a->src_wh_dentry);
++	dput(a->dst_wh_dentry);
++ out_thargs:
++	if (a->thargs) {
++		dput(a->h_dst);
++		au_whtmp_rmdir_free(a->thargs);
++		a->thargs = NULL;
++	}
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * test if @dentry dir can be rename destination or not.
++ * success means, it is a logically empty dir.
++ */
++static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist)
++{
++	return au_test_empty(dentry, whlist);
++}
++
++/*
++ * test if @dentry dir can be rename source or not.
++ * if it can, return 0 and @children is filled.
++ * success means,
++ * - it is a logically empty dir.
++ * - or, it exists on writable branch and has no children including whiteouts
++ *       on the lower branch.
++ */
++static int may_rename_srcdir(struct dentry *dentry, aufs_bindex_t btgt)
++{
++	int err;
++	aufs_bindex_t bstart;
++
++	bstart = au_dbstart(dentry);
++	if (bstart != btgt) {
++		struct au_nhash whlist;
++
++		SiMustAnyLock(dentry->d_sb);
++		err = au_nhash_alloc(&whlist, au_sbi(dentry->d_sb)->si_rdhash,
++				     GFP_NOFS);
++		if (unlikely(err))
++			goto out;
++		err = au_test_empty(dentry, &whlist);
++		au_nhash_wh_free(&whlist);
++		goto out;
++	}
++
++	if (bstart == au_dbtaildir(dentry))
++		return 0; /* success */
++
++	err = au_test_empty_lower(dentry);
++
++ out:
++	if (err == -ENOTEMPTY) {
++		AuWarn1("renaming dir who has child(ren) on multiple branches,"
++			" is not supported\n");
++		err = -EXDEV;
++	}
++	return err;
++}
++
++/* side effect: sets whlist and h_dentry */
++static int au_ren_may_dir(struct au_ren_args *a)
++{
++	int err;
++	struct dentry *d;
++
++	d = a->dst_dentry;
++	SiMustAnyLock(d->d_sb);
++	err = au_nhash_alloc(&a->whlist, au_sbi(d->d_sb)->si_rdhash, GFP_NOFS);
++	if (unlikely(err))
++		goto out;
++
++	err = 0;
++	if (au_ftest_ren(a->flags, ISDIR) && a->dst_inode) {
++		au_set_dbstart(d, a->dst_bstart);
++		err = may_rename_dstdir(d, &a->whlist);
++		au_set_dbstart(d, a->btgt);
++	}
++	a->dst_h_dentry = au_h_dptr(d, au_dbstart(d));
++	if (unlikely(err))
++		goto out;
++
++	d = a->src_dentry;
++	a->src_h_dentry = au_h_dptr(d, au_dbstart(d));
++	if (au_ftest_ren(a->flags, ISDIR)) {
++		err = may_rename_srcdir(d, a->btgt);
++		if (unlikely(err)) {
++			au_nhash_wh_free(&a->whlist);
++			a->whlist.nh_num = 0;
++		}
++	}
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * simple tests for rename.
++ * following the checks in vfs, plus the parent-child relationship.
++ */
++static int au_may_ren(struct au_ren_args *a)
++{
++	int err, isdir;
++	struct inode *h_inode;
++
++	if (a->src_bstart == a->btgt) {
++		err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent,
++				 au_ftest_ren(a->flags, ISDIR));
++		if (unlikely(err))
++			goto out;
++		err = -EINVAL;
++		if (unlikely(a->src_h_dentry == a->h_trap))
++			goto out;
++	}
++
++	err = 0;
++	if (a->dst_bstart != a->btgt)
++		goto out;
++
++	err = -EIO;
++	h_inode = a->dst_h_dentry->d_inode;
++	isdir = !!au_ftest_ren(a->flags, ISDIR);
++	if (!a->dst_dentry->d_inode) {
++		if (unlikely(h_inode))
++			goto out;
++		err = au_may_add(a->dst_dentry, a->btgt, a->dst_h_parent,
++				 isdir);
++	} else {
++		if (unlikely(!h_inode || !h_inode->i_nlink))
++			goto out;
++		err = au_may_del(a->dst_dentry, a->btgt, a->dst_h_parent,
++				 isdir);
++		if (unlikely(err))
++			goto out;
++		err = -ENOTEMPTY;
++		if (unlikely(a->dst_h_dentry == a->h_trap))
++			goto out;
++		err = 0;
++	}
++
++ out:
++	if (unlikely(err == -ENOENT || err == -EEXIST))
++		err = -EIO;
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * locking order
++ * (VFS)
++ * - src_dir and dir by lock_rename()
++ * - inode if exitsts
++ * (aufs)
++ * - lock all
++ *   + src_dentry and dentry by aufs_read_and_write_lock2() which calls,
++ *     + si_read_lock
++ *     + di_write_lock2_child()
++ *       + di_write_lock_child()
++ *	   + ii_write_lock_child()
++ *       + di_write_lock_child2()
++ *	   + ii_write_lock_child2()
++ *     + src_parent and parent
++ *       + di_write_lock_parent()
++ *	   + ii_write_lock_parent()
++ *       + di_write_lock_parent2()
++ *	   + ii_write_lock_parent2()
++ *   + lower src_dir and dir by vfsub_lock_rename()
++ *   + verify the every relationships between child and parent. if any
++ *     of them failed, unlock all and return -EBUSY.
++ */
++static void au_ren_unlock(struct au_ren_args *a)
++{
++	struct super_block *sb;
++
++	sb = a->dst_dentry->d_sb;
++	if (au_ftest_ren(a->flags, MNT_WRITE))
++		mnt_drop_write(a->br->br_mnt);
++	vfsub_unlock_rename(a->src_h_parent, a->src_hdir,
++			    a->dst_h_parent, a->dst_hdir);
++}
++
++static int au_ren_lock(struct au_ren_args *a)
++{
++	int err;
++	unsigned int udba;
++
++	err = 0;
++	a->src_h_parent = au_h_dptr(a->src_parent, a->btgt);
++	a->src_hdir = au_hi(a->src_dir, a->btgt);
++	a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt);
++	a->dst_hdir = au_hi(a->dst_dir, a->btgt);
++	a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir,
++				      a->dst_h_parent, a->dst_hdir);
++	udba = au_opt_udba(a->src_dentry->d_sb);
++	if (unlikely(a->src_hdir->hi_inode != a->src_h_parent->d_inode
++		     || a->dst_hdir->hi_inode != a->dst_h_parent->d_inode))
++		err = au_busy_or_stale();
++	if (!err && au_dbstart(a->src_dentry) == a->btgt)
++		err = au_h_verify(a->src_h_dentry, udba,
++				  a->src_h_parent->d_inode, a->src_h_parent,
++				  a->br);
++	if (!err && au_dbstart(a->dst_dentry) == a->btgt)
++		err = au_h_verify(a->dst_h_dentry, udba,
++				  a->dst_h_parent->d_inode, a->dst_h_parent,
++				  a->br);
++	if (!err) {
++		err = mnt_want_write(a->br->br_mnt);
++		if (unlikely(err))
++			goto out_unlock;
++		au_fset_ren(a->flags, MNT_WRITE);
++		goto out; /* success */
++	}
++
++	err = au_busy_or_stale();
++
++ out_unlock:
++	au_ren_unlock(a);
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static void au_ren_refresh_dir(struct au_ren_args *a)
++{
++	struct inode *dir;
++
++	dir = a->dst_dir;
++	dir->i_version++;
++	if (au_ftest_ren(a->flags, ISDIR)) {
++		/* is this updating defined in POSIX? */
++		au_cpup_attr_timesizes(a->src_inode);
++		au_cpup_attr_nlink(dir, /*force*/1);
++		if (a->dst_inode) {
++			clear_nlink(a->dst_inode);
++			au_cpup_attr_timesizes(a->dst_inode);
++		}
++	}
++	if (au_ibstart(dir) == a->btgt)
++		au_cpup_attr_timesizes(dir);
++
++	if (au_ftest_ren(a->flags, ISSAMEDIR))
++		return;
++
++	dir = a->src_dir;
++	dir->i_version++;
++	if (au_ftest_ren(a->flags, ISDIR))
++		au_cpup_attr_nlink(dir, /*force*/1);
++	if (au_ibstart(dir) == a->btgt)
++		au_cpup_attr_timesizes(dir);
++}
++
++static void au_ren_refresh(struct au_ren_args *a)
++{
++	aufs_bindex_t bend, bindex;
++	struct dentry *d, *h_d;
++	struct inode *i, *h_i;
++	struct super_block *sb;
++
++	d = a->src_dentry;
++	au_set_dbwh(d, -1);
++	bend = au_dbend(d);
++	for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
++		h_d = au_h_dptr(d, bindex);
++		if (h_d)
++			au_set_h_dptr(d, bindex, NULL);
++	}
++	au_set_dbend(d, a->btgt);
++
++	sb = d->d_sb;
++	i = a->src_inode;
++	if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i))
++		return; /* success */
++
++	bend = au_ibend(i);
++	for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
++		h_i = au_h_iptr(i, bindex);
++		if (h_i) {
++			au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0);
++			/* ignore this error */
++			au_set_h_iptr(i, bindex, NULL, 0);
++		}
++	}
++	au_set_ibend(i, a->btgt);
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* mainly for link(2) and rename(2) */
++int au_wbr(struct dentry *dentry, aufs_bindex_t btgt)
++{
++	aufs_bindex_t bdiropq, bwh;
++	struct dentry *parent;
++	struct au_branch *br;
++
++	parent = dentry->d_parent;
++	IMustLock(parent->d_inode); /* dir is locked */
++
++	bdiropq = au_dbdiropq(parent);
++	bwh = au_dbwh(dentry);
++	br = au_sbr(dentry->d_sb, btgt);
++	if (au_br_rdonly(br)
++	    || (0 <= bdiropq && bdiropq < btgt)
++	    || (0 <= bwh && bwh < btgt))
++		btgt = -1;
++
++	AuDbg("btgt %d\n", btgt);
++	return btgt;
++}
++
++/* sets src_bstart, dst_bstart and btgt */
++static int au_ren_wbr(struct au_ren_args *a)
++{
++	int err;
++	struct au_wr_dir_args wr_dir_args = {
++		/* .force_btgt	= -1, */
++		.flags		= AuWrDir_ADD_ENTRY
++	};
++
++	a->src_bstart = au_dbstart(a->src_dentry);
++	a->dst_bstart = au_dbstart(a->dst_dentry);
++	if (au_ftest_ren(a->flags, ISDIR))
++		au_fset_wrdir(wr_dir_args.flags, ISDIR);
++	wr_dir_args.force_btgt = a->src_bstart;
++	if (a->dst_inode && a->dst_bstart < a->src_bstart)
++		wr_dir_args.force_btgt = a->dst_bstart;
++	wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt);
++	err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args);
++	a->btgt = err;
++
++	return err;
++}
++
++static void au_ren_dt(struct au_ren_args *a)
++{
++	a->h_path.dentry = a->src_h_parent;
++	au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path);
++	if (!au_ftest_ren(a->flags, ISSAMEDIR)) {
++		a->h_path.dentry = a->dst_h_parent;
++		au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path);
++	}
++
++	au_fclr_ren(a->flags, DT_DSTDIR);
++	if (!au_ftest_ren(a->flags, ISDIR))
++		return;
++
++	a->h_path.dentry = a->src_h_dentry;
++	au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path);
++	if (a->dst_h_dentry->d_inode) {
++		au_fset_ren(a->flags, DT_DSTDIR);
++		a->h_path.dentry = a->dst_h_dentry;
++		au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path);
++	}
++}
++
++static void au_ren_rev_dt(int err, struct au_ren_args *a)
++{
++	struct dentry *h_d;
++	struct mutex *h_mtx;
++
++	au_dtime_revert(a->src_dt + AuPARENT);
++	if (!au_ftest_ren(a->flags, ISSAMEDIR))
++		au_dtime_revert(a->dst_dt + AuPARENT);
++
++	if (au_ftest_ren(a->flags, ISDIR) && err != -EIO) {
++		h_d = a->src_dt[AuCHILD].dt_h_path.dentry;
++		h_mtx = &h_d->d_inode->i_mutex;
++		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++		au_dtime_revert(a->src_dt + AuCHILD);
++		mutex_unlock(h_mtx);
++
++		if (au_ftest_ren(a->flags, DT_DSTDIR)) {
++			h_d = a->dst_dt[AuCHILD].dt_h_path.dentry;
++			h_mtx = &h_d->d_inode->i_mutex;
++			mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
++			au_dtime_revert(a->dst_dt + AuCHILD);
++			mutex_unlock(h_mtx);
++		}
++	}
++}
++
++/* ---------------------------------------------------------------------- */
++
++int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry,
++		struct inode *_dst_dir, struct dentry *_dst_dentry)
++{
++	int err;
++	/* reduce stack space */
++	struct au_ren_args *a;
++
++	IMustLock(_src_dir);
++	IMustLock(_dst_dir);
++
++	err = -ENOMEM;
++	BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE);
++	a = kzalloc(sizeof(*a), GFP_NOFS);
++	if (unlikely(!a))
++		goto out;
++
++	a->src_dir = _src_dir;
++	a->src_dentry = _src_dentry;
++	a->src_inode = a->src_dentry->d_inode;
++	a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */
++	a->dst_dir = _dst_dir;
++	a->dst_dentry = _dst_dentry;
++	a->dst_inode = a->dst_dentry->d_inode;
++	a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */
++	if (a->dst_inode) {
++		IMustLock(a->dst_inode);
++		au_igrab(a->dst_inode);
++	}
++
++	err = -ENOTDIR;
++	if (S_ISDIR(a->src_inode->i_mode)) {
++		au_fset_ren(a->flags, ISDIR);
++		if (unlikely(a->dst_inode && !S_ISDIR(a->dst_inode->i_mode)))
++			goto out_free;
++		aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
++					  AuLock_DIR | AuLock_FLUSH);
++	} else
++		aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
++					  AuLock_FLUSH);
++
++	au_fset_ren(a->flags, ISSAMEDIR); /* temporary */
++	di_write_lock_parent(a->dst_parent);
++
++	/* which branch we process */
++	err = au_ren_wbr(a);
++	if (unlikely(err < 0))
++		goto out_unlock;
++	a->br = au_sbr(a->dst_dentry->d_sb, a->btgt);
++	a->h_path.mnt = a->br->br_mnt;
++
++	/* are they available to be renamed */
++	err = au_ren_may_dir(a);
++	if (unlikely(err))
++		goto out_unlock;
++
++	/* prepare the writable parent dir on the same branch */
++	if (a->dst_bstart == a->btgt) {
++		au_fset_ren(a->flags, WHDST);
++	} else {
++		err = au_cpup_dirs(a->dst_dentry, a->btgt);
++		if (unlikely(err))
++			goto out_children;
++	}
++
++	if (a->src_dir != a->dst_dir) {
++		/*
++		 * this temporary unlock is safe,
++		 * because both dir->i_mutex are locked.
++		 */
++		di_write_unlock(a->dst_parent);
++		di_write_lock_parent(a->src_parent);
++		err = au_wr_dir_need_wh(a->src_dentry,
++					au_ftest_ren(a->flags, ISDIR),
++					&a->btgt);
++		di_write_unlock(a->src_parent);
++		di_write_lock2_parent(a->src_parent, a->dst_parent, /*isdir*/1);
++		au_fclr_ren(a->flags, ISSAMEDIR);
++	} else
++		err = au_wr_dir_need_wh(a->src_dentry,
++					au_ftest_ren(a->flags, ISDIR),
++					&a->btgt);
++	if (unlikely(err < 0))
++		goto out_children;
++	if (err)
++		au_fset_ren(a->flags, WHSRC);
++
++	/* lock them all */
++	err = au_ren_lock(a);
++	if (unlikely(err))
++		goto out_children;
++
++	if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE)) {
++		err = au_may_ren(a);
++		if (unlikely(err))
++			goto out_hdir;
++	}
++
++	/* store timestamps to be revertible */
++	au_ren_dt(a);
++
++	/* here we go */
++	err = do_rename(a);
++	if (unlikely(err))
++		goto out_dt;
++
++	/* update dir attributes */
++	au_ren_refresh_dir(a);
++
++	/* dput/iput all lower dentries */
++	au_ren_refresh(a);
++
++	goto out_hdir; /* success */
++
++ out_dt:
++	au_ren_rev_dt(err, a);
++ out_hdir:
++	au_ren_unlock(a);
++ out_children:
++	au_nhash_wh_free(&a->whlist);
++ out_unlock:
++	if (unlikely(err && au_ftest_ren(a->flags, ISDIR))) {
++		au_update_dbstart(a->dst_dentry);
++		d_drop(a->dst_dentry);
++	}
++	if (!err)
++		d_move(a->src_dentry, a->dst_dentry);
++	if (au_ftest_ren(a->flags, ISSAMEDIR))
++		di_write_unlock(a->dst_parent);
++	else
++		di_write_unlock2(a->src_parent, a->dst_parent);
++	aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry);
++ out_free:
++	iput(a->dst_inode);
++	if (a->thargs)
++		au_whtmp_rmdir_free(a->thargs);
++	kfree(a);
++ out:
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/Kconfig linux-2.6.31.5/fs/aufs/Kconfig
+--- linux-2.6.31.5.orig/fs/aufs/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/Kconfig	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,132 @@
++config AUFS_FS
++	tristate "Aufs (Advanced multi layered unification filesystem) support"
++	depends on EXPERIMENTAL
++	help
++	Aufs is a stackable unification filesystem such as Unionfs,
++	which unifies several directories and provides a merged single
++	directory.
++	In the early days, aufs was entirely re-designed and
++	re-implemented Unionfs Version 1.x series. Introducing many
++	original ideas, approaches and improvements, it becomes totally
++	different from Unionfs while keeping the basic features.
++
++if AUFS_FS
++choice
++	prompt "Maximum number of branches"
++	default AUFS_BRANCH_MAX_127
++	help
++	Specifies the maximum number of branches (or member directories)
++	in a single aufs. The larger value consumes more system
++	resources and has a minor impact to performance.
++config AUFS_BRANCH_MAX_127
++	bool "127"
++	help
++	Specifies the maximum number of branches (or member directories)
++	in a single aufs. The larger value consumes more system
++	resources and has a minor impact to performance.
++config AUFS_BRANCH_MAX_511
++	bool "511"
++	help
++	Specifies the maximum number of branches (or member directories)
++	in a single aufs. The larger value consumes more system
++	resources and has a minor impact to performance.
++config AUFS_BRANCH_MAX_1023
++	bool "1023"
++	help
++	Specifies the maximum number of branches (or member directories)
++	in a single aufs. The larger value consumes more system
++	resources and has a minor impact to performance.
++config AUFS_BRANCH_MAX_32767
++	bool "32767"
++	help
++	Specifies the maximum number of branches (or member directories)
++	in a single aufs. The larger value consumes more system
++	resources and has a minor impact to performance.
++endchoice
++
++config AUFS_HINOTIFY
++	bool "Use inotify to detect actions on a branch"
++	depends on INOTIFY
++	help
++	If you want to modify files on branches directly, eg. bypassing aufs,
++	and want aufs to detect the changes of them fully, then enable this
++	option and use 'udba=inotify' mount option.
++	It will have a negative impact to the performance.
++	See detail in aufs.5.
++
++config AUFS_EXPORT
++	bool "NFS-exportable aufs"
++	depends on (AUFS_FS = y && EXPORTFS = y) || (AUFS_FS = m && EXPORTFS)
++	help
++	If you want to export your mounted aufs via NFS, then enable this
++	option. There are several requirements for this configuration.
++	See detail in aufs.5.
++
++config AUFS_SHWH
++	bool "Show whiteouts"
++	help
++	If you want to make the whiteouts in aufs visible, then enable
++	this option and specify 'shwh' mount option. Although it may
++	sounds like philosophy or something, but in technically it
++	simply shows the name of whiteout with keeping its behaviour.
++
++config AUFS_BR_RAMFS
++	bool "Ramfs (initramfs/rootfs) as an aufs branch"
++	help
++	If you want to use ramfs as an aufs branch fs, then enable this
++	option. Generally tmpfs is recommended.
++	Aufs prohibited them to be a branch fs by default, because
++	initramfs becomes unusable after switch_root or something
++	generally. If you sets initramfs as an aufs branch and boot your
++	system by switch_root, you will meet a problem easily since the
++	files in initramfs may be inaccessible.
++	Unless you are going to use ramfs as an aufs branch fs without
++	switch_root or something, leave it N.
++
++config AUFS_BR_FUSE
++	bool "Fuse fs as an aufs branch"
++	depends on FUSE_FS
++	select AUFS_POLL
++	help
++	If you want to use fuse-based userspace filesystem as an aufs
++	branch fs, then enable this option.
++	It implements the internal poll(2) operation which is
++	implemented by fuse only (curretnly).
++
++config AUFS_DEBUG
++	bool "Debug aufs"
++	help
++	Enable this to compile aufs internal debug code.
++	It will have a negative impact to the performance.
++
++config AUFS_MAGIC_SYSRQ
++	bool
++	depends on AUFS_DEBUG && MAGIC_SYSRQ
++	default y
++	help
++	Automatic configuration for internal use.
++	When aufs supports Magic SysRq, enabled automatically.
++
++config AUFS_BDEV_LOOP
++	bool
++	depends on BLK_DEV_LOOP
++	default y
++	help
++	Automatic configuration for internal use.
++	Convert =[ym] into =y.
++
++config AUFS_INO_T_64
++	bool
++	depends on AUFS_EXPORT
++	depends on 64BIT && !(ALPHA || S390)
++	default y
++	help
++	Automatic configuration for internal use.
++	/* typedef unsigned long/int __kernel_ino_t */
++	/* alpha and s390x are int */
++
++config AUFS_POLL
++	bool
++	help
++	Automatic configuration for internal use.
++endif
+diff -Nur linux-2.6.31.5.orig/fs/aufs/loop.c linux-2.6.31.5/fs/aufs/loop.c
+--- linux-2.6.31.5.orig/fs/aufs/loop.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/loop.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,55 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * support for loopback block device as a branch
++ */
++
++#include <linux/loop.h>
++#include "aufs.h"
++
++/*
++ * test if two lower dentries have overlapping branches.
++ */
++int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_d1,
++			     struct dentry *h_d2)
++{
++	struct inode *h_inode;
++	struct loop_device *l;
++
++	h_inode = h_d1->d_inode;
++	if (MAJOR(h_inode->i_sb->s_dev) != LOOP_MAJOR)
++		return 0;
++
++	l = h_inode->i_sb->s_bdev->bd_disk->private_data;
++	h_d1 = l->lo_backing_file->f_dentry;
++	/* h_d1 can be local NFS. in this case aufs cannot detect the loop */
++	if (unlikely(h_d1->d_sb == sb))
++		return 1;
++	return !!au_test_subdir(h_d1, h_d2);
++}
++
++/* true if a kernel thread named 'loop[0-9].*' accesses a file */
++int au_test_loopback_kthread(void)
++{
++	const char c = current->comm[4];
++
++	return current->mm == NULL
++	       && '0' <= c && c <= '9'
++	       && strncmp(current->comm, "loop", 4) == 0;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/loop.h linux-2.6.31.5/fs/aufs/loop.h
+--- linux-2.6.31.5.orig/fs/aufs/loop.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/loop.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,51 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * support for loopback mount as a branch
++ */
++
++#ifndef __AUFS_LOOP_H__
++#define __AUFS_LOOP_H__
++
++#ifdef __KERNEL__
++
++struct dentry;
++struct super_block;
++
++#ifdef CONFIG_AUFS_BDEV_LOOP
++/* loop.c */
++int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_d1,
++			     struct dentry *h_d2);
++int au_test_loopback_kthread(void);
++#else
++static inline
++int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_d1,
++			     struct dentry *h_d2)
++{
++	return 0;
++}
++
++static inline int au_test_loopback_kthread(void)
++{
++	return 0;
++}
++#endif /* BLK_DEV_LOOP */
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_LOOP_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/magic.mk linux-2.6.31.5/fs/aufs/magic.mk
+--- linux-2.6.31.5.orig/fs/aufs/magic.mk	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/magic.mk	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,52 @@
++
++# defined in ${srctree}/fs/fuse/inode.c
++# tristate
++ifdef CONFIG_FUSE_FS
++ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546
++endif
++
++# defined in ${srctree}/fs/ocfs2/ocfs2_fs.h
++# tristate
++ifdef CONFIG_OCFS2_FS
++ccflags-y += -DOCFS2_SUPER_MAGIC=0x7461636f
++endif
++
++# defined in ${srctree}/fs/ocfs2/dlm/userdlm.h
++# tristate
++ifdef CONFIG_OCFS2_FS_O2CB
++ccflags-y += -DDLMFS_MAGIC=0x76a9f425
++endif
++
++# defined in ${srctree}/fs/ramfs/inode.c
++# always true
++ccflags-y += -DRAMFS_MAGIC=0x858458f6
++
++# defined in ${srctree}/fs/cifs/cifsfs.c
++# tristate
++ifdef CONFIG_CIFS_FS
++ccflags-y += -DCIFS_MAGIC_NUMBER=0xFF534D42
++endif
++
++# defined in ${srctree}/fs/xfs/xfs_sb.h
++# tristate
++ifdef CONFIG_XFS_FS
++ccflags-y += -DXFS_SB_MAGIC=0x58465342
++endif
++
++# defined in ${srctree}/fs/configfs/mount.c
++# tristate
++ifdef CONFIG_CONFIGFS_FS
++ccflags-y += -DCONFIGFS_MAGIC=0x62656570
++endif
++
++# defined in ${srctree}/fs/9p/v9fs.h
++# tristate
++ifdef CONFIG_9P_FS
++ccflags-y += -DV9FS_MAGIC=0x01021997
++endif
++
++# defined in ${srctree}/fs/ubifs/ubifs.h
++# tristate
++ifdef CONFIG_UBIFS_FS
++ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905
++endif
+diff -Nur linux-2.6.31.5.orig/fs/aufs/Makefile linux-2.6.31.5/fs/aufs/Makefile
+--- linux-2.6.31.5.orig/fs/aufs/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/Makefile	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,23 @@
++
++include ${src}/magic.mk
++-include ${src}/priv_def.mk
++
++obj-$(CONFIG_AUFS_FS) += aufs.o
++aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \
++	wkq.o vfsub.o dcsub.o \
++	cpup.o whout.o plink.o wbr_policy.o \
++	dinfo.o dentry.o \
++	finfo.o file.o f_op.o \
++	dir.o vdir.o \
++	iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \
++	ioctl.o
++
++# all are boolean
++aufs-$(CONFIG_SYSFS) += sysfs.o
++aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o
++aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o
++aufs-$(CONFIG_AUFS_HINOTIFY) += hinotify.o
++aufs-$(CONFIG_AUFS_EXPORT) += export.o
++aufs-$(CONFIG_AUFS_POLL) += poll.o
++aufs-$(CONFIG_AUFS_DEBUG) += debug.o
++aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o
+diff -Nur linux-2.6.31.5.orig/fs/aufs/module.c linux-2.6.31.5/fs/aufs/module.c
+--- linux-2.6.31.5.orig/fs/aufs/module.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/module.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * module global variables and operations
++ */
++
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include "aufs.h"
++
++void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp)
++{
++	if (new_sz <= nused)
++		return p;
++
++	p = krealloc(p, new_sz, gfp);
++	if (p)
++		memset(p + nused, 0, new_sz - nused);
++	return p;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * aufs caches
++ */
++struct kmem_cache *au_cachep[AuCache_Last];
++static int __init au_cache_init(void)
++{
++	au_cachep[AuCache_DINFO] = AuCache(au_dinfo);
++	if (au_cachep[AuCache_DINFO])
++		au_cachep[AuCache_ICNTNR] = AuCache(au_icntnr);
++	if (au_cachep[AuCache_ICNTNR])
++		au_cachep[AuCache_FINFO] = AuCache(au_finfo);
++	if (au_cachep[AuCache_FINFO])
++		au_cachep[AuCache_VDIR] = AuCache(au_vdir);
++	if (au_cachep[AuCache_VDIR])
++		au_cachep[AuCache_DEHSTR] = AuCache(au_vdir_dehstr);
++	if (au_cachep[AuCache_DEHSTR])
++		return 0;
++
++	return -ENOMEM;
++}
++
++static void au_cache_fin(void)
++{
++	int i;
++	for (i = 0; i < AuCache_Last; i++)
++		if (au_cachep[i]) {
++			kmem_cache_destroy(au_cachep[i]);
++			au_cachep[i] = NULL;
++		}
++}
++
++/* ---------------------------------------------------------------------- */
++
++int au_dir_roflags;
++
++/*
++ * functions for module interface.
++ */
++MODULE_LICENSE("GPL");
++/* MODULE_LICENSE("GPL v2"); */
++MODULE_AUTHOR("Junjiro R. Okajima <aufs-users@lists.sourceforge.net>");
++MODULE_DESCRIPTION(AUFS_NAME
++	" -- Advanced multi layered unification filesystem");
++MODULE_VERSION(AUFS_VERSION);
++
++/* it should be 'byte', but param_set_byte() prints it by "%c" */
++short aufs_nwkq = AUFS_NWKQ_DEF;
++MODULE_PARM_DESC(nwkq, "the number of workqueue thread, " AUFS_WKQ_NAME);
++module_param_named(nwkq, aufs_nwkq, short, S_IRUGO);
++
++/* this module parameter has no meaning when SYSFS is disabled */
++int sysaufs_brs = 1;
++MODULE_PARM_DESC(brs, "use <sysfs>/fs/aufs/si_*/brN");
++module_param_named(brs, sysaufs_brs, int, S_IRUGO);
++
++/* ---------------------------------------------------------------------- */
++
++static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */
++
++int au_seq_path(struct seq_file *seq, struct path *path)
++{
++	return seq_path(seq, path, au_esc_chars);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int __init aufs_init(void)
++{
++	int err, i;
++	char *p;
++
++	p = au_esc_chars;
++	for (i = 1; i <= ' '; i++)
++		*p++ = i;
++	*p++ = '\\';
++	*p++ = '\x7f';
++	*p = 0;
++
++	au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE);
++
++	sysaufs_brs_init();
++	au_debug_init();
++
++	err = -EINVAL;
++	if (unlikely(aufs_nwkq <= 0))
++		goto out;
++
++	err = sysaufs_init();
++	if (unlikely(err))
++		goto out;
++	err = au_wkq_init();
++	if (unlikely(err))
++		goto out_sysaufs;
++	err = au_hinotify_init();
++	if (unlikely(err))
++		goto out_wkq;
++	err = au_sysrq_init();
++	if (unlikely(err))
++		goto out_hin;
++	err = au_cache_init();
++	if (unlikely(err))
++		goto out_sysrq;
++	err = register_filesystem(&aufs_fs_type);
++	if (unlikely(err))
++		goto out_cache;
++	pr_info(AUFS_NAME " " AUFS_VERSION "\n");
++	goto out; /* success */
++
++ out_cache:
++	au_cache_fin();
++ out_sysrq:
++	au_sysrq_fin();
++ out_hin:
++	au_hinotify_fin();
++ out_wkq:
++	au_wkq_fin();
++ out_sysaufs:
++	sysaufs_fin();
++ out:
++	return err;
++}
++
++static void __exit aufs_exit(void)
++{
++	unregister_filesystem(&aufs_fs_type);
++	au_cache_fin();
++	au_sysrq_fin();
++	au_hinotify_fin();
++	au_wkq_fin();
++	sysaufs_fin();
++}
++
++module_init(aufs_init);
++module_exit(aufs_exit);
+diff -Nur linux-2.6.31.5.orig/fs/aufs/module.h linux-2.6.31.5/fs/aufs/module.h
+--- linux-2.6.31.5.orig/fs/aufs/module.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/module.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,78 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * module initialization and module-global
++ */
++
++#ifndef __AUFS_MODULE_H__
++#define __AUFS_MODULE_H__
++
++#ifdef __KERNEL__
++
++#include <linux/slab.h>
++
++struct path;
++struct seq_file;
++
++/* module parameters */
++extern short aufs_nwkq;
++extern int sysaufs_brs;
++
++/* ---------------------------------------------------------------------- */
++
++extern int au_dir_roflags;
++
++void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp);
++int au_seq_path(struct seq_file *seq, struct path *path);
++
++/* ---------------------------------------------------------------------- */
++
++/* kmem cache */
++enum {
++	AuCache_DINFO,
++	AuCache_ICNTNR,
++	AuCache_FINFO,
++	AuCache_VDIR,
++	AuCache_DEHSTR,
++#ifdef CONFIG_AUFS_HINOTIFY
++	AuCache_HINOTIFY,
++#endif
++	AuCache_Last
++};
++
++#define AuCache(type)	KMEM_CACHE(type, SLAB_RECLAIM_ACCOUNT)
++
++extern struct kmem_cache *au_cachep[];
++
++#define AuCacheFuncs(name, index) \
++static inline void *au_cache_alloc_##name(void) \
++{ return kmem_cache_alloc(au_cachep[AuCache_##index], GFP_NOFS); } \
++static inline void au_cache_free_##name(void *p) \
++{ kmem_cache_free(au_cachep[AuCache_##index], p); }
++
++AuCacheFuncs(dinfo, DINFO);
++AuCacheFuncs(icntnr, ICNTNR);
++AuCacheFuncs(finfo, FINFO);
++AuCacheFuncs(vdir, VDIR);
++AuCacheFuncs(dehstr, DEHSTR);
++
++/*  ---------------------------------------------------------------------- */
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_MODULE_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/opts.c linux-2.6.31.5/fs/aufs/opts.c
+--- linux-2.6.31.5.orig/fs/aufs/opts.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/opts.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,1543 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * mount options/flags
++ */
++
++#include <linux/file.h>
++#include <linux/namei.h>
++#include <linux/types.h> /* a distribution requires */
++#include <linux/parser.h>
++#include "aufs.h"
++
++/* ---------------------------------------------------------------------- */
++
++enum {
++	Opt_br,
++	Opt_add, Opt_del, Opt_mod, Opt_reorder, Opt_append, Opt_prepend,
++	Opt_idel, Opt_imod, Opt_ireorder,
++	Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash, Opt_rendir,
++	Opt_rdblk_def, Opt_rdhash_def,
++	Opt_xino, Opt_zxino, Opt_noxino,
++	Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino,
++	Opt_trunc_xino_path, Opt_itrunc_xino,
++	Opt_trunc_xib, Opt_notrunc_xib,
++	Opt_shwh, Opt_noshwh,
++	Opt_plink, Opt_noplink, Opt_list_plink,
++	Opt_udba,
++	/* Opt_lock, Opt_unlock, */
++	Opt_cmd, Opt_cmd_args,
++	Opt_diropq_a, Opt_diropq_w,
++	Opt_warn_perm, Opt_nowarn_perm,
++	Opt_wbr_copyup, Opt_wbr_create,
++	Opt_refrof, Opt_norefrof,
++	Opt_verbose, Opt_noverbose,
++	Opt_sum, Opt_nosum, Opt_wsum,
++	Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err
++};
++
++static match_table_t options = {
++	{Opt_br, "br=%s"},
++	{Opt_br, "br:%s"},
++
++	{Opt_add, "add=%d:%s"},
++	{Opt_add, "add:%d:%s"},
++	{Opt_add, "ins=%d:%s"},
++	{Opt_add, "ins:%d:%s"},
++	{Opt_append, "append=%s"},
++	{Opt_append, "append:%s"},
++	{Opt_prepend, "prepend=%s"},
++	{Opt_prepend, "prepend:%s"},
++
++	{Opt_del, "del=%s"},
++	{Opt_del, "del:%s"},
++	/* {Opt_idel, "idel:%d"}, */
++	{Opt_mod, "mod=%s"},
++	{Opt_mod, "mod:%s"},
++	/* {Opt_imod, "imod:%d:%s"}, */
++
++	{Opt_dirwh, "dirwh=%d"},
++
++	{Opt_xino, "xino=%s"},
++	{Opt_noxino, "noxino"},
++	{Opt_trunc_xino, "trunc_xino"},
++	{Opt_trunc_xino_v, "trunc_xino_v=%d:%d"},
++	{Opt_notrunc_xino, "notrunc_xino"},
++	{Opt_trunc_xino_path, "trunc_xino=%s"},
++	{Opt_itrunc_xino, "itrunc_xino=%d"},
++	/* {Opt_zxino, "zxino=%s"}, */
++	{Opt_trunc_xib, "trunc_xib"},
++	{Opt_notrunc_xib, "notrunc_xib"},
++
++	{Opt_plink, "plink"},
++	{Opt_noplink, "noplink"},
++#ifdef CONFIG_AUFS_DEBUG
++	{Opt_list_plink, "list_plink"},
++#endif
++
++	{Opt_udba, "udba=%s"},
++
++	{Opt_diropq_a, "diropq=always"},
++	{Opt_diropq_a, "diropq=a"},
++	{Opt_diropq_w, "diropq=whiteouted"},
++	{Opt_diropq_w, "diropq=w"},
++
++	{Opt_warn_perm, "warn_perm"},
++	{Opt_nowarn_perm, "nowarn_perm"},
++
++	/* keep them temporary */
++	{Opt_ignore_silent, "coo=%s"},
++	{Opt_ignore_silent, "nodlgt"},
++	{Opt_ignore_silent, "nodirperm1"},
++	{Opt_ignore_silent, "clean_plink"},
++
++#ifdef CONFIG_AUFS_SHWH
++	{Opt_shwh, "shwh"},
++#endif
++	{Opt_noshwh, "noshwh"},
++
++	{Opt_rendir, "rendir=%d"},
++
++	{Opt_refrof, "refrof"},
++	{Opt_norefrof, "norefrof"},
++
++	{Opt_verbose, "verbose"},
++	{Opt_verbose, "v"},
++	{Opt_noverbose, "noverbose"},
++	{Opt_noverbose, "quiet"},
++	{Opt_noverbose, "q"},
++	{Opt_noverbose, "silent"},
++
++	{Opt_sum, "sum"},
++	{Opt_nosum, "nosum"},
++	{Opt_wsum, "wsum"},
++
++	{Opt_rdcache, "rdcache=%d"},
++	{Opt_rdblk, "rdblk=%d"},
++	{Opt_rdblk_def, "rdblk=def"},
++	{Opt_rdhash, "rdhash=%d"},
++	{Opt_rdhash_def, "rdhash=def"},
++
++	{Opt_wbr_create, "create=%s"},
++	{Opt_wbr_create, "create_policy=%s"},
++	{Opt_wbr_copyup, "cpup=%s"},
++	{Opt_wbr_copyup, "copyup=%s"},
++	{Opt_wbr_copyup, "copyup_policy=%s"},
++
++	/* internal use for the scripts */
++	{Opt_ignore_silent, "si=%s"},
++
++	{Opt_br, "dirs=%s"},
++	{Opt_ignore, "debug=%d"},
++	{Opt_ignore, "delete=whiteout"},
++	{Opt_ignore, "delete=all"},
++	{Opt_ignore, "imap=%s"},
++
++	{Opt_err, NULL}
++};
++
++/* ---------------------------------------------------------------------- */
++
++static const char *au_parser_pattern(int val, struct match_token *token)
++{
++	while (token->pattern) {
++		if (token->token == val)
++			return token->pattern;
++		token++;
++	}
++	BUG();
++	return "??";
++}
++
++/* ---------------------------------------------------------------------- */
++
++static match_table_t brperms = {
++	{AuBrPerm_RO, AUFS_BRPERM_RO},
++	{AuBrPerm_RR, AUFS_BRPERM_RR},
++	{AuBrPerm_RW, AUFS_BRPERM_RW},
++
++	{AuBrPerm_ROWH, AUFS_BRPERM_ROWH},
++	{AuBrPerm_RRWH, AUFS_BRPERM_RRWH},
++	{AuBrPerm_RWNoLinkWH, AUFS_BRPERM_RWNLWH},
++
++	{AuBrPerm_ROWH, "nfsro"},
++	{AuBrPerm_RO, NULL}
++};
++
++static int br_perm_val(char *perm)
++{
++	int val;
++	substring_t args[MAX_OPT_ARGS];
++
++	val = match_token(perm, brperms, args);
++	return val;
++}
++
++const char *au_optstr_br_perm(int brperm)
++{
++	return au_parser_pattern(brperm, (void *)brperms);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static match_table_t udbalevel = {
++	{AuOpt_UDBA_REVAL, "reval"},
++	{AuOpt_UDBA_NONE, "none"},
++#ifdef CONFIG_AUFS_HINOTIFY
++	{AuOpt_UDBA_HINOTIFY, "inotify"},
++#endif
++	{-1, NULL}
++};
++
++static int udba_val(char *str)
++{
++	substring_t args[MAX_OPT_ARGS];
++
++	return match_token(str, udbalevel, args);
++}
++
++const char *au_optstr_udba(int udba)
++{
++	return au_parser_pattern(udba, (void *)udbalevel);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static match_table_t au_wbr_create_policy = {
++	{AuWbrCreate_TDP, "tdp"},
++	{AuWbrCreate_TDP, "top-down-parent"},
++	{AuWbrCreate_RR, "rr"},
++	{AuWbrCreate_RR, "round-robin"},
++	{AuWbrCreate_MFS, "mfs"},
++	{AuWbrCreate_MFS, "most-free-space"},
++	{AuWbrCreate_MFSV, "mfs:%d"},
++	{AuWbrCreate_MFSV, "most-free-space:%d"},
++
++	{AuWbrCreate_MFSRR, "mfsrr:%d"},
++	{AuWbrCreate_MFSRRV, "mfsrr:%d:%d"},
++	{AuWbrCreate_PMFS, "pmfs"},
++	{AuWbrCreate_PMFSV, "pmfs:%d"},
++
++	{-1, NULL}
++};
++
++/*
++ * cf. linux/lib/parser.c and cmdline.c
++ * gave up calling memparse() since it uses simple_strtoull() instead of
++ * strict_...().
++ */
++static int au_match_ull(substring_t *s, unsigned long long *result)
++{
++	int err;
++	unsigned int len;
++	char a[32];
++
++	err = -ERANGE;
++	len = s->to - s->from;
++	if (len + 1 <= sizeof(a)) {
++		memcpy(a, s->from, len);
++		a[len] = '\0';
++		err = strict_strtoull(a, 0, result);
++	}
++	return err;
++}
++
++static int au_wbr_mfs_wmark(substring_t *arg, char *str,
++			    struct au_opt_wbr_create *create)
++{
++	int err;
++	unsigned long long ull;
++
++	err = 0;
++	if (!au_match_ull(arg, &ull))
++		create->mfsrr_watermark = ull;
++	else {
++		AuErr("bad integer in %s\n", str);
++		err = -EINVAL;
++	}
++
++	return err;
++}
++
++static int au_wbr_mfs_sec(substring_t *arg, char *str,
++			  struct au_opt_wbr_create *create)
++{
++	int n, err;
++
++	err = 0;
++	if (!match_int(arg, &n) && 0 <= n)
++		create->mfs_second = n;
++	else {
++		AuErr("bad integer in %s\n", str);
++		err = -EINVAL;
++	}
++
++	return err;
++}
++
++static int au_wbr_create_val(char *str, struct au_opt_wbr_create *create)
++{
++	int err, e;
++	substring_t args[MAX_OPT_ARGS];
++
++	err = match_token(str, au_wbr_create_policy, args);
++	create->wbr_create = err;
++	switch (err) {
++	case AuWbrCreate_MFSRRV:
++		e = au_wbr_mfs_wmark(&args[0], str, create);
++		if (!e)
++			e = au_wbr_mfs_sec(&args[1], str, create);
++		if (unlikely(e))
++			err = e;
++		break;
++	case AuWbrCreate_MFSRR:
++		e = au_wbr_mfs_wmark(&args[0], str, create);
++		if (unlikely(e)) {
++			err = e;
++			break;
++		}
++		/*FALLTHROUGH*/
++	case AuWbrCreate_MFS:
++	case AuWbrCreate_PMFS:
++		create->mfs_second = AUFS_MFS_SECOND_DEF;
++		break;
++	case AuWbrCreate_MFSV:
++	case AuWbrCreate_PMFSV:
++		e = au_wbr_mfs_sec(&args[0], str, create);
++		if (unlikely(e))
++			err = e;
++		break;
++	}
++
++	return err;
++}
++
++const char *au_optstr_wbr_create(int wbr_create)
++{
++	return au_parser_pattern(wbr_create, (void *)au_wbr_create_policy);
++}
++
++static match_table_t au_wbr_copyup_policy = {
++	{AuWbrCopyup_TDP, "tdp"},
++	{AuWbrCopyup_TDP, "top-down-parent"},
++	{AuWbrCopyup_BUP, "bup"},
++	{AuWbrCopyup_BUP, "bottom-up-parent"},
++	{AuWbrCopyup_BU, "bu"},
++	{AuWbrCopyup_BU, "bottom-up"},
++	{-1, NULL}
++};
++
++static int au_wbr_copyup_val(char *str)
++{
++	substring_t args[MAX_OPT_ARGS];
++
++	return match_token(str, au_wbr_copyup_policy, args);
++}
++
++const char *au_optstr_wbr_copyup(int wbr_copyup)
++{
++	return au_parser_pattern(wbr_copyup, (void *)au_wbr_copyup_policy);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
++
++static void dump_opts(struct au_opts *opts)
++{
++#ifdef CONFIG_AUFS_DEBUG
++	/* reduce stack space */
++	union {
++		struct au_opt_add *add;
++		struct au_opt_del *del;
++		struct au_opt_mod *mod;
++		struct au_opt_xino *xino;
++		struct au_opt_xino_itrunc *xino_itrunc;
++		struct au_opt_wbr_create *create;
++	} u;
++	struct au_opt *opt;
++
++	opt = opts->opt;
++	while (opt->type != Opt_tail) {
++		switch (opt->type) {
++		case Opt_add:
++			u.add = &opt->add;
++			AuDbg("add {b%d, %s, 0x%x, %p}\n",
++				  u.add->bindex, u.add->pathname, u.add->perm,
++				  u.add->path.dentry);
++			break;
++		case Opt_del:
++		case Opt_idel:
++			u.del = &opt->del;
++			AuDbg("del {%s, %p}\n",
++			      u.del->pathname, u.del->h_path.dentry);
++			break;
++		case Opt_mod:
++		case Opt_imod:
++			u.mod = &opt->mod;
++			AuDbg("mod {%s, 0x%x, %p}\n",
++				  u.mod->path, u.mod->perm, u.mod->h_root);
++			break;
++		case Opt_append:
++			u.add = &opt->add;
++			AuDbg("append {b%d, %s, 0x%x, %p}\n",
++				  u.add->bindex, u.add->pathname, u.add->perm,
++				  u.add->path.dentry);
++			break;
++		case Opt_prepend:
++			u.add = &opt->add;
++			AuDbg("prepend {b%d, %s, 0x%x, %p}\n",
++				  u.add->bindex, u.add->pathname, u.add->perm,
++				  u.add->path.dentry);
++			break;
++		case Opt_dirwh:
++			AuDbg("dirwh %d\n", opt->dirwh);
++			break;
++		case Opt_rdcache:
++			AuDbg("rdcache %d\n", opt->rdcache);
++			break;
++		case Opt_rdblk:
++			AuDbg("rdblk %u\n", opt->rdblk);
++			break;
++		case Opt_rdblk_def:
++			AuDbg("rdblk_def\n");
++			break;
++		case Opt_rdhash:
++			AuDbg("rdhash %u\n", opt->rdhash);
++			break;
++		case Opt_rdhash_def:
++			AuDbg("rdhash_def\n");
++			break;
++		case Opt_xino:
++			u.xino = &opt->xino;
++			AuDbg("xino {%s %.*s}\n",
++				  u.xino->path,
++				  AuDLNPair(u.xino->file->f_dentry));
++			break;
++		case Opt_trunc_xino:
++			AuLabel(trunc_xino);
++			break;
++		case Opt_notrunc_xino:
++			AuLabel(notrunc_xino);
++			break;
++		case Opt_trunc_xino_path:
++		case Opt_itrunc_xino:
++			u.xino_itrunc = &opt->xino_itrunc;
++			AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex);
++			break;
++
++		case Opt_noxino:
++			AuLabel(noxino);
++			break;
++		case Opt_trunc_xib:
++			AuLabel(trunc_xib);
++			break;
++		case Opt_notrunc_xib:
++			AuLabel(notrunc_xib);
++			break;
++		case Opt_shwh:
++			AuLabel(shwh);
++			break;
++		case Opt_noshwh:
++			AuLabel(noshwh);
++			break;
++		case Opt_plink:
++			AuLabel(plink);
++			break;
++		case Opt_noplink:
++			AuLabel(noplink);
++			break;
++		case Opt_list_plink:
++			AuLabel(list_plink);
++			break;
++		case Opt_udba:
++			AuDbg("udba %d, %s\n",
++				  opt->udba, au_optstr_udba(opt->udba));
++			break;
++		case Opt_diropq_a:
++			AuLabel(diropq_a);
++			break;
++		case Opt_diropq_w:
++			AuLabel(diropq_w);
++			break;
++		case Opt_warn_perm:
++			AuLabel(warn_perm);
++			break;
++		case Opt_nowarn_perm:
++			AuLabel(nowarn_perm);
++			break;
++		case Opt_refrof:
++			AuLabel(refrof);
++			break;
++		case Opt_norefrof:
++			AuLabel(norefrof);
++			break;
++		case Opt_verbose:
++			AuLabel(verbose);
++			break;
++		case Opt_noverbose:
++			AuLabel(noverbose);
++			break;
++		case Opt_sum:
++			AuLabel(sum);
++			break;
++		case Opt_nosum:
++			AuLabel(nosum);
++			break;
++		case Opt_wsum:
++			AuLabel(wsum);
++			break;
++		case Opt_wbr_create:
++			u.create = &opt->wbr_create;
++			AuDbg("create %d, %s\n", u.create->wbr_create,
++				  au_optstr_wbr_create(u.create->wbr_create));
++			switch (u.create->wbr_create) {
++			case AuWbrCreate_MFSV:
++			case AuWbrCreate_PMFSV:
++				AuDbg("%d sec\n", u.create->mfs_second);
++				break;
++			case AuWbrCreate_MFSRR:
++				AuDbg("%llu watermark\n",
++					  u.create->mfsrr_watermark);
++				break;
++			case AuWbrCreate_MFSRRV:
++				AuDbg("%llu watermark, %d sec\n",
++					  u.create->mfsrr_watermark,
++					  u.create->mfs_second);
++				break;
++			}
++			break;
++		case Opt_wbr_copyup:
++			AuDbg("copyup %d, %s\n", opt->wbr_copyup,
++				  au_optstr_wbr_copyup(opt->wbr_copyup));
++			break;
++		default:
++			BUG();
++		}
++		opt++;
++	}
++#endif
++}
++
++void au_opts_free(struct au_opts *opts)
++{
++	struct au_opt *opt;
++
++	opt = opts->opt;
++	while (opt->type != Opt_tail) {
++		switch (opt->type) {
++		case Opt_add:
++		case Opt_append:
++		case Opt_prepend:
++			path_put(&opt->add.path);
++			break;
++		case Opt_del:
++		case Opt_idel:
++			path_put(&opt->del.h_path);
++			break;
++		case Opt_mod:
++		case Opt_imod:
++			dput(opt->mod.h_root);
++			break;
++		case Opt_xino:
++			fput(opt->xino.file);
++			break;
++		}
++		opt++;
++	}
++}
++
++static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags,
++		   aufs_bindex_t bindex)
++{
++	int err;
++	struct au_opt_add *add = &opt->add;
++	char *p;
++
++	add->bindex = bindex;
++	add->perm = AuBrPerm_Last;
++	add->pathname = opt_str;
++	p = strchr(opt_str, '=');
++	if (p) {
++		*p++ = 0;
++		if (*p)
++			add->perm = br_perm_val(p);
++	}
++
++	err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path);
++	if (!err) {
++		if (!p) {
++			add->perm = AuBrPerm_RO;
++			if (au_test_fs_rr(add->path.dentry->d_sb))
++				add->perm = AuBrPerm_RR;
++			else if (!bindex && !(sb_flags & MS_RDONLY))
++				add->perm = AuBrPerm_RW;
++		}
++		opt->type = Opt_add;
++		goto out;
++	}
++	AuErr("lookup failed %s (%d)\n", add->pathname, err);
++	err = -EINVAL;
++
++ out:
++	return err;
++}
++
++static int au_opts_parse_del(struct au_opt_del *del, substring_t args[])
++{
++	int err;
++
++	del->pathname = args[0].from;
++	AuDbg("del path %s\n", del->pathname);
++
++	err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path);
++	if (unlikely(err))
++		AuErr("lookup failed %s (%d)\n", del->pathname, err);
++
++	return err;
++}
++
++#if 0 /* reserved for future use */
++static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex,
++			      struct au_opt_del *del, substring_t args[])
++{
++	int err;
++	struct dentry *root;
++
++	err = -EINVAL;
++	root = sb->s_root;
++	aufs_read_lock(root, AuLock_FLUSH);
++	if (bindex < 0 || au_sbend(sb) < bindex) {
++		AuErr("out of bounds, %d\n", bindex);
++		goto out;
++	}
++
++	err = 0;
++	del->h_path.dentry = dget(au_h_dptr(root, bindex));
++	del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex));
++
++ out:
++	aufs_read_unlock(root, !AuLock_IR);
++	return err;
++}
++#endif
++
++static int au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[])
++{
++	int err;
++	struct path path;
++	char *p;
++
++	err = -EINVAL;
++	mod->path = args[0].from;
++	p = strchr(mod->path, '=');
++	if (unlikely(!p)) {
++		AuErr("no permssion %s\n", args[0].from);
++		goto out;
++	}
++
++	*p++ = 0;
++	err = vfsub_kern_path(mod->path, lkup_dirflags, &path);
++	if (unlikely(err)) {
++		AuErr("lookup failed %s (%d)\n", mod->path, err);
++		goto out;
++	}
++
++	mod->perm = br_perm_val(p);
++	AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p);
++	mod->h_root = dget(path.dentry);
++	path_put(&path);
++
++ out:
++	return err;
++}
++
++#if 0 /* reserved for future use */
++static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex,
++			      struct au_opt_mod *mod, substring_t args[])
++{
++	int err;
++	struct dentry *root;
++
++	err = -EINVAL;
++	root = sb->s_root;
++	aufs_read_lock(root, AuLock_FLUSH);
++	if (bindex < 0 || au_sbend(sb) < bindex) {
++		AuErr("out of bounds, %d\n", bindex);
++		goto out;
++	}
++
++	err = 0;
++	mod->perm = br_perm_val(args[1].from);
++	AuDbg("mod path %s, perm 0x%x, %s\n",
++	      mod->path, mod->perm, args[1].from);
++	mod->h_root = dget(au_h_dptr(root, bindex));
++
++ out:
++	aufs_read_unlock(root, !AuLock_IR);
++	return err;
++}
++#endif
++
++static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino,
++			      substring_t args[])
++{
++	int err;
++	struct file *file;
++
++	file = au_xino_create(sb, args[0].from, /*silent*/0);
++	err = PTR_ERR(file);
++	if (IS_ERR(file))
++		goto out;
++
++	err = -EINVAL;
++	if (unlikely(file->f_dentry->d_sb == sb)) {
++		fput(file);
++		AuErr("%s must be outside\n", args[0].from);
++		goto out;
++	}
++
++	err = 0;
++	xino->file = file;
++	xino->path = args[0].from;
++
++ out:
++	return err;
++}
++
++static
++int au_opts_parse_xino_itrunc_path(struct super_block *sb,
++				   struct au_opt_xino_itrunc *xino_itrunc,
++				   substring_t args[])
++{
++	int err;
++	aufs_bindex_t bend, bindex;
++	struct path path;
++	struct dentry *root;
++
++	err = vfsub_kern_path(args[0].from, lkup_dirflags, &path);
++	if (unlikely(err)) {
++		AuErr("lookup failed %s (%d)\n", args[0].from, err);
++		goto out;
++	}
++
++	xino_itrunc->bindex = -1;
++	root = sb->s_root;
++	aufs_read_lock(root, AuLock_FLUSH);
++	bend = au_sbend(sb);
++	for (bindex = 0; bindex <= bend; bindex++) {
++		if (au_h_dptr(root, bindex) == path.dentry) {
++			xino_itrunc->bindex = bindex;
++			break;
++		}
++	}
++	aufs_read_unlock(root, !AuLock_IR);
++	path_put(&path);
++
++	if (unlikely(xino_itrunc->bindex < 0)) {
++		AuErr("no such branch %s\n", args[0].from);
++		err = -EINVAL;
++	}
++
++ out:
++	return err;
++}
++
++/* called without aufs lock */
++int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts)
++{
++	int err, n, token;
++	aufs_bindex_t bindex;
++	unsigned char skipped;
++	struct dentry *root;
++	struct au_opt *opt, *opt_tail;
++	char *opt_str;
++	/* reduce the stack space */
++	union {
++		struct au_opt_xino_itrunc *xino_itrunc;
++		struct au_opt_wbr_create *create;
++	} u;
++	struct {
++		substring_t args[MAX_OPT_ARGS];
++	} *a;
++
++	err = -ENOMEM;
++	a = kmalloc(sizeof(*a), GFP_NOFS);
++	if (unlikely(!a))
++		goto out;
++
++	root = sb->s_root;
++	err = 0;
++	bindex = 0;
++	opt = opts->opt;
++	opt_tail = opt + opts->max_opt - 1;
++	opt->type = Opt_tail;
++	while (!err && (opt_str = strsep(&str, ",")) && *opt_str) {
++		err = -EINVAL;
++		skipped = 0;
++		token = match_token(opt_str, options, a->args);
++		switch (token) {
++		case Opt_br:
++			err = 0;
++			while (!err && (opt_str = strsep(&a->args[0].from, ":"))
++			       && *opt_str) {
++				err = opt_add(opt, opt_str, opts->sb_flags,
++					      bindex++);
++				if (unlikely(!err && ++opt > opt_tail)) {
++					err = -E2BIG;
++					break;
++				}
++				opt->type = Opt_tail;
++				skipped = 1;
++			}
++			break;
++		case Opt_add:
++			if (unlikely(match_int(&a->args[0], &n))) {
++				AuErr("bad integer in %s\n", opt_str);
++				break;
++			}
++			bindex = n;
++			err = opt_add(opt, a->args[1].from, opts->sb_flags,
++				      bindex);
++			if (!err)
++				opt->type = token;
++			break;
++		case Opt_append:
++			err = opt_add(opt, a->args[0].from, opts->sb_flags,
++				      /*dummy bindex*/1);
++			if (!err)
++				opt->type = token;
++			break;
++		case Opt_prepend:
++			err = opt_add(opt, a->args[0].from, opts->sb_flags,
++				      /*bindex*/0);
++			if (!err)
++				opt->type = token;
++			break;
++		case Opt_del:
++			err = au_opts_parse_del(&opt->del, a->args);
++			if (!err)
++				opt->type = token;
++			break;
++#if 0 /* reserved for future use */
++		case Opt_idel:
++			del->pathname = "(indexed)";
++			if (unlikely(match_int(&args[0], &n))) {
++				AuErr("bad integer in %s\n", opt_str);
++				break;
++			}
++			err = au_opts_parse_idel(sb, n, &opt->del, a->args);
++			if (!err)
++				opt->type = token;
++			break;
++#endif
++		case Opt_mod:
++			err = au_opts_parse_mod(&opt->mod, a->args);
++			if (!err)
++				opt->type = token;
++			break;
++#ifdef IMOD /* reserved for future use */
++		case Opt_imod:
++			u.mod->path = "(indexed)";
++			if (unlikely(match_int(&a->args[0], &n))) {
++				AuErr("bad integer in %s\n", opt_str);
++				break;
++			}
++			err = au_opts_parse_imod(sb, n, &opt->mod, a->args);
++			if (!err)
++				opt->type = token;
++			break;
++#endif
++		case Opt_xino:
++			err = au_opts_parse_xino(sb, &opt->xino, a->args);
++			if (!err)
++				opt->type = token;
++			break;
++
++		case Opt_trunc_xino_path:
++			err = au_opts_parse_xino_itrunc_path
++				(sb, &opt->xino_itrunc, a->args);
++			if (!err)
++				opt->type = token;
++			break;
++
++		case Opt_itrunc_xino:
++			u.xino_itrunc = &opt->xino_itrunc;
++			if (unlikely(match_int(&a->args[0], &n))) {
++				AuErr("bad integer in %s\n", opt_str);
++				break;
++			}
++			u.xino_itrunc->bindex = n;
++			aufs_read_lock(root, AuLock_FLUSH);
++			if (n < 0 || au_sbend(sb) < n) {
++				AuErr("out of bounds, %d\n", n);
++				aufs_read_unlock(root, !AuLock_IR);
++				break;
++			}
++			aufs_read_unlock(root, !AuLock_IR);
++			err = 0;
++			opt->type = token;
++			break;
++
++		case Opt_dirwh:
++			if (unlikely(match_int(&a->args[0], &opt->dirwh)))
++				break;
++			err = 0;
++			opt->type = token;
++			break;
++
++		case Opt_rdcache:
++			if (unlikely(match_int(&a->args[0], &opt->rdcache)))
++				break;
++			err = 0;
++			opt->type = token;
++			break;
++		case Opt_rdblk:
++			if (unlikely(match_int(&a->args[0], &n)
++				     || n <= 0
++				     || n > KMALLOC_MAX_SIZE)) {
++				AuErr("bad integer in %s\n", opt_str);
++				break;
++			}
++			if (unlikely(n < NAME_MAX)) {
++				AuErr("rdblk must be larger than %d\n",
++				      NAME_MAX);
++				break;
++			}
++			opt->rdblk = n;
++			err = 0;
++			opt->type = token;
++			break;
++		case Opt_rdhash:
++			if (unlikely(match_int(&a->args[0], &n)
++				     || n <= 0
++				     || n * sizeof(struct hlist_head)
++				     > KMALLOC_MAX_SIZE)) {
++				AuErr("bad integer in %s\n", opt_str);
++				break;
++			}
++			opt->rdhash = n;
++			err = 0;
++			opt->type = token;
++			break;
++
++		case Opt_trunc_xino:
++		case Opt_notrunc_xino:
++		case Opt_noxino:
++		case Opt_trunc_xib:
++		case Opt_notrunc_xib:
++		case Opt_shwh:
++		case Opt_noshwh:
++		case Opt_plink:
++		case Opt_noplink:
++		case Opt_list_plink:
++		case Opt_diropq_a:
++		case Opt_diropq_w:
++		case Opt_warn_perm:
++		case Opt_nowarn_perm:
++		case Opt_refrof:
++		case Opt_norefrof:
++		case Opt_verbose:
++		case Opt_noverbose:
++		case Opt_sum:
++		case Opt_nosum:
++		case Opt_wsum:
++		case Opt_rdblk_def:
++		case Opt_rdhash_def:
++			err = 0;
++			opt->type = token;
++			break;
++
++		case Opt_udba:
++			opt->udba = udba_val(a->args[0].from);
++			if (opt->udba >= 0) {
++				err = 0;
++				opt->type = token;
++			} else
++				AuErr("wrong value, %s\n", opt_str);
++			break;
++
++		case Opt_wbr_create:
++			u.create = &opt->wbr_create;
++			u.create->wbr_create
++				= au_wbr_create_val(a->args[0].from, u.create);
++			if (u.create->wbr_create >= 0) {
++				err = 0;
++				opt->type = token;
++			} else
++				AuErr("wrong value, %s\n", opt_str);
++			break;
++		case Opt_wbr_copyup:
++			opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from);
++			if (opt->wbr_copyup >= 0) {
++				err = 0;
++				opt->type = token;
++			} else
++				AuErr("wrong value, %s\n", opt_str);
++			break;
++
++		case Opt_ignore:
++			AuWarn("ignored %s\n", opt_str);
++			/*FALLTHROUGH*/
++		case Opt_ignore_silent:
++			skipped = 1;
++			err = 0;
++			break;
++		case Opt_err:
++			AuErr("unknown option %s\n", opt_str);
++			break;
++		}
++
++		if (!err && !skipped) {
++			if (unlikely(++opt > opt_tail)) {
++				err = -E2BIG;
++				opt--;
++				opt->type = Opt_tail;
++				break;
++			}
++			opt->type = Opt_tail;
++		}
++	}
++
++	kfree(a);
++	dump_opts(opts);
++	if (unlikely(err))
++		au_opts_free(opts);
++
++ out:
++	return err;
++}
++
++static int au_opt_wbr_create(struct super_block *sb,
++			     struct au_opt_wbr_create *create)
++{
++	int err;
++	struct au_sbinfo *sbinfo;
++
++	SiMustWriteLock(sb);
++
++	err = 1; /* handled */
++	sbinfo = au_sbi(sb);
++	if (sbinfo->si_wbr_create_ops->fin) {
++		err = sbinfo->si_wbr_create_ops->fin(sb);
++		if (!err)
++			err = 1;
++	}
++
++	sbinfo->si_wbr_create = create->wbr_create;
++	sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create;
++	switch (create->wbr_create) {
++	case AuWbrCreate_MFSRRV:
++	case AuWbrCreate_MFSRR:
++		sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark;
++		/*FALLTHROUGH*/
++	case AuWbrCreate_MFS:
++	case AuWbrCreate_MFSV:
++	case AuWbrCreate_PMFS:
++	case AuWbrCreate_PMFSV:
++		sbinfo->si_wbr_mfs.mfs_expire = create->mfs_second * HZ;
++		break;
++	}
++
++	if (sbinfo->si_wbr_create_ops->init)
++		sbinfo->si_wbr_create_ops->init(sb); /* ignore */
++
++	return err;
++}
++
++/*
++ * returns,
++ * plus: processed without an error
++ * zero: unprocessed
++ */
++static int au_opt_simple(struct super_block *sb, struct au_opt *opt,
++			 struct au_opts *opts)
++{
++	int err;
++	struct au_sbinfo *sbinfo;
++
++	SiMustWriteLock(sb);
++
++	err = 1; /* handled */
++	sbinfo = au_sbi(sb);
++	switch (opt->type) {
++	case Opt_udba:
++		sbinfo->si_mntflags &= ~AuOptMask_UDBA;
++		sbinfo->si_mntflags |= opt->udba;
++		opts->given_udba |= opt->udba;
++		break;
++
++	case Opt_plink:
++		au_opt_set(sbinfo->si_mntflags, PLINK);
++		break;
++	case Opt_noplink:
++		if (au_opt_test(sbinfo->si_mntflags, PLINK))
++			au_plink_put(sb);
++		au_opt_clr(sbinfo->si_mntflags, PLINK);
++		break;
++	case Opt_list_plink:
++		if (au_opt_test(sbinfo->si_mntflags, PLINK))
++			au_plink_list(sb);
++		break;
++
++	case Opt_diropq_a:
++		au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ);
++		break;
++	case Opt_diropq_w:
++		au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ);
++		break;
++
++	case Opt_warn_perm:
++		au_opt_set(sbinfo->si_mntflags, WARN_PERM);
++		break;
++	case Opt_nowarn_perm:
++		au_opt_clr(sbinfo->si_mntflags, WARN_PERM);
++		break;
++
++	case Opt_refrof:
++		au_opt_set(sbinfo->si_mntflags, REFROF);
++		break;
++	case Opt_norefrof:
++		au_opt_clr(sbinfo->si_mntflags, REFROF);
++		break;
++
++	case Opt_verbose:
++		au_opt_set(sbinfo->si_mntflags, VERBOSE);
++		break;
++	case Opt_noverbose:
++		au_opt_clr(sbinfo->si_mntflags, VERBOSE);
++		break;
++
++	case Opt_sum:
++		au_opt_set(sbinfo->si_mntflags, SUM);
++		break;
++	case Opt_wsum:
++		au_opt_clr(sbinfo->si_mntflags, SUM);
++		au_opt_set(sbinfo->si_mntflags, SUM_W);
++	case Opt_nosum:
++		au_opt_clr(sbinfo->si_mntflags, SUM);
++		au_opt_clr(sbinfo->si_mntflags, SUM_W);
++		break;
++
++	case Opt_wbr_create:
++		err = au_opt_wbr_create(sb, &opt->wbr_create);
++		break;
++	case Opt_wbr_copyup:
++		sbinfo->si_wbr_copyup = opt->wbr_copyup;
++		sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup;
++		break;
++
++	case Opt_dirwh:
++		sbinfo->si_dirwh = opt->dirwh;
++		break;
++
++	case Opt_rdcache:
++		sbinfo->si_rdcache = opt->rdcache * HZ;
++		break;
++	case Opt_rdblk:
++		sbinfo->si_rdblk = opt->rdblk;
++		break;
++	case Opt_rdblk_def:
++		sbinfo->si_rdblk = AUFS_RDBLK_DEF;
++		break;
++	case Opt_rdhash:
++		sbinfo->si_rdhash = opt->rdhash;
++		break;
++	case Opt_rdhash_def:
++		sbinfo->si_rdhash = AUFS_RDHASH_DEF;
++		break;
++
++	case Opt_shwh:
++		au_opt_set(sbinfo->si_mntflags, SHWH);
++		break;
++	case Opt_noshwh:
++		au_opt_clr(sbinfo->si_mntflags, SHWH);
++		break;
++
++	case Opt_trunc_xino:
++		au_opt_set(sbinfo->si_mntflags, TRUNC_XINO);
++		break;
++	case Opt_notrunc_xino:
++		au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO);
++		break;
++
++	case Opt_trunc_xino_path:
++	case Opt_itrunc_xino:
++		err = au_xino_trunc(sb, opt->xino_itrunc.bindex);
++		if (!err)
++			err = 1;
++		break;
++
++	case Opt_trunc_xib:
++		au_fset_opts(opts->flags, TRUNC_XIB);
++		break;
++	case Opt_notrunc_xib:
++		au_fclr_opts(opts->flags, TRUNC_XIB);
++		break;
++
++	default:
++		err = 0;
++		break;
++	}
++
++	return err;
++}
++
++/*
++ * returns tri-state.
++ * plus: processed without an error
++ * zero: unprocessed
++ * minus: error
++ */
++static int au_opt_br(struct super_block *sb, struct au_opt *opt,
++		     struct au_opts *opts)
++{
++	int err, do_refresh;
++
++	err = 0;
++	switch (opt->type) {
++	case Opt_append:
++		opt->add.bindex = au_sbend(sb) + 1;
++		if (opt->add.bindex < 0)
++			opt->add.bindex = 0;
++		goto add;
++	case Opt_prepend:
++		opt->add.bindex = 0;
++	add:
++	case Opt_add:
++		err = au_br_add(sb, &opt->add,
++				au_ftest_opts(opts->flags, REMOUNT));
++		if (!err) {
++			err = 1;
++			au_fset_opts(opts->flags, REFRESH_DIR);
++			if (au_br_whable(opt->add.perm))
++				au_fset_opts(opts->flags, REFRESH_NONDIR);
++		}
++		break;
++
++	case Opt_del:
++	case Opt_idel:
++		err = au_br_del(sb, &opt->del,
++				au_ftest_opts(opts->flags, REMOUNT));
++		if (!err) {
++			err = 1;
++			au_fset_opts(opts->flags, TRUNC_XIB);
++			au_fset_opts(opts->flags, REFRESH_DIR);
++			au_fset_opts(opts->flags, REFRESH_NONDIR);
++		}
++		break;
++
++	case Opt_mod:
++	case Opt_imod:
++		err = au_br_mod(sb, &opt->mod,
++				au_ftest_opts(opts->flags, REMOUNT),
++				&do_refresh);
++		if (!err) {
++			err = 1;
++			if (do_refresh) {
++				au_fset_opts(opts->flags, REFRESH_DIR);
++				au_fset_opts(opts->flags, REFRESH_NONDIR);
++			}
++		}
++		break;
++	}
++
++	return err;
++}
++
++static int au_opt_xino(struct super_block *sb, struct au_opt *opt,
++		       struct au_opt_xino **opt_xino,
++		       struct au_opts *opts)
++{
++	int err;
++	aufs_bindex_t bend, bindex;
++	struct dentry *root, *parent, *h_root;
++
++	err = 0;
++	switch (opt->type) {
++	case Opt_xino:
++		err = au_xino_set(sb, &opt->xino,
++				  !!au_ftest_opts(opts->flags, REMOUNT));
++		if (unlikely(err))
++			break;
++
++		*opt_xino = &opt->xino;
++		au_xino_brid_set(sb, -1);
++
++		/* safe d_parent access */
++		parent = opt->xino.file->f_dentry->d_parent;
++		root = sb->s_root;
++		bend = au_sbend(sb);
++		for (bindex = 0; bindex <= bend; bindex++) {
++			h_root = au_h_dptr(root, bindex);
++			if (h_root == parent) {
++				au_xino_brid_set(sb, au_sbr_id(sb, bindex));
++				break;
++			}
++		}
++		break;
++
++	case Opt_noxino:
++		au_xino_clr(sb);
++		au_xino_brid_set(sb, -1);
++		*opt_xino = (void *)-1;
++		break;
++	}
++
++	return err;
++}
++
++int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
++		   unsigned int pending)
++{
++	int err;
++	aufs_bindex_t bindex, bend;
++	unsigned char do_plink, skip, do_free;
++	struct au_branch *br;
++	struct au_wbr *wbr;
++	struct dentry *root;
++	struct inode *dir, *h_dir;
++	struct au_sbinfo *sbinfo;
++	struct au_hinode *hdir;
++
++	SiMustAnyLock(sb);
++
++	sbinfo = au_sbi(sb);
++	AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA));
++
++	if (!(sb_flags & MS_RDONLY)) {
++		if (unlikely(!au_br_writable(au_sbr_perm(sb, 0))))
++			AuWarn("first branch should be rw\n");
++		if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH)))
++			AuWarn("shwh should be used with ro\n");
++	}
++
++	if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HINOTIFY)
++	    && !au_opt_test(sbinfo->si_mntflags, XINO))
++		AuWarn("udba=inotify requires xino\n");
++
++	err = 0;
++	root = sb->s_root;
++	dir = sb->s_root->d_inode;
++	do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK);
++	bend = au_sbend(sb);
++	for (bindex = 0; !err && bindex <= bend; bindex++) {
++		skip = 0;
++		h_dir = au_h_iptr(dir, bindex);
++		br = au_sbr(sb, bindex);
++		do_free = 0;
++
++		wbr = br->br_wbr;
++		if (wbr)
++			wbr_wh_read_lock(wbr);
++
++		switch (br->br_perm) {
++		case AuBrPerm_RO:
++		case AuBrPerm_ROWH:
++		case AuBrPerm_RR:
++		case AuBrPerm_RRWH:
++			do_free = !!wbr;
++			skip = (!wbr
++				|| (!wbr->wbr_whbase
++				    && !wbr->wbr_plink
++				    && !wbr->wbr_orph));
++			break;
++
++		case AuBrPerm_RWNoLinkWH:
++			/* skip = (!br->br_whbase && !br->br_orph); */
++			skip = (!wbr || !wbr->wbr_whbase);
++			if (skip && wbr) {
++				if (do_plink)
++					skip = !!wbr->wbr_plink;
++				else
++					skip = !wbr->wbr_plink;
++			}
++			break;
++
++		case AuBrPerm_RW:
++			/* skip = (br->br_whbase && br->br_ohph); */
++			skip = (wbr && wbr->wbr_whbase);
++			if (skip) {
++				if (do_plink)
++					skip = !!wbr->wbr_plink;
++				else
++					skip = !wbr->wbr_plink;
++			}
++			break;
++
++		default:
++			BUG();
++		}
++		if (wbr)
++			wbr_wh_read_unlock(wbr);
++
++		if (skip)
++			continue;
++
++		hdir = au_hi(dir, bindex);
++		au_hin_imtx_lock_nested(hdir, AuLsc_I_PARENT);
++		if (wbr)
++			wbr_wh_write_lock(wbr);
++		err = au_wh_init(au_h_dptr(root, bindex), br, sb);
++		if (wbr)
++			wbr_wh_write_unlock(wbr);
++		au_hin_imtx_unlock(hdir);
++
++		if (!err && do_free) {
++			kfree(wbr);
++			br->br_wbr = NULL;
++		}
++	}
++
++	return err;
++}
++
++int au_opts_mount(struct super_block *sb, struct au_opts *opts)
++{
++	int err;
++	unsigned int tmp;
++	aufs_bindex_t bend;
++	struct au_opt *opt;
++	struct au_opt_xino *opt_xino, xino;
++	struct au_sbinfo *sbinfo;
++
++	SiMustWriteLock(sb);
++
++	err = 0;
++	opt_xino = NULL;
++	opt = opts->opt;
++	while (err >= 0 && opt->type != Opt_tail)
++		err = au_opt_simple(sb, opt++, opts);
++	if (err > 0)
++		err = 0;
++	else if (unlikely(err < 0))
++		goto out;
++
++	/* disable xino and udba temporary */
++	sbinfo = au_sbi(sb);
++	tmp = sbinfo->si_mntflags;
++	au_opt_clr(sbinfo->si_mntflags, XINO);
++	au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL);
++
++	opt = opts->opt;
++	while (err >= 0 && opt->type != Opt_tail)
++		err = au_opt_br(sb, opt++, opts);
++	if (err > 0)
++		err = 0;
++	else if (unlikely(err < 0))
++		goto out;
++
++	bend = au_sbend(sb);
++	if (unlikely(bend < 0)) {
++		err = -EINVAL;
++		AuErr("no branches\n");
++		goto out;
++	}
++
++	if (au_opt_test(tmp, XINO))
++		au_opt_set(sbinfo->si_mntflags, XINO);
++	opt = opts->opt;
++	while (!err && opt->type != Opt_tail)
++		err = au_opt_xino(sb, opt++, &opt_xino, opts);
++	if (unlikely(err))
++		goto out;
++
++	err = au_opts_verify(sb, sb->s_flags, tmp);
++	if (unlikely(err))
++		goto out;
++
++	/* restore xino */
++	if (au_opt_test(tmp, XINO) && !opt_xino) {
++		xino.file = au_xino_def(sb);
++		err = PTR_ERR(xino.file);
++		if (IS_ERR(xino.file))
++			goto out;
++
++		err = au_xino_set(sb, &xino, /*remount*/0);
++		fput(xino.file);
++		if (unlikely(err))
++			goto out;
++	}
++
++	/* restore udba */
++	sbinfo->si_mntflags &= ~AuOptMask_UDBA;
++	sbinfo->si_mntflags |= (tmp & AuOptMask_UDBA);
++	if (au_opt_test(tmp, UDBA_HINOTIFY)) {
++		struct inode *dir = sb->s_root->d_inode;
++		au_reset_hinotify(dir,
++				  au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO);
++	}
++
++ out:
++	return err;
++}
++
++int au_opts_remount(struct super_block *sb, struct au_opts *opts)
++{
++	int err, rerr;
++	struct inode *dir;
++	struct au_opt_xino *opt_xino;
++	struct au_opt *opt;
++	struct au_sbinfo *sbinfo;
++
++	SiMustWriteLock(sb);
++
++	dir = sb->s_root->d_inode;
++	sbinfo = au_sbi(sb);
++	err = 0;
++	opt_xino = NULL;
++	opt = opts->opt;
++	while (err >= 0 && opt->type != Opt_tail) {
++		err = au_opt_simple(sb, opt, opts);
++		if (!err)
++			err = au_opt_br(sb, opt, opts);
++		if (!err)
++			err = au_opt_xino(sb, opt, &opt_xino, opts);
++		opt++;
++	}
++	if (err > 0)
++		err = 0;
++	AuTraceErr(err);
++	/* go on even err */
++
++	rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0);
++	if (unlikely(rerr && !err))
++		err = rerr;
++
++	if (au_ftest_opts(opts->flags, TRUNC_XIB)) {
++		rerr = au_xib_trunc(sb);
++		if (unlikely(rerr && !err))
++			err = rerr;
++	}
++
++	/* will be handled by the caller */
++	if (!au_ftest_opts(opts->flags, REFRESH_DIR)
++	    && (opts->given_udba || au_opt_test(sbinfo->si_mntflags, XINO)))
++		au_fset_opts(opts->flags, REFRESH_DIR);
++
++	AuDbg("status 0x%x\n", opts->flags);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++unsigned int au_opt_udba(struct super_block *sb)
++{
++	return au_mntflags(sb) & AuOptMask_UDBA;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/opts.h linux-2.6.31.5/fs/aufs/opts.h
+--- linux-2.6.31.5.orig/fs/aufs/opts.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/opts.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,196 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * mount options/flags
++ */
++
++#ifndef __AUFS_OPTS_H__
++#define __AUFS_OPTS_H__
++
++#ifdef __KERNEL__
++
++#include <linux/path.h>
++#include <linux/aufs_type.h>
++
++struct file;
++struct super_block;
++
++/* ---------------------------------------------------------------------- */
++
++/* mount flags */
++#define AuOpt_XINO		1		/* external inode number bitmap
++						   and translation table */
++#define AuOpt_TRUNC_XINO	(1 << 1)	/* truncate xino files */
++#define AuOpt_UDBA_NONE		(1 << 2)	/* users direct branch access */
++#define AuOpt_UDBA_REVAL	(1 << 3)
++#define AuOpt_UDBA_HINOTIFY	(1 << 4)
++#define AuOpt_SHWH		(1 << 5)	/* show whiteout */
++#define AuOpt_PLINK		(1 << 6)	/* pseudo-link */
++#define AuOpt_DIRPERM1		(1 << 7)	/* unimplemented */
++#define AuOpt_REFROF		(1 << 8)	/* unimplemented */
++#define AuOpt_ALWAYS_DIROPQ	(1 << 9)	/* policy to creating diropq */
++#define AuOpt_SUM		(1 << 10)	/* summation for statfs(2) */
++#define AuOpt_SUM_W		(1 << 11)	/* unimplemented */
++#define AuOpt_WARN_PERM		(1 << 12)	/* warn when add-branch */
++#define AuOpt_VERBOSE		(1 << 13)	/* busy inode when del-branch */
++
++#ifndef CONFIG_AUFS_HINOTIFY
++#undef AuOpt_UDBA_HINOTIFY
++#define AuOpt_UDBA_HINOTIFY	0
++#endif
++#ifndef CONFIG_AUFS_SHWH
++#undef AuOpt_SHWH
++#define AuOpt_SHWH		0
++#endif
++
++#define AuOpt_Def	(AuOpt_XINO \
++			 | AuOpt_UDBA_REVAL \
++			 | AuOpt_PLINK \
++			 /* | AuOpt_DIRPERM1 */ \
++			 | AuOpt_WARN_PERM)
++#define AuOptMask_UDBA	(AuOpt_UDBA_NONE \
++			 | AuOpt_UDBA_REVAL \
++			 | AuOpt_UDBA_HINOTIFY)
++
++#define au_opt_test(flags, name)	(flags & AuOpt_##name)
++#define au_opt_set(flags, name) do { \
++	BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \
++	((flags) |= AuOpt_##name); \
++} while (0)
++#define au_opt_set_udba(flags, name) do { \
++	(flags) &= ~AuOptMask_UDBA; \
++	((flags) |= AuOpt_##name); \
++} while (0)
++#define au_opt_clr(flags, name)		{ ((flags) &= ~AuOpt_##name); }
++
++/* ---------------------------------------------------------------------- */
++
++/* policies to select one among multiple writable branches */
++enum {
++	AuWbrCreate_TDP,	/* top down parent */
++	AuWbrCreate_RR,		/* round robin */
++	AuWbrCreate_MFS,	/* most free space */
++	AuWbrCreate_MFSV,	/* mfs with seconds */
++	AuWbrCreate_MFSRR,	/* mfs then rr */
++	AuWbrCreate_MFSRRV,	/* mfs then rr with seconds */
++	AuWbrCreate_PMFS,	/* parent and mfs */
++	AuWbrCreate_PMFSV,	/* parent and mfs with seconds */
++
++	AuWbrCreate_Def = AuWbrCreate_TDP
++};
++
++enum {
++	AuWbrCopyup_TDP,	/* top down parent */
++	AuWbrCopyup_BUP,	/* bottom up parent */
++	AuWbrCopyup_BU,		/* bottom up */
++
++	AuWbrCopyup_Def = AuWbrCopyup_TDP
++};
++
++/* ---------------------------------------------------------------------- */
++
++struct au_opt_add {
++	aufs_bindex_t	bindex;
++	char		*pathname;
++	int		perm;
++	struct path	path;
++};
++
++struct au_opt_del {
++	char		*pathname;
++	struct path	h_path;
++};
++
++struct au_opt_mod {
++	char		*path;
++	int		perm;
++	struct dentry	*h_root;
++};
++
++struct au_opt_xino {
++	char		*path;
++	struct file	*file;
++};
++
++struct au_opt_xino_itrunc {
++	aufs_bindex_t	bindex;
++};
++
++struct au_opt_wbr_create {
++	int			wbr_create;
++	int			mfs_second;
++	unsigned long long	mfsrr_watermark;
++};
++
++struct au_opt {
++	int type;
++	union {
++		struct au_opt_xino	xino;
++		struct au_opt_xino_itrunc xino_itrunc;
++		struct au_opt_add	add;
++		struct au_opt_del	del;
++		struct au_opt_mod	mod;
++		int			dirwh;
++		int			rdcache;
++		unsigned int		rdblk;
++		unsigned int		rdhash;
++		int			udba;
++		struct au_opt_wbr_create wbr_create;
++		int			wbr_copyup;
++	};
++};
++
++/* opts flags */
++#define AuOpts_REMOUNT		1
++#define AuOpts_REFRESH_DIR	(1 << 1)
++#define AuOpts_REFRESH_NONDIR	(1 << 2)
++#define AuOpts_TRUNC_XIB	(1 << 3)
++#define au_ftest_opts(flags, name)	((flags) & AuOpts_##name)
++#define au_fset_opts(flags, name)	{ (flags) |= AuOpts_##name; }
++#define au_fclr_opts(flags, name)	{ (flags) &= ~AuOpts_##name; }
++
++struct au_opts {
++	struct au_opt	*opt;
++	int		max_opt;
++
++	unsigned int	given_udba;
++	unsigned int	flags;
++	unsigned long	sb_flags;
++};
++
++/* ---------------------------------------------------------------------- */
++
++const char *au_optstr_br_perm(int brperm);
++const char *au_optstr_udba(int udba);
++const char *au_optstr_wbr_copyup(int wbr_copyup);
++const char *au_optstr_wbr_create(int wbr_create);
++
++void au_opts_free(struct au_opts *opts);
++int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts);
++int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
++		   unsigned int pending);
++int au_opts_mount(struct super_block *sb, struct au_opts *opts);
++int au_opts_remount(struct super_block *sb, struct au_opts *opts);
++
++unsigned int au_opt_udba(struct super_block *sb);
++
++/* ---------------------------------------------------------------------- */
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_OPTS_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/plink.c linux-2.6.31.5/fs/aufs/plink.c
+--- linux-2.6.31.5.orig/fs/aufs/plink.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/plink.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,354 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * pseudo-link
++ */
++
++#include "aufs.h"
++
++/*
++ * during a user process maintains the pseudo-links,
++ * prohibit adding a new plink and branch manipulation.
++ */
++void au_plink_block_maintain(struct super_block *sb)
++{
++	struct au_sbinfo *sbi = au_sbi(sb);
++
++	SiMustAnyLock(sb);
++
++	/* gave up wake_up_bit() */
++	wait_event(sbi->si_plink_wq, !au_ftest_si(sbi, MAINTAIN_PLINK));
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct pseudo_link {
++	struct list_head list;
++	struct inode *inode;
++};
++
++#ifdef CONFIG_AUFS_DEBUG
++void au_plink_list(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++	struct list_head *plink_list;
++	struct pseudo_link *plink;
++
++	SiMustAnyLock(sb);
++
++	sbinfo = au_sbi(sb);
++	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
++
++	plink_list = &sbinfo->si_plink.head;
++	spin_lock(&sbinfo->si_plink.spin);
++	list_for_each_entry(plink, plink_list, list)
++		AuDbg("%lu\n", plink->inode->i_ino);
++	spin_unlock(&sbinfo->si_plink.spin);
++}
++#endif
++
++/* is the inode pseudo-linked? */
++int au_plink_test(struct inode *inode)
++{
++	int found;
++	struct au_sbinfo *sbinfo;
++	struct list_head *plink_list;
++	struct pseudo_link *plink;
++
++	sbinfo = au_sbi(inode->i_sb);
++	AuRwMustAnyLock(&sbinfo->si_rwsem);
++	AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK));
++
++	found = 0;
++	plink_list = &sbinfo->si_plink.head;
++	spin_lock(&sbinfo->si_plink.spin);
++	list_for_each_entry(plink, plink_list, list)
++		if (plink->inode == inode) {
++			found = 1;
++			break;
++		}
++	spin_unlock(&sbinfo->si_plink.spin);
++	return found;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * generate a name for plink.
++ * the file will be stored under AUFS_WH_PLINKDIR.
++ */
++/* 20 is max digits length of ulong 64 */
++#define PLINK_NAME_LEN	((20 + 1) * 2)
++
++static int plink_name(char *name, int len, struct inode *inode,
++		      aufs_bindex_t bindex)
++{
++	int rlen;
++	struct inode *h_inode;
++
++	h_inode = au_h_iptr(inode, bindex);
++	rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino);
++	return rlen;
++}
++
++/* lookup the plink-ed @inode under the branch at @bindex */
++struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex)
++{
++	struct dentry *h_dentry, *h_parent;
++	struct au_branch *br;
++	struct inode *h_dir;
++	char a[PLINK_NAME_LEN];
++	struct qstr tgtname = {
++		.name	= a
++	};
++
++	br = au_sbr(inode->i_sb, bindex);
++	h_parent = br->br_wbr->wbr_plink;
++	h_dir = h_parent->d_inode;
++	tgtname.len = plink_name(a, sizeof(a), inode, bindex);
++
++	/* always superio. */
++	mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_CHILD2);
++	h_dentry = au_sio_lkup_one(&tgtname, h_parent, br);
++	mutex_unlock(&h_dir->i_mutex);
++	return h_dentry;
++}
++
++/* create a pseudo-link */
++static int do_whplink(struct qstr *tgt, struct dentry *h_parent,
++		      struct dentry *h_dentry, struct au_branch *br)
++{
++	int err;
++	struct path h_path = {
++		.mnt = br->br_mnt
++	};
++	struct inode *h_dir;
++
++	h_dir = h_parent->d_inode;
++ again:
++	h_path.dentry = au_lkup_one(tgt, h_parent, br, /*nd*/NULL);
++	err = PTR_ERR(h_path.dentry);
++	if (IS_ERR(h_path.dentry))
++		goto out;
++
++	err = 0;
++	/* wh.plink dir is not monitored */
++	if (h_path.dentry->d_inode
++	    && h_path.dentry->d_inode != h_dentry->d_inode) {
++		err = vfsub_unlink(h_dir, &h_path, /*force*/0);
++		dput(h_path.dentry);
++		h_path.dentry = NULL;
++		if (!err)
++			goto again;
++	}
++	if (!err && !h_path.dentry->d_inode)
++		err = vfsub_link(h_dentry, h_dir, &h_path);
++	dput(h_path.dentry);
++
++ out:
++	return err;
++}
++
++struct do_whplink_args {
++	int *errp;
++	struct qstr *tgt;
++	struct dentry *h_parent;
++	struct dentry *h_dentry;
++	struct au_branch *br;
++};
++
++static void call_do_whplink(void *args)
++{
++	struct do_whplink_args *a = args;
++	*a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br);
++}
++
++static int whplink(struct dentry *h_dentry, struct inode *inode,
++		   aufs_bindex_t bindex, struct au_branch *br)
++{
++	int err, wkq_err;
++	struct au_wbr *wbr;
++	struct dentry *h_parent;
++	struct inode *h_dir;
++	char a[PLINK_NAME_LEN];
++	struct qstr tgtname = {
++		.name = a
++	};
++
++	wbr = au_sbr(inode->i_sb, bindex)->br_wbr;
++	h_parent = wbr->wbr_plink;
++	h_dir = h_parent->d_inode;
++	tgtname.len = plink_name(a, sizeof(a), inode, bindex);
++
++	/* always superio. */
++	mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_CHILD2);
++	if (!au_test_wkq(current)) {
++		struct do_whplink_args args = {
++			.errp		= &err,
++			.tgt		= &tgtname,
++			.h_parent	= h_parent,
++			.h_dentry	= h_dentry,
++			.br		= br
++		};
++		wkq_err = au_wkq_wait(call_do_whplink, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	} else
++		err = do_whplink(&tgtname, h_parent, h_dentry, br);
++	mutex_unlock(&h_dir->i_mutex);
++
++	return err;
++}
++
++/* free a single plink */
++static void do_put_plink(struct pseudo_link *plink, int do_del)
++{
++	iput(plink->inode);
++	if (do_del)
++		list_del(&plink->list);
++	kfree(plink);
++}
++
++/*
++ * create a new pseudo-link for @h_dentry on @bindex.
++ * the linked inode is held in aufs @inode.
++ */
++void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
++		     struct dentry *h_dentry)
++{
++	struct super_block *sb;
++	struct au_sbinfo *sbinfo;
++	struct list_head *plink_list;
++	struct pseudo_link *plink;
++	int found, err, cnt;
++
++	sb = inode->i_sb;
++	sbinfo = au_sbi(sb);
++	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
++
++	err = 0;
++	cnt = 0;
++	found = 0;
++	plink_list = &sbinfo->si_plink.head;
++	spin_lock(&sbinfo->si_plink.spin);
++	list_for_each_entry(plink, plink_list, list) {
++		cnt++;
++		if (plink->inode == inode) {
++			found = 1;
++			break;
++		}
++	}
++	if (found) {
++		spin_unlock(&sbinfo->si_plink.spin);
++		return;
++	}
++
++	plink = NULL;
++	if (!found) {
++		plink = kmalloc(sizeof(*plink), GFP_ATOMIC);
++		if (plink) {
++			plink->inode = au_igrab(inode);
++			list_add(&plink->list, plink_list);
++			cnt++;
++		} else
++			err = -ENOMEM;
++	}
++	spin_unlock(&sbinfo->si_plink.spin);
++
++	if (!err) {
++		au_plink_block_maintain(sb);
++		err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex));
++	}
++
++	if (unlikely(cnt > AUFS_PLINK_WARN))
++		AuWarn1("unexpectedly many pseudo links, %d\n", cnt);
++	if (unlikely(err)) {
++		AuWarn("err %d, damaged pseudo link.\n", err);
++		if (!found && plink)
++			do_put_plink(plink, /*do_del*/1);
++	}
++}
++
++/* free all plinks */
++void au_plink_put(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++	struct list_head *plink_list;
++	struct pseudo_link *plink, *tmp;
++
++	SiMustWriteLock(sb);
++
++	sbinfo = au_sbi(sb);
++	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
++
++	plink_list = &sbinfo->si_plink.head;
++	/* no spin_lock since sbinfo is write-locked */
++	list_for_each_entry_safe(plink, tmp, plink_list, list)
++		do_put_plink(plink, 0);
++	INIT_LIST_HEAD(plink_list);
++}
++
++/* free the plinks on a branch specified by @br_id */
++void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id)
++{
++	struct au_sbinfo *sbinfo;
++	struct list_head *plink_list;
++	struct pseudo_link *plink, *tmp;
++	struct inode *inode;
++	aufs_bindex_t bstart, bend, bindex;
++	unsigned char do_put;
++
++	SiMustWriteLock(sb);
++
++	sbinfo = au_sbi(sb);
++	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
++
++	plink_list = &sbinfo->si_plink.head;
++	/* no spin_lock since sbinfo is write-locked */
++	list_for_each_entry_safe(plink, tmp, plink_list, list) {
++		do_put = 0;
++		inode = au_igrab(plink->inode);
++		ii_write_lock_child(inode);
++		bstart = au_ibstart(inode);
++		bend = au_ibend(inode);
++		if (bstart >= 0) {
++			for (bindex = bstart; bindex <= bend; bindex++) {
++				if (!au_h_iptr(inode, bindex)
++				    || au_ii_br_id(inode, bindex) != br_id)
++					continue;
++				au_set_h_iptr(inode, bindex, NULL, 0);
++				do_put = 1;
++				break;
++			}
++		} else
++			do_put_plink(plink, 1);
++
++		if (do_put) {
++			for (bindex = bstart; bindex <= bend; bindex++)
++				if (au_h_iptr(inode, bindex)) {
++					do_put = 0;
++					break;
++				}
++			if (do_put)
++				do_put_plink(plink, 1);
++		}
++		ii_write_unlock(inode);
++		iput(inode);
++	}
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/poll.c linux-2.6.31.5/fs/aufs/poll.c
+--- linux-2.6.31.5.orig/fs/aufs/poll.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/poll.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,56 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * poll operation
++ * There is only one filesystem which implements ->poll operation, currently.
++ */
++
++#include "aufs.h"
++
++unsigned int aufs_poll(struct file *file, poll_table *wait)
++{
++	unsigned int mask;
++	int err;
++	struct file *h_file;
++	struct dentry *dentry;
++	struct super_block *sb;
++
++	/* We should pretend an error happened. */
++	mask = POLLERR /* | POLLIN | POLLOUT */;
++	dentry = file->f_dentry;
++	sb = dentry->d_sb;
++	si_read_lock(sb, AuLock_FLUSH);
++	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
++	if (unlikely(err))
++		goto out;
++
++	/* it is not an error if h_file has no operation */
++	mask = DEFAULT_POLLMASK;
++	h_file = au_h_fptr(file, au_fbstart(file));
++	if (h_file->f_op && h_file->f_op->poll)
++		mask = h_file->f_op->poll(h_file, wait);
++
++	di_read_unlock(dentry, AuLock_IR);
++	fi_read_unlock(file);
++
++ out:
++	si_read_unlock(sb);
++	AuTraceErr((int)mask);
++	return mask;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/rwsem.h linux-2.6.31.5/fs/aufs/rwsem.h
+--- linux-2.6.31.5.orig/fs/aufs/rwsem.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/rwsem.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,186 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * simple read-write semaphore wrappers
++ */
++
++#ifndef __AUFS_RWSEM_H__
++#define __AUFS_RWSEM_H__
++
++#ifdef __KERNEL__
++
++#include <linux/rwsem.h>
++
++struct au_rwsem {
++	struct rw_semaphore	rwsem;
++#ifdef CONFIG_AUFS_DEBUG
++	/* just for debugging, not almighty counter */
++	atomic_t		rcnt, wcnt;
++#endif
++};
++
++#ifdef CONFIG_AUFS_DEBUG
++#define AuDbgCntInit(rw) do { \
++	atomic_set(&(rw)->rcnt, 0); \
++	atomic_set(&(rw)->wcnt, 0); \
++	smp_mb(); /* atomic set */ \
++} while (0)
++
++#define AuDbgRcntInc(rw)	atomic_inc_return(&(rw)->rcnt)
++#define AuDbgRcntDec(rw)	WARN_ON(atomic_dec_return(&(rw)->rcnt) < 0)
++#define AuDbgWcntInc(rw)	WARN_ON(atomic_inc_return(&(rw)->wcnt) > 1)
++#define AuDbgWcntDec(rw)	WARN_ON(atomic_dec_return(&(rw)->wcnt) < 0)
++#else
++#define AuDbgCntInit(rw)	do {} while (0)
++#define AuDbgRcntInc(rw)	do {} while (0)
++#define AuDbgRcntDec(rw)	do {} while (0)
++#define AuDbgWcntInc(rw)	do {} while (0)
++#define AuDbgWcntDec(rw)	do {} while (0)
++#endif /* CONFIG_AUFS_DEBUG */
++
++/* to debug easier, do not make them inlined functions */
++#define AuRwMustNoWaiters(rw)	AuDebugOn(!list_empty(&(rw)->rwsem.wait_list))
++/* rwsem_is_locked() is unusable */
++#define AuRwMustReadLock(rw)	AuDebugOn(atomic_read(&(rw)->rcnt) <= 0)
++#define AuRwMustWriteLock(rw)	AuDebugOn(atomic_read(&(rw)->wcnt) <= 0)
++#define AuRwMustAnyLock(rw)	AuDebugOn(atomic_read(&(rw)->rcnt) <= 0 \
++					&& atomic_read(&(rw)->wcnt) <= 0)
++#define AuRwDestroy(rw)		AuDebugOn(atomic_read(&(rw)->rcnt) \
++					|| atomic_read(&(rw)->wcnt))
++
++static inline void au_rw_init(struct au_rwsem *rw)
++{
++	AuDbgCntInit(rw);
++	init_rwsem(&rw->rwsem);
++}
++
++static inline void au_rw_init_wlock(struct au_rwsem *rw)
++{
++	au_rw_init(rw);
++	down_write(&rw->rwsem);
++	AuDbgWcntInc(rw);
++}
++
++static inline void au_rw_init_wlock_nested(struct au_rwsem *rw,
++					   unsigned int lsc)
++{
++	au_rw_init(rw);
++	down_write_nested(&rw->rwsem, lsc);
++	AuDbgWcntInc(rw);
++}
++
++static inline void au_rw_read_lock(struct au_rwsem *rw)
++{
++	down_read(&rw->rwsem);
++	AuDbgRcntInc(rw);
++}
++
++static inline void au_rw_read_lock_nested(struct au_rwsem *rw, unsigned int lsc)
++{
++	down_read_nested(&rw->rwsem, lsc);
++	AuDbgRcntInc(rw);
++}
++
++static inline void au_rw_read_unlock(struct au_rwsem *rw)
++{
++	AuRwMustReadLock(rw);
++	AuDbgRcntDec(rw);
++	up_read(&rw->rwsem);
++}
++
++static inline void au_rw_dgrade_lock(struct au_rwsem *rw)
++{
++	AuRwMustWriteLock(rw);
++	AuDbgRcntInc(rw);
++	AuDbgWcntDec(rw);
++	downgrade_write(&rw->rwsem);
++}
++
++static inline void au_rw_write_lock(struct au_rwsem *rw)
++{
++	down_write(&rw->rwsem);
++	AuDbgWcntInc(rw);
++}
++
++static inline void au_rw_write_lock_nested(struct au_rwsem *rw,
++					   unsigned int lsc)
++{
++	down_write_nested(&rw->rwsem, lsc);
++	AuDbgWcntInc(rw);
++}
++
++static inline void au_rw_write_unlock(struct au_rwsem *rw)
++{
++	AuRwMustWriteLock(rw);
++	AuDbgWcntDec(rw);
++	up_write(&rw->rwsem);
++}
++
++/* why is not _nested version defined */
++static inline int au_rw_read_trylock(struct au_rwsem *rw)
++{
++	int ret = down_read_trylock(&rw->rwsem);
++	if (ret)
++		AuDbgRcntInc(rw);
++	return ret;
++}
++
++static inline int au_rw_write_trylock(struct au_rwsem *rw)
++{
++	int ret = down_write_trylock(&rw->rwsem);
++	if (ret)
++		AuDbgWcntInc(rw);
++	return ret;
++}
++
++#undef AuDbgCntInit
++#undef AuDbgRcntInc
++#undef AuDbgRcntDec
++#undef AuDbgWcntInc
++#undef AuDbgWcntDec
++
++#define AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
++static inline void prefix##_read_lock(param) \
++{ au_rw_read_lock(rwsem); } \
++static inline void prefix##_write_lock(param) \
++{ au_rw_write_lock(rwsem); } \
++static inline int prefix##_read_trylock(param) \
++{ return au_rw_read_trylock(rwsem); } \
++static inline int prefix##_write_trylock(param) \
++{ return au_rw_write_trylock(rwsem); }
++/* why is not _nested version defined */
++/* static inline void prefix##_read_trylock_nested(param, lsc)
++{ au_rw_read_trylock_nested(rwsem, lsc)); }
++static inline void prefix##_write_trylock_nestd(param, lsc)
++{ au_rw_write_trylock_nested(rwsem, lsc); } */
++
++#define AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) \
++static inline void prefix##_read_unlock(param) \
++{ au_rw_read_unlock(rwsem); } \
++static inline void prefix##_write_unlock(param) \
++{ au_rw_write_unlock(rwsem); } \
++static inline void prefix##_downgrade_lock(param) \
++{ au_rw_dgrade_lock(rwsem); }
++
++#define AuSimpleRwsemFuncs(prefix, param, rwsem) \
++	AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
++	AuSimpleUnlockRwsemFuncs(prefix, param, rwsem)
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_RWSEM_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/sbinfo.c linux-2.6.31.5/fs/aufs/sbinfo.c
+--- linux-2.6.31.5.orig/fs/aufs/sbinfo.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/sbinfo.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,208 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * superblock private data
++ */
++
++#include "aufs.h"
++
++/*
++ * they are necessary regardless sysfs is disabled.
++ */
++void au_si_free(struct kobject *kobj)
++{
++	struct au_sbinfo *sbinfo;
++	struct super_block *sb;
++
++	sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
++	AuDebugOn(!list_empty(&sbinfo->si_plink.head));
++
++	sb = sbinfo->si_sb;
++	si_write_lock(sb);
++	au_xino_clr(sb);
++	au_br_free(sbinfo);
++	kfree(sbinfo->si_branch);
++	mutex_destroy(&sbinfo->si_xib_mtx);
++	si_write_unlock(sb);
++	AuRwDestroy(&sbinfo->si_rwsem);
++
++	kfree(sbinfo);
++}
++
++int au_si_alloc(struct super_block *sb)
++{
++	int err;
++	struct au_sbinfo *sbinfo;
++
++	err = -ENOMEM;
++	sbinfo = kmalloc(sizeof(*sbinfo), GFP_NOFS);
++	if (unlikely(!sbinfo))
++		goto out;
++
++	/* will be reallocated separately */
++	sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
++	if (unlikely(!sbinfo->si_branch))
++		goto out_sbinfo;
++
++	memset(&sbinfo->si_kobj, 0, sizeof(sbinfo->si_kobj));
++	err = sysaufs_si_init(sbinfo);
++	if (unlikely(err))
++		goto out_br;
++
++	au_nwt_init(&sbinfo->si_nowait);
++	au_rw_init_wlock(&sbinfo->si_rwsem);
++	sbinfo->si_generation = 0;
++	sbinfo->au_si_status = 0;
++	sbinfo->si_bend = -1;
++	sbinfo->si_last_br_id = 0;
++
++	sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
++	sbinfo->si_wbr_create = AuWbrCreate_Def;
++	sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + AuWbrCopyup_Def;
++	sbinfo->si_wbr_create_ops = au_wbr_create_ops + AuWbrCreate_Def;
++
++	sbinfo->si_mntflags = AuOpt_Def;
++
++	sbinfo->si_xread = NULL;
++	sbinfo->si_xwrite = NULL;
++	sbinfo->si_xib = NULL;
++	mutex_init(&sbinfo->si_xib_mtx);
++	sbinfo->si_xib_buf = NULL;
++	sbinfo->si_xino_brid = -1;
++	/* leave si_xib_last_pindex and si_xib_next_bit */
++
++	sbinfo->si_rdcache = AUFS_RDCACHE_DEF * HZ;
++	sbinfo->si_rdblk = AUFS_RDBLK_DEF;
++	sbinfo->si_rdhash = AUFS_RDHASH_DEF;
++	sbinfo->si_dirwh = AUFS_DIRWH_DEF;
++
++	au_spl_init(&sbinfo->si_plink);
++	init_waitqueue_head(&sbinfo->si_plink_wq);
++
++	/* leave other members for sysaufs and si_mnt. */
++	sbinfo->si_sb = sb;
++	sb->s_fs_info = sbinfo;
++	au_debug_sbinfo_init(sbinfo);
++	return 0; /* success */
++
++ out_br:
++	kfree(sbinfo->si_branch);
++ out_sbinfo:
++	kfree(sbinfo);
++ out:
++	return err;
++}
++
++int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr)
++{
++	int err, sz;
++	struct au_branch **brp;
++
++	AuRwMustWriteLock(&sbinfo->si_rwsem);
++
++	err = -ENOMEM;
++	sz = sizeof(*brp) * (sbinfo->si_bend + 1);
++	if (unlikely(!sz))
++		sz = sizeof(*brp);
++	brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS);
++	if (brp) {
++		sbinfo->si_branch = brp;
++		err = 0;
++	}
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++unsigned int au_sigen_inc(struct super_block *sb)
++{
++	unsigned int gen;
++
++	SiMustWriteLock(sb);
++
++	gen = ++au_sbi(sb)->si_generation;
++	au_update_digen(sb->s_root);
++	au_update_iigen(sb->s_root->d_inode);
++	sb->s_root->d_inode->i_version++;
++	return gen;
++}
++
++aufs_bindex_t au_new_br_id(struct super_block *sb)
++{
++	aufs_bindex_t br_id;
++	int i;
++	struct au_sbinfo *sbinfo;
++
++	SiMustWriteLock(sb);
++
++	sbinfo = au_sbi(sb);
++	for (i = 0; i <= AUFS_BRANCH_MAX; i++) {
++		br_id = ++sbinfo->si_last_br_id;
++		if (br_id && au_br_index(sb, br_id) < 0)
++			return br_id;
++	}
++
++	return -1;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* dentry and super_block lock. call at entry point */
++void aufs_read_lock(struct dentry *dentry, int flags)
++{
++	si_read_lock(dentry->d_sb, flags);
++	if (au_ftest_lock(flags, DW))
++		di_write_lock_child(dentry);
++	else
++		di_read_lock_child(dentry, flags);
++}
++
++void aufs_read_unlock(struct dentry *dentry, int flags)
++{
++	if (au_ftest_lock(flags, DW))
++		di_write_unlock(dentry);
++	else
++		di_read_unlock(dentry, flags);
++	si_read_unlock(dentry->d_sb);
++}
++
++void aufs_write_lock(struct dentry *dentry)
++{
++	si_write_lock(dentry->d_sb);
++	di_write_lock_child(dentry);
++}
++
++void aufs_write_unlock(struct dentry *dentry)
++{
++	di_write_unlock(dentry);
++	si_write_unlock(dentry->d_sb);
++}
++
++void aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags)
++{
++	si_read_lock(d1->d_sb, flags);
++	di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIR));
++}
++
++void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2)
++{
++	di_write_unlock2(d1, d2);
++	si_read_unlock(d1->d_sb);
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/spl.h linux-2.6.31.5/fs/aufs/spl.h
+--- linux-2.6.31.5.orig/fs/aufs/spl.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/spl.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,57 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * simple list protected by a spinlock
++ */
++
++#ifndef __AUFS_SPL_H__
++#define __AUFS_SPL_H__
++
++#ifdef __KERNEL__
++
++#include <linux/spinlock.h>
++#include <linux/list.h>
++
++struct au_splhead {
++	spinlock_t		spin;
++	struct list_head	head;
++};
++
++static inline void au_spl_init(struct au_splhead *spl)
++{
++	spin_lock_init(&spl->spin);
++	INIT_LIST_HEAD(&spl->head);
++}
++
++static inline void au_spl_add(struct list_head *list, struct au_splhead *spl)
++{
++	spin_lock(&spl->spin);
++	list_add(list, &spl->head);
++	spin_unlock(&spl->spin);
++}
++
++static inline void au_spl_del(struct list_head *list, struct au_splhead *spl)
++{
++	spin_lock(&spl->spin);
++	list_del(list);
++	spin_unlock(&spl->spin);
++}
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_SPL_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/super.c linux-2.6.31.5/fs/aufs/super.c
+--- linux-2.6.31.5.orig/fs/aufs/super.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/super.c	2009-11-15 22:20:26.000000000 +0100
+@@ -0,0 +1,874 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * mount and super_block operations
++ */
++
++#include <linux/buffer_head.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/statfs.h>
++#include "aufs.h"
++
++/*
++ * super_operations
++ */
++static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused)
++{
++	struct au_icntnr *c;
++
++	c = au_cache_alloc_icntnr();
++	if (c) {
++		inode_init_once(&c->vfs_inode);
++		c->vfs_inode.i_version = 1; /* sigen(sb); */
++		c->iinfo.ii_hinode = NULL;
++		return &c->vfs_inode;
++	}
++	return NULL;
++}
++
++static void aufs_destroy_inode(struct inode *inode)
++{
++	au_iinfo_fin(inode);
++	au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode));
++}
++
++struct inode *au_iget_locked(struct super_block *sb, ino_t ino)
++{
++	struct inode *inode;
++	int err;
++
++	inode = iget_locked(sb, ino);
++	if (unlikely(!inode)) {
++		inode = ERR_PTR(-ENOMEM);
++		goto out;
++	}
++	if (!(inode->i_state & I_NEW))
++		goto out;
++
++	err = au_xigen_new(inode);
++	if (!err)
++		err = au_iinfo_init(inode);
++	if (!err)
++		inode->i_version++;
++	else {
++		iget_failed(inode);
++		inode = ERR_PTR(err);
++	}
++
++ out:
++	/* never return NULL */
++	AuDebugOn(!inode);
++	AuTraceErrPtr(inode);
++	return inode;
++}
++
++/* lock free root dinfo */
++static int au_show_brs(struct seq_file *seq, struct super_block *sb)
++{
++	int err;
++	aufs_bindex_t bindex, bend;
++	struct path path;
++	struct au_hdentry *hd;
++	struct au_branch *br;
++
++	err = 0;
++	bend = au_sbend(sb);
++	hd = au_di(sb->s_root)->di_hdentry;
++	for (bindex = 0; !err && bindex <= bend; bindex++) {
++		br = au_sbr(sb, bindex);
++		path.mnt = br->br_mnt;
++		path.dentry = hd[bindex].hd_dentry;
++		err = au_seq_path(seq, &path);
++		if (err > 0)
++			err = seq_printf(seq, "=%s",
++					 au_optstr_br_perm(br->br_perm));
++		if (!err && bindex != bend)
++			err = seq_putc(seq, ':');
++	}
++
++	return err;
++}
++
++static void au_show_wbr_create(struct seq_file *m, int v,
++			       struct au_sbinfo *sbinfo)
++{
++	const char *pat;
++
++	AuRwMustAnyLock(&sbinfo->si_rwsem);
++
++	seq_printf(m, ",create=");
++	pat = au_optstr_wbr_create(v);
++	switch (v) {
++	case AuWbrCreate_TDP:
++	case AuWbrCreate_RR:
++	case AuWbrCreate_MFS:
++	case AuWbrCreate_PMFS:
++		seq_printf(m, pat);
++		break;
++	case AuWbrCreate_MFSV:
++		seq_printf(m, /*pat*/"mfs:%lu",
++			   sbinfo->si_wbr_mfs.mfs_expire / HZ);
++		break;
++	case AuWbrCreate_PMFSV:
++		seq_printf(m, /*pat*/"pmfs:%lu",
++			   sbinfo->si_wbr_mfs.mfs_expire / HZ);
++		break;
++	case AuWbrCreate_MFSRR:
++		seq_printf(m, /*pat*/"mfsrr:%llu",
++			   sbinfo->si_wbr_mfs.mfsrr_watermark);
++		break;
++	case AuWbrCreate_MFSRRV:
++		seq_printf(m, /*pat*/"mfsrr:%llu:%lu",
++			   sbinfo->si_wbr_mfs.mfsrr_watermark,
++			   sbinfo->si_wbr_mfs.mfs_expire / HZ);
++		break;
++	}
++}
++
++static int au_show_xino(struct seq_file *seq, struct vfsmount *mnt)
++{
++#ifdef CONFIG_SYSFS
++	return 0;
++#else
++	int err;
++	const int len = sizeof(AUFS_XINO_FNAME) - 1;
++	aufs_bindex_t bindex, brid;
++	struct super_block *sb;
++	struct qstr *name;
++	struct file *f;
++	struct dentry *d, *h_root;
++
++	AuRwMustAnyLock(&sbinfo->si_rwsem);
++
++	err = 0;
++	sb = mnt->mnt_sb;
++	f = au_sbi(sb)->si_xib;
++	if (!f)
++		goto out;
++
++	/* stop printing the default xino path on the first writable branch */
++	h_root = NULL;
++	brid = au_xino_brid(sb);
++	if (brid >= 0) {
++		bindex = au_br_index(sb, brid);
++		h_root = au_di(sb->s_root)->di_hdentry[0 + bindex].hd_dentry;
++	}
++	d = f->f_dentry;
++	name = &d->d_name;
++	/* safe ->d_parent because the file is unlinked */
++	if (d->d_parent == h_root
++	    && name->len == len
++	    && !memcmp(name->name, AUFS_XINO_FNAME, len))
++		goto out;
++
++	seq_puts(seq, ",xino=");
++	err = au_xino_path(seq, f);
++
++ out:
++	return err;
++#endif
++}
++
++/* seq_file will re-call me in case of too long string */
++static int aufs_show_options(struct seq_file *m, struct vfsmount *mnt)
++{
++	int err, n;
++	unsigned int mnt_flags, v;
++	struct super_block *sb;
++	struct au_sbinfo *sbinfo;
++
++#define AuBool(name, str) do { \
++	v = au_opt_test(mnt_flags, name); \
++	if (v != au_opt_test(AuOpt_Def, name)) \
++		seq_printf(m, ",%s" #str, v ? "" : "no"); \
++} while (0)
++
++#define AuStr(name, str) do { \
++	v = mnt_flags & AuOptMask_##name; \
++	if (v != (AuOpt_Def & AuOptMask_##name)) \
++		seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \
++} while (0)
++
++#define AuUInt(name, str, val) do { \
++	if (val != AUFS_##name##_DEF) \
++		seq_printf(m, "," #str "=%u", val); \
++} while (0)
++
++	/* lock free root dinfo */
++	sb = mnt->mnt_sb;
++	si_noflush_read_lock(sb);
++	sbinfo = au_sbi(sb);
++	seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo));
++
++	mnt_flags = au_mntflags(sb);
++	if (au_opt_test(mnt_flags, XINO)) {
++		err = au_show_xino(m, mnt);
++		if (unlikely(err))
++			goto out;
++	} else
++		seq_puts(m, ",noxino");
++
++	AuBool(TRUNC_XINO, trunc_xino);
++	AuStr(UDBA, udba);
++	AuBool(SHWH, shwh);
++	AuBool(PLINK, plink);
++	/* AuBool(DIRPERM1, dirperm1); */
++	/* AuBool(REFROF, refrof); */
++
++	v = sbinfo->si_wbr_create;
++	if (v != AuWbrCreate_Def)
++		au_show_wbr_create(m, v, sbinfo);
++
++	v = sbinfo->si_wbr_copyup;
++	if (v != AuWbrCopyup_Def)
++		seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v));
++
++	v = au_opt_test(mnt_flags, ALWAYS_DIROPQ);
++	if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ))
++		seq_printf(m, ",diropq=%c", v ? 'a' : 'w');
++
++	AuUInt(DIRWH, dirwh, sbinfo->si_dirwh);
++
++	n = sbinfo->si_rdcache / HZ;
++	AuUInt(RDCACHE, rdcache, n);
++
++	AuUInt(RDBLK, rdblk, sbinfo->si_rdblk);
++	AuUInt(RDHASH, rdhash, sbinfo->si_rdhash);
++
++	AuBool(SUM, sum);
++	/* AuBool(SUM_W, wsum); */
++	AuBool(WARN_PERM, warn_perm);
++	AuBool(VERBOSE, verbose);
++
++ out:
++	/* be sure to print "br:" last */
++	if (!sysaufs_brs) {
++		seq_puts(m, ",br:");
++		au_show_brs(m, sb);
++	}
++	si_read_unlock(sb);
++	return 0;
++
++#undef Deleted
++#undef AuBool
++#undef AuStr
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* sum mode which returns the summation for statfs(2) */
++
++static u64 au_add_till_max(u64 a, u64 b)
++{
++	u64 old;
++
++	old = a;
++	a += b;
++	if (old < a)
++		return a;
++	return ULLONG_MAX;
++}
++
++static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf)
++{
++	int err;
++	u64 blocks, bfree, bavail, files, ffree;
++	aufs_bindex_t bend, bindex, i;
++	unsigned char shared;
++	struct vfsmount *h_mnt;
++	struct super_block *h_sb;
++
++	blocks = 0;
++	bfree = 0;
++	bavail = 0;
++	files = 0;
++	ffree = 0;
++
++	err = 0;
++	bend = au_sbend(sb);
++	for (bindex = bend; bindex >= 0; bindex--) {
++		h_mnt = au_sbr_mnt(sb, bindex);
++		h_sb = h_mnt->mnt_sb;
++		shared = 0;
++		for (i = bindex + 1; !shared && i <= bend; i++)
++			shared = (au_sbr_sb(sb, i) == h_sb);
++		if (shared)
++			continue;
++
++		/* sb->s_root for NFS is unreliable */
++		err = vfs_statfs(h_mnt->mnt_root, buf);
++		if (unlikely(err))
++			goto out;
++
++		blocks = au_add_till_max(blocks, buf->f_blocks);
++		bfree = au_add_till_max(bfree, buf->f_bfree);
++		bavail = au_add_till_max(bavail, buf->f_bavail);
++		files = au_add_till_max(files, buf->f_files);
++		ffree = au_add_till_max(ffree, buf->f_ffree);
++	}
++
++	buf->f_blocks = blocks;
++	buf->f_bfree = bfree;
++	buf->f_bavail = bavail;
++	buf->f_files = files;
++	buf->f_ffree = ffree;
++
++ out:
++	return err;
++}
++
++static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	int err;
++	struct super_block *sb;
++
++	/* lock free root dinfo */
++	sb = dentry->d_sb;
++	si_noflush_read_lock(sb);
++	if (!au_opt_test(au_mntflags(sb), SUM))
++		/* sb->s_root for NFS is unreliable */
++		err = vfs_statfs(au_sbr_mnt(sb, 0)->mnt_root, buf);
++	else
++		err = au_statfs_sum(sb, buf);
++	si_read_unlock(sb);
++
++	if (!err) {
++		buf->f_type = AUFS_SUPER_MAGIC;
++		buf->f_namelen -= AUFS_WH_PFX_LEN;
++		memset(&buf->f_fsid, 0, sizeof(buf->f_fsid));
++	}
++	/* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* try flushing the lower fs at aufs remount/unmount time */
++
++static void au_fsync_br(struct super_block *sb)
++{
++	aufs_bindex_t bend, bindex;
++	int brperm;
++	struct au_branch *br;
++	struct super_block *h_sb;
++
++	bend = au_sbend(sb);
++	for (bindex = 0; bindex < bend; bindex++) {
++		br = au_sbr(sb, bindex);
++		brperm = br->br_perm;
++		if (brperm == AuBrPerm_RR || brperm == AuBrPerm_RRWH)
++			continue;
++		h_sb = br->br_mnt->mnt_sb;
++		if (bdev_read_only(h_sb->s_bdev))
++			continue;
++
++		lockdep_off();
++		down_write(&h_sb->s_umount);
++		shrink_dcache_sb(h_sb);
++		sync_filesystem(h_sb);
++		up_write(&h_sb->s_umount);
++		lockdep_on();
++	}
++}
++
++/*
++ * this IS NOT for super_operations.
++ * I guess it will be reverted someday.
++ */
++static void aufs_umount_begin(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++
++	sbinfo = au_sbi(sb);
++	if (!sbinfo)
++		return;
++
++	si_write_lock(sb);
++	au_fsync_br(sb);
++	if (au_opt_test(au_mntflags(sb), PLINK))
++		au_plink_put(sb);
++	if (sbinfo->si_wbr_create_ops->fin)
++		sbinfo->si_wbr_create_ops->fin(sb);
++	si_write_unlock(sb);
++}
++
++/* final actions when unmounting a file system */
++static void aufs_put_super(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++
++	sbinfo = au_sbi(sb);
++	if (!sbinfo)
++		return;
++
++	aufs_umount_begin(sb);
++	dbgaufs_si_fin(sbinfo);
++	kobject_put(&sbinfo->si_kobj);
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * refresh dentry and inode at remount time.
++ */
++static int do_refresh(struct dentry *dentry, mode_t type,
++		      unsigned int dir_flags)
++{
++	int err;
++	struct dentry *parent;
++
++	di_write_lock_child(dentry);
++	parent = dget_parent(dentry);
++	di_read_lock_parent(parent, AuLock_IR);
++
++	/* returns the number of positive dentries */
++	err = au_refresh_hdentry(dentry, type);
++	if (err >= 0) {
++		struct inode *inode = dentry->d_inode;
++		err = au_refresh_hinode(inode, dentry);
++		if (!err && type == S_IFDIR)
++			au_reset_hinotify(inode, dir_flags);
++	}
++	if (unlikely(err))
++		AuErr("unrecoverable error %d, %.*s\n", err, AuDLNPair(dentry));
++
++	di_read_unlock(parent, AuLock_IR);
++	dput(parent);
++	di_write_unlock(dentry);
++
++	return err;
++}
++
++static int test_dir(struct dentry *dentry, void *arg __maybe_unused)
++{
++	return S_ISDIR(dentry->d_inode->i_mode);
++}
++
++/* gave up consolidating with refresh_nondir() */
++static int refresh_dir(struct dentry *root, unsigned int sigen)
++{
++	int err, i, j, ndentry, e;
++	struct au_dcsub_pages dpages;
++	struct au_dpage *dpage;
++	struct dentry **dentries;
++	struct inode *inode;
++	const unsigned int flags = au_hi_flags(root->d_inode, /*isdir*/1);
++
++	err = 0;
++	list_for_each_entry(inode, &root->d_sb->s_inodes, i_sb_list)
++		if (S_ISDIR(inode->i_mode) && au_iigen(inode) != sigen) {
++			ii_write_lock_child(inode);
++			e = au_refresh_hinode_self(inode, /*do_attr*/1);
++			ii_write_unlock(inode);
++			if (unlikely(e)) {
++				AuDbg("e %d, i%lu\n", e, inode->i_ino);
++				if (!err)
++					err = e;
++				/* go on even if err */
++			}
++		}
++
++	e = au_dpages_init(&dpages, GFP_NOFS);
++	if (unlikely(e)) {
++		if (!err)
++			err = e;
++		goto out;
++	}
++	e = au_dcsub_pages(&dpages, root, test_dir, NULL);
++	if (unlikely(e)) {
++		if (!err)
++			err = e;
++		goto out_dpages;
++	}
++
++	for (i = 0; !e && i < dpages.ndpage; i++) {
++		dpage = dpages.dpages + i;
++		dentries = dpage->dentries;
++		ndentry = dpage->ndentry;
++		for (j = 0; !e && j < ndentry; j++) {
++			struct dentry *d;
++
++			d = dentries[j];
++			au_dbg_verify_dir_parent(d, sigen);
++			if (au_digen(d) != sigen) {
++				e = do_refresh(d, S_IFDIR, flags);
++				if (unlikely(e && !err))
++					err = e;
++				/* break on err */
++			}
++		}
++	}
++
++ out_dpages:
++	au_dpages_free(&dpages);
++ out:
++	return err;
++}
++
++static int test_nondir(struct dentry *dentry, void *arg __maybe_unused)
++{
++	return !S_ISDIR(dentry->d_inode->i_mode);
++}
++
++static int refresh_nondir(struct dentry *root, unsigned int sigen,
++			  int do_dentry)
++{
++	int err, i, j, ndentry, e;
++	struct au_dcsub_pages dpages;
++	struct au_dpage *dpage;
++	struct dentry **dentries;
++	struct inode *inode;
++
++	err = 0;
++	list_for_each_entry(inode, &root->d_sb->s_inodes, i_sb_list)
++		if (!S_ISDIR(inode->i_mode) && au_iigen(inode) != sigen) {
++			ii_write_lock_child(inode);
++			e = au_refresh_hinode_self(inode, /*do_attr*/1);
++			ii_write_unlock(inode);
++			if (unlikely(e)) {
++				AuDbg("e %d, i%lu\n", e, inode->i_ino);
++				if (!err)
++					err = e;
++				/* go on even if err */
++			}
++		}
++
++	if (!do_dentry)
++		goto out;
++
++	e = au_dpages_init(&dpages, GFP_NOFS);
++	if (unlikely(e)) {
++		if (!err)
++			err = e;
++		goto out;
++	}
++	e = au_dcsub_pages(&dpages, root, test_nondir, NULL);
++	if (unlikely(e)) {
++		if (!err)
++			err = e;
++		goto out_dpages;
++	}
++
++	for (i = 0; i < dpages.ndpage; i++) {
++		dpage = dpages.dpages + i;
++		dentries = dpage->dentries;
++		ndentry = dpage->ndentry;
++		for (j = 0; j < ndentry; j++) {
++			struct dentry *d;
++
++			d = dentries[j];
++			au_dbg_verify_nondir_parent(d, sigen);
++			inode = d->d_inode;
++			if (inode && au_digen(d) != sigen) {
++				e = do_refresh(d, inode->i_mode & S_IFMT,
++					       /*dir_flags*/0);
++				if (unlikely(e && !err))
++					err = e;
++				/* go on even err */
++			}
++		}
++	}
++
++ out_dpages:
++	au_dpages_free(&dpages);
++ out:
++	return err;
++}
++
++static void au_remount_refresh(struct super_block *sb, unsigned int flags)
++{
++	int err;
++	unsigned int sigen;
++	struct au_sbinfo *sbinfo;
++	struct dentry *root;
++	struct inode *inode;
++
++	au_sigen_inc(sb);
++	sigen = au_sigen(sb);
++	sbinfo = au_sbi(sb);
++	au_fclr_si(sbinfo, FAILED_REFRESH_DIRS);
++
++	root = sb->s_root;
++	DiMustNoWaiters(root);
++	inode = root->d_inode;
++	IiMustNoWaiters(inode);
++	au_reset_hinotify(inode, au_hi_flags(inode, /*isdir*/1));
++	di_write_unlock(root);
++
++	err = refresh_dir(root, sigen);
++	if (unlikely(err)) {
++		au_fset_si(sbinfo, FAILED_REFRESH_DIRS);
++		AuWarn("Refreshing directories failed, ignored (%d)\n", err);
++	}
++
++	if (au_ftest_opts(flags, REFRESH_NONDIR)) {
++		err = refresh_nondir(root, sigen, !err);
++		if (unlikely(err))
++			AuWarn("Refreshing non-directories failed, ignored"
++			       "(%d)\n", err);
++	}
++
++	/* aufs_write_lock() calls ..._child() */
++	di_write_lock_child(root);
++	au_cpup_attr_all(root->d_inode, /*force*/1);
++}
++
++/* stop extra interpretation of errno in mount(8), and strange error messages */
++static int cvt_err(int err)
++{
++	AuTraceErr(err);
++
++	switch (err) {
++	case -ENOENT:
++	case -ENOTDIR:
++	case -EEXIST:
++	case -EIO:
++		err = -EINVAL;
++	}
++	return err;
++}
++
++static int aufs_remount_fs(struct super_block *sb, int *flags, char *data)
++{
++	int err;
++	struct au_opts opts;
++	struct dentry *root;
++	struct inode *inode;
++	struct au_sbinfo *sbinfo;
++
++	err = 0;
++	root = sb->s_root;
++	if (!data || !*data) {
++		aufs_write_lock(root);
++		err = au_opts_verify(sb, *flags, /*pending*/0);
++		if (!err)
++			au_fsync_br(sb);
++		aufs_write_unlock(root);
++		goto out;
++	}
++
++	err = -ENOMEM;
++	memset(&opts, 0, sizeof(opts));
++	opts.opt = (void *)__get_free_page(GFP_NOFS);
++	if (unlikely(!opts.opt))
++		goto out;
++	opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
++	opts.flags = AuOpts_REMOUNT;
++	opts.sb_flags = *flags;
++
++	/* parse it before aufs lock */
++	err = au_opts_parse(sb, data, &opts);
++	if (unlikely(err))
++		goto out_opts;
++
++	sbinfo = au_sbi(sb);
++	inode = root->d_inode;
++	mutex_lock(&inode->i_mutex);
++	aufs_write_lock(root);
++	au_fsync_br(sb);
++
++	/* au_opts_remount() may return an error */
++	err = au_opts_remount(sb, &opts);
++	au_opts_free(&opts);
++
++	if (au_ftest_opts(opts.flags, REFRESH_DIR)
++	    || au_ftest_opts(opts.flags, REFRESH_NONDIR))
++		au_remount_refresh(sb, opts.flags);
++
++	aufs_write_unlock(root);
++	mutex_unlock(&inode->i_mutex);
++
++ out_opts:
++	free_page((unsigned long)opts.opt);
++ out:
++	err = cvt_err(err);
++	AuTraceErr(err);
++	return err;
++}
++
++static struct super_operations aufs_sop = {
++	.alloc_inode	= aufs_alloc_inode,
++	.destroy_inode	= aufs_destroy_inode,
++	.drop_inode	= generic_delete_inode,
++	.show_options	= aufs_show_options,
++	.statfs		= aufs_statfs,
++	.put_super	= aufs_put_super,
++	.remount_fs	= aufs_remount_fs
++};
++
++/* ---------------------------------------------------------------------- */
++
++static int alloc_root(struct super_block *sb)
++{
++	int err;
++	struct inode *inode;
++	struct dentry *root;
++
++	err = -ENOMEM;
++	inode = au_iget_locked(sb, AUFS_ROOT_INO);
++	err = PTR_ERR(inode);
++	if (IS_ERR(inode))
++		goto out;
++
++	inode->i_op = &aufs_dir_iop;
++	inode->i_fop = &aufs_dir_fop;
++	inode->i_mode = S_IFDIR;
++	inode->i_nlink = 2;
++	unlock_new_inode(inode);
++
++	root = d_alloc_root(inode);
++	if (unlikely(!root))
++		goto out_iput;
++	err = PTR_ERR(root);
++	if (IS_ERR(root))
++		goto out_iput;
++
++	err = au_alloc_dinfo(root);
++	if (!err) {
++		sb->s_root = root;
++		return 0; /* success */
++	}
++	dput(root);
++	goto out; /* do not iput */
++
++ out_iput:
++	iget_failed(inode);
++	iput(inode);
++ out:
++	return err;
++
++}
++
++static int aufs_fill_super(struct super_block *sb, void *raw_data,
++			   int silent __maybe_unused)
++{
++	int err;
++	struct au_opts opts;
++	struct dentry *root;
++	struct inode *inode;
++	char *arg = raw_data;
++
++	if (unlikely(!arg || !*arg)) {
++		err = -EINVAL;
++		AuErr("no arg\n");
++		goto out;
++	}
++
++	err = -ENOMEM;
++	memset(&opts, 0, sizeof(opts));
++	opts.opt = (void *)__get_free_page(GFP_NOFS);
++	if (unlikely(!opts.opt))
++		goto out;
++	opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
++	opts.sb_flags = sb->s_flags;
++
++	err = au_si_alloc(sb);
++	if (unlikely(err))
++		goto out_opts;
++
++	/* all timestamps always follow the ones on the branch */
++	sb->s_flags |= MS_NOATIME | MS_NODIRATIME;
++	sb->s_op = &aufs_sop;
++	sb->s_magic = AUFS_SUPER_MAGIC;
++	sb->s_maxbytes = 0;
++	au_export_init(sb);
++
++	err = alloc_root(sb);
++	if (unlikely(err)) {
++		si_write_unlock(sb);
++		goto out_info;
++	}
++	root = sb->s_root;
++	inode = root->d_inode;
++
++	/*
++	 * actually we can parse options regardless aufs lock here.
++	 * but at remount time, parsing must be done before aufs lock.
++	 * so we follow the same rule.
++	 */
++	ii_write_lock_parent(inode);
++	aufs_write_unlock(root);
++	err = au_opts_parse(sb, arg, &opts);
++	if (unlikely(err))
++		goto out_root;
++
++	/* lock vfs_inode first, then aufs. */
++	mutex_lock(&inode->i_mutex);
++	inode->i_op = &aufs_dir_iop;
++	inode->i_fop = &aufs_dir_fop;
++	aufs_write_lock(root);
++	err = au_opts_mount(sb, &opts);
++	au_opts_free(&opts);
++	if (unlikely(err))
++		goto out_unlock;
++	aufs_write_unlock(root);
++	mutex_unlock(&inode->i_mutex);
++	goto out_opts; /* success */
++
++ out_unlock:
++	aufs_write_unlock(root);
++	mutex_unlock(&inode->i_mutex);
++ out_root:
++	dput(root);
++	sb->s_root = NULL;
++ out_info:
++	kobject_put(&au_sbi(sb)->si_kobj);
++	sb->s_fs_info = NULL;
++ out_opts:
++	free_page((unsigned long)opts.opt);
++ out:
++	AuTraceErr(err);
++	err = cvt_err(err);
++	AuTraceErr(err);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int aufs_get_sb(struct file_system_type *fs_type, int flags,
++		       const char *dev_name __maybe_unused, void *raw_data,
++		       struct vfsmount *mnt)
++{
++	int err;
++	struct super_block *sb;
++
++	/* all timestamps always follow the ones on the branch */
++	/* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */
++	err = get_sb_nodev(fs_type, flags, raw_data, aufs_fill_super, mnt);
++	if (!err) {
++		sb = mnt->mnt_sb;
++		si_write_lock(sb);
++		sysaufs_brs_add(sb, 0);
++		si_write_unlock(sb);
++	}
++	return err;
++}
++
++struct file_system_type aufs_fs_type = {
++	.name		= AUFS_FSTYPE,
++	.fs_flags	=
++		FS_RENAME_DOES_D_MOVE	/* a race between rename and others */
++		| FS_REVAL_DOT,		/* for NFS branch and udba */
++	.get_sb		= aufs_get_sb,
++	.kill_sb	= generic_shutdown_super,
++	/* no need to __module_get() and module_put(). */
++	.owner		= THIS_MODULE,
++};
+diff -Nur linux-2.6.31.5.orig/fs/aufs/super.h linux-2.6.31.5/fs/aufs/super.h
+--- linux-2.6.31.5.orig/fs/aufs/super.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/super.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,384 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * super_block operations
++ */
++
++#ifndef __AUFS_SUPER_H__
++#define __AUFS_SUPER_H__
++
++#ifdef __KERNEL__
++
++#include <linux/fs.h>
++#include <linux/aufs_type.h>
++#include "rwsem.h"
++#include "spl.h"
++#include "wkq.h"
++
++typedef ssize_t (*au_readf_t)(struct file *, char __user *, size_t, loff_t *);
++typedef ssize_t (*au_writef_t)(struct file *, const char __user *, size_t,
++			       loff_t *);
++
++/* policies to select one among multiple writable branches */
++struct au_wbr_copyup_operations {
++	int (*copyup)(struct dentry *dentry);
++};
++
++struct au_wbr_create_operations {
++	int (*create)(struct dentry *dentry, int isdir);
++	int (*init)(struct super_block *sb);
++	int (*fin)(struct super_block *sb);
++};
++
++struct au_wbr_mfs {
++	struct mutex	mfs_lock; /* protect this structure */
++	unsigned long	mfs_jiffy;
++	unsigned long	mfs_expire;
++	aufs_bindex_t	mfs_bindex;
++
++	unsigned long long	mfsrr_bytes;
++	unsigned long long	mfsrr_watermark;
++};
++
++struct au_branch;
++struct au_sbinfo {
++	/* nowait tasks in the system-wide workqueue */
++	struct au_nowait_tasks	si_nowait;
++
++	struct au_rwsem		si_rwsem;
++
++	/* branch management */
++	unsigned int		si_generation;
++
++	/* see above flags */
++	unsigned char		au_si_status;
++
++	aufs_bindex_t		si_bend;
++	aufs_bindex_t		si_last_br_id;
++	struct au_branch	**si_branch;
++
++	/* policy to select a writable branch */
++	unsigned char		si_wbr_copyup;
++	unsigned char		si_wbr_create;
++	struct au_wbr_copyup_operations *si_wbr_copyup_ops;
++	struct au_wbr_create_operations *si_wbr_create_ops;
++
++	/* round robin */
++	atomic_t		si_wbr_rr_next;
++
++	/* most free space */
++	struct au_wbr_mfs	si_wbr_mfs;
++
++	/* mount flags */
++	/* include/asm-ia64/siginfo.h defines a macro named si_flags */
++	unsigned int		si_mntflags;
++
++	/* external inode number (bitmap and translation table) */
++	au_readf_t		si_xread;
++	au_writef_t		si_xwrite;
++	struct file		*si_xib;
++	struct mutex		si_xib_mtx; /* protect xib members */
++	unsigned long		*si_xib_buf;
++	unsigned long		si_xib_last_pindex;
++	int			si_xib_next_bit;
++	aufs_bindex_t		si_xino_brid;
++	/* reserved for future use */
++	/* unsigned long long	si_xib_limit; */	/* Max xib file size */
++
++#ifdef CONFIG_AUFS_EXPORT
++	/* i_generation */
++	struct file		*si_xigen;
++	atomic_t		si_xigen_next;
++#endif
++
++	/* vdir parameters */
++	unsigned long		si_rdcache;	/* max cache time in HZ */
++	unsigned int		si_rdblk;	/* deblk size */
++	unsigned int		si_rdhash;	/* hash size */
++
++	/*
++	 * If the number of whiteouts are larger than si_dirwh, leave all of
++	 * them after au_whtmp_ren to reduce the cost of rmdir(2).
++	 * future fsck.aufs or kernel thread will remove them later.
++	 * Otherwise, remove all whiteouts and the dir in rmdir(2).
++	 */
++	unsigned int		si_dirwh;
++
++	/*
++	 * rename(2) a directory with all children.
++	 */
++	/* reserved for future use */
++	/* int			si_rendir; */
++
++	/* pseudo_link list */
++	struct au_splhead	si_plink;
++	wait_queue_head_t	si_plink_wq;
++
++	/*
++	 * sysfs and lifetime management.
++	 * this is not a small structure and it may be a waste of memory in case
++	 * of sysfs is disabled, particulary when many aufs-es are mounted.
++	 * but using sysfs is majority.
++	 */
++	struct kobject		si_kobj;
++#ifdef CONFIG_DEBUG_FS
++	struct dentry		 *si_dbgaufs, *si_dbgaufs_xib;
++#ifdef CONFIG_AUFS_EXPORT
++	struct dentry		 *si_dbgaufs_xigen;
++#endif
++#endif
++
++	/* dirty, necessary for unmounting, sysfs and sysrq */
++	struct super_block	*si_sb;
++};
++
++/* sbinfo status flags */
++/*
++ * set true when refresh_dirs() failed at remount time.
++ * then try refreshing dirs at access time again.
++ * if it is false, refreshing dirs at access time is unnecesary
++ */
++#define AuSi_FAILED_REFRESH_DIRS	1
++#define AuSi_MAINTAIN_PLINK		(1 << 1)	/* ioctl */
++static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi,
++					   unsigned int flag)
++{
++	AuRwMustAnyLock(&sbi->si_rwsem);
++	return sbi->au_si_status & flag;
++}
++#define au_ftest_si(sbinfo, name)	au_do_ftest_si(sbinfo, AuSi_##name)
++#define au_fset_si(sbinfo, name) do { \
++	AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
++	(sbinfo)->au_si_status |= AuSi_##name; \
++} while (0)
++#define au_fclr_si(sbinfo, name) do { \
++	AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
++	(sbinfo)->au_si_status &= ~AuSi_##name; \
++} while (0)
++
++/* ---------------------------------------------------------------------- */
++
++/* policy to select one among writable branches */
++#define AuWbrCopyup(sbinfo, args...) \
++	((sbinfo)->si_wbr_copyup_ops->copyup(args))
++#define AuWbrCreate(sbinfo, args...) \
++	((sbinfo)->si_wbr_create_ops->create(args))
++
++/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */
++#define AuLock_DW		1		/* write-lock dentry */
++#define AuLock_IR		(1 << 1)	/* read-lock inode */
++#define AuLock_IW		(1 << 2)	/* write-lock inode */
++#define AuLock_FLUSH		(1 << 3)	/* wait for 'nowait' tasks */
++#define AuLock_DIR		(1 << 4)	/* target is a dir */
++#define au_ftest_lock(flags, name)	((flags) & AuLock_##name)
++#define au_fset_lock(flags, name)	{ (flags) |= AuLock_##name; }
++#define au_fclr_lock(flags, name)	{ (flags) &= ~AuLock_##name; }
++
++/* ---------------------------------------------------------------------- */
++
++/* super.c */
++extern struct file_system_type aufs_fs_type;
++struct inode *au_iget_locked(struct super_block *sb, ino_t ino);
++
++/* sbinfo.c */
++void au_si_free(struct kobject *kobj);
++int au_si_alloc(struct super_block *sb);
++int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr);
++
++unsigned int au_sigen_inc(struct super_block *sb);
++aufs_bindex_t au_new_br_id(struct super_block *sb);
++
++void aufs_read_lock(struct dentry *dentry, int flags);
++void aufs_read_unlock(struct dentry *dentry, int flags);
++void aufs_write_lock(struct dentry *dentry);
++void aufs_write_unlock(struct dentry *dentry);
++void aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int isdir);
++void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2);
++
++/* wbr_policy.c */
++extern struct au_wbr_copyup_operations au_wbr_copyup_ops[];
++extern struct au_wbr_create_operations au_wbr_create_ops[];
++int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst);
++
++/* ---------------------------------------------------------------------- */
++
++static inline struct au_sbinfo *au_sbi(struct super_block *sb)
++{
++	return sb->s_fs_info;
++}
++
++/* ---------------------------------------------------------------------- */
++
++#ifdef CONFIG_AUFS_EXPORT
++void au_export_init(struct super_block *sb);
++
++static inline int au_test_nfsd(struct task_struct *tsk)
++{
++	return !tsk->mm && !strcmp(tsk->comm, "nfsd");
++}
++
++int au_xigen_inc(struct inode *inode);
++int au_xigen_new(struct inode *inode);
++int au_xigen_set(struct super_block *sb, struct file *base);
++void au_xigen_clr(struct super_block *sb);
++
++static inline int au_busy_or_stale(void)
++{
++	if (!au_test_nfsd(current))
++		return -EBUSY;
++	return -ESTALE;
++}
++#else
++static inline void au_export_init(struct super_block *sb)
++{
++	/* nothing */
++}
++
++static inline int au_test_nfsd(struct task_struct *tsk)
++{
++	return 0;
++}
++
++static inline int au_xigen_inc(struct inode *inode)
++{
++	return 0;
++}
++
++static inline int au_xigen_new(struct inode *inode)
++{
++	return 0;
++}
++
++static inline int au_xigen_set(struct super_block *sb, struct file *base)
++{
++	return 0;
++}
++
++static inline void au_xigen_clr(struct super_block *sb)
++{
++	/* empty */
++}
++
++static inline int au_busy_or_stale(void)
++{
++	return -EBUSY;
++}
++#endif /* CONFIG_AUFS_EXPORT */
++
++/* ---------------------------------------------------------------------- */
++
++static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo)
++{
++	/*
++	 * This function is a dynamic '__init' fucntion actually,
++	 * so the tiny check for si_rwsem is unnecessary.
++	 */
++	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
++#ifdef CONFIG_DEBUG_FS
++	sbinfo->si_dbgaufs = NULL;
++	sbinfo->si_dbgaufs_xib = NULL;
++#ifdef CONFIG_AUFS_EXPORT
++	sbinfo->si_dbgaufs_xigen = NULL;
++#endif
++#endif
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* lock superblock. mainly for entry point functions */
++/*
++ * si_noflush_read_lock, si_noflush_write_lock,
++ * si_read_unlock, si_write_unlock, si_downgrade_lock
++ */
++AuSimpleLockRwsemFuncs(si_noflush, struct super_block *sb,
++		       &au_sbi(sb)->si_rwsem);
++AuSimpleUnlockRwsemFuncs(si, struct super_block *sb, &au_sbi(sb)->si_rwsem);
++
++#define SiMustNoWaiters(sb)	AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem)
++#define SiMustAnyLock(sb)	AuRwMustAnyLock(&au_sbi(sb)->si_rwsem)
++#define SiMustWriteLock(sb)	AuRwMustWriteLock(&au_sbi(sb)->si_rwsem)
++
++static inline void si_read_lock(struct super_block *sb, int flags)
++{
++	if (au_ftest_lock(flags, FLUSH))
++		au_nwt_flush(&au_sbi(sb)->si_nowait);
++	si_noflush_read_lock(sb);
++}
++
++static inline void si_write_lock(struct super_block *sb)
++{
++	au_nwt_flush(&au_sbi(sb)->si_nowait);
++	si_noflush_write_lock(sb);
++}
++
++static inline int si_read_trylock(struct super_block *sb, int flags)
++{
++	if (au_ftest_lock(flags, FLUSH))
++		au_nwt_flush(&au_sbi(sb)->si_nowait);
++	return si_noflush_read_trylock(sb);
++}
++
++static inline int si_write_trylock(struct super_block *sb, int flags)
++{
++	if (au_ftest_lock(flags, FLUSH))
++		au_nwt_flush(&au_sbi(sb)->si_nowait);
++	return si_noflush_write_trylock(sb);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static inline aufs_bindex_t au_sbend(struct super_block *sb)
++{
++	SiMustAnyLock(sb);
++	return au_sbi(sb)->si_bend;
++}
++
++static inline unsigned int au_mntflags(struct super_block *sb)
++{
++	SiMustAnyLock(sb);
++	return au_sbi(sb)->si_mntflags;
++}
++
++static inline unsigned int au_sigen(struct super_block *sb)
++{
++	SiMustAnyLock(sb);
++	return au_sbi(sb)->si_generation;
++}
++
++static inline struct au_branch *au_sbr(struct super_block *sb,
++				       aufs_bindex_t bindex)
++{
++	SiMustAnyLock(sb);
++	return au_sbi(sb)->si_branch[0 + bindex];
++}
++
++static inline void au_xino_brid_set(struct super_block *sb, aufs_bindex_t brid)
++{
++	SiMustWriteLock(sb);
++	au_sbi(sb)->si_xino_brid = brid;
++}
++
++static inline aufs_bindex_t au_xino_brid(struct super_block *sb)
++{
++	SiMustAnyLock(sb);
++	return au_sbi(sb)->si_xino_brid;
++}
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_SUPER_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/sysaufs.c linux-2.6.31.5/fs/aufs/sysaufs.c
+--- linux-2.6.31.5.orig/fs/aufs/sysaufs.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/sysaufs.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,104 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * sysfs interface and lifetime management
++ * they are necessary regardless sysfs is disabled.
++ */
++
++#include <linux/fs.h>
++#include <linux/random.h>
++#include <linux/sysfs.h>
++#include "aufs.h"
++
++unsigned long sysaufs_si_mask;
++struct kset *sysaufs_ket;
++
++#define AuSiAttr(_name) { \
++	.attr   = { .name = __stringify(_name), .mode = 0444 },	\
++	.show   = sysaufs_si_##_name,				\
++}
++
++static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path);
++struct attribute *sysaufs_si_attrs[] = {
++	&sysaufs_si_attr_xi_path.attr,
++	NULL,
++};
++
++static struct sysfs_ops au_sbi_ops = {
++	.show   = sysaufs_si_show
++};
++
++static struct kobj_type au_sbi_ktype = {
++	.release	= au_si_free,
++	.sysfs_ops	= &au_sbi_ops,
++	.default_attrs	= sysaufs_si_attrs
++};
++
++/* ---------------------------------------------------------------------- */
++
++int sysaufs_si_init(struct au_sbinfo *sbinfo)
++{
++	int err;
++
++	sbinfo->si_kobj.kset = sysaufs_ket;
++	/* cf. sysaufs_name() */
++	err = kobject_init_and_add
++		(&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_ket->kobj*/NULL,
++		 SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo));
++
++	dbgaufs_si_null(sbinfo);
++	if (!err) {
++		err = dbgaufs_si_init(sbinfo);
++		if (unlikely(err))
++			kobject_put(&sbinfo->si_kobj);
++	}
++	return err;
++}
++
++void sysaufs_fin(void)
++{
++	dbgaufs_fin();
++	sysfs_remove_group(&sysaufs_ket->kobj, sysaufs_attr_group);
++	kset_unregister(sysaufs_ket);
++}
++
++int __init sysaufs_init(void)
++{
++	int err;
++
++	do {
++		get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask));
++	} while (!sysaufs_si_mask);
++
++	sysaufs_ket = kset_create_and_add(AUFS_NAME, NULL, fs_kobj);
++	err = PTR_ERR(sysaufs_ket);
++	if (IS_ERR(sysaufs_ket))
++		goto out;
++	err = sysfs_create_group(&sysaufs_ket->kobj, sysaufs_attr_group);
++	if (unlikely(err)) {
++		kset_unregister(sysaufs_ket);
++		goto out;
++	}
++
++	err = dbgaufs_init();
++	if (unlikely(err))
++		sysaufs_fin();
++ out:
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/sysaufs.h linux-2.6.31.5/fs/aufs/sysaufs.h
+--- linux-2.6.31.5.orig/fs/aufs/sysaufs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/sysaufs.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,120 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * sysfs interface and mount lifetime management
++ */
++
++#ifndef __SYSAUFS_H__
++#define __SYSAUFS_H__
++
++#ifdef __KERNEL__
++
++#include <linux/sysfs.h>
++#include <linux/aufs_type.h>
++#include "module.h"
++
++struct super_block;
++struct au_sbinfo;
++
++struct sysaufs_si_attr {
++	struct attribute attr;
++	int (*show)(struct seq_file *seq, struct super_block *sb);
++};
++
++/* ---------------------------------------------------------------------- */
++
++/* sysaufs.c */
++extern unsigned long sysaufs_si_mask;
++extern struct kset *sysaufs_ket;
++extern struct attribute *sysaufs_si_attrs[];
++int sysaufs_si_init(struct au_sbinfo *sbinfo);
++int __init sysaufs_init(void);
++void sysaufs_fin(void);
++
++/* ---------------------------------------------------------------------- */
++
++/* some people doesn't like to show a pointer in kernel */
++static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo)
++{
++	return sysaufs_si_mask ^ (unsigned long)sbinfo;
++}
++
++#define SysaufsSiNamePrefix	"si_"
++#define SysaufsSiNameLen	(sizeof(SysaufsSiNamePrefix) + 16)
++static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name)
++{
++	snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx",
++		 sysaufs_si_id(sbinfo));
++}
++
++struct au_branch;
++#ifdef CONFIG_SYSFS
++/* sysfs.c */
++extern struct attribute_group *sysaufs_attr_group;
++
++int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb);
++ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
++			 char *buf);
++
++void sysaufs_br_init(struct au_branch *br);
++void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
++void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
++
++#define sysaufs_brs_init()	do {} while (0)
++
++#else
++#define sysaufs_attr_group	NULL
++
++static inline
++int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb)
++{
++	return 0;
++}
++
++static inline
++ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
++			 char *buf)
++{
++	return 0;
++}
++
++static inline void sysaufs_br_init(struct au_branch *br)
++{
++	/* empty */
++}
++
++static inline void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
++{
++	/* nothing */
++}
++
++static inline void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
++{
++	/* nothing */
++}
++
++static inline void sysaufs_brs_init(void)
++{
++	sysaufs_brs = 0;
++}
++
++#endif /* CONFIG_SYSFS */
++
++#endif /* __KERNEL__ */
++#endif /* __SYSAUFS_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/sysfs.c linux-2.6.31.5/fs/aufs/sysfs.c
+--- linux-2.6.31.5.orig/fs/aufs/sysfs.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/sysfs.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,210 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * sysfs interface
++ */
++
++#include <linux/fs.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/sysfs.h>
++#include "aufs.h"
++
++static struct attribute *au_attr[] = {
++	NULL,	/* need to NULL terminate the list of attributes */
++};
++
++static struct attribute_group sysaufs_attr_group_body = {
++	.attrs = au_attr
++};
++
++struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body;
++
++/* ---------------------------------------------------------------------- */
++
++int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb)
++{
++	int err;
++
++	SiMustAnyLock(sb);
++
++	err = 0;
++	if (au_opt_test(au_mntflags(sb), XINO)) {
++		err = au_xino_path(seq, au_sbi(sb)->si_xib);
++		seq_putc(seq, '\n');
++	}
++	return err;
++}
++
++/*
++ * the lifetime of branch is independent from the entry under sysfs.
++ * sysfs handles the lifetime of the entry, and never call ->show() after it is
++ * unlinked.
++ */
++static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb,
++			 aufs_bindex_t bindex)
++{
++	struct path path;
++	struct dentry *root;
++	struct au_branch *br;
++
++	AuDbg("b%d\n", bindex);
++
++	root = sb->s_root;
++	di_read_lock_parent(root, !AuLock_IR);
++	br = au_sbr(sb, bindex);
++	path.mnt = br->br_mnt;
++	path.dentry = au_h_dptr(root, bindex);
++	au_seq_path(seq, &path);
++	di_read_unlock(root, !AuLock_IR);
++	seq_printf(seq, "=%s\n", au_optstr_br_perm(br->br_perm));
++	return 0;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static struct seq_file *au_seq(char *p, ssize_t len)
++{
++	struct seq_file *seq;
++
++	seq = kzalloc(sizeof(*seq), GFP_NOFS);
++	if (seq) {
++		/* mutex_init(&seq.lock); */
++		seq->buf = p;
++		seq->size = len;
++		return seq; /* success */
++	}
++
++	seq = ERR_PTR(-ENOMEM);
++	return seq;
++}
++
++#define SysaufsBr_PREFIX "br"
++
++/* todo: file size may exceed PAGE_SIZE */
++ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
++			 char *buf)
++{
++	ssize_t err;
++	long l;
++	aufs_bindex_t bend;
++	struct au_sbinfo *sbinfo;
++	struct super_block *sb;
++	struct seq_file *seq;
++	char *name;
++	struct attribute **cattr;
++
++	sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
++	sb = sbinfo->si_sb;
++	si_noflush_read_lock(sb);
++
++	seq = au_seq(buf, PAGE_SIZE);
++	err = PTR_ERR(seq);
++	if (IS_ERR(seq))
++		goto out;
++
++	name = (void *)attr->name;
++	cattr = sysaufs_si_attrs;
++	while (*cattr) {
++		if (!strcmp(name, (*cattr)->name)) {
++			err = container_of(*cattr, struct sysaufs_si_attr, attr)
++				->show(seq, sb);
++			goto out_seq;
++		}
++		cattr++;
++	}
++
++	bend = au_sbend(sb);
++	if (!strncmp(name, SysaufsBr_PREFIX, sizeof(SysaufsBr_PREFIX) - 1)) {
++		name += sizeof(SysaufsBr_PREFIX) - 1;
++		err = strict_strtol(name, 10, &l);
++		if (!err) {
++			if (l <= bend)
++				err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l);
++			else
++				err = -ENOENT;
++		}
++		goto out_seq;
++	}
++	BUG();
++
++ out_seq:
++	if (!err) {
++		err = seq->count;
++		/* sysfs limit */
++		if (unlikely(err == PAGE_SIZE))
++			err = -EFBIG;
++	}
++	kfree(seq);
++ out:
++	si_read_unlock(sb);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++void sysaufs_br_init(struct au_branch *br)
++{
++	br->br_attr.name = br->br_name;
++	br->br_attr.mode = S_IRUGO;
++	br->br_attr.owner = THIS_MODULE;
++}
++
++void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
++{
++	struct au_branch *br;
++	struct kobject *kobj;
++	aufs_bindex_t bend;
++
++	dbgaufs_brs_del(sb, bindex);
++
++	if (!sysaufs_brs)
++		return;
++
++	kobj = &au_sbi(sb)->si_kobj;
++	bend = au_sbend(sb);
++	for (; bindex <= bend; bindex++) {
++		br = au_sbr(sb, bindex);
++		sysfs_remove_file(kobj, &br->br_attr);
++	}
++}
++
++void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
++{
++	int err;
++	aufs_bindex_t bend;
++	struct kobject *kobj;
++	struct au_branch *br;
++
++	dbgaufs_brs_add(sb, bindex);
++
++	if (!sysaufs_brs)
++		return;
++
++	kobj = &au_sbi(sb)->si_kobj;
++	bend = au_sbend(sb);
++	for (; bindex <= bend; bindex++) {
++		br = au_sbr(sb, bindex);
++		snprintf(br->br_name, sizeof(br->br_name), SysaufsBr_PREFIX
++			 "%d", bindex);
++		err = sysfs_create_file(kobj, &br->br_attr);
++		if (unlikely(err))
++			AuWarn("failed %s under sysfs(%d)\n", br->br_name, err);
++	}
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/sysrq.c linux-2.6.31.5/fs/aufs/sysrq.c
+--- linux-2.6.31.5.orig/fs/aufs/sysrq.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/sysrq.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,115 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * magic sysrq hanlder
++ */
++
++#include <linux/fs.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++/* #include <linux/sysrq.h> */
++#include "aufs.h"
++
++/* ---------------------------------------------------------------------- */
++
++static void sysrq_sb(struct super_block *sb)
++{
++	char *plevel;
++	struct au_sbinfo *sbinfo;
++	struct file *file;
++
++	plevel = au_plevel;
++	au_plevel = KERN_WARNING;
++	au_debug(1);
++
++	sbinfo = au_sbi(sb);
++	pr_warning("si=%lx\n", sysaufs_si_id(sbinfo));
++	pr_warning(AUFS_NAME ": superblock\n");
++	au_dpri_sb(sb);
++	pr_warning(AUFS_NAME ": root dentry\n");
++	au_dpri_dentry(sb->s_root);
++	pr_warning(AUFS_NAME ": root inode\n");
++	au_dpri_inode(sb->s_root->d_inode);
++#if 0
++	struct inode *i;
++	pr_warning(AUFS_NAME ": isolated inode\n");
++	list_for_each_entry(i, &sb->s_inodes, i_sb_list)
++		if (list_empty(&i->i_dentry))
++			au_dpri_inode(i);
++#endif
++	pr_warning(AUFS_NAME ": files\n");
++	list_for_each_entry(file, &sb->s_files, f_u.fu_list)
++		if (!special_file(file->f_dentry->d_inode->i_mode))
++			au_dpri_file(file);
++
++	au_plevel = plevel;
++	au_debug(0);
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* module parameter */
++static char *aufs_sysrq_key = "a";
++module_param_named(sysrq, aufs_sysrq_key, charp, S_IRUGO);
++MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME);
++
++static void au_sysrq(int key __maybe_unused,
++		     struct tty_struct *tty __maybe_unused)
++{
++	struct kobject *kobj;
++	struct au_sbinfo *sbinfo;
++
++	/* spin_lock(&sysaufs_ket->list_lock); */
++	list_for_each_entry(kobj, &sysaufs_ket->list, entry) {
++		sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
++		sysrq_sb(sbinfo->si_sb);
++	}
++	/* spin_unlock(&sysaufs_ket->list_lock); */
++}
++
++static struct sysrq_key_op au_sysrq_op = {
++	.handler	= au_sysrq,
++	.help_msg	= "Aufs",
++	.action_msg	= "Aufs",
++	.enable_mask	= SYSRQ_ENABLE_DUMP
++};
++
++/* ---------------------------------------------------------------------- */
++
++int __init au_sysrq_init(void)
++{
++	int err;
++	char key;
++
++	err = -1;
++	key = *aufs_sysrq_key;
++	if ('a' <= key && key <= 'z')
++		err = register_sysrq_key(key, &au_sysrq_op);
++	if (unlikely(err))
++		AuErr("err %d, sysrq=%c\n", err, key);
++	return err;
++}
++
++void au_sysrq_fin(void)
++{
++	int err;
++	err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op);
++	if (unlikely(err))
++		AuErr("err %d (ignored)\n", err);
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/vdir.c linux-2.6.31.5/fs/aufs/vdir.c
+--- linux-2.6.31.5.orig/fs/aufs/vdir.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/vdir.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,882 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * virtual or vertical directory
++ */
++
++#include <linux/hash.h>
++#include "aufs.h"
++
++static unsigned int calc_size(int nlen)
++{
++	BUILD_BUG_ON(sizeof(ino_t) != sizeof(long));
++	return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t));
++}
++
++static int set_deblk_end(union au_vdir_deblk_p *p,
++			 union au_vdir_deblk_p *deblk_end)
++{
++	if (calc_size(0) <= deblk_end->deblk - p->deblk) {
++		p->de->de_str.len = 0;
++		/* smp_mb(); */
++		return 0;
++	}
++	return -1; /* error */
++}
++
++/* returns true or false */
++static int is_deblk_end(union au_vdir_deblk_p *p,
++			union au_vdir_deblk_p *deblk_end)
++{
++	if (calc_size(0) <= deblk_end->deblk - p->deblk)
++		return !p->de->de_str.len;
++	return 1;
++}
++
++static unsigned char *last_deblk(struct au_vdir *vdir)
++{
++	return vdir->vd_deblk[vdir->vd_nblk - 1];
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * the allocated memory has to be freed by
++ * au_nhash_wh_free() or au_nhash_de_free().
++ */
++int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp)
++{
++	struct hlist_head *head;
++	unsigned int u;
++
++	head = kmalloc(sizeof(*nhash->nh_head) * num_hash, gfp);
++	if (head) {
++		nhash->nh_num = num_hash;
++		nhash->nh_head = head;
++		for (u = 0; u < num_hash; u++)
++			INIT_HLIST_HEAD(head++);
++		return 0; /* success */
++	}
++
++	return -ENOMEM;
++}
++
++static void nhash_count(struct hlist_head *head)
++{
++#if 0
++	unsigned long n;
++	struct hlist_node *pos;
++
++	n = 0;
++	hlist_for_each(pos, head)
++		n++;
++	AuInfo("%lu\n", n);
++#endif
++}
++
++static void au_nhash_wh_do_free(struct hlist_head *head)
++{
++	struct au_vdir_wh *tpos;
++	struct hlist_node *pos, *node;
++
++	hlist_for_each_entry_safe(tpos, pos, node, head, wh_hash) {
++		/* hlist_del(pos); */
++		kfree(tpos);
++	}
++}
++
++static void au_nhash_de_do_free(struct hlist_head *head)
++{
++	struct au_vdir_dehstr *tpos;
++	struct hlist_node *pos, *node;
++
++	hlist_for_each_entry_safe(tpos, pos, node, head, hash) {
++		/* hlist_del(pos); */
++		au_cache_free_dehstr(tpos);
++	}
++}
++
++static void au_nhash_do_free(struct au_nhash *nhash,
++			     void (*free)(struct hlist_head *head))
++{
++	unsigned int u, n;
++	struct hlist_head *head;
++
++	n = nhash->nh_num;
++	head = nhash->nh_head;
++	for (u = 0; u < n; u++) {
++		nhash_count(head);
++		free(head++);
++	}
++	kfree(nhash->nh_head);
++}
++
++void au_nhash_wh_free(struct au_nhash *whlist)
++{
++	au_nhash_do_free(whlist, au_nhash_wh_do_free);
++}
++
++static void au_nhash_de_free(struct au_nhash *delist)
++{
++	au_nhash_do_free(delist, au_nhash_de_do_free);
++}
++
++/* ---------------------------------------------------------------------- */
++
++int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
++			    int limit)
++{
++	int num;
++	unsigned int u, n;
++	struct hlist_head *head;
++	struct au_vdir_wh *tpos;
++	struct hlist_node *pos;
++
++	num = 0;
++	n = whlist->nh_num;
++	head = whlist->nh_head;
++	for (u = 0; u < n; u++) {
++		hlist_for_each_entry(tpos, pos, head, wh_hash)
++			if (tpos->wh_bindex == btgt && ++num > limit)
++				return 1;
++		head++;
++	}
++	return 0;
++}
++
++static struct hlist_head *au_name_hash(struct au_nhash *nhash,
++				       unsigned char *name,
++				       unsigned int len)
++{
++	unsigned int v;
++	/* const unsigned int magic_bit = 12; */
++
++	v = 0;
++	while (len--)
++		v += *name++;
++	/* v = hash_long(v, magic_bit); */
++	v %= nhash->nh_num;
++	return nhash->nh_head + v;
++}
++
++static int au_nhash_test_name(struct au_vdir_destr *str, const char *name,
++			      int nlen)
++{
++	return str->len == nlen && !memcmp(str->name, name, nlen);
++}
++
++/* returns found or not */
++int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen)
++{
++	struct hlist_head *head;
++	struct au_vdir_wh *tpos;
++	struct hlist_node *pos;
++	struct au_vdir_destr *str;
++
++	head = au_name_hash(whlist, name, nlen);
++	hlist_for_each_entry(tpos, pos, head, wh_hash) {
++		str = &tpos->wh_str;
++		AuDbg("%.*s\n", str->len, str->name);
++		if (au_nhash_test_name(str, name, nlen))
++			return 1;
++	}
++	return 0;
++}
++
++/* returns found(true) or not */
++static int test_known(struct au_nhash *delist, char *name, int nlen)
++{
++	struct hlist_head *head;
++	struct au_vdir_dehstr *tpos;
++	struct hlist_node *pos;
++	struct au_vdir_destr *str;
++
++	head = au_name_hash(delist, name, nlen);
++	hlist_for_each_entry(tpos, pos, head, hash) {
++		str = tpos->str;
++		AuDbg("%.*s\n", str->len, str->name);
++		if (au_nhash_test_name(str, name, nlen))
++			return 1;
++	}
++	return 0;
++}
++
++static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino,
++			    unsigned char d_type)
++{
++#ifdef CONFIG_AUFS_SHWH
++	wh->wh_ino = ino;
++	wh->wh_type = d_type;
++#endif
++}
++
++/* ---------------------------------------------------------------------- */
++
++int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
++		       unsigned int d_type, aufs_bindex_t bindex,
++		       unsigned char shwh)
++{
++	int err;
++	struct au_vdir_destr *str;
++	struct au_vdir_wh *wh;
++
++	AuDbg("%.*s\n", nlen, name);
++	err = -ENOMEM;
++	wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS);
++	if (unlikely(!wh))
++		goto out;
++
++	err = 0;
++	wh->wh_bindex = bindex;
++	if (shwh)
++		au_shwh_init_wh(wh, ino, d_type);
++	str = &wh->wh_str;
++	str->len = nlen;
++	memcpy(str->name, name, nlen);
++	hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen));
++	/* smp_mb(); */
++
++ out:
++	return err;
++}
++
++static int append_deblk(struct au_vdir *vdir)
++{
++	int err;
++	unsigned long ul;
++	const unsigned int deblk_sz = vdir->vd_deblk_sz;
++	union au_vdir_deblk_p p, deblk_end;
++	unsigned char **o;
++
++	err = -ENOMEM;
++	o = krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
++		     GFP_NOFS);
++	if (unlikely(!o))
++		goto out;
++
++	vdir->vd_deblk = o;
++	p.deblk = kmalloc(deblk_sz, GFP_NOFS);
++	if (p.deblk) {
++		ul = vdir->vd_nblk++;
++		vdir->vd_deblk[ul] = p.deblk;
++		vdir->vd_last.ul = ul;
++		vdir->vd_last.p.deblk = p.deblk;
++		deblk_end.deblk = p.deblk + deblk_sz;
++		err = set_deblk_end(&p, &deblk_end);
++	}
++
++ out:
++	return err;
++}
++
++static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino,
++		     unsigned int d_type, struct au_nhash *delist)
++{
++	int err;
++	unsigned int sz;
++	const unsigned int deblk_sz = vdir->vd_deblk_sz;
++	union au_vdir_deblk_p p, *room, deblk_end;
++	struct au_vdir_dehstr *dehstr;
++
++	p.deblk = last_deblk(vdir);
++	deblk_end.deblk = p.deblk + deblk_sz;
++	room = &vdir->vd_last.p;
++	AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk
++		  || !is_deblk_end(room, &deblk_end));
++
++	sz = calc_size(nlen);
++	if (unlikely(sz > deblk_end.deblk - room->deblk)) {
++		err = append_deblk(vdir);
++		if (unlikely(err))
++			goto out;
++
++		p.deblk = last_deblk(vdir);
++		deblk_end.deblk = p.deblk + deblk_sz;
++		/* smp_mb(); */
++		AuDebugOn(room->deblk != p.deblk);
++	}
++
++	err = -ENOMEM;
++	dehstr = au_cache_alloc_dehstr();
++	if (unlikely(!dehstr))
++		goto out;
++
++	dehstr->str = &room->de->de_str;
++	hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen));
++	room->de->de_ino = ino;
++	room->de->de_type = d_type;
++	room->de->de_str.len = nlen;
++	memcpy(room->de->de_str.name, name, nlen);
++
++	err = 0;
++	room->deblk += sz;
++	if (unlikely(set_deblk_end(room, &deblk_end)))
++		err = append_deblk(vdir);
++	/* smp_mb(); */
++
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++void au_vdir_free(struct au_vdir *vdir)
++{
++	unsigned char **deblk;
++
++	deblk = vdir->vd_deblk;
++	while (vdir->vd_nblk--)
++		kfree(*deblk++);
++	kfree(vdir->vd_deblk);
++	au_cache_free_vdir(vdir);
++}
++
++static struct au_vdir *alloc_vdir(struct super_block *sb)
++{
++	struct au_vdir *vdir;
++	int err;
++
++	SiMustAnyLock(sb);
++
++	err = -ENOMEM;
++	vdir = au_cache_alloc_vdir();
++	if (unlikely(!vdir))
++		goto out;
++
++	vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS);
++	if (unlikely(!vdir->vd_deblk))
++		goto out_free;
++
++	vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk;
++	vdir->vd_nblk = 0;
++	vdir->vd_version = 0;
++	vdir->vd_jiffy = 0;
++	err = append_deblk(vdir);
++	if (!err)
++		return vdir; /* success */
++
++	kfree(vdir->vd_deblk);
++
++ out_free:
++	au_cache_free_vdir(vdir);
++ out:
++	vdir = ERR_PTR(err);
++	return vdir;
++}
++
++static int reinit_vdir(struct au_vdir *vdir)
++{
++	int err;
++	union au_vdir_deblk_p p, deblk_end;
++
++	while (vdir->vd_nblk > 1) {
++		kfree(vdir->vd_deblk[vdir->vd_nblk - 1]);
++		/* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */
++		vdir->vd_nblk--;
++	}
++	p.deblk = vdir->vd_deblk[0];
++	deblk_end.deblk = p.deblk + vdir->vd_deblk_sz;
++	err = set_deblk_end(&p, &deblk_end);
++	/* keep vd_dblk_sz */
++	vdir->vd_last.ul = 0;
++	vdir->vd_last.p.deblk = vdir->vd_deblk[0];
++	vdir->vd_version = 0;
++	vdir->vd_jiffy = 0;
++	/* smp_mb(); */
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
++		  unsigned int d_type, ino_t *ino)
++{
++	int err;
++	struct mutex *mtx;
++	const int isdir = (d_type == DT_DIR);
++
++	/* prevent hardlinks from race condition */
++	mtx = NULL;
++	if (!isdir) {
++		mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx;
++		mutex_lock(mtx);
++	}
++	err = au_xino_read(sb, bindex, h_ino, ino);
++	if (unlikely(err))
++		goto out;
++
++	if (!*ino) {
++		err = -EIO;
++		*ino = au_xino_new_ino(sb);
++		if (unlikely(!*ino))
++			goto out;
++		err = au_xino_write(sb, bindex, h_ino, *ino);
++		if (unlikely(err))
++			goto out;
++	}
++
++ out:
++	if (!isdir)
++		mutex_unlock(mtx);
++	return err;
++}
++
++static int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
++		     unsigned int d_type, ino_t *ino)
++{
++#ifdef CONFIG_AUFS_SHWH
++	return au_ino(sb, bindex, h_ino, d_type, ino);
++#else
++	return 0;
++#endif
++}
++
++#define AuFillVdir_CALLED	1
++#define AuFillVdir_WHABLE	(1 << 1)
++#define AuFillVdir_SHWH		(1 << 2)
++#define au_ftest_fillvdir(flags, name)	((flags) & AuFillVdir_##name)
++#define au_fset_fillvdir(flags, name)	{ (flags) |= AuFillVdir_##name; }
++#define au_fclr_fillvdir(flags, name)	{ (flags) &= ~AuFillVdir_##name; }
++
++#ifndef CONFIG_AUFS_SHWH
++#undef AuFillVdir_SHWH
++#define AuFillVdir_SHWH		0
++#endif
++
++struct fillvdir_arg {
++	struct file		*file;
++	struct au_vdir		*vdir;
++	struct au_nhash		delist;
++	struct au_nhash		whlist;
++	aufs_bindex_t		bindex;
++	unsigned int		flags;
++	int			err;
++};
++
++static int fillvdir(void *__arg, const char *__name, int nlen,
++		    loff_t offset __maybe_unused, u64 h_ino,
++		    unsigned int d_type)
++{
++	struct fillvdir_arg *arg = __arg;
++	char *name = (void *)__name;
++	struct super_block *sb;
++	ino_t ino;
++	const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH);
++
++	arg->err = 0;
++	sb = arg->file->f_dentry->d_sb;
++	au_fset_fillvdir(arg->flags, CALLED);
++	/* smp_mb(); */
++	if (nlen <= AUFS_WH_PFX_LEN
++	    || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
++		if (test_known(&arg->delist, name, nlen)
++		    || au_nhash_test_known_wh(&arg->whlist, name, nlen))
++			goto out; /* already exists or whiteouted */
++
++		sb = arg->file->f_dentry->d_sb;
++		arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino);
++		if (!arg->err)
++			arg->err = append_de(arg->vdir, name, nlen, ino,
++					     d_type, &arg->delist);
++	} else if (au_ftest_fillvdir(arg->flags, WHABLE)) {
++		name += AUFS_WH_PFX_LEN;
++		nlen -= AUFS_WH_PFX_LEN;
++		if (au_nhash_test_known_wh(&arg->whlist, name, nlen))
++			goto out; /* already whiteouted */
++
++		if (shwh)
++			arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type,
++					     &ino);
++		if (!arg->err)
++			arg->err = au_nhash_append_wh
++				(&arg->whlist, name, nlen, ino, d_type,
++				 arg->bindex, shwh);
++	}
++
++ out:
++	if (!arg->err)
++		arg->vdir->vd_jiffy = jiffies;
++	/* smp_mb(); */
++	AuTraceErr(arg->err);
++	return arg->err;
++}
++
++static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir,
++			  struct au_nhash *whlist, struct au_nhash *delist)
++{
++#ifdef CONFIG_AUFS_SHWH
++	int err;
++	unsigned int nh, u;
++	struct hlist_head *head;
++	struct au_vdir_wh *tpos;
++	struct hlist_node *pos, *n;
++	char *p, *o;
++	struct au_vdir_destr *destr;
++
++	AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH));
++
++	err = -ENOMEM;
++	o = p = __getname();
++	if (unlikely(!p))
++		goto out;
++
++	err = 0;
++	nh = whlist->nh_num;
++	memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
++	p += AUFS_WH_PFX_LEN;
++	for (u = 0; u < nh; u++) {
++		head = whlist->nh_head + u;
++		hlist_for_each_entry_safe(tpos, pos, n, head, wh_hash) {
++			destr = &tpos->wh_str;
++			memcpy(p, destr->name, destr->len);
++			err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN,
++					tpos->wh_ino, tpos->wh_type, delist);
++			if (unlikely(err))
++				break;
++		}
++	}
++
++	__putname(o);
++
++ out:
++	AuTraceErr(err);
++	return err;
++#else
++	return 0;
++#endif
++}
++
++static int au_do_read_vdir(struct fillvdir_arg *arg)
++{
++	int err;
++	unsigned int rdhash;
++	loff_t offset;
++	aufs_bindex_t bend, bindex, bstart;
++	unsigned char shwh;
++	struct file *hf, *file;
++	struct super_block *sb;
++
++	file = arg->file;
++	sb = file->f_dentry->d_sb;
++	SiMustAnyLock(sb);
++
++	rdhash = au_sbi(sb)->si_rdhash;
++	err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS);
++	if (unlikely(err))
++		goto out;
++	err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS);
++	if (unlikely(err))
++		goto out_delist;
++
++	err = 0;
++	arg->flags = 0;
++	shwh = 0;
++	if (au_opt_test(au_mntflags(sb), SHWH)) {
++		shwh = 1;
++		au_fset_fillvdir(arg->flags, SHWH);
++	}
++	bstart = au_fbstart(file);
++	bend = au_fbend(file);
++	for (bindex = bstart; !err && bindex <= bend; bindex++) {
++		hf = au_h_fptr(file, bindex);
++		if (!hf)
++			continue;
++
++		offset = vfsub_llseek(hf, 0, SEEK_SET);
++		err = offset;
++		if (unlikely(offset))
++			break;
++
++		arg->bindex = bindex;
++		au_fclr_fillvdir(arg->flags, WHABLE);
++		if (shwh
++		    || (bindex != bend
++			&& au_br_whable(au_sbr_perm(sb, bindex))))
++			au_fset_fillvdir(arg->flags, WHABLE);
++		do {
++			arg->err = 0;
++			au_fclr_fillvdir(arg->flags, CALLED);
++			/* smp_mb(); */
++			err = vfsub_readdir(hf, fillvdir, arg);
++			if (err >= 0)
++				err = arg->err;
++		} while (!err && au_ftest_fillvdir(arg->flags, CALLED));
++	}
++
++	if (!err && shwh)
++		err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist);
++
++	au_nhash_wh_free(&arg->whlist);
++
++ out_delist:
++	au_nhash_de_free(&arg->delist);
++ out:
++	return err;
++}
++
++static int read_vdir(struct file *file, int may_read)
++{
++	int err;
++	unsigned long expire;
++	unsigned char do_read;
++	struct fillvdir_arg arg;
++	struct inode *inode;
++	struct au_vdir *vdir, *allocated;
++
++	err = 0;
++	inode = file->f_dentry->d_inode;
++	IMustLock(inode);
++	SiMustAnyLock(inode->i_sb);
++
++	allocated = NULL;
++	do_read = 0;
++	expire = au_sbi(inode->i_sb)->si_rdcache;
++	vdir = au_ivdir(inode);
++	if (!vdir) {
++		do_read = 1;
++		vdir = alloc_vdir(inode->i_sb);
++		err = PTR_ERR(vdir);
++		if (IS_ERR(vdir))
++			goto out;
++		err = 0;
++		allocated = vdir;
++	} else if (may_read
++		   && (inode->i_version != vdir->vd_version
++		       || time_after(jiffies, vdir->vd_jiffy + expire))) {
++		do_read = 1;
++		err = reinit_vdir(vdir);
++		if (unlikely(err))
++			goto out;
++	}
++
++	if (!do_read)
++		return 0; /* success */
++
++	arg.file = file;
++	arg.vdir = vdir;
++	err = au_do_read_vdir(&arg);
++	if (!err) {
++		/* file->f_pos = 0; */
++		vdir->vd_version = inode->i_version;
++		vdir->vd_last.ul = 0;
++		vdir->vd_last.p.deblk = vdir->vd_deblk[0];
++		if (allocated)
++			au_set_ivdir(inode, allocated);
++	} else if (allocated)
++		au_vdir_free(allocated);
++
++ out:
++	return err;
++}
++
++static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
++{
++	int err, rerr;
++	unsigned long ul, n;
++	const unsigned int deblk_sz = src->vd_deblk_sz;
++
++	AuDebugOn(tgt->vd_nblk != 1);
++
++	err = -ENOMEM;
++	if (tgt->vd_nblk < src->vd_nblk) {
++		unsigned char **p;
++
++		p = krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
++			     GFP_NOFS);
++		if (unlikely(!p))
++			goto out;
++		tgt->vd_deblk = p;
++	}
++
++	tgt->vd_nblk = src->vd_nblk;
++	tgt->vd_deblk_sz = deblk_sz;
++	memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz);
++	/* tgt->vd_last.i = 0; */
++	/* tgt->vd_last.p.deblk = tgt->vd_deblk[0]; */
++	tgt->vd_version = src->vd_version;
++	tgt->vd_jiffy = src->vd_jiffy;
++
++	n = src->vd_nblk;
++	for (ul = 1; ul < n; ul++) {
++		tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz,
++					    GFP_NOFS);
++		if (unlikely(!tgt->vd_deblk[ul]))
++			goto out;
++	}
++	/* smp_mb(); */
++	return 0; /* success */
++
++ out:
++	rerr = reinit_vdir(tgt);
++	BUG_ON(rerr);
++	return err;
++}
++
++int au_vdir_init(struct file *file)
++{
++	int err;
++	struct inode *inode;
++	struct au_vdir *vdir_cache, *allocated;
++
++	err = read_vdir(file, !file->f_pos);
++	if (unlikely(err))
++		goto out;
++
++	allocated = NULL;
++	vdir_cache = au_fvdir_cache(file);
++	if (!vdir_cache) {
++		vdir_cache = alloc_vdir(file->f_dentry->d_sb);
++		err = PTR_ERR(vdir_cache);
++		if (IS_ERR(vdir_cache))
++			goto out;
++		allocated = vdir_cache;
++	} else if (!file->f_pos && vdir_cache->vd_version != file->f_version) {
++		err = reinit_vdir(vdir_cache);
++		if (unlikely(err))
++			goto out;
++	} else
++		return 0; /* success */
++
++	inode = file->f_dentry->d_inode;
++	err = copy_vdir(vdir_cache, au_ivdir(inode));
++	if (!err) {
++		file->f_version = inode->i_version;
++		if (allocated)
++			au_set_fvdir_cache(file, allocated);
++	} else if (allocated)
++		au_vdir_free(allocated);
++
++ out:
++	return err;
++}
++
++static loff_t calc_offset(struct au_vdir *vdir)
++{
++	loff_t offset;
++	union au_vdir_deblk_p p;
++
++	p.deblk = vdir->vd_deblk[vdir->vd_last.ul];
++	offset = vdir->vd_last.p.deblk - p.deblk;
++	offset += vdir->vd_deblk_sz * vdir->vd_last.ul;
++	return offset;
++}
++
++/* returns true or false */
++static int seek_vdir(struct file *file)
++{
++	int valid;
++	unsigned int deblk_sz;
++	unsigned long ul, n;
++	loff_t offset;
++	union au_vdir_deblk_p p, deblk_end;
++	struct au_vdir *vdir_cache;
++
++	valid = 1;
++	vdir_cache = au_fvdir_cache(file);
++	offset = calc_offset(vdir_cache);
++	AuDbg("offset %lld\n", offset);
++	if (file->f_pos == offset)
++		goto out;
++
++	vdir_cache->vd_last.ul = 0;
++	vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0];
++	if (!file->f_pos)
++		goto out;
++
++	valid = 0;
++	deblk_sz = vdir_cache->vd_deblk_sz;
++	ul = div64_u64(file->f_pos, deblk_sz);
++	AuDbg("ul %lu\n", ul);
++	if (ul >= vdir_cache->vd_nblk)
++		goto out;
++
++	n = vdir_cache->vd_nblk;
++	for (; ul < n; ul++) {
++		p.deblk = vdir_cache->vd_deblk[ul];
++		deblk_end.deblk = p.deblk + deblk_sz;
++		offset = ul;
++		offset *= deblk_sz;
++		while (!is_deblk_end(&p, &deblk_end) && offset < file->f_pos) {
++			unsigned int l;
++
++			l = calc_size(p.de->de_str.len);
++			offset += l;
++			p.deblk += l;
++		}
++		if (!is_deblk_end(&p, &deblk_end)) {
++			valid = 1;
++			vdir_cache->vd_last.ul = ul;
++			vdir_cache->vd_last.p = p;
++			break;
++		}
++	}
++
++ out:
++	/* smp_mb(); */
++	AuTraceErr(!valid);
++	return valid;
++}
++
++int au_vdir_fill_de(struct file *file, void *dirent, filldir_t filldir)
++{
++	int err;
++	unsigned int l, deblk_sz;
++	union au_vdir_deblk_p deblk_end;
++	struct au_vdir *vdir_cache;
++	struct au_vdir_de *de;
++
++	vdir_cache = au_fvdir_cache(file);
++	if (!seek_vdir(file))
++		return 0;
++
++	deblk_sz = vdir_cache->vd_deblk_sz;
++	while (1) {
++		deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
++		deblk_end.deblk += deblk_sz;
++		while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) {
++			de = vdir_cache->vd_last.p.de;
++			AuDbg("%.*s, off%lld, i%lu, dt%d\n",
++			      de->de_str.len, de->de_str.name, file->f_pos,
++			      (unsigned long)de->de_ino, de->de_type);
++			err = filldir(dirent, de->de_str.name, de->de_str.len,
++				      file->f_pos, de->de_ino, de->de_type);
++			if (unlikely(err)) {
++				AuTraceErr(err);
++				/* todo: ignore the error caused by udba? */
++				/* return err; */
++				return 0;
++			}
++
++			l = calc_size(de->de_str.len);
++			vdir_cache->vd_last.p.deblk += l;
++			file->f_pos += l;
++		}
++		if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) {
++			vdir_cache->vd_last.ul++;
++			vdir_cache->vd_last.p.deblk
++				= vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
++			file->f_pos = deblk_sz * vdir_cache->vd_last.ul;
++			continue;
++		}
++		break;
++	}
++
++	/* smp_mb(); */
++	return 0;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/vfsub.c linux-2.6.31.5/fs/aufs/vfsub.c
+--- linux-2.6.31.5.orig/fs/aufs/vfsub.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/vfsub.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,740 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * sub-routines for VFS
++ */
++
++#include <linux/namei.h>
++#include <linux/security.h>
++#include <linux/splice.h>
++#include <linux/uaccess.h>
++#include "aufs.h"
++
++int vfsub_update_h_iattr(struct path *h_path, int *did)
++{
++	int err;
++	struct kstat st;
++	struct super_block *h_sb;
++
++	/* for remote fs, leave work for its getattr or d_revalidate */
++	/* for bad i_attr fs, handle them in aufs_getattr() */
++	/* still some fs may acquire i_mutex. we need to skip them */
++	err = 0;
++	if (!did)
++		did = &err;
++	h_sb = h_path->dentry->d_sb;
++	*did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb));
++	if (*did)
++		err = vfs_getattr(h_path->mnt, h_path->dentry, &st);
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++#ifdef CONFIG_IMA
++#error IMA is not supported since it does not work well. Wait for their fixing.
++#endif
++
++struct file *vfsub_filp_open(const char *path, int oflags, int mode)
++{
++	struct file *file;
++
++	lockdep_off();
++	file = filp_open(path, oflags, mode);
++	lockdep_on();
++	if (IS_ERR(file))
++		goto out;
++	vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
++
++ out:
++	return file;
++}
++
++int vfsub_kern_path(const char *name, unsigned int flags, struct path *path)
++{
++	int err;
++
++	/* lockdep_off(); */
++	err = kern_path(name, flags, path);
++	/* lockdep_on(); */
++	if (!err && path->dentry->d_inode)
++		vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
++	return err;
++}
++
++struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
++				    int len)
++{
++	struct path path = {
++		.mnt = NULL
++	};
++
++	IMustLock(parent->d_inode);
++
++	path.dentry = lookup_one_len(name, parent, len);
++	if (IS_ERR(path.dentry))
++		goto out;
++	if (path.dentry->d_inode)
++		vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
++
++ out:
++	return path.dentry;
++}
++
++struct dentry *vfsub_lookup_hash(struct nameidata *nd)
++{
++	struct path path = {
++		.mnt = nd->path.mnt
++	};
++
++	IMustLock(nd->path.dentry->d_inode);
++
++	path.dentry = lookup_hash(nd);
++	if (!IS_ERR(path.dentry) && path.dentry->d_inode)
++		vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
++
++	return path.dentry;
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
++				 struct dentry *d2, struct au_hinode *hdir2)
++{
++	struct dentry *d;
++
++	lockdep_off();
++	d = lock_rename(d1, d2);
++	lockdep_on();
++	au_hin_suspend(hdir1);
++	if (hdir1 != hdir2)
++		au_hin_suspend(hdir2);
++
++	return d;
++}
++
++void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
++			 struct dentry *d2, struct au_hinode *hdir2)
++{
++	au_hin_resume(hdir1);
++	if (hdir1 != hdir2)
++		au_hin_resume(hdir2);
++	lockdep_off();
++	unlock_rename(d1, d2);
++	lockdep_on();
++}
++
++/* ---------------------------------------------------------------------- */
++
++int vfsub_create(struct inode *dir, struct path *path, int mode)
++{
++	int err;
++	struct dentry *d;
++
++	IMustLock(dir);
++
++	d = path->dentry;
++	path->dentry = d->d_parent;
++	err = security_path_mknod(path, path->dentry, mode, 0);
++	path->dentry = d;
++	if (unlikely(err))
++		goto out;
++
++	if (au_test_fs_null_nd(dir->i_sb))
++		err = vfs_create(dir, path->dentry, mode, NULL);
++	else {
++		struct nameidata h_nd;
++
++		memset(&h_nd, 0, sizeof(h_nd));
++		h_nd.flags = LOOKUP_CREATE;
++		h_nd.intent.open.flags = O_CREAT
++			| vfsub_fmode_to_uint(FMODE_READ);
++		h_nd.intent.open.create_mode = mode;
++		h_nd.path.dentry = path->dentry->d_parent;
++		h_nd.path.mnt = path->mnt;
++		path_get(&h_nd.path);
++		err = vfs_create(dir, path->dentry, mode, &h_nd);
++		path_put(&h_nd.path);
++	}
++
++	if (!err) {
++		struct path tmp = *path;
++		int did;
++
++		vfsub_update_h_iattr(&tmp, &did);
++		if (did) {
++			tmp.dentry = path->dentry->d_parent;
++			vfsub_update_h_iattr(&tmp, /*did*/NULL);
++		}
++		/*ignore*/
++	}
++
++ out:
++	return err;
++}
++
++int vfsub_symlink(struct inode *dir, struct path *path, const char *symname)
++{
++	int err;
++	struct dentry *d;
++
++	IMustLock(dir);
++
++	d = path->dentry;
++	path->dentry = d->d_parent;
++	err = security_path_symlink(path, path->dentry, symname);
++	path->dentry = d;
++	if (unlikely(err))
++		goto out;
++
++	err = vfs_symlink(dir, path->dentry, symname);
++	if (!err) {
++		struct path tmp = *path;
++		int did;
++
++		vfsub_update_h_iattr(&tmp, &did);
++		if (did) {
++			tmp.dentry = path->dentry->d_parent;
++			vfsub_update_h_iattr(&tmp, /*did*/NULL);
++		}
++		/*ignore*/
++	}
++
++ out:
++	return err;
++}
++
++int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev)
++{
++	int err;
++	struct dentry *d;
++
++	IMustLock(dir);
++
++	d = path->dentry;
++	path->dentry = d->d_parent;
++	err = security_path_mknod(path, path->dentry, mode, dev);
++	path->dentry = d;
++	if (unlikely(err))
++		goto out;
++
++	err = vfs_mknod(dir, path->dentry, mode, dev);
++	if (!err) {
++		struct path tmp = *path;
++		int did;
++
++		vfsub_update_h_iattr(&tmp, &did);
++		if (did) {
++			tmp.dentry = path->dentry->d_parent;
++			vfsub_update_h_iattr(&tmp, /*did*/NULL);
++		}
++		/*ignore*/
++	}
++
++ out:
++	return err;
++}
++
++static int au_test_nlink(struct inode *inode)
++{
++	const unsigned int link_max = UINT_MAX >> 1; /* rough margin */
++
++	if (!au_test_fs_no_limit_nlink(inode->i_sb)
++	    || inode->i_nlink < link_max)
++		return 0;
++	return -EMLINK;
++}
++
++int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path)
++{
++	int err;
++	struct dentry *d;
++
++	IMustLock(dir);
++
++	err = au_test_nlink(src_dentry->d_inode);
++	if (unlikely(err))
++		return err;
++
++	d = path->dentry;
++	path->dentry = d->d_parent;
++	err = security_path_link(src_dentry, path, path->dentry);
++	path->dentry = d;
++	if (unlikely(err))
++		goto out;
++
++	lockdep_off();
++	err = vfs_link(src_dentry, dir, path->dentry);
++	lockdep_on();
++	if (!err) {
++		struct path tmp = *path;
++		int did;
++
++		/* fuse has different memory inode for the same inumber */
++		vfsub_update_h_iattr(&tmp, &did);
++		if (did) {
++			tmp.dentry = path->dentry->d_parent;
++			vfsub_update_h_iattr(&tmp, /*did*/NULL);
++			tmp.dentry = src_dentry;
++			vfsub_update_h_iattr(&tmp, /*did*/NULL);
++		}
++		/*ignore*/
++	}
++
++ out:
++	return err;
++}
++
++int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry,
++		 struct inode *dir, struct path *path)
++{
++	int err;
++	struct path tmp = {
++		.mnt	= path->mnt
++	};
++	struct dentry *d;
++
++	IMustLock(dir);
++	IMustLock(src_dir);
++
++	d = path->dentry;
++	path->dentry = d->d_parent;
++	tmp.dentry = src_dentry->d_parent;
++	err = security_path_rename(&tmp, src_dentry, path, path->dentry);
++	path->dentry = d;
++	if (unlikely(err))
++		goto out;
++
++	lockdep_off();
++	err = vfs_rename(src_dir, src_dentry, dir, path->dentry);
++	lockdep_on();
++	if (!err) {
++		int did;
++
++		tmp.dentry = d->d_parent;
++		vfsub_update_h_iattr(&tmp, &did);
++		if (did) {
++			tmp.dentry = src_dentry;
++			vfsub_update_h_iattr(&tmp, /*did*/NULL);
++			tmp.dentry = src_dentry->d_parent;
++			vfsub_update_h_iattr(&tmp, /*did*/NULL);
++		}
++		/*ignore*/
++	}
++
++ out:
++	return err;
++}
++
++int vfsub_mkdir(struct inode *dir, struct path *path, int mode)
++{
++	int err;
++	struct dentry *d;
++
++	IMustLock(dir);
++
++	d = path->dentry;
++	path->dentry = d->d_parent;
++	err = security_path_mkdir(path, path->dentry, mode);
++	path->dentry = d;
++	if (unlikely(err))
++		goto out;
++
++	err = vfs_mkdir(dir, path->dentry, mode);
++	if (!err) {
++		struct path tmp = *path;
++		int did;
++
++		vfsub_update_h_iattr(&tmp, &did);
++		if (did) {
++			tmp.dentry = path->dentry->d_parent;
++			vfsub_update_h_iattr(&tmp, /*did*/NULL);
++		}
++		/*ignore*/
++	}
++
++ out:
++	return err;
++}
++
++int vfsub_rmdir(struct inode *dir, struct path *path)
++{
++	int err;
++	struct dentry *d;
++
++	IMustLock(dir);
++
++	d = path->dentry;
++	path->dentry = d->d_parent;
++	err = security_path_rmdir(path, path->dentry);
++	path->dentry = d;
++	if (unlikely(err))
++		goto out;
++
++	lockdep_off();
++	err = vfs_rmdir(dir, path->dentry);
++	lockdep_on();
++	if (!err) {
++		struct path tmp = {
++			.dentry	= path->dentry->d_parent,
++			.mnt	= path->mnt
++		};
++
++		vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
++	}
++
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
++		     loff_t *ppos)
++{
++	ssize_t err;
++
++	err = vfs_read(file, ubuf, count, ppos);
++	if (err >= 0)
++		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
++	return err;
++}
++
++/* todo: kernel_read()? */
++ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
++		     loff_t *ppos)
++{
++	ssize_t err;
++	mm_segment_t oldfs;
++
++	oldfs = get_fs();
++	set_fs(KERNEL_DS);
++	err = vfsub_read_u(file, (char __user *)kbuf, count, ppos);
++	set_fs(oldfs);
++	return err;
++}
++
++ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
++		      loff_t *ppos)
++{
++	ssize_t err;
++
++	lockdep_off();
++	err = vfs_write(file, ubuf, count, ppos);
++	lockdep_on();
++	if (err >= 0)
++		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
++	return err;
++}
++
++ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos)
++{
++	ssize_t err;
++	mm_segment_t oldfs;
++
++	oldfs = get_fs();
++	set_fs(KERNEL_DS);
++	err = vfsub_write_u(file, (const char __user *)kbuf, count, ppos);
++	set_fs(oldfs);
++	return err;
++}
++
++int vfsub_readdir(struct file *file, filldir_t filldir, void *arg)
++{
++	int err;
++
++	lockdep_off();
++	err = vfs_readdir(file, filldir, arg);
++	lockdep_on();
++	if (err >= 0)
++		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
++	return err;
++}
++
++long vfsub_splice_to(struct file *in, loff_t *ppos,
++		     struct pipe_inode_info *pipe, size_t len,
++		     unsigned int flags)
++{
++	long err;
++
++	lockdep_off();
++	err = do_splice_to(in, ppos, pipe, len, flags);
++	lockdep_on();
++	if (err >= 0)
++		vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/
++	return err;
++}
++
++long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
++		       loff_t *ppos, size_t len, unsigned int flags)
++{
++	long err;
++
++	lockdep_off();
++	err = do_splice_from(pipe, out, ppos, len, flags);
++	lockdep_on();
++	if (err >= 0)
++		vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/
++	return err;
++}
++
++/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */
++int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
++		struct file *h_file)
++{
++	int err;
++	struct inode *h_inode;
++
++	h_inode = h_path->dentry->d_inode;
++	if (!h_file) {
++		err = mnt_want_write(h_path->mnt);
++		if (err)
++			goto out;
++		err = inode_permission(h_inode, MAY_WRITE);
++		if (err)
++			goto out_mnt;
++		err = get_write_access(h_inode);
++		if (err)
++			goto out_mnt;
++		err = break_lease(h_inode, vfsub_fmode_to_uint(FMODE_WRITE));
++		if (err)
++			goto out_inode;
++	}
++
++	err = locks_verify_truncate(h_inode, h_file, length);
++	if (!err)
++		err = security_path_truncate(h_path, length, attr);
++	if (!err) {
++		lockdep_off();
++		err = do_truncate(h_path->dentry, length, attr, h_file);
++		lockdep_on();
++	}
++
++ out_inode:
++	if (!h_file)
++		put_write_access(h_inode);
++ out_mnt:
++	if (!h_file)
++		mnt_drop_write(h_path->mnt);
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct au_vfsub_mkdir_args {
++	int *errp;
++	struct inode *dir;
++	struct path *path;
++	int mode;
++};
++
++static void au_call_vfsub_mkdir(void *args)
++{
++	struct au_vfsub_mkdir_args *a = args;
++	*a->errp = vfsub_mkdir(a->dir, a->path, a->mode);
++}
++
++int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode)
++{
++	int err, do_sio, wkq_err;
++
++	do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
++	if (!do_sio)
++		err = vfsub_mkdir(dir, path, mode);
++	else {
++		struct au_vfsub_mkdir_args args = {
++			.errp	= &err,
++			.dir	= dir,
++			.path	= path,
++			.mode	= mode
++		};
++		wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	}
++
++	return err;
++}
++
++struct au_vfsub_rmdir_args {
++	int *errp;
++	struct inode *dir;
++	struct path *path;
++};
++
++static void au_call_vfsub_rmdir(void *args)
++{
++	struct au_vfsub_rmdir_args *a = args;
++	*a->errp = vfsub_rmdir(a->dir, a->path);
++}
++
++int vfsub_sio_rmdir(struct inode *dir, struct path *path)
++{
++	int err, do_sio, wkq_err;
++
++	do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
++	if (!do_sio)
++		err = vfsub_rmdir(dir, path);
++	else {
++		struct au_vfsub_rmdir_args args = {
++			.errp	= &err,
++			.dir	= dir,
++			.path	= path
++		};
++		wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	}
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct notify_change_args {
++	int *errp;
++	struct path *path;
++	struct iattr *ia;
++};
++
++static void call_notify_change(void *args)
++{
++	struct notify_change_args *a = args;
++	struct inode *h_inode;
++
++	h_inode = a->path->dentry->d_inode;
++	IMustLock(h_inode);
++
++	*a->errp = -EPERM;
++	if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) {
++		lockdep_off();
++		*a->errp = notify_change(a->path->dentry, a->ia);
++		lockdep_on();
++		if (!*a->errp)
++			vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/
++	}
++	AuTraceErr(*a->errp);
++}
++
++int vfsub_notify_change(struct path *path, struct iattr *ia)
++{
++	int err;
++	struct notify_change_args args = {
++		.errp	= &err,
++		.path	= path,
++		.ia	= ia
++	};
++
++	call_notify_change(&args);
++
++	return err;
++}
++
++int vfsub_sio_notify_change(struct path *path, struct iattr *ia)
++{
++	int err, wkq_err;
++	struct notify_change_args args = {
++		.errp	= &err,
++		.path	= path,
++		.ia	= ia
++	};
++
++	wkq_err = au_wkq_wait(call_notify_change, &args);
++	if (unlikely(wkq_err))
++		err = wkq_err;
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct unlink_args {
++	int *errp;
++	struct inode *dir;
++	struct path *path;
++};
++
++static void call_unlink(void *args)
++{
++	struct unlink_args *a = args;
++	struct dentry *d = a->path->dentry;
++	struct inode *h_inode;
++	const int stop_sillyrename = (au_test_nfs(d->d_sb)
++				      && atomic_read(&d->d_count) == 1);
++
++	IMustLock(a->dir);
++
++	a->path->dentry = d->d_parent;
++	*a->errp = security_path_unlink(a->path, d);
++	a->path->dentry = d;
++	if (unlikely(*a->errp))
++		return;
++
++	if (!stop_sillyrename)
++		dget(d);
++	h_inode = d->d_inode;
++	if (h_inode)
++		atomic_inc(&h_inode->i_count);
++
++	lockdep_off();
++	*a->errp = vfs_unlink(a->dir, d);
++	lockdep_on();
++	if (!*a->errp) {
++		struct path tmp = {
++			.dentry = d->d_parent,
++			.mnt	= a->path->mnt
++		};
++		vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
++	}
++
++	if (!stop_sillyrename)
++		dput(d);
++	if (h_inode)
++		iput(h_inode);
++
++	AuTraceErr(*a->errp);
++}
++
++/*
++ * @dir: must be locked.
++ * @dentry: target dentry.
++ */
++int vfsub_unlink(struct inode *dir, struct path *path, int force)
++{
++	int err;
++	struct unlink_args args = {
++		.errp	= &err,
++		.dir	= dir,
++		.path	= path
++	};
++
++	if (!force)
++		call_unlink(&args);
++	else {
++		int wkq_err;
++
++		wkq_err = au_wkq_wait(call_unlink, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	}
++
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/vfsub.h linux-2.6.31.5/fs/aufs/vfsub.h
+--- linux-2.6.31.5.orig/fs/aufs/vfsub.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/vfsub.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,172 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * sub-routines for VFS
++ */
++
++#ifndef __AUFS_VFSUB_H__
++#define __AUFS_VFSUB_H__
++
++#ifdef __KERNEL__
++
++#include <linux/fs.h>
++#include <linux/fs_stack.h>
++
++/* ---------------------------------------------------------------------- */
++
++/* lock subclass for lower inode */
++/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */
++/* reduce? gave up. */
++enum {
++	AuLsc_I_Begin = I_MUTEX_QUOTA, /* 4 */
++	AuLsc_I_PARENT,		/* lower inode, parent first */
++	AuLsc_I_PARENT2,	/* copyup dirs */
++	AuLsc_I_PARENT3,	/* copyup wh */
++	AuLsc_I_CHILD,
++	AuLsc_I_CHILD2,
++	AuLsc_I_End
++};
++
++/* to debug easier, do not make them inlined functions */
++#define MtxMustLock(mtx)	AuDebugOn(!mutex_is_locked(mtx))
++#define IMustLock(i)		MtxMustLock(&(i)->i_mutex)
++
++/* ---------------------------------------------------------------------- */
++
++static inline void vfsub_copy_inode_size(struct inode *inode,
++					 struct inode *h_inode)
++{
++	spin_lock(&inode->i_lock);
++	fsstack_copy_inode_size(inode, h_inode);
++	spin_unlock(&inode->i_lock);
++}
++
++int vfsub_update_h_iattr(struct path *h_path, int *did);
++struct file *vfsub_filp_open(const char *path, int oflags, int mode);
++int vfsub_kern_path(const char *name, unsigned int flags, struct path *path);
++struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
++				    int len);
++struct dentry *vfsub_lookup_hash(struct nameidata *nd);
++
++/* ---------------------------------------------------------------------- */
++
++struct au_hinode;
++struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
++				 struct dentry *d2, struct au_hinode *hdir2);
++void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
++			 struct dentry *d2, struct au_hinode *hdir2);
++
++int vfsub_create(struct inode *dir, struct path *path, int mode);
++int vfsub_symlink(struct inode *dir, struct path *path,
++		  const char *symname);
++int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev);
++int vfsub_link(struct dentry *src_dentry, struct inode *dir,
++	       struct path *path);
++int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry,
++		 struct inode *hdir, struct path *path);
++int vfsub_mkdir(struct inode *dir, struct path *path, int mode);
++int vfsub_rmdir(struct inode *dir, struct path *path);
++
++/* ---------------------------------------------------------------------- */
++
++ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
++		     loff_t *ppos);
++ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
++			loff_t *ppos);
++ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
++		      loff_t *ppos);
++ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count,
++		      loff_t *ppos);
++int vfsub_readdir(struct file *file, filldir_t filldir, void *arg);
++
++static inline void vfsub_file_accessed(struct file *h_file)
++{
++	file_accessed(h_file);
++	vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/
++}
++
++static inline void vfsub_touch_atime(struct vfsmount *h_mnt,
++				     struct dentry *h_dentry)
++{
++	struct path h_path = {
++		.dentry	= h_dentry,
++		.mnt	= h_mnt
++	};
++	touch_atime(h_mnt, h_dentry);
++	vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
++}
++
++long vfsub_splice_to(struct file *in, loff_t *ppos,
++		     struct pipe_inode_info *pipe, size_t len,
++		     unsigned int flags);
++long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
++		       loff_t *ppos, size_t len, unsigned int flags);
++int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
++		struct file *h_file);
++
++/* ---------------------------------------------------------------------- */
++
++static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin)
++{
++	loff_t err;
++
++	lockdep_off();
++	err = vfs_llseek(file, offset, origin);
++	lockdep_on();
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* dirty workaround for strict type of fmode_t */
++union vfsub_fmu {
++	fmode_t fm;
++	unsigned int ui;
++};
++
++static inline unsigned int vfsub_fmode_to_uint(fmode_t fm)
++{
++	union vfsub_fmu u = {
++		.fm = fm
++	};
++
++	BUILD_BUG_ON(sizeof(u.fm) != sizeof(u.ui));
++
++	return u.ui;
++}
++
++static inline fmode_t vfsub_uint_to_fmode(unsigned int ui)
++{
++	union vfsub_fmu u = {
++		.ui = ui
++	};
++
++	return u.fm;
++}
++
++/* ---------------------------------------------------------------------- */
++
++int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode);
++int vfsub_sio_rmdir(struct inode *dir, struct path *path);
++int vfsub_sio_notify_change(struct path *path, struct iattr *ia);
++int vfsub_notify_change(struct path *path, struct iattr *ia);
++int vfsub_unlink(struct inode *dir, struct path *path, int force);
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_VFSUB_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/wbr_policy.c linux-2.6.31.5/fs/aufs/wbr_policy.c
+--- linux-2.6.31.5.orig/fs/aufs/wbr_policy.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/wbr_policy.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,641 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * policies for selecting one among multiple writable branches
++ */
++
++#include <linux/statfs.h>
++#include "aufs.h"
++
++/* subset of cpup_attr() */
++static noinline_for_stack
++int au_cpdown_attr(struct path *h_path, struct dentry *h_src)
++{
++	int err, sbits;
++	struct iattr ia;
++	struct inode *h_isrc;
++
++	h_isrc = h_src->d_inode;
++	ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID;
++	ia.ia_mode = h_isrc->i_mode;
++	ia.ia_uid = h_isrc->i_uid;
++	ia.ia_gid = h_isrc->i_gid;
++	sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID));
++	au_cpup_attr_flags(h_path->dentry->d_inode, h_isrc);
++	err = vfsub_sio_notify_change(h_path, &ia);
++
++	/* is this nfs only? */
++	if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) {
++		ia.ia_valid = ATTR_FORCE | ATTR_MODE;
++		ia.ia_mode = h_isrc->i_mode;
++		err = vfsub_sio_notify_change(h_path, &ia);
++	}
++
++	return err;
++}
++
++#define AuCpdown_PARENT_OPQ	1
++#define AuCpdown_WHED		(1 << 1)
++#define AuCpdown_MADE_DIR	(1 << 2)
++#define AuCpdown_DIROPQ		(1 << 3)
++#define au_ftest_cpdown(flags, name)	((flags) & AuCpdown_##name)
++#define au_fset_cpdown(flags, name)	{ (flags) |= AuCpdown_##name; }
++#define au_fclr_cpdown(flags, name)	{ (flags) &= ~AuCpdown_##name; }
++
++struct au_cpdown_dir_args {
++	struct dentry *parent;
++	unsigned int flags;
++};
++
++static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst,
++			     struct au_cpdown_dir_args *a)
++{
++	int err;
++	struct dentry *opq_dentry;
++
++	opq_dentry = au_diropq_create(dentry, bdst);
++	err = PTR_ERR(opq_dentry);
++	if (IS_ERR(opq_dentry))
++		goto out;
++	dput(opq_dentry);
++	au_fset_cpdown(a->flags, DIROPQ);
++
++ out:
++	return err;
++}
++
++static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent,
++			    struct inode *dir, aufs_bindex_t bdst)
++{
++	int err;
++	struct path h_path;
++	struct au_branch *br;
++
++	br = au_sbr(dentry->d_sb, bdst);
++	h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
++	err = PTR_ERR(h_path.dentry);
++	if (IS_ERR(h_path.dentry))
++		goto out;
++
++	err = 0;
++	if (h_path.dentry->d_inode) {
++		h_path.mnt = br->br_mnt;
++		err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path,
++					  dentry);
++	}
++	dput(h_path.dentry);
++
++ out:
++	return err;
++}
++
++static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst,
++			 struct dentry *h_parent, void *arg)
++{
++	int err, rerr;
++	aufs_bindex_t bend, bopq, bstart;
++	unsigned char parent_opq;
++	struct path h_path;
++	struct dentry *parent;
++	struct inode *h_dir, *h_inode, *inode, *dir;
++	struct au_cpdown_dir_args *args = arg;
++
++	bstart = au_dbstart(dentry);
++	/* dentry is di-locked */
++	parent = dget_parent(dentry);
++	dir = parent->d_inode;
++	h_dir = h_parent->d_inode;
++	AuDebugOn(h_dir != au_h_iptr(dir, bdst));
++	IMustLock(h_dir);
++
++	err = au_lkup_neg(dentry, bdst);
++	if (unlikely(err < 0))
++		goto out;
++	h_path.dentry = au_h_dptr(dentry, bdst);
++	h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst);
++	err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path,
++			      S_IRWXU | S_IRUGO | S_IXUGO);
++	if (unlikely(err))
++		goto out_put;
++	au_fset_cpdown(args->flags, MADE_DIR);
++
++	bend = au_dbend(dentry);
++	bopq = au_dbdiropq(dentry);
++	au_fclr_cpdown(args->flags, WHED);
++	au_fclr_cpdown(args->flags, DIROPQ);
++	if (au_dbwh(dentry) == bdst)
++		au_fset_cpdown(args->flags, WHED);
++	if (!au_ftest_cpdown(args->flags, PARENT_OPQ) && bopq <= bdst)
++		au_fset_cpdown(args->flags, PARENT_OPQ);
++	parent_opq = (au_ftest_cpdown(args->flags, PARENT_OPQ)
++		      && args->parent == dentry);
++	h_inode = h_path.dentry->d_inode;
++	mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
++	if (au_ftest_cpdown(args->flags, WHED)) {
++		err = au_cpdown_dir_opq(dentry, bdst, args);
++		if (unlikely(err)) {
++			mutex_unlock(&h_inode->i_mutex);
++			goto out_dir;
++		}
++	}
++
++	err = au_cpdown_attr(&h_path, au_h_dptr(dentry, bstart));
++	mutex_unlock(&h_inode->i_mutex);
++	if (unlikely(err))
++		goto out_opq;
++
++	if (au_ftest_cpdown(args->flags, WHED)) {
++		err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst);
++		if (unlikely(err))
++			goto out_opq;
++	}
++
++	inode = dentry->d_inode;
++	if (au_ibend(inode) < bdst)
++		au_set_ibend(inode, bdst);
++	au_set_h_iptr(inode, bdst, au_igrab(h_inode),
++		      au_hi_flags(inode, /*isdir*/1));
++	goto out; /* success */
++
++	/* revert */
++ out_opq:
++	if (au_ftest_cpdown(args->flags, DIROPQ)) {
++		mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
++		rerr = au_diropq_remove(dentry, bdst);
++		mutex_unlock(&h_inode->i_mutex);
++		if (unlikely(rerr)) {
++			AuIOErr("failed removing diropq for %.*s b%d (%d)\n",
++				AuDLNPair(dentry), bdst, rerr);
++			err = -EIO;
++			goto out;
++		}
++	}
++ out_dir:
++	if (au_ftest_cpdown(args->flags, MADE_DIR)) {
++		rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path);
++		if (unlikely(rerr)) {
++			AuIOErr("failed removing %.*s b%d (%d)\n",
++				AuDLNPair(dentry), bdst, rerr);
++			err = -EIO;
++		}
++	}
++ out_put:
++	au_set_h_dptr(dentry, bdst, NULL);
++	if (au_dbend(dentry) == bdst)
++		au_update_dbend(dentry);
++ out:
++	dput(parent);
++	return err;
++}
++
++int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst)
++{
++	int err;
++	struct au_cpdown_dir_args args = {
++		.parent	= dget_parent(dentry),
++		.flags	= 0
++	};
++
++	err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &args);
++	dput(args.parent);
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* policies for create */
++
++static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex)
++{
++	for (; bindex >= 0; bindex--)
++		if (!au_br_rdonly(au_sbr(sb, bindex)))
++			return bindex;
++	return -EROFS;
++}
++
++/* top down parent */
++static int au_wbr_create_tdp(struct dentry *dentry, int isdir __maybe_unused)
++{
++	int err;
++	aufs_bindex_t bstart, bindex;
++	struct super_block *sb;
++	struct dentry *parent, *h_parent;
++
++	sb = dentry->d_sb;
++	bstart = au_dbstart(dentry);
++	err = bstart;
++	if (!au_br_rdonly(au_sbr(sb, bstart)))
++		goto out;
++
++	err = -EROFS;
++	parent = dget_parent(dentry);
++	for (bindex = au_dbstart(parent); bindex < bstart; bindex++) {
++		h_parent = au_h_dptr(parent, bindex);
++		if (!h_parent || !h_parent->d_inode)
++			continue;
++
++		if (!au_br_rdonly(au_sbr(sb, bindex))) {
++			err = bindex;
++			break;
++		}
++	}
++	dput(parent);
++
++	/* bottom up here */
++	if (unlikely(err < 0))
++		err = au_wbr_bu(sb, bstart - 1);
++
++ out:
++	AuDbg("b%d\n", err);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* an exception for the policy other than tdp */
++static int au_wbr_create_exp(struct dentry *dentry)
++{
++	int err;
++	aufs_bindex_t bwh, bdiropq;
++	struct dentry *parent;
++
++	err = -1;
++	bwh = au_dbwh(dentry);
++	parent = dget_parent(dentry);
++	bdiropq = au_dbdiropq(parent);
++	if (bwh >= 0) {
++		if (bdiropq >= 0)
++			err = min(bdiropq, bwh);
++		else
++			err = bwh;
++		AuDbg("%d\n", err);
++	} else if (bdiropq >= 0) {
++		err = bdiropq;
++		AuDbg("%d\n", err);
++	}
++	dput(parent);
++
++	if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err)))
++		err = -1;
++
++	AuDbg("%d\n", err);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* round robin */
++static int au_wbr_create_init_rr(struct super_block *sb)
++{
++	int err;
++
++	err = au_wbr_bu(sb, au_sbend(sb));
++	atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */
++	/* smp_mb(); */
++
++	AuDbg("b%d\n", err);
++	return err;
++}
++
++static int au_wbr_create_rr(struct dentry *dentry, int isdir)
++{
++	int err, nbr;
++	unsigned int u;
++	aufs_bindex_t bindex, bend;
++	struct super_block *sb;
++	atomic_t *next;
++
++	err = au_wbr_create_exp(dentry);
++	if (err >= 0)
++		goto out;
++
++	sb = dentry->d_sb;
++	next = &au_sbi(sb)->si_wbr_rr_next;
++	bend = au_sbend(sb);
++	nbr = bend + 1;
++	for (bindex = 0; bindex <= bend; bindex++) {
++		if (!isdir) {
++			err = atomic_dec_return(next) + 1;
++			/* modulo for 0 is meaningless */
++			if (unlikely(!err))
++				err = atomic_dec_return(next) + 1;
++		} else
++			err = atomic_read(next);
++		AuDbg("%d\n", err);
++		u = err;
++		err = u % nbr;
++		AuDbg("%d\n", err);
++		if (!au_br_rdonly(au_sbr(sb, err)))
++			break;
++		err = -EROFS;
++	}
++
++ out:
++	AuDbg("%d\n", err);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* most free space */
++static void au_mfs(struct dentry *dentry)
++{
++	struct super_block *sb;
++	struct au_branch *br;
++	struct au_wbr_mfs *mfs;
++	aufs_bindex_t bindex, bend;
++	int err;
++	unsigned long long b, bavail;
++	/* reduce the stack usage */
++	struct kstatfs *st;
++
++	st = kmalloc(sizeof(*st), GFP_NOFS);
++	if (unlikely(!st)) {
++		AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM);
++		return;
++	}
++
++	bavail = 0;
++	sb = dentry->d_sb;
++	mfs = &au_sbi(sb)->si_wbr_mfs;
++	MtxMustLock(&mfs->mfs_lock);
++	mfs->mfs_bindex = -EROFS;
++	mfs->mfsrr_bytes = 0;
++	bend = au_sbend(sb);
++	for (bindex = 0; bindex <= bend; bindex++) {
++		br = au_sbr(sb, bindex);
++		if (au_br_rdonly(br))
++			continue;
++
++		/* sb->s_root for NFS is unreliable */
++		err = vfs_statfs(br->br_mnt->mnt_root, st);
++		if (unlikely(err)) {
++			AuWarn1("failed statfs, b%d, %d\n", bindex, err);
++			continue;
++		}
++
++		/* when the available size is equal, select the lower one */
++		BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail)
++			     || sizeof(b) < sizeof(st->f_bsize));
++		b = st->f_bavail * st->f_bsize;
++		br->br_wbr->wbr_bytes = b;
++		if (b >= bavail) {
++			bavail = b;
++			mfs->mfs_bindex = bindex;
++			mfs->mfs_jiffy = jiffies;
++		}
++	}
++
++	mfs->mfsrr_bytes = bavail;
++	AuDbg("b%d\n", mfs->mfs_bindex);
++	kfree(st);
++}
++
++static int au_wbr_create_mfs(struct dentry *dentry, int isdir __maybe_unused)
++{
++	int err;
++	struct super_block *sb;
++	struct au_wbr_mfs *mfs;
++
++	err = au_wbr_create_exp(dentry);
++	if (err >= 0)
++		goto out;
++
++	sb = dentry->d_sb;
++	mfs = &au_sbi(sb)->si_wbr_mfs;
++	mutex_lock(&mfs->mfs_lock);
++	if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
++	    || mfs->mfs_bindex < 0
++	    || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex)))
++		au_mfs(dentry);
++	mutex_unlock(&mfs->mfs_lock);
++	err = mfs->mfs_bindex;
++
++ out:
++	AuDbg("b%d\n", err);
++	return err;
++}
++
++static int au_wbr_create_init_mfs(struct super_block *sb)
++{
++	struct au_wbr_mfs *mfs;
++
++	mfs = &au_sbi(sb)->si_wbr_mfs;
++	mutex_init(&mfs->mfs_lock);
++	mfs->mfs_jiffy = 0;
++	mfs->mfs_bindex = -EROFS;
++
++	return 0;
++}
++
++static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused)
++{
++	mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock);
++	return 0;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* most free space and then round robin */
++static int au_wbr_create_mfsrr(struct dentry *dentry, int isdir)
++{
++	int err;
++	struct au_wbr_mfs *mfs;
++
++	err = au_wbr_create_mfs(dentry, isdir);
++	if (err >= 0) {
++		mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs;
++		mutex_lock(&mfs->mfs_lock);
++		if (mfs->mfsrr_bytes < mfs->mfsrr_watermark)
++			err = au_wbr_create_rr(dentry, isdir);
++		mutex_unlock(&mfs->mfs_lock);
++	}
++
++	AuDbg("b%d\n", err);
++	return err;
++}
++
++static int au_wbr_create_init_mfsrr(struct super_block *sb)
++{
++	int err;
++
++	au_wbr_create_init_mfs(sb); /* ignore */
++	err = au_wbr_create_init_rr(sb);
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* top down parent and most free space */
++static int au_wbr_create_pmfs(struct dentry *dentry, int isdir)
++{
++	int err, e2;
++	unsigned long long b;
++	aufs_bindex_t bindex, bstart, bend;
++	struct super_block *sb;
++	struct dentry *parent, *h_parent;
++	struct au_branch *br;
++
++	err = au_wbr_create_tdp(dentry, isdir);
++	if (unlikely(err < 0))
++		goto out;
++	parent = dget_parent(dentry);
++	bstart = au_dbstart(parent);
++	bend = au_dbtaildir(parent);
++	if (bstart == bend)
++		goto out_parent; /* success */
++
++	e2 = au_wbr_create_mfs(dentry, isdir);
++	if (e2 < 0)
++		goto out_parent; /* success */
++
++	/* when the available size is equal, select upper one */
++	sb = dentry->d_sb;
++	br = au_sbr(sb, err);
++	b = br->br_wbr->wbr_bytes;
++	AuDbg("b%d, %llu\n", err, b);
++
++	for (bindex = bstart; bindex <= bend; bindex++) {
++		h_parent = au_h_dptr(parent, bindex);
++		if (!h_parent || !h_parent->d_inode)
++			continue;
++
++		br = au_sbr(sb, bindex);
++		if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) {
++			b = br->br_wbr->wbr_bytes;
++			err = bindex;
++			AuDbg("b%d, %llu\n", err, b);
++		}
++	}
++
++ out_parent:
++	dput(parent);
++ out:
++	AuDbg("b%d\n", err);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* policies for copyup */
++
++/* top down parent */
++static int au_wbr_copyup_tdp(struct dentry *dentry)
++{
++	return au_wbr_create_tdp(dentry, /*isdir, anything is ok*/0);
++}
++
++/* bottom up parent */
++static int au_wbr_copyup_bup(struct dentry *dentry)
++{
++	int err;
++	aufs_bindex_t bindex, bstart;
++	struct dentry *parent, *h_parent;
++	struct super_block *sb;
++
++	err = -EROFS;
++	sb = dentry->d_sb;
++	parent = dget_parent(dentry);
++	bstart = au_dbstart(parent);
++	for (bindex = au_dbstart(dentry); bindex >= bstart; bindex--) {
++		h_parent = au_h_dptr(parent, bindex);
++		if (!h_parent || !h_parent->d_inode)
++			continue;
++
++		if (!au_br_rdonly(au_sbr(sb, bindex))) {
++			err = bindex;
++			break;
++		}
++	}
++	dput(parent);
++
++	/* bottom up here */
++	if (unlikely(err < 0))
++		err = au_wbr_bu(sb, bstart - 1);
++
++	AuDbg("b%d\n", err);
++	return err;
++}
++
++/* bottom up */
++static int au_wbr_copyup_bu(struct dentry *dentry)
++{
++	int err;
++
++	err = au_wbr_bu(dentry->d_sb, au_dbstart(dentry));
++
++	AuDbg("b%d\n", err);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct au_wbr_copyup_operations au_wbr_copyup_ops[] = {
++	[AuWbrCopyup_TDP] = {
++		.copyup	= au_wbr_copyup_tdp
++	},
++	[AuWbrCopyup_BUP] = {
++		.copyup	= au_wbr_copyup_bup
++	},
++	[AuWbrCopyup_BU] = {
++		.copyup	= au_wbr_copyup_bu
++	}
++};
++
++struct au_wbr_create_operations au_wbr_create_ops[] = {
++	[AuWbrCreate_TDP] = {
++		.create	= au_wbr_create_tdp
++	},
++	[AuWbrCreate_RR] = {
++		.create	= au_wbr_create_rr,
++		.init	= au_wbr_create_init_rr
++	},
++	[AuWbrCreate_MFS] = {
++		.create	= au_wbr_create_mfs,
++		.init	= au_wbr_create_init_mfs,
++		.fin	= au_wbr_create_fin_mfs
++	},
++	[AuWbrCreate_MFSV] = {
++		.create	= au_wbr_create_mfs,
++		.init	= au_wbr_create_init_mfs,
++		.fin	= au_wbr_create_fin_mfs
++	},
++	[AuWbrCreate_MFSRR] = {
++		.create	= au_wbr_create_mfsrr,
++		.init	= au_wbr_create_init_mfsrr,
++		.fin	= au_wbr_create_fin_mfs
++	},
++	[AuWbrCreate_MFSRRV] = {
++		.create	= au_wbr_create_mfsrr,
++		.init	= au_wbr_create_init_mfsrr,
++		.fin	= au_wbr_create_fin_mfs
++	},
++	[AuWbrCreate_PMFS] = {
++		.create	= au_wbr_create_pmfs,
++		.init	= au_wbr_create_init_mfs,
++		.fin	= au_wbr_create_fin_mfs
++	},
++	[AuWbrCreate_PMFSV] = {
++		.create	= au_wbr_create_pmfs,
++		.init	= au_wbr_create_init_mfs,
++		.fin	= au_wbr_create_fin_mfs
++	}
++};
+diff -Nur linux-2.6.31.5.orig/fs/aufs/whout.c linux-2.6.31.5/fs/aufs/whout.c
+--- linux-2.6.31.5.orig/fs/aufs/whout.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/whout.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,1048 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * whiteout for logical deletion and opaque directory
++ */
++
++#include <linux/fs.h>
++#include "aufs.h"
++
++#define WH_MASK			S_IRUGO
++
++/*
++ * If a directory contains this file, then it is opaque.  We start with the
++ * .wh. flag so that it is blocked by lookup.
++ */
++static struct qstr diropq_name = {
++	.name = AUFS_WH_DIROPQ,
++	.len = sizeof(AUFS_WH_DIROPQ) - 1
++};
++
++/*
++ * generate whiteout name, which is NOT terminated by NULL.
++ * @name: original d_name.name
++ * @len: original d_name.len
++ * @wh: whiteout qstr
++ * returns zero when succeeds, otherwise error.
++ * succeeded value as wh->name should be freed by kfree().
++ */
++int au_wh_name_alloc(struct qstr *wh, const struct qstr *name)
++{
++	char *p;
++
++	if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN))
++		return -ENAMETOOLONG;
++
++	wh->len = name->len + AUFS_WH_PFX_LEN;
++	p = kmalloc(wh->len, GFP_NOFS);
++	wh->name = p;
++	if (p) {
++		memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
++		memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len);
++		/* smp_mb(); */
++		return 0;
++	}
++	return -ENOMEM;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * test if the @wh_name exists under @h_parent.
++ * @try_sio specifies the necessary of super-io.
++ */
++int au_wh_test(struct dentry *h_parent, struct qstr *wh_name,
++	       struct au_branch *br, int try_sio)
++{
++	int err;
++	struct dentry *wh_dentry;
++	struct inode *h_dir;
++
++	h_dir = h_parent->d_inode;
++	if (!try_sio)
++		wh_dentry = au_lkup_one(wh_name, h_parent, br, /*nd*/NULL);
++	else
++		wh_dentry = au_sio_lkup_one(wh_name, h_parent, br);
++	err = PTR_ERR(wh_dentry);
++	if (IS_ERR(wh_dentry))
++		goto out;
++
++	err = 0;
++	if (!wh_dentry->d_inode)
++		goto out_wh; /* success */
++
++	err = 1;
++	if (S_ISREG(wh_dentry->d_inode->i_mode))
++		goto out_wh; /* success */
++
++	err = -EIO;
++	AuIOErr("%.*s Invalid whiteout entry type 0%o.\n",
++		AuDLNPair(wh_dentry), wh_dentry->d_inode->i_mode);
++
++ out_wh:
++	dput(wh_dentry);
++ out:
++	return err;
++}
++
++/*
++ * test if the @h_dentry sets opaque or not.
++ */
++int au_diropq_test(struct dentry *h_dentry, struct au_branch *br)
++{
++	int err;
++	struct inode *h_dir;
++
++	h_dir = h_dentry->d_inode;
++	err = au_wh_test(h_dentry, &diropq_name, br,
++			 au_test_h_perm_sio(h_dir, MAY_EXEC));
++	return err;
++}
++
++/*
++ * returns a negative dentry whose name is unique and temporary.
++ */
++struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
++			     struct qstr *prefix)
++{
++#define HEX_LEN	4
++	struct dentry *dentry;
++	int i;
++	char defname[AUFS_WH_PFX_LEN * 2 + DNAME_INLINE_LEN_MIN + 1
++		     + HEX_LEN + 1], *name, *p;
++	static unsigned short cnt;
++	struct qstr qs;
++
++	name = defname;
++	qs.len = sizeof(defname) - DNAME_INLINE_LEN_MIN + prefix->len - 1;
++	if (unlikely(prefix->len > DNAME_INLINE_LEN_MIN)) {
++		dentry = ERR_PTR(-ENAMETOOLONG);
++		if (unlikely(qs.len >= PATH_MAX))
++			goto out;
++		dentry = ERR_PTR(-ENOMEM);
++		name = kmalloc(qs.len + 1, GFP_NOFS);
++		if (unlikely(!name))
++			goto out;
++	}
++
++	/* doubly whiteout-ed */
++	memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2);
++	p = name + AUFS_WH_PFX_LEN * 2;
++	memcpy(p, prefix->name, prefix->len);
++	p += prefix->len;
++	*p++ = '.';
++	AuDebugOn(name + qs.len + 1 - p <= HEX_LEN);
++
++	qs.name = name;
++	for (i = 0; i < 3; i++) {
++		sprintf(p, "%.*d", HEX_LEN, cnt++);
++		dentry = au_sio_lkup_one(&qs, h_parent, br);
++		if (IS_ERR(dentry) || !dentry->d_inode)
++			goto out_name;
++		dput(dentry);
++	}
++	/* AuWarn("could not get random name\n"); */
++	dentry = ERR_PTR(-EEXIST);
++	AuDbg("%.*s\n", AuLNPair(&qs));
++	BUG();
++
++ out_name:
++	if (name != defname)
++		kfree(name);
++ out:
++	return dentry;
++#undef HEX_LEN
++}
++
++/*
++ * rename the @h_dentry on @br to the whiteouted temporary name.
++ */
++int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br)
++{
++	int err;
++	struct path h_path = {
++		.mnt = br->br_mnt
++	};
++	struct inode *h_dir;
++	struct dentry *h_parent;
++
++	h_parent = h_dentry->d_parent; /* dir inode is locked */
++	h_dir = h_parent->d_inode;
++	IMustLock(h_dir);
++
++	h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name);
++	err = PTR_ERR(h_path.dentry);
++	if (IS_ERR(h_path.dentry))
++		goto out;
++
++	/* under the same dir, no need to lock_rename() */
++	err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path);
++	AuTraceErr(err);
++	dput(h_path.dentry);
++
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++/*
++ * functions for removing a whiteout
++ */
++
++static int do_unlink_wh(struct inode *h_dir, struct path *h_path)
++{
++	int force;
++
++	/*
++	 * forces superio when the dir has a sticky bit.
++	 * this may be a violation of unix fs semantics.
++	 */
++	force = (h_dir->i_mode & S_ISVTX)
++		&& h_path->dentry->d_inode->i_uid != current_fsuid();
++	return vfsub_unlink(h_dir, h_path, force);
++}
++
++int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
++			struct dentry *dentry)
++{
++	int err;
++
++	err = do_unlink_wh(h_dir, h_path);
++	if (!err && dentry)
++		au_set_dbwh(dentry, -1);
++
++	return err;
++}
++
++static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh,
++			  struct au_branch *br)
++{
++	int err;
++	struct path h_path = {
++		.mnt = br->br_mnt
++	};
++
++	err = 0;
++	h_path.dentry = au_lkup_one(wh, h_parent, br, /*nd*/NULL);
++	if (IS_ERR(h_path.dentry))
++		err = PTR_ERR(h_path.dentry);
++	else {
++		if (h_path.dentry->d_inode
++		    && S_ISREG(h_path.dentry->d_inode->i_mode))
++			err = do_unlink_wh(h_parent->d_inode, &h_path);
++		dput(h_path.dentry);
++	}
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++/*
++ * initialize/clean whiteout for a branch
++ */
++
++static void au_wh_clean(struct inode *h_dir, struct path *whpath,
++			const int isdir)
++{
++	int err;
++
++	if (!whpath->dentry->d_inode)
++		return;
++
++	err = mnt_want_write(whpath->mnt);
++	if (!err) {
++		if (isdir)
++			err = vfsub_rmdir(h_dir, whpath);
++		else
++			err = vfsub_unlink(h_dir, whpath, /*force*/0);
++		mnt_drop_write(whpath->mnt);
++	}
++	if (unlikely(err))
++		AuWarn("failed removing %.*s (%d), ignored.\n",
++		       AuDLNPair(whpath->dentry), err);
++}
++
++static int test_linkable(struct dentry *h_root)
++{
++	struct inode *h_dir = h_root->d_inode;
++
++	if (h_dir->i_op->link)
++		return 0;
++
++	AuErr("%.*s (%s) doesn't support link(2), use noplink and rw+nolwh\n",
++	      AuDLNPair(h_root), au_sbtype(h_root->d_sb));
++	return -ENOSYS;
++}
++
++/* todo: should this mkdir be done in /sbin/mount.aufs helper? */
++static int au_whdir(struct inode *h_dir, struct path *path)
++{
++	int err;
++
++	err = -EEXIST;
++	if (!path->dentry->d_inode) {
++		int mode = S_IRWXU;
++
++		if (au_test_nfs(path->dentry->d_sb))
++			mode |= S_IXUGO;
++		err = mnt_want_write(path->mnt);
++		if (!err) {
++			err = vfsub_mkdir(h_dir, path, mode);
++			mnt_drop_write(path->mnt);
++		}
++	} else if (S_ISDIR(path->dentry->d_inode->i_mode))
++		err = 0;
++	else
++		AuErr("unknown %.*s exists\n", AuDLNPair(path->dentry));
++
++	return err;
++}
++
++struct au_wh_base {
++	const struct qstr *name;
++	struct dentry *dentry;
++};
++
++static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[],
++			  struct path *h_path)
++{
++	h_path->dentry = base[AuBrWh_BASE].dentry;
++	au_wh_clean(h_dir, h_path, /*isdir*/0);
++	h_path->dentry = base[AuBrWh_PLINK].dentry;
++	au_wh_clean(h_dir, h_path, /*isdir*/1);
++	h_path->dentry = base[AuBrWh_ORPH].dentry;
++	au_wh_clean(h_dir, h_path, /*isdir*/1);
++}
++
++/*
++ * returns tri-state,
++ * minus: error, caller should print the mesage
++ * zero: succuess
++ * plus: error, caller should NOT print the mesage
++ */
++static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr,
++				int do_plink, struct au_wh_base base[],
++				struct path *h_path)
++{
++	int err;
++	struct inode *h_dir;
++
++	h_dir = h_root->d_inode;
++	h_path->dentry = base[AuBrWh_BASE].dentry;
++	au_wh_clean(h_dir, h_path, /*isdir*/0);
++	h_path->dentry = base[AuBrWh_PLINK].dentry;
++	if (do_plink) {
++		err = test_linkable(h_root);
++		if (unlikely(err)) {
++			err = 1;
++			goto out;
++		}
++
++		err = au_whdir(h_dir, h_path);
++		if (unlikely(err))
++			goto out;
++		wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
++	} else
++		au_wh_clean(h_dir, h_path, /*isdir*/1);
++	h_path->dentry = base[AuBrWh_ORPH].dentry;
++	err = au_whdir(h_dir, h_path);
++	if (unlikely(err))
++		goto out;
++	wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
++
++ out:
++	return err;
++}
++
++/*
++ * for the moment, aufs supports the branch filesystem which does not support
++ * link(2). testing on FAT which does not support i_op->setattr() fully either,
++ * copyup failed. finally, such filesystem will not be used as the writable
++ * branch.
++ *
++ * returns tri-state, see above.
++ */
++static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr,
++			 int do_plink, struct au_wh_base base[],
++			 struct path *h_path)
++{
++	int err;
++	struct inode *h_dir;
++
++	WbrWhMustWriteLock(wbr);
++
++	err = test_linkable(h_root);
++	if (unlikely(err)) {
++		err = 1;
++		goto out;
++	}
++
++	/*
++	 * todo: should this create be done in /sbin/mount.aufs helper?
++	 */
++	err = -EEXIST;
++	h_dir = h_root->d_inode;
++	if (!base[AuBrWh_BASE].dentry->d_inode) {
++		err = mnt_want_write(h_path->mnt);
++		if (!err) {
++			h_path->dentry = base[AuBrWh_BASE].dentry;
++			err = vfsub_create(h_dir, h_path, WH_MASK);
++			mnt_drop_write(h_path->mnt);
++		}
++	} else if (S_ISREG(base[AuBrWh_BASE].dentry->d_inode->i_mode))
++		err = 0;
++	else
++		AuErr("unknown %.*s/%.*s exists\n",
++		      AuDLNPair(h_root), AuDLNPair(base[AuBrWh_BASE].dentry));
++	if (unlikely(err))
++		goto out;
++
++	h_path->dentry = base[AuBrWh_PLINK].dentry;
++	if (do_plink) {
++		err = au_whdir(h_dir, h_path);
++		if (unlikely(err))
++			goto out;
++		wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
++	} else
++		au_wh_clean(h_dir, h_path, /*isdir*/1);
++	wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry);
++
++	h_path->dentry = base[AuBrWh_ORPH].dentry;
++	err = au_whdir(h_dir, h_path);
++	if (unlikely(err))
++		goto out;
++	wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
++
++ out:
++	return err;
++}
++
++/*
++ * initialize the whiteout base file/dir for @br.
++ */
++int au_wh_init(struct dentry *h_root, struct au_branch *br,
++	       struct super_block *sb)
++{
++	int err, i;
++	const unsigned char do_plink
++		= !!au_opt_test(au_mntflags(sb), PLINK);
++	struct path path = {
++		.mnt = br->br_mnt
++	};
++	struct inode *h_dir;
++	struct au_wbr *wbr = br->br_wbr;
++	static const struct qstr base_name[] = {
++		[AuBrWh_BASE] = {
++			.name	= AUFS_BASE_NAME,
++			.len	= sizeof(AUFS_BASE_NAME) - 1
++		},
++		[AuBrWh_PLINK] = {
++			.name	= AUFS_PLINKDIR_NAME,
++			.len	= sizeof(AUFS_PLINKDIR_NAME) - 1
++		},
++		[AuBrWh_ORPH] = {
++			.name	= AUFS_ORPHDIR_NAME,
++			.len	= sizeof(AUFS_ORPHDIR_NAME) - 1
++		}
++	};
++	struct au_wh_base base[] = {
++		[AuBrWh_BASE] = {
++			.name	= base_name + AuBrWh_BASE,
++			.dentry	= NULL
++		},
++		[AuBrWh_PLINK] = {
++			.name	= base_name + AuBrWh_PLINK,
++			.dentry	= NULL
++		},
++		[AuBrWh_ORPH] = {
++			.name	= base_name + AuBrWh_ORPH,
++			.dentry	= NULL
++		}
++	};
++
++	if (wbr)
++		WbrWhMustWriteLock(wbr);
++
++	h_dir = h_root->d_inode;
++	for (i = 0; i < AuBrWh_Last; i++) {
++		/* doubly whiteouted */
++		struct dentry *d;
++
++		d = au_wh_lkup(h_root, (void *)base[i].name, br);
++		err = PTR_ERR(d);
++		if (IS_ERR(d))
++			goto out;
++
++		base[i].dentry = d;
++		AuDebugOn(wbr
++			  && wbr->wbr_wh[i]
++			  && wbr->wbr_wh[i] != base[i].dentry);
++	}
++
++	if (wbr)
++		for (i = 0; i < AuBrWh_Last; i++) {
++			dput(wbr->wbr_wh[i]);
++			wbr->wbr_wh[i] = NULL;
++		}
++
++	err = 0;
++
++	switch (br->br_perm) {
++	case AuBrPerm_RO:
++	case AuBrPerm_ROWH:
++	case AuBrPerm_RR:
++	case AuBrPerm_RRWH:
++		au_wh_init_ro(h_dir, base, &path);
++		break;
++
++	case AuBrPerm_RWNoLinkWH:
++		err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path);
++		if (err > 0)
++			goto out;
++		else if (err)
++			goto out_err;
++		break;
++
++	case AuBrPerm_RW:
++		err = au_wh_init_rw(h_root, wbr, do_plink, base, &path);
++		if (err > 0)
++			goto out;
++		else if (err)
++			goto out_err;
++		break;
++
++	default:
++		BUG();
++	}
++	goto out; /* success */
++
++ out_err:
++	AuErr("an error(%d) on the writable branch %.*s(%s)\n",
++	      err, AuDLNPair(h_root), au_sbtype(h_root->d_sb));
++ out:
++	for (i = 0; i < AuBrWh_Last; i++)
++		dput(base[i].dentry);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++/*
++ * whiteouts are all hard-linked usually.
++ * when its link count reaches a ceiling, we create a new whiteout base
++ * asynchronously.
++ */
++
++struct reinit_br_wh {
++	struct super_block *sb;
++	struct au_branch *br;
++};
++
++static void reinit_br_wh(void *arg)
++{
++	int err;
++	aufs_bindex_t bindex;
++	struct path h_path;
++	struct reinit_br_wh *a = arg;
++	struct au_wbr *wbr;
++	struct inode *dir;
++	struct dentry *h_root;
++	struct au_hinode *hdir;
++
++	err = 0;
++	wbr = a->br->br_wbr;
++	/* big aufs lock */
++	si_noflush_write_lock(a->sb);
++	if (!au_br_writable(a->br->br_perm))
++		goto out;
++	bindex = au_br_index(a->sb, a->br->br_id);
++	if (unlikely(bindex < 0))
++		goto out;
++
++	di_read_lock_parent(a->sb->s_root, AuLock_IR);
++	dir = a->sb->s_root->d_inode;
++	hdir = au_hi(dir, bindex);
++	h_root = au_h_dptr(a->sb->s_root, bindex);
++
++	au_hin_imtx_lock_nested(hdir, AuLsc_I_PARENT);
++	wbr_wh_write_lock(wbr);
++	err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode,
++			  h_root, a->br);
++	if (!err) {
++		err = mnt_want_write(a->br->br_mnt);
++		if (!err) {
++			h_path.dentry = wbr->wbr_whbase;
++			h_path.mnt = a->br->br_mnt;
++			err = vfsub_unlink(hdir->hi_inode, &h_path, /*force*/0);
++			mnt_drop_write(a->br->br_mnt);
++		}
++	} else {
++		AuWarn("%.*s is moved, ignored\n", AuDLNPair(wbr->wbr_whbase));
++		err = 0;
++	}
++	dput(wbr->wbr_whbase);
++	wbr->wbr_whbase = NULL;
++	if (!err)
++		err = au_wh_init(h_root, a->br, a->sb);
++	wbr_wh_write_unlock(wbr);
++	au_hin_imtx_unlock(hdir);
++	di_read_unlock(a->sb->s_root, AuLock_IR);
++
++ out:
++	if (wbr)
++		atomic_dec(&wbr->wbr_wh_running);
++	atomic_dec(&a->br->br_count);
++	au_nwt_done(&au_sbi(a->sb)->si_nowait);
++	si_write_unlock(a->sb);
++	kfree(arg);
++	if (unlikely(err))
++		AuIOErr("err %d\n", err);
++}
++
++static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br)
++{
++	int do_dec, wkq_err;
++	struct reinit_br_wh *arg;
++
++	do_dec = 1;
++	if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1)
++		goto out;
++
++	/* ignore ENOMEM */
++	arg = kmalloc(sizeof(*arg), GFP_NOFS);
++	if (arg) {
++		/*
++		 * dec(wh_running), kfree(arg) and dec(br_count)
++		 * in reinit function
++		 */
++		arg->sb = sb;
++		arg->br = br;
++		atomic_inc(&br->br_count);
++		wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb);
++		if (unlikely(wkq_err)) {
++			atomic_dec(&br->br_wbr->wbr_wh_running);
++			atomic_dec(&br->br_count);
++			kfree(arg);
++		}
++		do_dec = 0;
++	}
++
++ out:
++	if (do_dec)
++		atomic_dec(&br->br_wbr->wbr_wh_running);
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * create the whiteout @wh.
++ */
++static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex,
++			     struct dentry *wh)
++{
++	int err;
++	struct path h_path = {
++		.dentry = wh
++	};
++	struct au_branch *br;
++	struct au_wbr *wbr;
++	struct dentry *h_parent;
++	struct inode *h_dir;
++
++	h_parent = wh->d_parent; /* dir inode is locked */
++	h_dir = h_parent->d_inode;
++	IMustLock(h_dir);
++
++	br = au_sbr(sb, bindex);
++	h_path.mnt = br->br_mnt;
++	wbr = br->br_wbr;
++	wbr_wh_read_lock(wbr);
++	if (wbr->wbr_whbase) {
++		err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path);
++		if (!err || err != -EMLINK)
++			goto out;
++
++		/* link count full. re-initialize br_whbase. */
++		kick_reinit_br_wh(sb, br);
++	}
++
++	/* return this error in this context */
++	err = vfsub_create(h_dir, &h_path, WH_MASK);
++
++ out:
++	wbr_wh_read_unlock(wbr);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * create or remove the diropq.
++ */
++static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex,
++				unsigned int flags)
++{
++	struct dentry *opq_dentry, *h_dentry;
++	struct super_block *sb;
++	struct au_branch *br;
++	int err;
++
++	sb = dentry->d_sb;
++	br = au_sbr(sb, bindex);
++	h_dentry = au_h_dptr(dentry, bindex);
++	opq_dentry = au_lkup_one(&diropq_name, h_dentry, br, /*nd*/NULL);
++	if (IS_ERR(opq_dentry))
++		goto out;
++
++	if (au_ftest_diropq(flags, CREATE)) {
++		err = link_or_create_wh(sb, bindex, opq_dentry);
++		if (!err) {
++			au_set_dbdiropq(dentry, bindex);
++			goto out; /* success */
++		}
++	} else {
++		struct path tmp = {
++			.dentry = opq_dentry,
++			.mnt	= br->br_mnt
++		};
++		err = do_unlink_wh(au_h_iptr(dentry->d_inode, bindex), &tmp);
++		if (!err)
++			au_set_dbdiropq(dentry, -1);
++	}
++	dput(opq_dentry);
++	opq_dentry = ERR_PTR(err);
++
++ out:
++	return opq_dentry;
++}
++
++struct do_diropq_args {
++	struct dentry **errp;
++	struct dentry *dentry;
++	aufs_bindex_t bindex;
++	unsigned int flags;
++};
++
++static void call_do_diropq(void *args)
++{
++	struct do_diropq_args *a = args;
++	*a->errp = do_diropq(a->dentry, a->bindex, a->flags);
++}
++
++struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
++			     unsigned int flags)
++{
++	struct dentry *diropq, *h_dentry;
++
++	h_dentry = au_h_dptr(dentry, bindex);
++	if (!au_test_h_perm_sio(h_dentry->d_inode, MAY_EXEC | MAY_WRITE))
++		diropq = do_diropq(dentry, bindex, flags);
++	else {
++		int wkq_err;
++		struct do_diropq_args args = {
++			.errp		= &diropq,
++			.dentry		= dentry,
++			.bindex		= bindex,
++			.flags		= flags
++		};
++
++		wkq_err = au_wkq_wait(call_do_diropq, &args);
++		if (unlikely(wkq_err))
++			diropq = ERR_PTR(wkq_err);
++	}
++
++	return diropq;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * lookup whiteout dentry.
++ * @h_parent: lower parent dentry which must exist and be locked
++ * @base_name: name of dentry which will be whiteouted
++ * returns dentry for whiteout.
++ */
++struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
++			  struct au_branch *br)
++{
++	int err;
++	struct qstr wh_name;
++	struct dentry *wh_dentry;
++
++	err = au_wh_name_alloc(&wh_name, base_name);
++	wh_dentry = ERR_PTR(err);
++	if (!err) {
++		wh_dentry = au_lkup_one(&wh_name, h_parent, br, /*nd*/NULL);
++		kfree(wh_name.name);
++	}
++	return wh_dentry;
++}
++
++/*
++ * link/create a whiteout for @dentry on @bindex.
++ */
++struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
++			    struct dentry *h_parent)
++{
++	struct dentry *wh_dentry;
++	struct super_block *sb;
++	int err;
++
++	sb = dentry->d_sb;
++	wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex));
++	if (!IS_ERR(wh_dentry) && !wh_dentry->d_inode) {
++		err = link_or_create_wh(sb, bindex, wh_dentry);
++		if (!err)
++			au_set_dbwh(dentry, bindex);
++		else {
++			dput(wh_dentry);
++			wh_dentry = ERR_PTR(err);
++		}
++	}
++
++	return wh_dentry;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* Delete all whiteouts in this directory on branch bindex. */
++static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist,
++			   aufs_bindex_t bindex, struct au_branch *br)
++{
++	int err;
++	unsigned long ul, n;
++	struct qstr wh_name;
++	char *p;
++	struct hlist_head *head;
++	struct au_vdir_wh *tpos;
++	struct hlist_node *pos;
++	struct au_vdir_destr *str;
++
++	err = -ENOMEM;
++	p = __getname();
++	wh_name.name = p;
++	if (unlikely(!wh_name.name))
++		goto out;
++
++	err = 0;
++	memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
++	p += AUFS_WH_PFX_LEN;
++	n = whlist->nh_num;
++	head = whlist->nh_head;
++	for (ul = 0; !err && ul < n; ul++, head++) {
++		hlist_for_each_entry(tpos, pos, head, wh_hash) {
++			if (tpos->wh_bindex != bindex)
++				continue;
++
++			str = &tpos->wh_str;
++			if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) {
++				memcpy(p, str->name, str->len);
++				wh_name.len = AUFS_WH_PFX_LEN + str->len;
++				err = unlink_wh_name(h_dentry, &wh_name, br);
++				if (!err)
++					continue;
++				break;
++			}
++			AuIOErr("whiteout name too long %.*s\n",
++				str->len, str->name);
++			err = -EIO;
++			break;
++		}
++	}
++	__putname(wh_name.name);
++
++ out:
++	return err;
++}
++
++struct del_wh_children_args {
++	int *errp;
++	struct dentry *h_dentry;
++	struct au_nhash whlist;
++	aufs_bindex_t bindex;
++	struct au_branch *br;
++};
++
++static void call_del_wh_children(void *args)
++{
++	struct del_wh_children_args *a = args;
++	*a->errp = del_wh_children(a->h_dentry, &a->whlist, a->bindex, a->br);
++}
++
++/* ---------------------------------------------------------------------- */
++
++struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp)
++{
++	struct au_whtmp_rmdir *whtmp;
++	int err;
++
++	SiMustAnyLock(sb);
++
++	whtmp = kmalloc(sizeof(*whtmp), gfp);
++	if (unlikely(!whtmp)) {
++		whtmp = ERR_PTR(-ENOMEM);
++		goto out;
++	}
++
++	whtmp->dir = NULL;
++	whtmp->wh_dentry = NULL;
++	err = au_nhash_alloc(&whtmp->whlist, au_sbi(sb)->si_rdhash, gfp);
++	if (!err)
++		return whtmp; /* success */
++
++	kfree(whtmp);
++	whtmp = ERR_PTR(err);
++
++ out:
++	return whtmp;
++}
++
++void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp)
++{
++	dput(whtmp->wh_dentry);
++	iput(whtmp->dir);
++	au_nhash_wh_free(&whtmp->whlist);
++	kfree(whtmp);
++}
++
++/*
++ * rmdir the whiteouted temporary named dir @h_dentry.
++ * @whlist: whiteouted children.
++ */
++int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
++		   struct dentry *wh_dentry, struct au_nhash *whlist)
++{
++	int err;
++	struct path h_tmp;
++	struct inode *wh_inode, *h_dir;
++	struct au_branch *br;
++
++	h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */
++	IMustLock(h_dir);
++
++	br = au_sbr(dir->i_sb, bindex);
++	wh_inode = wh_dentry->d_inode;
++	mutex_lock_nested(&wh_inode->i_mutex, AuLsc_I_CHILD);
++
++	/*
++	 * someone else might change some whiteouts while we were sleeping.
++	 * it means this whlist may have an obsoleted entry.
++	 */
++	if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE))
++		err = del_wh_children(wh_dentry, whlist, bindex, br);
++	else {
++		int wkq_err;
++		struct del_wh_children_args args = {
++			.errp		= &err,
++			.h_dentry	= wh_dentry,
++			.whlist		= *whlist,
++			.bindex		= bindex,
++			.br		= br
++		};
++
++		wkq_err = au_wkq_wait(call_del_wh_children, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	}
++	mutex_unlock(&wh_inode->i_mutex);
++
++	if (!err) {
++		h_tmp.dentry = wh_dentry;
++		h_tmp.mnt = br->br_mnt;
++		err = vfsub_rmdir(h_dir, &h_tmp);
++		/* d_drop(h_dentry); */
++	}
++
++	if (!err) {
++		if (au_ibstart(dir) == bindex) {
++			au_cpup_attr_timesizes(dir);
++			drop_nlink(dir);
++		}
++		return 0; /* success */
++	}
++
++	AuWarn("failed removing %.*s(%d), ignored\n",
++	       AuDLNPair(wh_dentry), err);
++	return err;
++}
++
++static void call_rmdir_whtmp(void *args)
++{
++	int err;
++	struct au_whtmp_rmdir *a = args;
++	struct super_block *sb;
++	struct dentry *h_parent;
++	struct inode *h_dir;
++	struct au_branch *br;
++	struct au_hinode *hdir;
++
++	/* rmdir by nfsd may cause deadlock with this i_mutex */
++	/* mutex_lock(&a->dir->i_mutex); */
++	sb = a->dir->i_sb;
++	si_noflush_read_lock(sb);
++	err = au_test_ro(sb, a->bindex, NULL);
++	if (unlikely(err))
++		goto out;
++
++	err = -EIO;
++	br = au_sbr(sb, a->bindex);
++	ii_write_lock_parent(a->dir);
++	h_parent = dget_parent(a->wh_dentry);
++	h_dir = h_parent->d_inode;
++	hdir = au_hi(a->dir, a->bindex);
++	au_hin_imtx_lock_nested(hdir, AuLsc_I_PARENT);
++	err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent, br);
++	if (!err) {
++		err = mnt_want_write(br->br_mnt);
++		if (!err) {
++			err = au_whtmp_rmdir(a->dir, a->bindex, a->wh_dentry,
++					     &a->whlist);
++			mnt_drop_write(br->br_mnt);
++		}
++	}
++	au_hin_imtx_unlock(hdir);
++	dput(h_parent);
++	ii_write_unlock(a->dir);
++
++ out:
++	/* mutex_unlock(&a->dir->i_mutex); */
++	au_nwt_done(&au_sbi(sb)->si_nowait);
++	si_read_unlock(sb);
++	au_whtmp_rmdir_free(a);
++	if (unlikely(err))
++		AuIOErr("err %d\n", err);
++}
++
++void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
++			 struct dentry *wh_dentry, struct au_whtmp_rmdir *args)
++{
++	int wkq_err;
++
++	IMustLock(dir);
++
++	/* all post-process will be done in do_rmdir_whtmp(). */
++	args->dir = au_igrab(dir);
++	args->bindex = bindex;
++	args->wh_dentry = dget(wh_dentry);
++	wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, dir->i_sb);
++	if (unlikely(wkq_err)) {
++		AuWarn("rmdir error %.*s (%d), ignored\n",
++		       AuDLNPair(wh_dentry), wkq_err);
++		au_whtmp_rmdir_free(args);
++	}
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/whout.h linux-2.6.31.5/fs/aufs/whout.h
+--- linux-2.6.31.5.orig/fs/aufs/whout.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/whout.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,87 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * whiteout for logical deletion and opaque directory
++ */
++
++#ifndef __AUFS_WHOUT_H__
++#define __AUFS_WHOUT_H__
++
++#ifdef __KERNEL__
++
++#include <linux/aufs_type.h>
++#include "dir.h"
++
++/* whout.c */
++int au_wh_name_alloc(struct qstr *wh, const struct qstr *name);
++struct au_branch;
++int au_wh_test(struct dentry *h_parent, struct qstr *wh_name,
++	       struct au_branch *br, int try_sio);
++int au_diropq_test(struct dentry *h_dentry, struct au_branch *br);
++struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
++			     struct qstr *prefix);
++int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br);
++int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
++			struct dentry *dentry);
++int au_wh_init(struct dentry *h_parent, struct au_branch *br,
++	       struct super_block *sb);
++
++/* diropq flags */
++#define AuDiropq_CREATE	1
++#define au_ftest_diropq(flags, name)	((flags) & AuDiropq_##name)
++#define au_fset_diropq(flags, name)	{ (flags) |= AuDiropq_##name; }
++#define au_fclr_diropq(flags, name)	{ (flags) &= ~AuDiropq_##name; }
++
++struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
++			     unsigned int flags);
++struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
++			  struct au_branch *br);
++struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
++			    struct dentry *h_parent);
++
++/* real rmdir for the whiteout-ed dir */
++struct au_whtmp_rmdir {
++	struct inode *dir;
++	aufs_bindex_t bindex;
++	struct dentry *wh_dentry;
++	struct au_nhash whlist;
++};
++
++struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp);
++void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp);
++int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
++		   struct dentry *wh_dentry, struct au_nhash *whlist);
++void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
++			 struct dentry *wh_dentry, struct au_whtmp_rmdir *args);
++
++/* ---------------------------------------------------------------------- */
++
++static inline struct dentry *au_diropq_create(struct dentry *dentry,
++					      aufs_bindex_t bindex)
++{
++	return au_diropq_sio(dentry, bindex, AuDiropq_CREATE);
++}
++
++static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex)
++{
++	return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE));
++}
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_WHOUT_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/wkq.c linux-2.6.31.5/fs/aufs/wkq.c
+--- linux-2.6.31.5.orig/fs/aufs/wkq.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/wkq.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,259 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * workqueue for asynchronous/super-io operations
++ * todo: try new dredential scheme
++ */
++
++#include <linux/module.h>
++#include "aufs.h"
++
++/* internal workqueue named AUFS_WKQ_NAME */
++static struct au_wkq {
++	struct workqueue_struct	*q;
++
++	/* balancing */
++	atomic_t		busy;
++} *au_wkq;
++
++struct au_wkinfo {
++	struct work_struct wk;
++	struct super_block *sb;
++
++	unsigned int flags; /* see wkq.h */
++
++	au_wkq_func_t func;
++	void *args;
++
++	atomic_t *busyp;
++	struct completion *comp;
++};
++
++/* ---------------------------------------------------------------------- */
++
++static int enqueue(struct au_wkq *wkq, struct au_wkinfo *wkinfo)
++{
++	wkinfo->busyp = &wkq->busy;
++	if (au_ftest_wkq(wkinfo->flags, WAIT))
++		return !queue_work(wkq->q, &wkinfo->wk);
++	else
++		return !schedule_work(&wkinfo->wk);
++}
++
++static void do_wkq(struct au_wkinfo *wkinfo)
++{
++	unsigned int idle, n;
++	int i, idle_idx;
++
++	while (1) {
++		if (au_ftest_wkq(wkinfo->flags, WAIT)) {
++			idle_idx = 0;
++			idle = UINT_MAX;
++			for (i = 0; i < aufs_nwkq; i++) {
++				n = atomic_inc_return(&au_wkq[i].busy);
++				if (n == 1 && !enqueue(au_wkq + i, wkinfo))
++					return; /* success */
++
++				if (n < idle) {
++					idle_idx = i;
++					idle = n;
++				}
++				atomic_dec(&au_wkq[i].busy);
++			}
++		} else
++			idle_idx = aufs_nwkq;
++
++		atomic_inc(&au_wkq[idle_idx].busy);
++		if (!enqueue(au_wkq + idle_idx, wkinfo))
++			return; /* success */
++
++		/* impossible? */
++		AuWarn1("failed to queue_work()\n");
++		yield();
++	}
++}
++
++static void wkq_func(struct work_struct *wk)
++{
++	struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
++
++	wkinfo->func(wkinfo->args);
++	atomic_dec_return(wkinfo->busyp);
++	if (au_ftest_wkq(wkinfo->flags, WAIT))
++		complete(wkinfo->comp);
++	else {
++		kobject_put(&au_sbi(wkinfo->sb)->si_kobj);
++		module_put(THIS_MODULE);
++		kfree(wkinfo);
++	}
++}
++
++/*
++ * Since struct completion is large, try allocating it dynamically.
++ */
++#if defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS)
++#define AuWkqCompDeclare(name)	struct completion *comp = NULL
++
++static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
++{
++	*comp = kmalloc(sizeof(**comp), GFP_NOFS);
++	if (*comp) {
++		init_completion(*comp);
++		wkinfo->comp = *comp;
++		return 0;
++	}
++	return -ENOMEM;
++}
++
++static void au_wkq_comp_free(struct completion *comp)
++{
++	kfree(comp);
++}
++
++#else
++
++/* no braces */
++#define AuWkqCompDeclare(name) \
++	DECLARE_COMPLETION_ONSTACK(_ ## name); \
++	struct completion *comp = &_ ## name
++
++static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
++{
++	wkinfo->comp = *comp;
++	return 0;
++}
++
++static void au_wkq_comp_free(struct completion *comp __maybe_unused)
++{
++	/* empty */
++}
++#endif /* 4KSTACKS */
++
++static void au_wkq_run(struct au_wkinfo *wkinfo)
++{
++	au_dbg_verify_kthread();
++	INIT_WORK(&wkinfo->wk, wkq_func);
++	do_wkq(wkinfo);
++}
++
++int au_wkq_wait(au_wkq_func_t func, void *args)
++{
++	int err;
++	AuWkqCompDeclare(comp);
++	struct au_wkinfo wkinfo = {
++		.flags	= AuWkq_WAIT,
++		.func	= func,
++		.args	= args
++	};
++
++	err = au_wkq_comp_alloc(&wkinfo, &comp);
++	if (!err) {
++		au_wkq_run(&wkinfo);
++		/* no timeout, no interrupt */
++		wait_for_completion(wkinfo.comp);
++		au_wkq_comp_free(comp);
++	}
++
++	return err;
++
++}
++
++int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb)
++{
++	int err;
++	struct au_wkinfo *wkinfo;
++
++	atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
++
++	/*
++	 * wkq_func() must free this wkinfo.
++	 * it highly depends upon the implementation of workqueue.
++	 */
++	err = 0;
++	wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
++	if (wkinfo) {
++		wkinfo->sb = sb;
++		wkinfo->flags = !AuWkq_WAIT;
++		wkinfo->func = func;
++		wkinfo->args = args;
++		wkinfo->comp = NULL;
++		kobject_get(&au_sbi(sb)->si_kobj);
++		__module_get(THIS_MODULE);
++
++		au_wkq_run(wkinfo);
++	} else {
++		err = -ENOMEM;
++		atomic_dec(&au_sbi(sb)->si_nowait.nw_len);
++	}
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++void au_nwt_init(struct au_nowait_tasks *nwt)
++{
++	atomic_set(&nwt->nw_len, 0);
++	/* smp_mb();*/ /* atomic_set */
++	init_waitqueue_head(&nwt->nw_wq);
++}
++
++void au_wkq_fin(void)
++{
++	int i;
++
++	for (i = 0; i < aufs_nwkq; i++)
++		if (au_wkq[i].q && !IS_ERR(au_wkq[i].q))
++			destroy_workqueue(au_wkq[i].q);
++	kfree(au_wkq);
++}
++
++int __init au_wkq_init(void)
++{
++	int err, i;
++	struct au_wkq *nowaitq;
++
++	/* '+1' is for accounting of nowait queue */
++	err = -ENOMEM;
++	au_wkq = kcalloc(aufs_nwkq + 1, sizeof(*au_wkq), GFP_NOFS);
++	if (unlikely(!au_wkq))
++		goto out;
++
++	err = 0;
++	for (i = 0; i < aufs_nwkq; i++) {
++		au_wkq[i].q = create_singlethread_workqueue(AUFS_WKQ_NAME);
++		if (au_wkq[i].q && !IS_ERR(au_wkq[i].q)) {
++			atomic_set(&au_wkq[i].busy, 0);
++			continue;
++		}
++
++		err = PTR_ERR(au_wkq[i].q);
++		au_wkq_fin();
++		goto out;
++	}
++
++	/* nowait accounting */
++	nowaitq = au_wkq + aufs_nwkq;
++	atomic_set(&nowaitq->busy, 0);
++	nowaitq->q = NULL;
++	/* smp_mb(); */ /* atomic_set */
++
++ out:
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/aufs/wkq.h linux-2.6.31.5/fs/aufs/wkq.h
+--- linux-2.6.31.5.orig/fs/aufs/wkq.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/wkq.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,82 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * workqueue for asynchronous/super-io operations
++ * todo: try new credentials management scheme
++ */
++
++#ifndef __AUFS_WKQ_H__
++#define __AUFS_WKQ_H__
++
++#ifdef __KERNEL__
++
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/aufs_type.h>
++
++struct super_block;
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue
++ */
++struct au_nowait_tasks {
++	atomic_t		nw_len;
++	wait_queue_head_t	nw_wq;
++};
++
++/* ---------------------------------------------------------------------- */
++
++typedef void (*au_wkq_func_t)(void *args);
++
++/* wkq flags */
++#define AuWkq_WAIT	1
++#define au_ftest_wkq(flags, name)	((flags) & AuWkq_##name)
++#define au_fset_wkq(flags, name)	{ (flags) |= AuWkq_##name; }
++#define au_fclr_wkq(flags, name)	{ (flags) &= ~AuWkq_##name; }
++
++/* wkq.c */
++int au_wkq_wait(au_wkq_func_t func, void *args);
++int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb);
++void au_nwt_init(struct au_nowait_tasks *nwt);
++int __init au_wkq_init(void);
++void au_wkq_fin(void);
++
++/* ---------------------------------------------------------------------- */
++
++static inline int au_test_wkq(struct task_struct *tsk)
++{
++	return !tsk->mm && !strcmp(tsk->comm, AUFS_WKQ_NAME);
++}
++
++static inline void au_nwt_done(struct au_nowait_tasks *nwt)
++{
++	if (!atomic_dec_return(&nwt->nw_len))
++		wake_up_all(&nwt->nw_wq);
++}
++
++static inline int au_nwt_flush(struct au_nowait_tasks *nwt)
++{
++	wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len));
++	return 0;
++}
++
++#endif /* __KERNEL__ */
++#endif /* __AUFS_WKQ_H__ */
+diff -Nur linux-2.6.31.5.orig/fs/aufs/xino.c linux-2.6.31.5/fs/aufs/xino.c
+--- linux-2.6.31.5.orig/fs/aufs/xino.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/fs/aufs/xino.c	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,1200 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++/*
++ * external inode number translation table and bitmap
++ */
++
++#include <linux/file.h>
++#include <linux/seq_file.h>
++#include <linux/uaccess.h>
++#include "aufs.h"
++
++ssize_t xino_fread(au_readf_t func, struct file *file, void *buf, size_t size,
++		   loff_t *pos)
++{
++	ssize_t err;
++	mm_segment_t oldfs;
++
++	oldfs = get_fs();
++	set_fs(KERNEL_DS);
++	do {
++		/* todo: signal_pending? */
++		err = func(file, (char __user *)buf, size, pos);
++	} while (err == -EAGAIN || err == -EINTR);
++	set_fs(oldfs);
++
++#if 0 /* reserved for future use */
++	if (err > 0)
++		fsnotify_access(file->f_dentry);
++#endif
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++static ssize_t do_xino_fwrite(au_writef_t func, struct file *file, void *buf,
++			      size_t size, loff_t *pos)
++{
++	ssize_t err;
++	mm_segment_t oldfs;
++
++	oldfs = get_fs();
++	set_fs(KERNEL_DS);
++	lockdep_off();
++	do {
++		/* todo: signal_pending? */
++		err = func(file, (const char __user *)buf, size, pos);
++	} while (err == -EAGAIN || err == -EINTR);
++	lockdep_on();
++	set_fs(oldfs);
++
++#if 0 /* reserved for future use */
++	if (err > 0)
++		fsnotify_modify(file->f_dentry);
++#endif
++
++	return err;
++}
++
++struct do_xino_fwrite_args {
++	ssize_t *errp;
++	au_writef_t func;
++	struct file *file;
++	void *buf;
++	size_t size;
++	loff_t *pos;
++};
++
++static void call_do_xino_fwrite(void *args)
++{
++	struct do_xino_fwrite_args *a = args;
++	*a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos);
++}
++
++ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size,
++		    loff_t *pos)
++{
++	ssize_t err;
++
++	/* todo: signal block and no wkq? */
++	/* todo: new credential scheme */
++	/*
++	 * it breaks RLIMIT_FSIZE and normal user's limit,
++	 * users should care about quota and real 'filesystem full.'
++	 */
++	if (!au_test_wkq(current)) {
++		int wkq_err;
++		struct do_xino_fwrite_args args = {
++			.errp	= &err,
++			.func	= func,
++			.file	= file,
++			.buf	= buf,
++			.size	= size,
++			.pos	= pos
++		};
++
++		wkq_err = au_wkq_wait(call_do_xino_fwrite, &args);
++		if (unlikely(wkq_err))
++			err = wkq_err;
++	} else
++		err = do_xino_fwrite(func, file, buf, size, pos);
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * create a new xinofile at the same place/path as @base_file.
++ */
++struct file *au_xino_create2(struct file *base_file, struct file *copy_src)
++{
++	struct file *file;
++	struct dentry *base, *dentry, *parent;
++	struct inode *dir;
++	struct qstr *name;
++	int err;
++
++	base = base_file->f_dentry;
++	parent = base->d_parent; /* dir inode is locked */
++	dir = parent->d_inode;
++	IMustLock(dir);
++
++	file = ERR_PTR(-EINVAL);
++	name = &base->d_name;
++	dentry = vfsub_lookup_one_len(name->name, parent, name->len);
++	if (IS_ERR(dentry)) {
++		file = (void *)dentry;
++		AuErr("%.*s lookup err %ld\n", AuLNPair(name), PTR_ERR(dentry));
++		goto out;
++	}
++
++	/* no need to mnt_want_write() since we call dentry_open() later */
++	err = vfs_create(dir, dentry, S_IRUGO | S_IWUGO, NULL);
++	if (unlikely(err)) {
++		file = ERR_PTR(err);
++		AuErr("%.*s create err %d\n", AuLNPair(name), err);
++		goto out_dput;
++	}
++
++	file = dentry_open(dget(dentry), mntget(base_file->f_vfsmnt),
++			   O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE,
++			   current_cred());
++	if (IS_ERR(file)) {
++		AuErr("%.*s open err %ld\n", AuLNPair(name), PTR_ERR(file));
++		goto out_dput;
++	}
++
++	err = vfsub_unlink(dir, &file->f_path, /*force*/0);
++	if (unlikely(err)) {
++		AuErr("%.*s unlink err %d\n", AuLNPair(name), err);
++		goto out_fput;
++	}
++
++	if (copy_src) {
++		/* no one can touch copy_src xino */
++		err = au_copy_file(file, copy_src,
++				   i_size_read(copy_src->f_dentry->d_inode));
++		if (unlikely(err)) {
++			AuErr("%.*s copy err %d\n", AuLNPair(name), err);
++			goto out_fput;
++		}
++	}
++	goto out_dput; /* success */
++
++ out_fput:
++	fput(file);
++	file = ERR_PTR(err);
++ out_dput:
++	dput(dentry);
++ out:
++	return file;
++}
++
++struct au_xino_lock_dir {
++	struct au_hinode *hdir;
++	struct dentry *parent;
++	struct mutex *mtx;
++};
++
++static void au_xino_lock_dir(struct super_block *sb, struct file *xino,
++			     struct au_xino_lock_dir *ldir)
++{
++	aufs_bindex_t brid, bindex;
++
++	ldir->hdir = NULL;
++	bindex = -1;
++	brid = au_xino_brid(sb);
++	if (brid >= 0)
++		bindex = au_br_index(sb, brid);
++	if (bindex >= 0) {
++		ldir->hdir = au_hi(sb->s_root->d_inode, bindex);
++		au_hin_imtx_lock_nested(ldir->hdir, AuLsc_I_PARENT);
++	} else {
++		ldir->parent = dget_parent(xino->f_dentry);
++		ldir->mtx = &ldir->parent->d_inode->i_mutex;
++		mutex_lock_nested(ldir->mtx, AuLsc_I_PARENT);
++	}
++}
++
++static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir)
++{
++	if (ldir->hdir)
++		au_hin_imtx_unlock(ldir->hdir);
++	else {
++		mutex_unlock(ldir->mtx);
++		dput(ldir->parent);
++	}
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* trucate xino files asynchronously */
++
++int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex)
++{
++	int err;
++	aufs_bindex_t bi, bend;
++	struct au_branch *br;
++	struct file *new_xino, *file;
++	struct super_block *h_sb;
++	struct au_xino_lock_dir ldir;
++
++	err = -EINVAL;
++	bend = au_sbend(sb);
++	if (unlikely(bindex < 0 || bend < bindex))
++		goto out;
++	br = au_sbr(sb, bindex);
++	file = br->br_xino.xi_file;
++	if (!file)
++		goto out;
++
++	au_xino_lock_dir(sb, file, &ldir);
++	/* mnt_want_write() is unnecessary here */
++	new_xino = au_xino_create2(file, file);
++	au_xino_unlock_dir(&ldir);
++	err = PTR_ERR(new_xino);
++	if (IS_ERR(new_xino))
++		goto out;
++	err = 0;
++	fput(file);
++	br->br_xino.xi_file = new_xino;
++
++	h_sb = br->br_mnt->mnt_sb;
++	for (bi = 0; bi <= bend; bi++) {
++		if (unlikely(bi == bindex))
++			continue;
++		br = au_sbr(sb, bi);
++		if (br->br_mnt->mnt_sb != h_sb)
++			continue;
++
++		fput(br->br_xino.xi_file);
++		br->br_xino.xi_file = new_xino;
++		get_file(new_xino);
++	}
++
++ out:
++	return err;
++}
++
++struct xino_do_trunc_args {
++	struct super_block *sb;
++	struct au_branch *br;
++};
++
++static void xino_do_trunc(void *_args)
++{
++	struct xino_do_trunc_args *args = _args;
++	struct super_block *sb;
++	struct au_branch *br;
++	struct inode *dir;
++	int err;
++	aufs_bindex_t bindex;
++
++	err = 0;
++	sb = args->sb;
++	dir = sb->s_root->d_inode;
++	br = args->br;
++
++	si_noflush_write_lock(sb);
++	ii_read_lock_parent(dir);
++	bindex = au_br_index(sb, br->br_id);
++	err = au_xino_trunc(sb, bindex);
++	if (!err
++	    && br->br_xino.xi_file->f_dentry->d_inode->i_blocks
++	    >= br->br_xino_upper)
++		br->br_xino_upper += AUFS_XINO_TRUNC_STEP;
++
++	ii_read_unlock(dir);
++	if (unlikely(err))
++		AuWarn("err b%d, (%d)\n", bindex, err);
++	atomic_dec(&br->br_xino_running);
++	atomic_dec(&br->br_count);
++	au_nwt_done(&au_sbi(sb)->si_nowait);
++	si_write_unlock(sb);
++	kfree(args);
++}
++
++static void xino_try_trunc(struct super_block *sb, struct au_branch *br)
++{
++	struct xino_do_trunc_args *args;
++	int wkq_err;
++
++	if (br->br_xino.xi_file->f_dentry->d_inode->i_blocks
++	    < br->br_xino_upper)
++		return;
++
++	if (atomic_inc_return(&br->br_xino_running) > 1)
++		goto out;
++
++	/* lock and kfree() will be called in trunc_xino() */
++	args = kmalloc(sizeof(*args), GFP_NOFS);
++	if (unlikely(!args)) {
++		AuErr1("no memory\n");
++		goto out_args;
++	}
++
++	atomic_inc_return(&br->br_count);
++	args->sb = sb;
++	args->br = br;
++	wkq_err = au_wkq_nowait(xino_do_trunc, args, sb);
++	if (!wkq_err)
++		return; /* success */
++
++	AuErr("wkq %d\n", wkq_err);
++	atomic_dec_return(&br->br_count);
++
++ out_args:
++	kfree(args);
++ out:
++	atomic_dec_return(&br->br_xino_running);
++}
++
++/* ---------------------------------------------------------------------- */
++
++static int au_xino_do_write(au_writef_t write, struct file *file,
++			    ino_t h_ino, ino_t ino)
++{
++	loff_t pos;
++	ssize_t sz;
++
++	pos = h_ino;
++	if (unlikely(au_loff_max / sizeof(ino) - 1 < pos)) {
++		AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
++		return -EFBIG;
++	}
++	pos *= sizeof(ino);
++	sz = xino_fwrite(write, file, &ino, sizeof(ino), &pos);
++	if (sz == sizeof(ino))
++		return 0; /* success */
++
++	AuIOErr("write failed (%zd)\n", sz);
++	return -EIO;
++}
++
++/*
++ * write @ino to the xinofile for the specified branch{@sb, @bindex}
++ * at the position of @h_ino.
++ * even if @ino is zero, it is written to the xinofile and means no entry.
++ * if the size of the xino file on a specific filesystem exceeds the watermark,
++ * try truncating it.
++ */
++int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
++		  ino_t ino)
++{
++	int err;
++	unsigned int mnt_flags;
++	struct au_branch *br;
++
++	BUILD_BUG_ON(sizeof(long long) != sizeof(au_loff_max)
++		     || ((loff_t)-1) > 0);
++	SiMustAnyLock(sb);
++
++	mnt_flags = au_mntflags(sb);
++	if (!au_opt_test(mnt_flags, XINO))
++		return 0;
++
++	br = au_sbr(sb, bindex);
++	err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
++			       h_ino, ino);
++	if (!err) {
++		if (au_opt_test(mnt_flags, TRUNC_XINO)
++		    && au_test_fs_trunc_xino(br->br_mnt->mnt_sb))
++			xino_try_trunc(sb, br);
++		return 0; /* success */
++	}
++
++	AuIOErr("write failed (%d)\n", err);
++	return -EIO;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* aufs inode number bitmap */
++
++static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE;
++static ino_t xib_calc_ino(unsigned long pindex, int bit)
++{
++	ino_t ino;
++
++	AuDebugOn(bit < 0 || page_bits <= bit);
++	ino = AUFS_FIRST_INO + pindex * page_bits + bit;
++	return ino;
++}
++
++static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit)
++{
++	AuDebugOn(ino < AUFS_FIRST_INO);
++	ino -= AUFS_FIRST_INO;
++	*pindex = ino / page_bits;
++	*bit = ino % page_bits;
++}
++
++static int xib_pindex(struct super_block *sb, unsigned long pindex)
++{
++	int err;
++	loff_t pos;
++	ssize_t sz;
++	struct au_sbinfo *sbinfo;
++	struct file *xib;
++	unsigned long *p;
++
++	sbinfo = au_sbi(sb);
++	MtxMustLock(&sbinfo->si_xib_mtx);
++	AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE
++		  || !au_opt_test(sbinfo->si_mntflags, XINO));
++
++	if (pindex == sbinfo->si_xib_last_pindex)
++		return 0;
++
++	xib = sbinfo->si_xib;
++	p = sbinfo->si_xib_buf;
++	pos = sbinfo->si_xib_last_pindex;
++	pos *= PAGE_SIZE;
++	sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
++	if (unlikely(sz != PAGE_SIZE))
++		goto out;
++
++	pos = pindex;
++	pos *= PAGE_SIZE;
++	if (i_size_read(xib->f_dentry->d_inode) >= pos + PAGE_SIZE)
++		sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos);
++	else {
++		memset(p, 0, PAGE_SIZE);
++		sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
++	}
++	if (sz == PAGE_SIZE) {
++		sbinfo->si_xib_last_pindex = pindex;
++		return 0; /* success */
++	}
++
++ out:
++	AuIOErr1("write failed (%zd)\n", sz);
++	err = sz;
++	if (sz >= 0)
++		err = -EIO;
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++int au_xino_write0(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
++		   ino_t ino)
++{
++	int err, bit;
++	unsigned long pindex;
++	struct au_sbinfo *sbinfo;
++
++	if (!au_opt_test(au_mntflags(sb), XINO))
++		return 0;
++
++	err = 0;
++	if (ino) {
++		sbinfo = au_sbi(sb);
++		xib_calc_bit(ino, &pindex, &bit);
++		AuDebugOn(page_bits <= bit);
++		mutex_lock(&sbinfo->si_xib_mtx);
++		err = xib_pindex(sb, pindex);
++		if (!err) {
++			clear_bit(bit, sbinfo->si_xib_buf);
++			sbinfo->si_xib_next_bit = bit;
++		}
++		mutex_unlock(&sbinfo->si_xib_mtx);
++	}
++
++	if (!err)
++		err = au_xino_write(sb, bindex, h_ino, 0);
++	return err;
++}
++
++/* get an unused inode number from bitmap */
++ino_t au_xino_new_ino(struct super_block *sb)
++{
++	ino_t ino;
++	unsigned long *p, pindex, ul, pend;
++	struct au_sbinfo *sbinfo;
++	struct file *file;
++	int free_bit, err;
++
++	if (!au_opt_test(au_mntflags(sb), XINO))
++		return iunique(sb, AUFS_FIRST_INO);
++
++	sbinfo = au_sbi(sb);
++	mutex_lock(&sbinfo->si_xib_mtx);
++	p = sbinfo->si_xib_buf;
++	free_bit = sbinfo->si_xib_next_bit;
++	if (free_bit < page_bits && !test_bit(free_bit, p))
++		goto out; /* success */
++	free_bit = find_first_zero_bit(p, page_bits);
++	if (free_bit < page_bits)
++		goto out; /* success */
++
++	pindex = sbinfo->si_xib_last_pindex;
++	for (ul = pindex - 1; ul < ULONG_MAX; ul--) {
++		err = xib_pindex(sb, ul);
++		if (unlikely(err))
++			goto out_err;
++		free_bit = find_first_zero_bit(p, page_bits);
++		if (free_bit < page_bits)
++			goto out; /* success */
++	}
++
++	file = sbinfo->si_xib;
++	pend = i_size_read(file->f_dentry->d_inode) / PAGE_SIZE;
++	for (ul = pindex + 1; ul <= pend; ul++) {
++		err = xib_pindex(sb, ul);
++		if (unlikely(err))
++			goto out_err;
++		free_bit = find_first_zero_bit(p, page_bits);
++		if (free_bit < page_bits)
++			goto out; /* success */
++	}
++	BUG();
++
++ out:
++	set_bit(free_bit, p);
++	sbinfo->si_xib_next_bit++;
++	pindex = sbinfo->si_xib_last_pindex;
++	mutex_unlock(&sbinfo->si_xib_mtx);
++	ino = xib_calc_ino(pindex, free_bit);
++	AuDbg("i%lu\n", (unsigned long)ino);
++	return ino;
++ out_err:
++	mutex_unlock(&sbinfo->si_xib_mtx);
++	AuDbg("i0\n");
++	return 0;
++}
++
++/*
++ * read @ino from xinofile for the specified branch{@sb, @bindex}
++ * at the position of @h_ino.
++ * if @ino does not exist and @do_new is true, get new one.
++ */
++int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
++		 ino_t *ino)
++{
++	int err;
++	ssize_t sz;
++	loff_t pos;
++	struct file *file;
++	struct au_sbinfo *sbinfo;
++
++	*ino = 0;
++	if (!au_opt_test(au_mntflags(sb), XINO))
++		return 0; /* no xino */
++
++	err = 0;
++	sbinfo = au_sbi(sb);
++	pos = h_ino;
++	if (unlikely(au_loff_max / sizeof(*ino) - 1 < pos)) {
++		AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
++		return -EFBIG;
++	}
++	pos *= sizeof(*ino);
++
++	file = au_sbr(sb, bindex)->br_xino.xi_file;
++	if (i_size_read(file->f_dentry->d_inode) < pos + sizeof(*ino))
++		return 0; /* no ino */
++
++	sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &pos);
++	if (sz == sizeof(*ino))
++		return 0; /* success */
++
++	err = sz;
++	if (unlikely(sz >= 0)) {
++		err = -EIO;
++		AuIOErr("xino read error (%zd)\n", sz);
++	}
++
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* create and set a new xino file */
++
++struct file *au_xino_create(struct super_block *sb, char *fname, int silent)
++{
++	struct file *file;
++	struct dentry *h_parent, *d;
++	struct inode *h_dir;
++	int err;
++
++	/*
++	 * at mount-time, and the xino file is the default path,
++	 * hinotify is disabled so we have no inotify events to ignore.
++	 * when a user specified the xino, we cannot get au_hdir to be ignored.
++	 */
++	file = vfsub_filp_open(fname, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE,
++			       S_IRUGO | S_IWUGO);
++	if (IS_ERR(file)) {
++		if (!silent)
++			AuErr("open %s(%ld)\n", fname, PTR_ERR(file));
++		return file;
++	}
++
++	/* keep file count */
++	h_parent = dget_parent(file->f_dentry);
++	h_dir = h_parent->d_inode;
++	mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
++	/* mnt_want_write() is unnecessary here */
++	err = vfsub_unlink(h_dir, &file->f_path, /*force*/0);
++	mutex_unlock(&h_dir->i_mutex);
++	dput(h_parent);
++	if (unlikely(err)) {
++		if (!silent)
++			AuErr("unlink %s(%d)\n", fname, err);
++		goto out;
++	}
++
++	err = -EINVAL;
++	d = file->f_dentry;
++	if (unlikely(sb == d->d_sb)) {
++		if (!silent)
++			AuErr("%s must be outside\n", fname);
++		goto out;
++	}
++	if (unlikely(au_test_fs_bad_xino(d->d_sb))) {
++		if (!silent)
++			AuErr("xino doesn't support %s(%s)\n",
++			      fname, au_sbtype(d->d_sb));
++		goto out;
++	}
++	return file; /* success */
++
++ out:
++	fput(file);
++	file = ERR_PTR(err);
++	return file;
++}
++
++/*
++ * find another branch who is on the same filesystem of the specified
++ * branch{@btgt}. search until @bend.
++ */
++static int is_sb_shared(struct super_block *sb, aufs_bindex_t btgt,
++			aufs_bindex_t bend)
++{
++	aufs_bindex_t bindex;
++	struct super_block *tgt_sb = au_sbr_sb(sb, btgt);
++
++	for (bindex = 0; bindex < btgt; bindex++)
++		if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
++			return bindex;
++	for (bindex++; bindex <= bend; bindex++)
++		if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
++			return bindex;
++	return -1;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * initialize the xinofile for the specified branch @br
++ * at the place/path where @base_file indicates.
++ * test whether another branch is on the same filesystem or not,
++ * if @do_test is true.
++ */
++int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t h_ino,
++	       struct file *base_file, int do_test)
++{
++	int err;
++	ino_t ino;
++	aufs_bindex_t bend, bindex;
++	struct au_branch *shared_br, *b;
++	struct file *file;
++	struct super_block *tgt_sb;
++
++	shared_br = NULL;
++	bend = au_sbend(sb);
++	if (do_test) {
++		tgt_sb = br->br_mnt->mnt_sb;
++		for (bindex = 0; bindex <= bend; bindex++) {
++			b = au_sbr(sb, bindex);
++			if (tgt_sb == b->br_mnt->mnt_sb) {
++				shared_br = b;
++				break;
++			}
++		}
++	}
++
++	if (!shared_br || !shared_br->br_xino.xi_file) {
++		struct au_xino_lock_dir ldir;
++
++		au_xino_lock_dir(sb, base_file, &ldir);
++		/* mnt_want_write() is unnecessary here */
++		file = au_xino_create2(base_file, NULL);
++		au_xino_unlock_dir(&ldir);
++		err = PTR_ERR(file);
++		if (IS_ERR(file))
++			goto out;
++		br->br_xino.xi_file = file;
++	} else {
++		br->br_xino.xi_file = shared_br->br_xino.xi_file;
++		get_file(br->br_xino.xi_file);
++	}
++
++	ino = AUFS_ROOT_INO;
++	err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
++			       h_ino, ino);
++	if (!err)
++		return 0; /* success */
++
++
++ out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/* trucate a xino bitmap file */
++
++/* todo: slow */
++static int do_xib_restore(struct super_block *sb, struct file *file, void *page)
++{
++	int err, bit;
++	ssize_t sz;
++	unsigned long pindex;
++	loff_t pos, pend;
++	struct au_sbinfo *sbinfo;
++	au_readf_t func;
++	ino_t *ino;
++	unsigned long *p;
++
++	err = 0;
++	sbinfo = au_sbi(sb);
++	MtxMustLock(&sbinfo->si_xib_mtx);
++	p = sbinfo->si_xib_buf;
++	func = sbinfo->si_xread;
++	pend = i_size_read(file->f_dentry->d_inode);
++	pos = 0;
++	while (pos < pend) {
++		sz = xino_fread(func, file, page, PAGE_SIZE, &pos);
++		err = sz;
++		if (unlikely(sz <= 0))
++			goto out;
++
++		err = 0;
++		for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) {
++			if (unlikely(*ino < AUFS_FIRST_INO))
++				continue;
++
++			xib_calc_bit(*ino, &pindex, &bit);
++			AuDebugOn(page_bits <= bit);
++			err = xib_pindex(sb, pindex);
++			if (!err)
++				set_bit(bit, p);
++			else
++				goto out;
++		}
++	}
++
++ out:
++	return err;
++}
++
++static int xib_restore(struct super_block *sb)
++{
++	int err;
++	aufs_bindex_t bindex, bend;
++	void *page;
++
++	err = -ENOMEM;
++	page = (void *)__get_free_page(GFP_NOFS);
++	if (unlikely(!page))
++		goto out;
++
++	err = 0;
++	bend = au_sbend(sb);
++	for (bindex = 0; !err && bindex <= bend; bindex++)
++		if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0)
++			err = do_xib_restore
++				(sb, au_sbr(sb, bindex)->br_xino.xi_file, page);
++		else
++			AuDbg("b%d\n", bindex);
++	free_page((unsigned long)page);
++
++ out:
++	return err;
++}
++
++int au_xib_trunc(struct super_block *sb)
++{
++	int err;
++	ssize_t sz;
++	loff_t pos;
++	struct au_xino_lock_dir ldir;
++	struct au_sbinfo *sbinfo;
++	unsigned long *p;
++	struct file *file;
++
++	SiMustWriteLock(sb);
++
++	err = 0;
++	sbinfo = au_sbi(sb);
++	if (!au_opt_test(sbinfo->si_mntflags, XINO))
++		goto out;
++
++	file = sbinfo->si_xib;
++	if (i_size_read(file->f_dentry->d_inode) <= PAGE_SIZE)
++		goto out;
++
++	au_xino_lock_dir(sb, file, &ldir);
++	/* mnt_want_write() is unnecessary here */
++	file = au_xino_create2(sbinfo->si_xib, NULL);
++	au_xino_unlock_dir(&ldir);
++	err = PTR_ERR(file);
++	if (IS_ERR(file))
++		goto out;
++	fput(sbinfo->si_xib);
++	sbinfo->si_xib = file;
++
++	p = sbinfo->si_xib_buf;
++	memset(p, 0, PAGE_SIZE);
++	pos = 0;
++	sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos);
++	if (unlikely(sz != PAGE_SIZE)) {
++		err = sz;
++		AuIOErr("err %d\n", err);
++		if (sz >= 0)
++			err = -EIO;
++		goto out;
++	}
++
++	mutex_lock(&sbinfo->si_xib_mtx);
++	/* mnt_want_write() is unnecessary here */
++	err = xib_restore(sb);
++	mutex_unlock(&sbinfo->si_xib_mtx);
++
++out:
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * xino mount option handlers
++ */
++static au_readf_t find_readf(struct file *h_file)
++{
++	const struct file_operations *fop = h_file->f_op;
++
++	if (fop) {
++		if (fop->read)
++			return fop->read;
++		if (fop->aio_read)
++			return do_sync_read;
++	}
++	return ERR_PTR(-ENOSYS);
++}
++
++static au_writef_t find_writef(struct file *h_file)
++{
++	const struct file_operations *fop = h_file->f_op;
++
++	if (fop) {
++		if (fop->write)
++			return fop->write;
++		if (fop->aio_write)
++			return do_sync_write;
++	}
++	return ERR_PTR(-ENOSYS);
++}
++
++/* xino bitmap */
++static void xino_clear_xib(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++
++	SiMustWriteLock(sb);
++
++	sbinfo = au_sbi(sb);
++	sbinfo->si_xread = NULL;
++	sbinfo->si_xwrite = NULL;
++	if (sbinfo->si_xib)
++		fput(sbinfo->si_xib);
++	sbinfo->si_xib = NULL;
++	free_page((unsigned long)sbinfo->si_xib_buf);
++	sbinfo->si_xib_buf = NULL;
++}
++
++static int au_xino_set_xib(struct super_block *sb, struct file *base)
++{
++	int err;
++	loff_t pos;
++	struct au_sbinfo *sbinfo;
++	struct file *file;
++
++	SiMustWriteLock(sb);
++
++	sbinfo = au_sbi(sb);
++	file = au_xino_create2(base, sbinfo->si_xib);
++	err = PTR_ERR(file);
++	if (IS_ERR(file))
++		goto out;
++	if (sbinfo->si_xib)
++		fput(sbinfo->si_xib);
++	sbinfo->si_xib = file;
++	sbinfo->si_xread = find_readf(file);
++	sbinfo->si_xwrite = find_writef(file);
++
++	err = -ENOMEM;
++	if (!sbinfo->si_xib_buf)
++		sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS);
++	if (unlikely(!sbinfo->si_xib_buf))
++		goto out_unset;
++
++	sbinfo->si_xib_last_pindex = 0;
++	sbinfo->si_xib_next_bit = 0;
++	if (i_size_read(file->f_dentry->d_inode) < PAGE_SIZE) {
++		pos = 0;
++		err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf,
++				  PAGE_SIZE, &pos);
++		if (unlikely(err != PAGE_SIZE))
++			goto out_free;
++	}
++	err = 0;
++	goto out; /* success */
++
++ out_free:
++	free_page((unsigned long)sbinfo->si_xib_buf);
++	sbinfo->si_xib_buf = NULL;
++	if (err >= 0)
++		err = -EIO;
++ out_unset:
++	fput(sbinfo->si_xib);
++	sbinfo->si_xib = NULL;
++	sbinfo->si_xread = NULL;
++	sbinfo->si_xwrite = NULL;
++ out:
++	return err;
++}
++
++/* xino for each branch */
++static void xino_clear_br(struct super_block *sb)
++{
++	aufs_bindex_t bindex, bend;
++	struct au_branch *br;
++
++	bend = au_sbend(sb);
++	for (bindex = 0; bindex <= bend; bindex++) {
++		br = au_sbr(sb, bindex);
++		if (!br || !br->br_xino.xi_file)
++			continue;
++
++		fput(br->br_xino.xi_file);
++		br->br_xino.xi_file = NULL;
++	}
++}
++
++static int au_xino_set_br(struct super_block *sb, struct file *base)
++{
++	int err;
++	ino_t ino;
++	aufs_bindex_t bindex, bend, bshared;
++	struct {
++		struct file *old, *new;
++	} *fpair, *p;
++	struct au_branch *br;
++	struct inode *inode;
++	au_writef_t writef;
++
++	SiMustWriteLock(sb);
++
++	err = -ENOMEM;
++	bend = au_sbend(sb);
++	fpair = kcalloc(bend + 1, sizeof(*fpair), GFP_NOFS);
++	if (unlikely(!fpair))
++		goto out;
++
++	inode = sb->s_root->d_inode;
++	ino = AUFS_ROOT_INO;
++	writef = au_sbi(sb)->si_xwrite;
++	for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
++		br = au_sbr(sb, bindex);
++		bshared = is_sb_shared(sb, bindex, bindex - 1);
++		if (bshared >= 0) {
++			/* shared xino */
++			*p = fpair[bshared];
++			get_file(p->new);
++		}
++
++		if (!p->new) {
++			/* new xino */
++			p->old = br->br_xino.xi_file;
++			p->new = au_xino_create2(base, br->br_xino.xi_file);
++			err = PTR_ERR(p->new);
++			if (IS_ERR(p->new)) {
++				p->new = NULL;
++				goto out_pair;
++			}
++		}
++
++		err = au_xino_do_write(writef, p->new,
++				       au_h_iptr(inode, bindex)->i_ino, ino);
++		if (unlikely(err))
++			goto out_pair;
++	}
++
++	for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
++		br = au_sbr(sb, bindex);
++		if (br->br_xino.xi_file)
++			fput(br->br_xino.xi_file);
++		get_file(p->new);
++		br->br_xino.xi_file = p->new;
++	}
++
++ out_pair:
++	for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++)
++		if (p->new)
++			fput(p->new);
++		else
++			break;
++	kfree(fpair);
++ out:
++	return err;
++}
++
++void au_xino_clr(struct super_block *sb)
++{
++	struct au_sbinfo *sbinfo;
++
++	au_xigen_clr(sb);
++	xino_clear_xib(sb);
++	xino_clear_br(sb);
++	sbinfo = au_sbi(sb);
++	/* lvalue, do not call au_mntflags() */
++	au_opt_clr(sbinfo->si_mntflags, XINO);
++}
++
++int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount)
++{
++	int err, skip;
++	struct dentry *parent, *cur_parent;
++	struct qstr *dname, *cur_name;
++	struct file *cur_xino;
++	struct inode *dir;
++	struct au_sbinfo *sbinfo;
++
++	SiMustWriteLock(sb);
++
++	err = 0;
++	sbinfo = au_sbi(sb);
++	parent = dget_parent(xino->file->f_dentry);
++	if (remount) {
++		skip = 0;
++		dname = &xino->file->f_dentry->d_name;
++		cur_xino = sbinfo->si_xib;
++		if (cur_xino) {
++			cur_parent = dget_parent(cur_xino->f_dentry);
++			cur_name = &cur_xino->f_dentry->d_name;
++			skip = (cur_parent == parent
++				&& dname->len == cur_name->len
++				&& !memcmp(dname->name, cur_name->name,
++					   dname->len));
++			dput(cur_parent);
++		}
++		if (skip)
++			goto out;
++	}
++
++	au_opt_set(sbinfo->si_mntflags, XINO);
++	dir = parent->d_inode;
++	mutex_lock_nested(&dir->i_mutex, AuLsc_I_PARENT);
++	/* mnt_want_write() is unnecessary here */
++	err = au_xino_set_xib(sb, xino->file);
++	if (!err)
++		err = au_xigen_set(sb, xino->file);
++	if (!err)
++		err = au_xino_set_br(sb, xino->file);
++	mutex_unlock(&dir->i_mutex);
++	if (!err)
++		goto out; /* success */
++
++	/* reset all */
++	AuIOErr("failed creating xino(%d).\n", err);
++
++ out:
++	dput(parent);
++	return err;
++}
++
++/* ---------------------------------------------------------------------- */
++
++/*
++ * create a xinofile at the default place/path.
++ */
++struct file *au_xino_def(struct super_block *sb)
++{
++	struct file *file;
++	char *page, *p;
++	struct au_branch *br;
++	struct super_block *h_sb;
++	struct path path;
++	aufs_bindex_t bend, bindex, bwr;
++
++	br = NULL;
++	bend = au_sbend(sb);
++	bwr = -1;
++	for (bindex = 0; bindex <= bend; bindex++) {
++		br = au_sbr(sb, bindex);
++		if (au_br_writable(br->br_perm)
++		    && !au_test_fs_bad_xino(br->br_mnt->mnt_sb)) {
++			bwr = bindex;
++			break;
++		}
++	}
++
++	if (bwr >= 0) {
++		file = ERR_PTR(-ENOMEM);
++		page = __getname();
++		if (unlikely(!page))
++			goto out;
++		path.mnt = br->br_mnt;
++		path.dentry = au_h_dptr(sb->s_root, bwr);
++		p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME));
++		file = (void *)p;
++		if (!IS_ERR(p)) {
++			strcat(p, "/" AUFS_XINO_FNAME);
++			AuDbg("%s\n", p);
++			file = au_xino_create(sb, p, /*silent*/0);
++			if (!IS_ERR(file))
++				au_xino_brid_set(sb, br->br_id);
++		}
++		__putname(page);
++	} else {
++		file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0);
++		if (IS_ERR(file))
++			goto out;
++		h_sb = file->f_dentry->d_sb;
++		if (unlikely(au_test_fs_bad_xino(h_sb))) {
++			AuErr("xino doesn't support %s(%s)\n",
++			      AUFS_XINO_DEFPATH, au_sbtype(h_sb));
++			fput(file);
++			file = ERR_PTR(-EINVAL);
++		}
++		if (!IS_ERR(file))
++			au_xino_brid_set(sb, -1);
++	}
++
++ out:
++	return file;
++}
++
++/* ---------------------------------------------------------------------- */
++
++int au_xino_path(struct seq_file *seq, struct file *file)
++{
++	int err;
++
++	err = au_seq_path(seq, &file->f_path);
++	if (unlikely(err < 0))
++		goto out;
++
++	err = 0;
++#define Deleted "\\040(deleted)"
++	seq->count -= sizeof(Deleted) - 1;
++	AuDebugOn(memcmp(seq->buf + seq->count, Deleted,
++			 sizeof(Deleted) - 1));
++#undef Deleted
++
++ out:
++	return err;
++}
+diff -Nur linux-2.6.31.5.orig/fs/Kconfig linux-2.6.31.5/fs/Kconfig
+--- linux-2.6.31.5.orig/fs/Kconfig	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/fs/Kconfig	2009-11-15 22:02:37.000000000 +0100
+@@ -187,6 +187,7 @@
+ source "fs/ufs/Kconfig"
+ source "fs/exofs/Kconfig"
+ source "fs/nilfs2/Kconfig"
++source "fs/aufs/Kconfig"
+ 
+ endif # MISC_FILESYSTEMS
+ 
+diff -Nur linux-2.6.31.5.orig/fs/Makefile linux-2.6.31.5/fs/Makefile
+--- linux-2.6.31.5.orig/fs/Makefile	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/fs/Makefile	2009-11-15 22:02:37.000000000 +0100
+@@ -85,6 +85,7 @@
+ obj-$(CONFIG_HFS_FS)		+= hfs/
+ obj-$(CONFIG_ECRYPT_FS)		+= ecryptfs/
+ obj-$(CONFIG_VXFS_FS)		+= freevxfs/
++obj-$(CONFIG_AUFS_FS)           += aufs/
+ obj-$(CONFIG_NFS_FS)		+= nfs/
+ obj-$(CONFIG_EXPORTFS)		+= exportfs/
+ obj-$(CONFIG_NFSD)		+= nfsd/
+diff -Nur linux-2.6.31.5.orig/fs/namei.c linux-2.6.31.5/fs/namei.c
+--- linux-2.6.31.5.orig/fs/namei.c	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/fs/namei.c	2009-11-15 22:02:37.000000000 +0100
+@@ -337,6 +337,7 @@
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(deny_write_access);
+ 
+ /**
+  * path_get - get a reference to a path
+@@ -1219,7 +1220,7 @@
+  * needs parent already locked. Doesn't follow mounts.
+  * SMP-safe.
+  */
+-static struct dentry *lookup_hash(struct nameidata *nd)
++struct dentry *lookup_hash(struct nameidata *nd)
+ {
+ 	int err;
+ 
+@@ -1228,8 +1229,9 @@
+ 		return ERR_PTR(err);
+ 	return __lookup_hash(&nd->last, nd->path.dentry, nd);
+ }
++EXPORT_SYMBOL(lookup_hash);
+ 
+-static int __lookup_one_len(const char *name, struct qstr *this,
++int __lookup_one_len(const char *name, struct qstr *this,
+ 		struct dentry *base, int len)
+ {
+ 	unsigned long hash;
+@@ -1250,6 +1252,7 @@
+ 	this->hash = end_name_hash(hash);
+ 	return 0;
+ }
++EXPORT_SYMBOL(__lookup_one_len);
+ 
+ /**
+  * lookup_one_len - filesystem helper to lookup single pathname component
+diff -Nur linux-2.6.31.5.orig/fs/namespace.c linux-2.6.31.5/fs/namespace.c
+--- linux-2.6.31.5.orig/fs/namespace.c	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/fs/namespace.c	2009-11-15 22:02:37.000000000 +0100
+@@ -39,6 +39,7 @@
+ 
+ /* spinlock for vfsmount related operations, inplace of dcache_lock */
+ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
++EXPORT_SYMBOL(vfsmount_lock);
+ 
+ static int event;
+ static DEFINE_IDA(mnt_id_ida);
+diff -Nur linux-2.6.31.5.orig/fs/open.c linux-2.6.31.5/fs/open.c
+--- linux-2.6.31.5.orig/fs/open.c	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/fs/open.c	2009-11-15 22:02:37.000000000 +0100
+@@ -221,6 +221,7 @@
+ 	mutex_unlock(&dentry->d_inode->i_mutex);
+ 	return err;
+ }
++EXPORT_SYMBOL(do_truncate);
+ 
+ static long do_sys_truncate(const char __user *pathname, loff_t length)
+ {
+diff -Nur linux-2.6.31.5.orig/fs/splice.c linux-2.6.31.5/fs/splice.c
+--- linux-2.6.31.5.orig/fs/splice.c	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/fs/splice.c	2009-11-15 22:02:37.000000000 +0100
+@@ -1057,8 +1057,8 @@
+ /*
+  * Attempt to initiate a splice from pipe to file.
+  */
+-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+-			   loff_t *ppos, size_t len, unsigned int flags)
++long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
++		    loff_t *ppos, size_t len, unsigned int flags)
+ {
+ 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
+ 				loff_t *, size_t, unsigned int);
+@@ -1080,13 +1080,14 @@
+ 
+ 	return splice_write(pipe, out, ppos, len, flags);
+ }
++EXPORT_SYMBOL(do_splice_from);
+ 
+ /*
+  * Attempt to initiate a splice from a file to a pipe.
+  */
+-static long do_splice_to(struct file *in, loff_t *ppos,
+-			 struct pipe_inode_info *pipe, size_t len,
+-			 unsigned int flags)
++long do_splice_to(struct file *in, loff_t *ppos,
++		  struct pipe_inode_info *pipe, size_t len,
++		  unsigned int flags)
+ {
+ 	ssize_t (*splice_read)(struct file *, loff_t *,
+ 			       struct pipe_inode_info *, size_t, unsigned int);
+@@ -1105,6 +1106,7 @@
+ 
+ 	return splice_read(in, ppos, pipe, len, flags);
+ }
++EXPORT_SYMBOL(do_splice_to);
+ 
+ /**
+  * splice_direct_to_actor - splices data directly between two non-pipes
+diff -Nur linux-2.6.31.5.orig/include/linux/aufs_type.h linux-2.6.31.5/include/linux/aufs_type.h
+--- linux-2.6.31.5.orig/include/linux/aufs_type.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.31.5/include/linux/aufs_type.h	2009-11-15 22:02:37.000000000 +0100
+@@ -0,0 +1,109 @@
++/*
++ * Copyright (C) 2005-2009 Junjiro R. Okajima
++ *
++ * This program, aufs is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
++ */
++
++#ifndef __AUFS_TYPE_H__
++#define __AUFS_TYPE_H__
++
++#include <linux/ioctl.h>
++
++#define AUFS_VERSION	"2-standalone.tree-30-20090803"
++
++/* todo? move this to linux-2.6.19/include/magic.h */
++#define AUFS_SUPER_MAGIC	('a' << 24 | 'u' << 16 | 'f' << 8 | 's')
++
++/* ---------------------------------------------------------------------- */
++
++#ifdef CONFIG_AUFS_BRANCH_MAX_127
++/* some environments treat 'char' as 'unsigned char' by default */
++typedef signed char aufs_bindex_t;
++#define AUFS_BRANCH_MAX 127
++#else
++typedef short aufs_bindex_t;
++#ifdef CONFIG_AUFS_BRANCH_MAX_511
++#define AUFS_BRANCH_MAX 511
++#elif defined(CONFIG_AUFS_BRANCH_MAX_1023)
++#define AUFS_BRANCH_MAX 1023
++#elif defined(CONFIG_AUFS_BRANCH_MAX_32767)
++#define AUFS_BRANCH_MAX 32767
++#endif
++#endif
++
++#ifdef __KERNEL__
++#ifndef AUFS_BRANCH_MAX
++#error unknown CONFIG_AUFS_BRANCH_MAX value
++#endif
++#endif /* __KERNEL__ */
++
++/* ---------------------------------------------------------------------- */
++
++#define AUFS_NAME		"aufs"
++#define AUFS_FSTYPE		AUFS_NAME
++
++#define AUFS_ROOT_INO		2
++#define AUFS_FIRST_INO		11
++
++#define AUFS_WH_PFX		".wh."
++#define AUFS_WH_PFX_LEN		((int)sizeof(AUFS_WH_PFX) - 1)
++#define AUFS_XINO_FNAME		"." AUFS_NAME ".xino"
++#define AUFS_XINO_DEFPATH	"/tmp/" AUFS_XINO_FNAME
++#define AUFS_XINO_TRUNC_INIT	64 /* blocks */
++#define AUFS_XINO_TRUNC_STEP	4  /* blocks */
++#define AUFS_DIRWH_DEF		3
++#define AUFS_RDCACHE_DEF	10 /* seconds */
++#define AUFS_RDBLK_DEF		512 /* bytes */
++#define AUFS_RDHASH_DEF		32
++#define AUFS_WKQ_NAME		AUFS_NAME "d"
++#define AUFS_NWKQ_DEF		4
++#define AUFS_MFS_SECOND_DEF	30 /* seconds */
++#define AUFS_PLINK_WARN		100 /* number of plinks */
++
++#define AUFS_DIROPQ_NAME	AUFS_WH_PFX ".opq" /* whiteouted doubly */
++#define AUFS_WH_DIROPQ		AUFS_WH_PFX AUFS_DIROPQ_NAME
++
++#define AUFS_BASE_NAME		AUFS_WH_PFX AUFS_NAME
++#define AUFS_PLINKDIR_NAME	AUFS_WH_PFX "plnk"
++#define AUFS_ORPHDIR_NAME	AUFS_WH_PFX "orph"
++
++/* doubly whiteouted */
++#define AUFS_WH_BASE		AUFS_WH_PFX AUFS_BASE_NAME
++#define AUFS_WH_PLINKDIR	AUFS_WH_PFX AUFS_PLINKDIR_NAME
++#define AUFS_WH_ORPHDIR		AUFS_WH_PFX AUFS_ORPHDIR_NAME
++
++/* branch permission */
++#define AUFS_BRPERM_RW		"rw"
++#define AUFS_BRPERM_RO		"ro"
++#define AUFS_BRPERM_RR		"rr"
++#define AUFS_BRPERM_WH		"wh"
++#define AUFS_BRPERM_NLWH	"nolwh"
++#define AUFS_BRPERM_ROWH	AUFS_BRPERM_RO "+" AUFS_BRPERM_WH
++#define AUFS_BRPERM_RRWH	AUFS_BRPERM_RR "+" AUFS_BRPERM_WH
++#define AUFS_BRPERM_RWNLWH	AUFS_BRPERM_RW "+" AUFS_BRPERM_NLWH
++
++/* ---------------------------------------------------------------------- */
++
++/* ioctl */
++enum {
++	AuCtl_PLINK_MAINT,
++	AuCtl_PLINK_CLEAN
++};
++
++#define AuCtlType		'A'
++#define AUFS_CTL_PLINK_MAINT	_IO(AuCtlType, AuCtl_PLINK_MAINT)
++#define AUFS_CTL_PLINK_CLEAN	_IO(AuCtlType, AuCtl_PLINK_CLEAN)
++
++#endif /* __AUFS_TYPE_H__ */
+diff -Nur linux-2.6.31.5.orig/include/linux/Kbuild linux-2.6.31.5/include/linux/Kbuild
+--- linux-2.6.31.5.orig/include/linux/Kbuild	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/include/linux/Kbuild	2009-11-15 22:02:38.000000000 +0100
+@@ -34,6 +34,7 @@
+ header-y += atmsap.h
+ header-y += atmsvc.h
+ header-y += atm_zatm.h
++header-y += aufs_type.h
+ header-y += auto_fs4.h
+ header-y += ax25.h
+ header-y += b1lli.h
+diff -Nur linux-2.6.31.5.orig/include/linux/namei.h linux-2.6.31.5/include/linux/namei.h
+--- linux-2.6.31.5.orig/include/linux/namei.h	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/include/linux/namei.h	2009-11-15 22:02:38.000000000 +0100
+@@ -75,6 +75,9 @@
+ extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
+ extern void release_open_intent(struct nameidata *);
+ 
++extern struct dentry *lookup_hash(struct nameidata *nd);
++extern int __lookup_one_len(const char *name, struct qstr *this,
++			    struct dentry *base, int len);
+ extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
+ extern struct dentry *lookup_one_noperm(const char *, struct dentry *);
+ 
+diff -Nur linux-2.6.31.5.orig/include/linux/splice.h linux-2.6.31.5/include/linux/splice.h
+--- linux-2.6.31.5.orig/include/linux/splice.h	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/include/linux/splice.h	2009-11-15 22:02:38.000000000 +0100
+@@ -82,4 +82,10 @@
+ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+ 				      splice_direct_actor *);
+ 
++extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
++			   loff_t *ppos, size_t len, unsigned int flags);
++extern long do_splice_to(struct file *in, loff_t *ppos,
++			 struct pipe_inode_info *pipe, size_t len,
++			 unsigned int flags);
++
+ #endif
+diff -Nur linux-2.6.31.5.orig/security/device_cgroup.c linux-2.6.31.5/security/device_cgroup.c
+--- linux-2.6.31.5.orig/security/device_cgroup.c	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/security/device_cgroup.c	2009-11-15 22:02:38.000000000 +0100
+@@ -513,6 +513,7 @@
+ 
+ 	return -EPERM;
+ }
++EXPORT_SYMBOL(devcgroup_inode_permission);
+ 
+ int devcgroup_inode_mknod(int mode, dev_t dev)
+ {
+diff -Nur linux-2.6.31.5.orig/security/security.c linux-2.6.31.5/security/security.c
+--- linux-2.6.31.5.orig/security/security.c	2009-10-23 00:57:56.000000000 +0200
++++ linux-2.6.31.5/security/security.c	2009-11-15 22:02:38.000000000 +0100
+@@ -386,6 +386,7 @@
+ 		return 0;
+ 	return security_ops->path_mkdir(path, dentry, mode);
+ }
++EXPORT_SYMBOL(security_path_mkdir);
+ 
+ int security_path_rmdir(struct path *path, struct dentry *dentry)
+ {
+@@ -393,6 +394,7 @@
+ 		return 0;
+ 	return security_ops->path_rmdir(path, dentry);
+ }
++EXPORT_SYMBOL(security_path_rmdir);
+ 
+ int security_path_unlink(struct path *path, struct dentry *dentry)
+ {
+@@ -400,6 +402,7 @@
+ 		return 0;
+ 	return security_ops->path_unlink(path, dentry);
+ }
++EXPORT_SYMBOL(security_path_unlink);
+ 
+ int security_path_symlink(struct path *path, struct dentry *dentry,
+ 			  const char *old_name)
+@@ -408,6 +411,7 @@
+ 		return 0;
+ 	return security_ops->path_symlink(path, dentry, old_name);
+ }
++EXPORT_SYMBOL(security_path_symlink);
+ 
+ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
+ 		       struct dentry *new_dentry)
+@@ -416,6 +420,7 @@
+ 		return 0;
+ 	return security_ops->path_link(old_dentry, new_dir, new_dentry);
+ }
++EXPORT_SYMBOL(security_path_link);
+ 
+ int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
+ 			 struct path *new_dir, struct dentry *new_dentry)
+@@ -426,6 +431,7 @@
+ 	return security_ops->path_rename(old_dir, old_dentry, new_dir,
+ 					 new_dentry);
+ }
++EXPORT_SYMBOL(security_path_rename);
+ 
+ int security_path_truncate(struct path *path, loff_t length,
+ 			   unsigned int time_attrs)
+@@ -434,6 +440,7 @@
+ 		return 0;
+ 	return security_ops->path_truncate(path, length, time_attrs);
+ }
++EXPORT_SYMBOL(security_path_truncate);
+ #endif
+ 
+ int security_inode_create(struct inode *dir, struct dentry *dentry, int mode)
+@@ -505,6 +512,7 @@
+ 		return 0;
+ 	return security_ops->inode_readlink(dentry);
+ }
++EXPORT_SYMBOL(security_inode_readlink);
+ 
+ int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
+ {
+@@ -519,6 +527,7 @@
+ 		return 0;
+ 	return security_ops->inode_permission(inode, mask);
+ }
++EXPORT_SYMBOL(security_inode_permission);
+ 
+ int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+@@ -619,6 +628,7 @@
+ {
+ 	return security_ops->file_permission(file, mask);
+ }
++EXPORT_SYMBOL(security_file_permission);
+ 
+ int security_file_alloc(struct file *file)
+ {

+ 2513 - 0
target/linux/patches/2.6.33/bsd-compatibility.patch

@@ -0,0 +1,2513 @@
+diff -Nur linux-2.6.32.orig/scripts/Makefile.lib linux-2.6.32/scripts/Makefile.lib
+--- linux-2.6.32.orig/scripts/Makefile.lib	Thu Dec  3 04:50:57 2009
++++ linux-2.6.32/scripts/Makefile.lib	Sat Dec 19 12:21:44 2009
+@@ -211,7 +211,12 @@
+ size_append = /bin/echo -ne $(shell					\
+ dec_size=0;								\
+ for F in $1; do								\
+-	fsize=$$(stat -c "%s" $$F);					\
++	if stat --help >/dev/null 2>&1; then				\
++		statcmd='stat -c %s';					\
++	else								\
++		statcmd='stat -f %z';					\
++	fi;								\
++	fsize=$$($$statcmd $$F);					\
+ 	dec_size=$$(expr $$dec_size + $$fsize);				\
+ done;									\
+ printf "%08x" $$dec_size |						\
+diff -Nur linux-2.6.32.orig/scripts/mod/mk_elfconfig.c linux-2.6.32/scripts/mod/mk_elfconfig.c
+--- linux-2.6.32.orig/scripts/mod/mk_elfconfig.c	Thu Dec  3 04:50:57 2009
++++ linux-2.6.32/scripts/mod/mk_elfconfig.c	Sat Dec 19 11:09:24 2009
+@@ -1,7 +1,18 @@
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+-#include <elf.h>
++
++#define EI_NIDENT (16)
++#define ELFMAG          "\177ELF"
++
++#define SELFMAG         4
++#define EI_CLASS        4
++#define ELFCLASS32      1               /* 32-bit objects */
++#define ELFCLASS64      2               /* 64-bit objects */
++
++#define EI_DATA         5               /* Data encoding byte index */
++#define ELFDATA2LSB     1               /* 2's complement, little endian */
++#define ELFDATA2MSB     2               /* 2's complement, big endian */
+ 
+ int
+ main(int argc, char **argv)
+diff -Nur linux-2.6.32.orig/scripts/mod/modpost.h linux-2.6.32/scripts/mod/modpost.h
+--- linux-2.6.32.orig/scripts/mod/modpost.h	Thu Dec  3 04:50:57 2009
++++ linux-2.6.32/scripts/mod/modpost.h	Sat Dec 19 11:55:02 2009
+@@ -7,8 +7,2454 @@
+ #include <sys/mman.h>
+ #include <fcntl.h>
+ #include <unistd.h>
+-#include <elf.h>
+ 
++
++/* This file defines standard ELF types, structures, and macros.
++   Copyright (C) 1995-1999,2000,2001,2002,2003 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, write to the Free
++   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++   02111-1307 USA.  */
++
++#ifndef _ELF_H
++#define	_ELF_H 1
++
++__BEGIN_DECLS
++
++/* Standard ELF types.  */
++
++#include <stdint.h>
++
++/* Type for a 16-bit quantity.  */
++typedef uint16_t Elf32_Half;
++typedef uint16_t Elf64_Half;
++
++/* Types for signed and unsigned 32-bit quantities.  */
++typedef uint32_t Elf32_Word;
++typedef	int32_t  Elf32_Sword;
++typedef uint32_t Elf64_Word;
++typedef	int32_t  Elf64_Sword;
++
++/* Types for signed and unsigned 64-bit quantities.  */
++typedef uint64_t Elf32_Xword;
++typedef	int64_t  Elf32_Sxword;
++typedef uint64_t Elf64_Xword;
++typedef	int64_t  Elf64_Sxword;
++
++/* Type of addresses.  */
++typedef uint32_t Elf32_Addr;
++typedef uint64_t Elf64_Addr;
++
++/* Type of file offsets.  */
++typedef uint32_t Elf32_Off;
++typedef uint64_t Elf64_Off;
++
++/* Type for section indices, which are 16-bit quantities.  */
++typedef uint16_t Elf32_Section;
++typedef uint16_t Elf64_Section;
++
++/* Type for version symbol information.  */
++typedef Elf32_Half Elf32_Versym;
++typedef Elf64_Half Elf64_Versym;
++
++
++/* The ELF file header.  This appears at the start of every ELF file.  */
++
++#define EI_NIDENT (16)
++
++typedef struct
++{
++  unsigned char	e_ident[EI_NIDENT];	/* Magic number and other info */
++  Elf32_Half	e_type;			/* Object file type */
++  Elf32_Half	e_machine;		/* Architecture */
++  Elf32_Word	e_version;		/* Object file version */
++  Elf32_Addr	e_entry;		/* Entry point virtual address */
++  Elf32_Off	e_phoff;		/* Program header table file offset */
++  Elf32_Off	e_shoff;		/* Section header table file offset */
++  Elf32_Word	e_flags;		/* Processor-specific flags */
++  Elf32_Half	e_ehsize;		/* ELF header size in bytes */
++  Elf32_Half	e_phentsize;		/* Program header table entry size */
++  Elf32_Half	e_phnum;		/* Program header table entry count */
++  Elf32_Half	e_shentsize;		/* Section header table entry size */
++  Elf32_Half	e_shnum;		/* Section header table entry count */
++  Elf32_Half	e_shstrndx;		/* Section header string table index */
++} Elf32_Ehdr;
++
++typedef struct
++{
++  unsigned char	e_ident[EI_NIDENT];	/* Magic number and other info */
++  Elf64_Half	e_type;			/* Object file type */
++  Elf64_Half	e_machine;		/* Architecture */
++  Elf64_Word	e_version;		/* Object file version */
++  Elf64_Addr	e_entry;		/* Entry point virtual address */
++  Elf64_Off	e_phoff;		/* Program header table file offset */
++  Elf64_Off	e_shoff;		/* Section header table file offset */
++  Elf64_Word	e_flags;		/* Processor-specific flags */
++  Elf64_Half	e_ehsize;		/* ELF header size in bytes */
++  Elf64_Half	e_phentsize;		/* Program header table entry size */
++  Elf64_Half	e_phnum;		/* Program header table entry count */
++  Elf64_Half	e_shentsize;		/* Section header table entry size */
++  Elf64_Half	e_shnum;		/* Section header table entry count */
++  Elf64_Half	e_shstrndx;		/* Section header string table index */
++} Elf64_Ehdr;
++
++/* Fields in the e_ident array.  The EI_* macros are indices into the
++   array.  The macros under each EI_* macro are the values the byte
++   may have.  */
++
++#define EI_MAG0		0		/* File identification byte 0 index */
++#define ELFMAG0		0x7f		/* Magic number byte 0 */
++
++#define EI_MAG1		1		/* File identification byte 1 index */
++#define ELFMAG1		'E'		/* Magic number byte 1 */
++
++#define EI_MAG2		2		/* File identification byte 2 index */
++#define ELFMAG2		'L'		/* Magic number byte 2 */
++
++#define EI_MAG3		3		/* File identification byte 3 index */
++#define ELFMAG3		'F'		/* Magic number byte 3 */
++
++/* Conglomeration of the identification bytes, for easy testing as a word.  */
++#define	ELFMAG		"\177ELF"
++#define	SELFMAG		4
++
++#define EI_CLASS	4		/* File class byte index */
++#define ELFCLASSNONE	0		/* Invalid class */
++#define ELFCLASS32	1		/* 32-bit objects */
++#define ELFCLASS64	2		/* 64-bit objects */
++#define ELFCLASSNUM	3
++
++#define EI_DATA		5		/* Data encoding byte index */
++#define ELFDATANONE	0		/* Invalid data encoding */
++#define ELFDATA2LSB	1		/* 2's complement, little endian */
++#define ELFDATA2MSB	2		/* 2's complement, big endian */
++#define ELFDATANUM	3
++
++#define EI_VERSION	6		/* File version byte index */
++					/* Value must be EV_CURRENT */
++
++#define EI_OSABI	7		/* OS ABI identification */
++#define ELFOSABI_NONE		0	/* UNIX System V ABI */
++#define ELFOSABI_SYSV		0	/* Alias.  */
++#define ELFOSABI_HPUX		1	/* HP-UX */
++#define ELFOSABI_NETBSD		2	/* NetBSD.  */
++#define ELFOSABI_LINUX		3	/* Linux.  */
++#define ELFOSABI_SOLARIS	6	/* Sun Solaris.  */
++#define ELFOSABI_AIX		7	/* IBM AIX.  */
++#define ELFOSABI_IRIX		8	/* SGI Irix.  */
++#define ELFOSABI_FREEBSD	9	/* FreeBSD.  */
++#define ELFOSABI_TRU64		10	/* Compaq TRU64 UNIX.  */
++#define ELFOSABI_MODESTO	11	/* Novell Modesto.  */
++#define ELFOSABI_OPENBSD	12	/* OpenBSD.  */
++#define ELFOSABI_ARM		97	/* ARM */
++#define ELFOSABI_STANDALONE	255	/* Standalone (embedded) application */
++
++#define EI_ABIVERSION	8		/* ABI version */
++
++#define EI_PAD		9		/* Byte index of padding bytes */
++
++/* Legal values for e_type (object file type).  */
++
++#define ET_NONE		0		/* No file type */
++#define ET_REL		1		/* Relocatable file */
++#define ET_EXEC		2		/* Executable file */
++#define ET_DYN		3		/* Shared object file */
++#define ET_CORE		4		/* Core file */
++#define	ET_NUM		5		/* Number of defined types */
++#define ET_LOOS		0xfe00		/* OS-specific range start */
++#define ET_HIOS		0xfeff		/* OS-specific range end */
++#define ET_LOPROC	0xff00		/* Processor-specific range start */
++#define ET_HIPROC	0xffff		/* Processor-specific range end */
++
++/* Legal values for e_machine (architecture).  */
++
++#define EM_NONE		 0		/* No machine */
++#define EM_M32		 1		/* AT&T WE 32100 */
++#define EM_SPARC	 2		/* SUN SPARC */
++#define EM_386		 3		/* Intel 80386 */
++#define EM_68K		 4		/* Motorola m68k family */
++#define EM_88K		 5		/* Motorola m88k family */
++#define EM_860		 7		/* Intel 80860 */
++#define EM_MIPS		 8		/* MIPS R3000 big-endian */
++#define EM_S370		 9		/* IBM System/370 */
++#define EM_MIPS_RS3_LE	10		/* MIPS R3000 little-endian */
++
++#define EM_PARISC	15		/* HPPA */
++#define EM_VPP500	17		/* Fujitsu VPP500 */
++#define EM_SPARC32PLUS	18		/* Sun's "v8plus" */
++#define EM_960		19		/* Intel 80960 */
++#define EM_PPC		20		/* PowerPC */
++#define EM_PPC64	21		/* PowerPC 64-bit */
++#define EM_S390		22		/* IBM S390 */
++
++#define EM_V800		36		/* NEC V800 series */
++#define EM_FR20		37		/* Fujitsu FR20 */
++#define EM_RH32		38		/* TRW RH-32 */
++#define EM_RCE		39		/* Motorola RCE */
++#define EM_ARM		40		/* ARM */
++#define EM_FAKE_ALPHA	41		/* Digital Alpha */
++#define EM_SH		42		/* Hitachi SH */
++#define EM_SPARCV9	43		/* SPARC v9 64-bit */
++#define EM_TRICORE	44		/* Siemens Tricore */
++#define EM_ARC		45		/* Argonaut RISC Core */
++#define EM_H8_300	46		/* Hitachi H8/300 */
++#define EM_H8_300H	47		/* Hitachi H8/300H */
++#define EM_H8S		48		/* Hitachi H8S */
++#define EM_H8_500	49		/* Hitachi H8/500 */
++#define EM_IA_64	50		/* Intel Merced */
++#define EM_MIPS_X	51		/* Stanford MIPS-X */
++#define EM_COLDFIRE	52		/* Motorola Coldfire */
++#define EM_68HC12	53		/* Motorola M68HC12 */
++#define EM_MMA		54		/* Fujitsu MMA Multimedia Accelerator*/
++#define EM_PCP		55		/* Siemens PCP */
++#define EM_NCPU		56		/* Sony nCPU embeeded RISC */
++#define EM_NDR1		57		/* Denso NDR1 microprocessor */
++#define EM_STARCORE	58		/* Motorola Start*Core processor */
++#define EM_ME16		59		/* Toyota ME16 processor */
++#define EM_ST100	60		/* STMicroelectronic ST100 processor */
++#define EM_TINYJ	61		/* Advanced Logic Corp. Tinyj emb.fam*/
++#define EM_X86_64	62		/* AMD x86-64 architecture */
++#define EM_PDSP		63		/* Sony DSP Processor */
++
++#define EM_FX66		66		/* Siemens FX66 microcontroller */
++#define EM_ST9PLUS	67		/* STMicroelectronics ST9+ 8/16 mc */
++#define EM_ST7		68		/* STmicroelectronics ST7 8 bit mc */
++#define EM_68HC16	69		/* Motorola MC68HC16 microcontroller */
++#define EM_68HC11	70		/* Motorola MC68HC11 microcontroller */
++#define EM_68HC08	71		/* Motorola MC68HC08 microcontroller */
++#define EM_68HC05	72		/* Motorola MC68HC05 microcontroller */
++#define EM_SVX		73		/* Silicon Graphics SVx */
++#define EM_ST19		74		/* STMicroelectronics ST19 8 bit mc */
++#define EM_VAX		75		/* Digital VAX */
++#define EM_CRIS		76		/* Axis Communications 32-bit embedded processor */
++#define EM_JAVELIN	77		/* Infineon Technologies 32-bit embedded processor */
++#define EM_FIREPATH	78		/* Element 14 64-bit DSP Processor */
++#define EM_ZSP		79		/* LSI Logic 16-bit DSP Processor */
++#define EM_MMIX		80		/* Donald Knuth's educational 64-bit processor */
++#define EM_HUANY	81		/* Harvard University machine-independent object files */
++#define EM_PRISM	82		/* SiTera Prism */
++#define EM_AVR		83		/* Atmel AVR 8-bit microcontroller */
++#define EM_FR30		84		/* Fujitsu FR30 */
++#define EM_D10V		85		/* Mitsubishi D10V */
++#define EM_D30V		86		/* Mitsubishi D30V */
++#define EM_V850		87		/* NEC v850 */
++#define EM_M32R		88		/* Mitsubishi M32R */
++#define EM_MN10300	89		/* Matsushita MN10300 */
++#define EM_MN10200	90		/* Matsushita MN10200 */
++#define EM_PJ		91		/* picoJava */
++#define EM_OPENRISC	92		/* OpenRISC 32-bit embedded processor */
++#define EM_ARC_A5	93		/* ARC Cores Tangent-A5 */
++#define EM_XTENSA	94		/* Tensilica Xtensa Architecture */
++#define EM_NUM		95
++
++/* If it is necessary to assign new unofficial EM_* values, please
++   pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the
++   chances of collision with official or non-GNU unofficial values.  */
++
++#define EM_ALPHA	0x9026
++
++/* Legal values for e_version (version).  */
++
++#define EV_NONE		0		/* Invalid ELF version */
++#define EV_CURRENT	1		/* Current version */
++#define EV_NUM		2
++
++/* Section header.  */
++
++typedef struct
++{
++  Elf32_Word	sh_name;		/* Section name (string tbl index) */
++  Elf32_Word	sh_type;		/* Section type */
++  Elf32_Word	sh_flags;		/* Section flags */
++  Elf32_Addr	sh_addr;		/* Section virtual addr at execution */
++  Elf32_Off	sh_offset;		/* Section file offset */
++  Elf32_Word	sh_size;		/* Section size in bytes */
++  Elf32_Word	sh_link;		/* Link to another section */
++  Elf32_Word	sh_info;		/* Additional section information */
++  Elf32_Word	sh_addralign;		/* Section alignment */
++  Elf32_Word	sh_entsize;		/* Entry size if section holds table */
++} Elf32_Shdr;
++
++typedef struct
++{
++  Elf64_Word	sh_name;		/* Section name (string tbl index) */
++  Elf64_Word	sh_type;		/* Section type */
++  Elf64_Xword	sh_flags;		/* Section flags */
++  Elf64_Addr	sh_addr;		/* Section virtual addr at execution */
++  Elf64_Off	sh_offset;		/* Section file offset */
++  Elf64_Xword	sh_size;		/* Section size in bytes */
++  Elf64_Word	sh_link;		/* Link to another section */
++  Elf64_Word	sh_info;		/* Additional section information */
++  Elf64_Xword	sh_addralign;		/* Section alignment */
++  Elf64_Xword	sh_entsize;		/* Entry size if section holds table */
++} Elf64_Shdr;
++
++/* Special section indices.  */
++
++#define SHN_UNDEF	0		/* Undefined section */
++#define SHN_LORESERVE	0xff00		/* Start of reserved indices */
++#define SHN_LOPROC	0xff00		/* Start of processor-specific */
++#define SHN_HIPROC	0xff1f		/* End of processor-specific */
++#define SHN_LOOS	0xff20		/* Start of OS-specific */
++#define SHN_HIOS	0xff3f		/* End of OS-specific */
++#define SHN_ABS		0xfff1		/* Associated symbol is absolute */
++#define SHN_COMMON	0xfff2		/* Associated symbol is common */
++#define SHN_XINDEX	0xffff		/* Index is in extra table.  */
++#define SHN_HIRESERVE	0xffff		/* End of reserved indices */
++
++/* Legal values for sh_type (section type).  */
++
++#define SHT_NULL	  0		/* Section header table entry unused */
++#define SHT_PROGBITS	  1		/* Program data */
++#define SHT_SYMTAB	  2		/* Symbol table */
++#define SHT_STRTAB	  3		/* String table */
++#define SHT_RELA	  4		/* Relocation entries with addends */
++#define SHT_HASH	  5		/* Symbol hash table */
++#define SHT_DYNAMIC	  6		/* Dynamic linking information */
++#define SHT_NOTE	  7		/* Notes */
++#define SHT_NOBITS	  8		/* Program space with no data (bss) */
++#define SHT_REL		  9		/* Relocation entries, no addends */
++#define SHT_SHLIB	  10		/* Reserved */
++#define SHT_DYNSYM	  11		/* Dynamic linker symbol table */
++#define SHT_INIT_ARRAY	  14		/* Array of constructors */
++#define SHT_FINI_ARRAY	  15		/* Array of destructors */
++#define SHT_PREINIT_ARRAY 16		/* Array of pre-constructors */
++#define SHT_GROUP	  17		/* Section group */
++#define SHT_SYMTAB_SHNDX  18		/* Extended section indeces */
++#define	SHT_NUM		  19		/* Number of defined types.  */
++#define SHT_LOOS	  0x60000000	/* Start OS-specific */
++#define SHT_GNU_LIBLIST	  0x6ffffff7	/* Prelink library list */
++#define SHT_CHECKSUM	  0x6ffffff8	/* Checksum for DSO content.  */
++#define SHT_LOSUNW	  0x6ffffffa	/* Sun-specific low bound.  */
++#define SHT_SUNW_move	  0x6ffffffa
++#define SHT_SUNW_COMDAT   0x6ffffffb
++#define SHT_SUNW_syminfo  0x6ffffffc
++#define SHT_GNU_verdef	  0x6ffffffd	/* Version definition section.  */
++#define SHT_GNU_verneed	  0x6ffffffe	/* Version needs section.  */
++#define SHT_GNU_versym	  0x6fffffff	/* Version symbol table.  */
++#define SHT_HISUNW	  0x6fffffff	/* Sun-specific high bound.  */
++#define SHT_HIOS	  0x6fffffff	/* End OS-specific type */
++#define SHT_LOPROC	  0x70000000	/* Start of processor-specific */
++#define SHT_HIPROC	  0x7fffffff	/* End of processor-specific */
++#define SHT_LOUSER	  0x80000000	/* Start of application-specific */
++#define SHT_HIUSER	  0x8fffffff	/* End of application-specific */
++
++/* Legal values for sh_flags (section flags).  */
++
++#define SHF_WRITE	     (1 << 0)	/* Writable */
++#define SHF_ALLOC	     (1 << 1)	/* Occupies memory during execution */
++#define SHF_EXECINSTR	     (1 << 2)	/* Executable */
++#define SHF_MERGE	     (1 << 4)	/* Might be merged */
++#define SHF_STRINGS	     (1 << 5)	/* Contains nul-terminated strings */
++#define SHF_INFO_LINK	     (1 << 6)	/* `sh_info' contains SHT index */
++#define SHF_LINK_ORDER	     (1 << 7)	/* Preserve order after combining */
++#define SHF_OS_NONCONFORMING (1 << 8)	/* Non-standard OS specific handling
++					   required */
++#define SHF_GROUP	     (1 << 9)	/* Section is member of a group.  */
++#define SHF_TLS		     (1 << 10)	/* Section hold thread-local data.  */
++#define SHF_MASKOS	     0x0ff00000	/* OS-specific.  */
++#define SHF_MASKPROC	     0xf0000000	/* Processor-specific */
++
++/* Section group handling.  */
++#define GRP_COMDAT	0x1		/* Mark group as COMDAT.  */
++
++/* Symbol table entry.  */
++
++typedef struct
++{
++  Elf32_Word	st_name;		/* Symbol name (string tbl index) */
++  Elf32_Addr	st_value;		/* Symbol value */
++  Elf32_Word	st_size;		/* Symbol size */
++  unsigned char	st_info;		/* Symbol type and binding */
++  unsigned char	st_other;		/* Symbol visibility */
++  Elf32_Section	st_shndx;		/* Section index */
++} Elf32_Sym;
++
++typedef struct
++{
++  Elf64_Word	st_name;		/* Symbol name (string tbl index) */
++  unsigned char	st_info;		/* Symbol type and binding */
++  unsigned char st_other;		/* Symbol visibility */
++  Elf64_Section	st_shndx;		/* Section index */
++  Elf64_Addr	st_value;		/* Symbol value */
++  Elf64_Xword	st_size;		/* Symbol size */
++} Elf64_Sym;
++
++/* The syminfo section if available contains additional information about
++   every dynamic symbol.  */
++
++typedef struct
++{
++  Elf32_Half si_boundto;		/* Direct bindings, symbol bound to */
++  Elf32_Half si_flags;			/* Per symbol flags */
++} Elf32_Syminfo;
++
++typedef struct
++{
++  Elf64_Half si_boundto;		/* Direct bindings, symbol bound to */
++  Elf64_Half si_flags;			/* Per symbol flags */
++} Elf64_Syminfo;
++
++/* Possible values for si_boundto.  */
++#define SYMINFO_BT_SELF		0xffff	/* Symbol bound to self */
++#define SYMINFO_BT_PARENT	0xfffe	/* Symbol bound to parent */
++#define SYMINFO_BT_LOWRESERVE	0xff00	/* Beginning of reserved entries */
++
++/* Possible bitmasks for si_flags.  */
++#define SYMINFO_FLG_DIRECT	0x0001	/* Direct bound symbol */
++#define SYMINFO_FLG_PASSTHRU	0x0002	/* Pass-thru symbol for translator */
++#define SYMINFO_FLG_COPY	0x0004	/* Symbol is a copy-reloc */
++#define SYMINFO_FLG_LAZYLOAD	0x0008	/* Symbol bound to object to be lazy
++					   loaded */
++/* Syminfo version values.  */
++#define SYMINFO_NONE		0
++#define SYMINFO_CURRENT		1
++#define SYMINFO_NUM		2
++
++
++/* How to extract and insert information held in the st_info field.  */
++
++#define ELF32_ST_BIND(val)		(((unsigned char) (val)) >> 4)
++#define ELF32_ST_TYPE(val)		((val) & 0xf)
++#define ELF32_ST_INFO(bind, type)	(((bind) << 4) + ((type) & 0xf))
++
++/* Both Elf32_Sym and Elf64_Sym use the same one-byte st_info field.  */
++#define ELF64_ST_BIND(val)		ELF32_ST_BIND (val)
++#define ELF64_ST_TYPE(val)		ELF32_ST_TYPE (val)
++#define ELF64_ST_INFO(bind, type)	ELF32_ST_INFO ((bind), (type))
++
++/* Legal values for ST_BIND subfield of st_info (symbol binding).  */
++
++#define STB_LOCAL	0		/* Local symbol */
++#define STB_GLOBAL	1		/* Global symbol */
++#define STB_WEAK	2		/* Weak symbol */
++#define	STB_NUM		3		/* Number of defined types.  */
++#define STB_LOOS	10		/* Start of OS-specific */
++#define STB_HIOS	12		/* End of OS-specific */
++#define STB_LOPROC	13		/* Start of processor-specific */
++#define STB_HIPROC	15		/* End of processor-specific */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type).  */
++
++#define STT_NOTYPE	0		/* Symbol type is unspecified */
++#define STT_OBJECT	1		/* Symbol is a data object */
++#define STT_FUNC	2		/* Symbol is a code object */
++#define STT_SECTION	3		/* Symbol associated with a section */
++#define STT_FILE	4		/* Symbol's name is file name */
++#define STT_COMMON	5		/* Symbol is a common data object */
++#define STT_TLS		6		/* Symbol is thread-local data object*/
++#define	STT_NUM		7		/* Number of defined types.  */
++#define STT_LOOS	10		/* Start of OS-specific */
++#define STT_HIOS	12		/* End of OS-specific */
++#define STT_LOPROC	13		/* Start of processor-specific */
++#define STT_HIPROC	15		/* End of processor-specific */
++
++
++/* Symbol table indices are found in the hash buckets and chain table
++   of a symbol hash table section.  This special index value indicates
++   the end of a chain, meaning no further symbols are found in that bucket.  */
++
++#define STN_UNDEF	0		/* End of a chain.  */
++
++
++/* How to extract and insert information held in the st_other field.  */
++
++#define ELF32_ST_VISIBILITY(o)	((o) & 0x03)
++
++/* For ELF64 the definitions are the same.  */
++#define ELF64_ST_VISIBILITY(o)	ELF32_ST_VISIBILITY (o)
++
++/* Symbol visibility specification encoded in the st_other field.  */
++#define STV_DEFAULT	0		/* Default symbol visibility rules */
++#define STV_INTERNAL	1		/* Processor specific hidden class */
++#define STV_HIDDEN	2		/* Sym unavailable in other modules */
++#define STV_PROTECTED	3		/* Not preemptible, not exported */
++
++
++/* Relocation table entry without addend (in section of type SHT_REL).  */
++
++typedef struct
++{
++  Elf32_Addr	r_offset;		/* Address */
++  Elf32_Word	r_info;			/* Relocation type and symbol index */
++} Elf32_Rel;
++
++/* I have seen two different definitions of the Elf64_Rel and
++   Elf64_Rela structures, so we'll leave them out until Novell (or
++   whoever) gets their act together.  */
++/* The following, at least, is used on Sparc v9, MIPS, and Alpha.  */
++
++typedef struct
++{
++  Elf64_Addr	r_offset;		/* Address */
++  Elf64_Xword	r_info;			/* Relocation type and symbol index */
++} Elf64_Rel;
++
++/* Relocation table entry with addend (in section of type SHT_RELA).  */
++
++typedef struct
++{
++  Elf32_Addr	r_offset;		/* Address */
++  Elf32_Word	r_info;			/* Relocation type and symbol index */
++  Elf32_Sword	r_addend;		/* Addend */
++} Elf32_Rela;
++
++typedef struct
++{
++  Elf64_Addr	r_offset;		/* Address */
++  Elf64_Xword	r_info;			/* Relocation type and symbol index */
++  Elf64_Sxword	r_addend;		/* Addend */
++} Elf64_Rela;
++
++/* How to extract and insert information held in the r_info field.  */
++
++#define ELF32_R_SYM(val)		((val) >> 8)
++#define ELF32_R_TYPE(val)		((val) & 0xff)
++#define ELF32_R_INFO(sym, type)		(((sym) << 8) + ((type) & 0xff))
++
++#define ELF64_R_SYM(i)			((i) >> 32)
++#define ELF64_R_TYPE(i)			((i) & 0xffffffff)
++#define ELF64_R_INFO(sym,type)		((((Elf64_Xword) (sym)) << 32) + (type))
++
++/* Program segment header.  */
++
++typedef struct
++{
++  Elf32_Word	p_type;			/* Segment type */
++  Elf32_Off	p_offset;		/* Segment file offset */
++  Elf32_Addr	p_vaddr;		/* Segment virtual address */
++  Elf32_Addr	p_paddr;		/* Segment physical address */
++  Elf32_Word	p_filesz;		/* Segment size in file */
++  Elf32_Word	p_memsz;		/* Segment size in memory */
++  Elf32_Word	p_flags;		/* Segment flags */
++  Elf32_Word	p_align;		/* Segment alignment */
++} Elf32_Phdr;
++
++typedef struct
++{
++  Elf64_Word	p_type;			/* Segment type */
++  Elf64_Word	p_flags;		/* Segment flags */
++  Elf64_Off	p_offset;		/* Segment file offset */
++  Elf64_Addr	p_vaddr;		/* Segment virtual address */
++  Elf64_Addr	p_paddr;		/* Segment physical address */
++  Elf64_Xword	p_filesz;		/* Segment size in file */
++  Elf64_Xword	p_memsz;		/* Segment size in memory */
++  Elf64_Xword	p_align;		/* Segment alignment */
++} Elf64_Phdr;
++
++/* Legal values for p_type (segment type).  */
++
++#define	PT_NULL		0		/* Program header table entry unused */
++#define PT_LOAD		1		/* Loadable program segment */
++#define PT_DYNAMIC	2		/* Dynamic linking information */
++#define PT_INTERP	3		/* Program interpreter */
++#define PT_NOTE		4		/* Auxiliary information */
++#define PT_SHLIB	5		/* Reserved */
++#define PT_PHDR		6		/* Entry for header table itself */
++#define PT_TLS		7		/* Thread-local storage segment */
++#define	PT_NUM		8		/* Number of defined types */
++#define PT_LOOS		0x60000000	/* Start of OS-specific */
++#define PT_GNU_EH_FRAME	0x6474e550	/* GCC .eh_frame_hdr segment */
++#define PT_GNU_STACK	0x6474e551	/* Indicates stack executability */
++#define PT_LOSUNW	0x6ffffffa
++#define PT_SUNWBSS	0x6ffffffa	/* Sun Specific segment */
++#define PT_SUNWSTACK	0x6ffffffb	/* Stack segment */
++#define PT_HISUNW	0x6fffffff
++#define PT_HIOS		0x6fffffff	/* End of OS-specific */
++#define PT_LOPROC	0x70000000	/* Start of processor-specific */
++#define PT_HIPROC	0x7fffffff	/* End of processor-specific */
++
++/* Legal values for p_flags (segment flags).  */
++
++#define PF_X		(1 << 0)	/* Segment is executable */
++#define PF_W		(1 << 1)	/* Segment is writable */
++#define PF_R		(1 << 2)	/* Segment is readable */
++#define PF_MASKOS	0x0ff00000	/* OS-specific */
++#define PF_MASKPROC	0xf0000000	/* Processor-specific */
++
++/* Legal values for note segment descriptor types for core files. */
++
++#define NT_PRSTATUS	1		/* Contains copy of prstatus struct */
++#define NT_FPREGSET	2		/* Contains copy of fpregset struct */
++#define NT_PRPSINFO	3		/* Contains copy of prpsinfo struct */
++#define NT_PRXREG	4		/* Contains copy of prxregset struct */
++#define NT_TASKSTRUCT	4		/* Contains copy of task structure */
++#define NT_PLATFORM	5		/* String from sysinfo(SI_PLATFORM) */
++#define NT_AUXV		6		/* Contains copy of auxv array */
++#define NT_GWINDOWS	7		/* Contains copy of gwindows struct */
++#define NT_ASRS		8		/* Contains copy of asrset struct */
++#define NT_PSTATUS	10		/* Contains copy of pstatus struct */
++#define NT_PSINFO	13		/* Contains copy of psinfo struct */
++#define NT_PRCRED	14		/* Contains copy of prcred struct */
++#define NT_UTSNAME	15		/* Contains copy of utsname struct */
++#define NT_LWPSTATUS	16		/* Contains copy of lwpstatus struct */
++#define NT_LWPSINFO	17		/* Contains copy of lwpinfo struct */
++#define NT_PRFPXREG	20		/* Contains copy of fprxregset struct*/
++
++/* Legal values for the note segment descriptor types for object files.  */
++
++#define NT_VERSION	1		/* Contains a version string.  */
++
++
++/* Dynamic section entry.  */
++
++typedef struct
++{
++  Elf32_Sword	d_tag;			/* Dynamic entry type */
++  union
++    {
++      Elf32_Word d_val;			/* Integer value */
++      Elf32_Addr d_ptr;			/* Address value */
++    } d_un;
++} Elf32_Dyn;
++
++typedef struct
++{
++  Elf64_Sxword	d_tag;			/* Dynamic entry type */
++  union
++    {
++      Elf64_Xword d_val;		/* Integer value */
++      Elf64_Addr d_ptr;			/* Address value */
++    } d_un;
++} Elf64_Dyn;
++
++/* Legal values for d_tag (dynamic entry type).  */
++
++#define DT_NULL		0		/* Marks end of dynamic section */
++#define DT_NEEDED	1		/* Name of needed library */
++#define DT_PLTRELSZ	2		/* Size in bytes of PLT relocs */
++#define DT_PLTGOT	3		/* Processor defined value */
++#define DT_HASH		4		/* Address of symbol hash table */
++#define DT_STRTAB	5		/* Address of string table */
++#define DT_SYMTAB	6		/* Address of symbol table */
++#define DT_RELA		7		/* Address of Rela relocs */
++#define DT_RELASZ	8		/* Total size of Rela relocs */
++#define DT_RELAENT	9		/* Size of one Rela reloc */
++#define DT_STRSZ	10		/* Size of string table */
++#define DT_SYMENT	11		/* Size of one symbol table entry */
++#define DT_INIT		12		/* Address of init function */
++#define DT_FINI		13		/* Address of termination function */
++#define DT_SONAME	14		/* Name of shared object */
++#define DT_RPATH	15		/* Library search path (deprecated) */
++#define DT_SYMBOLIC	16		/* Start symbol search here */
++#define DT_REL		17		/* Address of Rel relocs */
++#define DT_RELSZ	18		/* Total size of Rel relocs */
++#define DT_RELENT	19		/* Size of one Rel reloc */
++#define DT_PLTREL	20		/* Type of reloc in PLT */
++#define DT_DEBUG	21		/* For debugging; unspecified */
++#define DT_TEXTREL	22		/* Reloc might modify .text */
++#define DT_JMPREL	23		/* Address of PLT relocs */
++#define	DT_BIND_NOW	24		/* Process relocations of object */
++#define	DT_INIT_ARRAY	25		/* Array with addresses of init fct */
++#define	DT_FINI_ARRAY	26		/* Array with addresses of fini fct */
++#define	DT_INIT_ARRAYSZ	27		/* Size in bytes of DT_INIT_ARRAY */
++#define	DT_FINI_ARRAYSZ	28		/* Size in bytes of DT_FINI_ARRAY */
++#define DT_RUNPATH	29		/* Library search path */
++#define DT_FLAGS	30		/* Flags for the object being loaded */
++#define DT_ENCODING	32		/* Start of encoded range */
++#define DT_PREINIT_ARRAY 32		/* Array with addresses of preinit fct*/
++#define DT_PREINIT_ARRAYSZ 33		/* size in bytes of DT_PREINIT_ARRAY */
++#define	DT_NUM		34		/* Number used */
++#define DT_LOOS		0x6000000d	/* Start of OS-specific */
++#define DT_HIOS		0x6ffff000	/* End of OS-specific */
++#define DT_LOPROC	0x70000000	/* Start of processor-specific */
++#define DT_HIPROC	0x7fffffff	/* End of processor-specific */
++#define	DT_PROCNUM	DT_MIPS_NUM	/* Most used by any processor */
++
++/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
++   Dyn.d_un.d_val field of the Elf*_Dyn structure.  This follows Sun's
++   approach.  */
++#define DT_VALRNGLO	0x6ffffd00
++#define DT_GNU_PRELINKED 0x6ffffdf5	/* Prelinking timestamp */
++#define DT_GNU_CONFLICTSZ 0x6ffffdf6	/* Size of conflict section */
++#define DT_GNU_LIBLISTSZ 0x6ffffdf7	/* Size of library list */
++#define DT_CHECKSUM	0x6ffffdf8
++#define DT_PLTPADSZ	0x6ffffdf9
++#define DT_MOVEENT	0x6ffffdfa
++#define DT_MOVESZ	0x6ffffdfb
++#define DT_FEATURE_1	0x6ffffdfc	/* Feature selection (DTF_*).  */
++#define DT_POSFLAG_1	0x6ffffdfd	/* Flags for DT_* entries, effecting
++					   the following DT_* entry.  */
++#define DT_SYMINSZ	0x6ffffdfe	/* Size of syminfo table (in bytes) */
++#define DT_SYMINENT	0x6ffffdff	/* Entry size of syminfo */
++#define DT_VALRNGHI	0x6ffffdff
++#define DT_VALTAGIDX(tag)	(DT_VALRNGHI - (tag))	/* Reverse order! */
++#define DT_VALNUM 12
++
++/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
++   Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
++
++   If any adjustment is made to the ELF object after it has been
++   built these entries will need to be adjusted.  */
++#define DT_ADDRRNGLO	0x6ffffe00
++#define DT_GNU_CONFLICT	0x6ffffef8	/* Start of conflict section */
++#define DT_GNU_LIBLIST	0x6ffffef9	/* Library list */
++#define DT_CONFIG	0x6ffffefa	/* Configuration information.  */
++#define DT_DEPAUDIT	0x6ffffefb	/* Dependency auditing.  */
++#define DT_AUDIT	0x6ffffefc	/* Object auditing.  */
++#define	DT_PLTPAD	0x6ffffefd	/* PLT padding.  */
++#define	DT_MOVETAB	0x6ffffefe	/* Move table.  */
++#define DT_SYMINFO	0x6ffffeff	/* Syminfo table.  */
++#define DT_ADDRRNGHI	0x6ffffeff
++#define DT_ADDRTAGIDX(tag)	(DT_ADDRRNGHI - (tag))	/* Reverse order! */
++#define DT_ADDRNUM 10
++
++/* The versioning entry types.  The next are defined as part of the
++   GNU extension.  */
++#define DT_VERSYM	0x6ffffff0
++
++#define DT_RELACOUNT	0x6ffffff9
++#define DT_RELCOUNT	0x6ffffffa
++
++/* These were chosen by Sun.  */
++#define DT_FLAGS_1	0x6ffffffb	/* State flags, see DF_1_* below.  */
++#define	DT_VERDEF	0x6ffffffc	/* Address of version definition
++					   table */
++#define	DT_VERDEFNUM	0x6ffffffd	/* Number of version definitions */
++#define	DT_VERNEED	0x6ffffffe	/* Address of table with needed
++					   versions */
++#define	DT_VERNEEDNUM	0x6fffffff	/* Number of needed versions */
++#define DT_VERSIONTAGIDX(tag)	(DT_VERNEEDNUM - (tag))	/* Reverse order! */
++#define DT_VERSIONTAGNUM 16
++
++/* Sun added these machine-independent extensions in the "processor-specific"
++   range.  Be compatible.  */
++#define DT_AUXILIARY    0x7ffffffd      /* Shared object to load before self */
++#define DT_FILTER       0x7fffffff      /* Shared object to get values from */
++#define DT_EXTRATAGIDX(tag)	((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1)
++#define DT_EXTRANUM	3
++
++/* Values of `d_un.d_val' in the DT_FLAGS entry.  */
++#define DF_ORIGIN	0x00000001	/* Object may use DF_ORIGIN */
++#define DF_SYMBOLIC	0x00000002	/* Symbol resolutions starts here */
++#define DF_TEXTREL	0x00000004	/* Object contains text relocations */
++#define DF_BIND_NOW	0x00000008	/* No lazy binding for this object */
++#define DF_STATIC_TLS	0x00000010	/* Module uses the static TLS model */
++
++/* State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1
++   entry in the dynamic section.  */
++#define DF_1_NOW	0x00000001	/* Set RTLD_NOW for this object.  */
++#define DF_1_GLOBAL	0x00000002	/* Set RTLD_GLOBAL for this object.  */
++#define DF_1_GROUP	0x00000004	/* Set RTLD_GROUP for this object.  */
++#define DF_1_NODELETE	0x00000008	/* Set RTLD_NODELETE for this object.*/
++#define DF_1_LOADFLTR	0x00000010	/* Trigger filtee loading at runtime.*/
++#define DF_1_INITFIRST	0x00000020	/* Set RTLD_INITFIRST for this object*/
++#define DF_1_NOOPEN	0x00000040	/* Set RTLD_NOOPEN for this object.  */
++#define DF_1_ORIGIN	0x00000080	/* $ORIGIN must be handled.  */
++#define DF_1_DIRECT	0x00000100	/* Direct binding enabled.  */
++#define DF_1_TRANS	0x00000200
++#define DF_1_INTERPOSE	0x00000400	/* Object is used to interpose.  */
++#define DF_1_NODEFLIB	0x00000800	/* Ignore default lib search path.  */
++#define DF_1_NODUMP	0x00001000	/* Object can't be dldump'ed.  */
++#define DF_1_CONFALT	0x00002000	/* Configuration alternative created.*/
++#define DF_1_ENDFILTEE	0x00004000	/* Filtee terminates filters search. */
++#define	DF_1_DISPRELDNE	0x00008000	/* Disp reloc applied at build time. */
++#define	DF_1_DISPRELPND	0x00010000	/* Disp reloc applied at run-time.  */
++
++/* Flags for the feature selection in DT_FEATURE_1.  */
++#define DTF_1_PARINIT	0x00000001
++#define DTF_1_CONFEXP	0x00000002
++
++/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry.  */
++#define DF_P1_LAZYLOAD	0x00000001	/* Lazyload following object.  */
++#define DF_P1_GROUPPERM	0x00000002	/* Symbols from next object are not
++					   generally available.  */
++
++/* Version definition sections.  */
++
++typedef struct
++{
++  Elf32_Half	vd_version;		/* Version revision */
++  Elf32_Half	vd_flags;		/* Version information */
++  Elf32_Half	vd_ndx;			/* Version Index */
++  Elf32_Half	vd_cnt;			/* Number of associated aux entries */
++  Elf32_Word	vd_hash;		/* Version name hash value */
++  Elf32_Word	vd_aux;			/* Offset in bytes to verdaux array */
++  Elf32_Word	vd_next;		/* Offset in bytes to next verdef
++					   entry */
++} Elf32_Verdef;
++
++typedef struct
++{
++  Elf64_Half	vd_version;		/* Version revision */
++  Elf64_Half	vd_flags;		/* Version information */
++  Elf64_Half	vd_ndx;			/* Version Index */
++  Elf64_Half	vd_cnt;			/* Number of associated aux entries */
++  Elf64_Word	vd_hash;		/* Version name hash value */
++  Elf64_Word	vd_aux;			/* Offset in bytes to verdaux array */
++  Elf64_Word	vd_next;		/* Offset in bytes to next verdef
++					   entry */
++} Elf64_Verdef;
++
++
++/* Legal values for vd_version (version revision).  */
++#define VER_DEF_NONE	0		/* No version */
++#define VER_DEF_CURRENT	1		/* Current version */
++#define VER_DEF_NUM	2		/* Given version number */
++
++/* Legal values for vd_flags (version information flags).  */
++#define VER_FLG_BASE	0x1		/* Version definition of file itself */
++#define VER_FLG_WEAK	0x2		/* Weak version identifier */
++
++/* Versym symbol index values.  */
++#define	VER_NDX_LOCAL		0	/* Symbol is local.  */
++#define	VER_NDX_GLOBAL		1	/* Symbol is global.  */
++#define	VER_NDX_LORESERVE	0xff00	/* Beginning of reserved entries.  */
++#define	VER_NDX_ELIMINATE	0xff01	/* Symbol is to be eliminated.  */
++
++/* Auxialiary version information.  */
++
++typedef struct
++{
++  Elf32_Word	vda_name;		/* Version or dependency names */
++  Elf32_Word	vda_next;		/* Offset in bytes to next verdaux
++					   entry */
++} Elf32_Verdaux;
++
++typedef struct
++{
++  Elf64_Word	vda_name;		/* Version or dependency names */
++  Elf64_Word	vda_next;		/* Offset in bytes to next verdaux
++					   entry */
++} Elf64_Verdaux;
++
++
++/* Version dependency section.  */
++
++typedef struct
++{
++  Elf32_Half	vn_version;		/* Version of structure */
++  Elf32_Half	vn_cnt;			/* Number of associated aux entries */
++  Elf32_Word	vn_file;		/* Offset of filename for this
++					   dependency */
++  Elf32_Word	vn_aux;			/* Offset in bytes to vernaux array */
++  Elf32_Word	vn_next;		/* Offset in bytes to next verneed
++					   entry */
++} Elf32_Verneed;
++
++typedef struct
++{
++  Elf64_Half	vn_version;		/* Version of structure */
++  Elf64_Half	vn_cnt;			/* Number of associated aux entries */
++  Elf64_Word	vn_file;		/* Offset of filename for this
++					   dependency */
++  Elf64_Word	vn_aux;			/* Offset in bytes to vernaux array */
++  Elf64_Word	vn_next;		/* Offset in bytes to next verneed
++					   entry */
++} Elf64_Verneed;
++
++
++/* Legal values for vn_version (version revision).  */
++#define VER_NEED_NONE	 0		/* No version */
++#define VER_NEED_CURRENT 1		/* Current version */
++#define VER_NEED_NUM	 2		/* Given version number */
++
++/* Auxiliary needed version information.  */
++
++typedef struct
++{
++  Elf32_Word	vna_hash;		/* Hash value of dependency name */
++  Elf32_Half	vna_flags;		/* Dependency specific information */
++  Elf32_Half	vna_other;		/* Unused */
++  Elf32_Word	vna_name;		/* Dependency name string offset */
++  Elf32_Word	vna_next;		/* Offset in bytes to next vernaux
++					   entry */
++} Elf32_Vernaux;
++
++typedef struct
++{
++  Elf64_Word	vna_hash;		/* Hash value of dependency name */
++  Elf64_Half	vna_flags;		/* Dependency specific information */
++  Elf64_Half	vna_other;		/* Unused */
++  Elf64_Word	vna_name;		/* Dependency name string offset */
++  Elf64_Word	vna_next;		/* Offset in bytes to next vernaux
++					   entry */
++} Elf64_Vernaux;
++
++
++/* Legal values for vna_flags.  */
++#define VER_FLG_WEAK	0x2		/* Weak version identifier */
++
++
++/* Auxiliary vector.  */
++
++/* This vector is normally only used by the program interpreter.  The
++   usual definition in an ABI supplement uses the name auxv_t.  The
++   vector is not usually defined in a standard <elf.h> file, but it
++   can't hurt.  We rename it to avoid conflicts.  The sizes of these
++   types are an arrangement between the exec server and the program
++   interpreter, so we don't fully specify them here.  */
++
++typedef struct
++{
++  int a_type;			/* Entry type */
++  union
++    {
++      long int a_val;		/* Integer value */
++      void *a_ptr;		/* Pointer value */
++      void (*a_fcn) (void);	/* Function pointer value */
++    } a_un;
++} Elf32_auxv_t;
++
++typedef struct
++{
++  long int a_type;		/* Entry type */
++  union
++    {
++      long int a_val;		/* Integer value */
++      void *a_ptr;		/* Pointer value */
++      void (*a_fcn) (void);	/* Function pointer value */
++    } a_un;
++} Elf64_auxv_t;
++
++/* Legal values for a_type (entry type).  */
++
++#define AT_NULL		0		/* End of vector */
++#define AT_IGNORE	1		/* Entry should be ignored */
++#define AT_EXECFD	2		/* File descriptor of program */
++#define AT_PHDR		3		/* Program headers for program */
++#define AT_PHENT	4		/* Size of program header entry */
++#define AT_PHNUM	5		/* Number of program headers */
++#define AT_PAGESZ	6		/* System page size */
++#define AT_BASE		7		/* Base address of interpreter */
++#define AT_FLAGS	8		/* Flags */
++#define AT_ENTRY	9		/* Entry point of program */
++#define AT_NOTELF	10		/* Program is not ELF */
++#define AT_UID		11		/* Real uid */
++#define AT_EUID		12		/* Effective uid */
++#define AT_GID		13		/* Real gid */
++#define AT_EGID		14		/* Effective gid */
++#define AT_CLKTCK	17		/* Frequency of times() */
++
++/* Some more special a_type values describing the hardware.  */
++#define AT_PLATFORM	15		/* String identifying platform.  */
++#define AT_HWCAP	16		/* Machine dependent hints about
++					   processor capabilities.  */
++
++/* This entry gives some information about the FPU initialization
++   performed by the kernel.  */
++#define AT_FPUCW	18		/* Used FPU control word.  */
++
++/* Cache block sizes.  */
++#define AT_DCACHEBSIZE	19		/* Data cache block size.  */
++#define AT_ICACHEBSIZE	20		/* Instruction cache block size.  */
++#define AT_UCACHEBSIZE	21		/* Unified cache block size.  */
++
++/* A special ignored value for PPC, used by the kernel to control the
++   interpretation of the AUXV. Must be > 16.  */
++#define AT_IGNOREPPC	22		/* Entry should be ignored.  */
++
++#define	AT_SECURE	23		/* Boolean, was exec setuid-like?  */
++
++/* Pointer to the global system page used for system calls and other
++   nice things.  */
++#define AT_SYSINFO	32
++#define AT_SYSINFO_EHDR	33
++
++
++/* Note section contents.  Each entry in the note section begins with
++   a header of a fixed form.  */
++
++typedef struct
++{
++  Elf32_Word n_namesz;			/* Length of the note's name.  */
++  Elf32_Word n_descsz;			/* Length of the note's descriptor.  */
++  Elf32_Word n_type;			/* Type of the note.  */
++} Elf32_Nhdr;
++
++typedef struct
++{
++  Elf64_Word n_namesz;			/* Length of the note's name.  */
++  Elf64_Word n_descsz;			/* Length of the note's descriptor.  */
++  Elf64_Word n_type;			/* Type of the note.  */
++} Elf64_Nhdr;
++
++/* Known names of notes.  */
++
++/* Solaris entries in the note section have this name.  */
++#define ELF_NOTE_SOLARIS	"SUNW Solaris"
++
++/* Note entries for GNU systems have this name.  */
++#define ELF_NOTE_GNU		"GNU"
++
++
++/* Defined types of notes for Solaris.  */
++
++/* Value of descriptor (one word) is desired pagesize for the binary.  */
++#define ELF_NOTE_PAGESIZE_HINT	1
++
++
++/* Defined note types for GNU systems.  */
++
++/* ABI information.  The descriptor consists of words:
++   word 0: OS descriptor
++   word 1: major version of the ABI
++   word 2: minor version of the ABI
++   word 3: subminor version of the ABI
++*/
++#define ELF_NOTE_ABI		1
++
++/* Known OSes.  These value can appear in word 0 of an ELF_NOTE_ABI
++   note section entry.  */
++#define ELF_NOTE_OS_LINUX	0
++#define ELF_NOTE_OS_GNU		1
++#define ELF_NOTE_OS_SOLARIS2	2
++#define ELF_NOTE_OS_FREEBSD	3
++
++
++/* Move records.  */
++typedef struct
++{
++  Elf32_Xword m_value;		/* Symbol value.  */
++  Elf32_Word m_info;		/* Size and index.  */
++  Elf32_Word m_poffset;		/* Symbol offset.  */
++  Elf32_Half m_repeat;		/* Repeat count.  */
++  Elf32_Half m_stride;		/* Stride info.  */
++} Elf32_Move;
++
++typedef struct
++{
++  Elf64_Xword m_value;		/* Symbol value.  */
++  Elf64_Xword m_info;		/* Size and index.  */
++  Elf64_Xword m_poffset;	/* Symbol offset.  */
++  Elf64_Half m_repeat;		/* Repeat count.  */
++  Elf64_Half m_stride;		/* Stride info.  */
++} Elf64_Move;
++
++/* Macro to construct move records.  */
++#define ELF32_M_SYM(info)	((info) >> 8)
++#define ELF32_M_SIZE(info)	((unsigned char) (info))
++#define ELF32_M_INFO(sym, size)	(((sym) << 8) + (unsigned char) (size))
++
++#define ELF64_M_SYM(info)	ELF32_M_SYM (info)
++#define ELF64_M_SIZE(info)	ELF32_M_SIZE (info)
++#define ELF64_M_INFO(sym, size)	ELF32_M_INFO (sym, size)
++
++
++/* Motorola 68k specific definitions.  */
++
++/* Values for Elf32_Ehdr.e_flags.  */
++#define EF_CPU32	0x00810000
++
++/* m68k relocs.  */
++
++#define R_68K_NONE	0		/* No reloc */
++#define R_68K_32	1		/* Direct 32 bit  */
++#define R_68K_16	2		/* Direct 16 bit  */
++#define R_68K_8		3		/* Direct 8 bit  */
++#define R_68K_PC32	4		/* PC relative 32 bit */
++#define R_68K_PC16	5		/* PC relative 16 bit */
++#define R_68K_PC8	6		/* PC relative 8 bit */
++#define R_68K_GOT32	7		/* 32 bit PC relative GOT entry */
++#define R_68K_GOT16	8		/* 16 bit PC relative GOT entry */
++#define R_68K_GOT8	9		/* 8 bit PC relative GOT entry */
++#define R_68K_GOT32O	10		/* 32 bit GOT offset */
++#define R_68K_GOT16O	11		/* 16 bit GOT offset */
++#define R_68K_GOT8O	12		/* 8 bit GOT offset */
++#define R_68K_PLT32	13		/* 32 bit PC relative PLT address */
++#define R_68K_PLT16	14		/* 16 bit PC relative PLT address */
++#define R_68K_PLT8	15		/* 8 bit PC relative PLT address */
++#define R_68K_PLT32O	16		/* 32 bit PLT offset */
++#define R_68K_PLT16O	17		/* 16 bit PLT offset */
++#define R_68K_PLT8O	18		/* 8 bit PLT offset */
++#define R_68K_COPY	19		/* Copy symbol at runtime */
++#define R_68K_GLOB_DAT	20		/* Create GOT entry */
++#define R_68K_JMP_SLOT	21		/* Create PLT entry */
++#define R_68K_RELATIVE	22		/* Adjust by program base */
++/* Keep this the last entry.  */
++#define R_68K_NUM	23
++
++/* Intel 80386 specific definitions.  */
++
++/* i386 relocs.  */
++
++#define R_386_NONE	   0		/* No reloc */
++#define R_386_32	   1		/* Direct 32 bit  */
++#define R_386_PC32	   2		/* PC relative 32 bit */
++#define R_386_GOT32	   3		/* 32 bit GOT entry */
++#define R_386_PLT32	   4		/* 32 bit PLT address */
++#define R_386_COPY	   5		/* Copy symbol at runtime */
++#define R_386_GLOB_DAT	   6		/* Create GOT entry */
++#define R_386_JMP_SLOT	   7		/* Create PLT entry */
++#define R_386_RELATIVE	   8		/* Adjust by program base */
++#define R_386_GOTOFF	   9		/* 32 bit offset to GOT */
++#define R_386_GOTPC	   10		/* 32 bit PC relative offset to GOT */
++#define R_386_32PLT	   11
++#define R_386_TLS_TPOFF	   14		/* Offset in static TLS block */
++#define R_386_TLS_IE	   15		/* Address of GOT entry for static TLS
++					   block offset */
++#define R_386_TLS_GOTIE	   16		/* GOT entry for static TLS block
++					   offset */
++#define R_386_TLS_LE	   17		/* Offset relative to static TLS
++					   block */
++#define R_386_TLS_GD	   18		/* Direct 32 bit for GNU version of
++					   general dynamic thread local data */
++#define R_386_TLS_LDM	   19		/* Direct 32 bit for GNU version of
++					   local dynamic thread local data
++					   in LE code */
++#define R_386_16	   20
++#define R_386_PC16	   21
++#define R_386_8		   22
++#define R_386_PC8	   23
++#define R_386_TLS_GD_32	   24		/* Direct 32 bit for general dynamic
++					   thread local data */
++#define R_386_TLS_GD_PUSH  25		/* Tag for pushl in GD TLS code */
++#define R_386_TLS_GD_CALL  26		/* Relocation for call to
++					   __tls_get_addr() */
++#define R_386_TLS_GD_POP   27		/* Tag for popl in GD TLS code */
++#define R_386_TLS_LDM_32   28		/* Direct 32 bit for local dynamic
++					   thread local data in LE code */
++#define R_386_TLS_LDM_PUSH 29		/* Tag for pushl in LDM TLS code */
++#define R_386_TLS_LDM_CALL 30		/* Relocation for call to
++					   __tls_get_addr() in LDM code */
++#define R_386_TLS_LDM_POP  31		/* Tag for popl in LDM TLS code */
++#define R_386_TLS_LDO_32   32		/* Offset relative to TLS block */
++#define R_386_TLS_IE_32	   33		/* GOT entry for negated static TLS
++					   block offset */
++#define R_386_TLS_LE_32	   34		/* Negated offset relative to static
++					   TLS block */
++#define R_386_TLS_DTPMOD32 35		/* ID of module containing symbol */
++#define R_386_TLS_DTPOFF32 36		/* Offset in TLS block */
++#define R_386_TLS_TPOFF32  37		/* Negated offset in static TLS block */
++/* Keep this the last entry.  */
++#define R_386_NUM	   38
++
++/* SUN SPARC specific definitions.  */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type).  */
++
++#define STT_REGISTER	13		/* Global register reserved to app. */
++
++/* Values for Elf64_Ehdr.e_flags.  */
++
++#define EF_SPARCV9_MM		3
++#define EF_SPARCV9_TSO		0
++#define EF_SPARCV9_PSO		1
++#define EF_SPARCV9_RMO		2
++#define EF_SPARC_LEDATA		0x800000 /* little endian data */
++#define EF_SPARC_EXT_MASK	0xFFFF00
++#define EF_SPARC_32PLUS		0x000100 /* generic V8+ features */
++#define EF_SPARC_SUN_US1	0x000200 /* Sun UltraSPARC1 extensions */
++#define EF_SPARC_HAL_R1		0x000400 /* HAL R1 extensions */
++#define EF_SPARC_SUN_US3	0x000800 /* Sun UltraSPARCIII extensions */
++
++/* SPARC relocs.  */
++
++#define R_SPARC_NONE		0	/* No reloc */
++#define R_SPARC_8		1	/* Direct 8 bit */
++#define R_SPARC_16		2	/* Direct 16 bit */
++#define R_SPARC_32		3	/* Direct 32 bit */
++#define R_SPARC_DISP8		4	/* PC relative 8 bit */
++#define R_SPARC_DISP16		5	/* PC relative 16 bit */
++#define R_SPARC_DISP32		6	/* PC relative 32 bit */
++#define R_SPARC_WDISP30		7	/* PC relative 30 bit shifted */
++#define R_SPARC_WDISP22		8	/* PC relative 22 bit shifted */
++#define R_SPARC_HI22		9	/* High 22 bit */
++#define R_SPARC_22		10	/* Direct 22 bit */
++#define R_SPARC_13		11	/* Direct 13 bit */
++#define R_SPARC_LO10		12	/* Truncated 10 bit */
++#define R_SPARC_GOT10		13	/* Truncated 10 bit GOT entry */
++#define R_SPARC_GOT13		14	/* 13 bit GOT entry */
++#define R_SPARC_GOT22		15	/* 22 bit GOT entry shifted */
++#define R_SPARC_PC10		16	/* PC relative 10 bit truncated */
++#define R_SPARC_PC22		17	/* PC relative 22 bit shifted */
++#define R_SPARC_WPLT30		18	/* 30 bit PC relative PLT address */
++#define R_SPARC_COPY		19	/* Copy symbol at runtime */
++#define R_SPARC_GLOB_DAT	20	/* Create GOT entry */
++#define R_SPARC_JMP_SLOT	21	/* Create PLT entry */
++#define R_SPARC_RELATIVE	22	/* Adjust by program base */
++#define R_SPARC_UA32		23	/* Direct 32 bit unaligned */
++
++/* Additional Sparc64 relocs.  */
++
++#define R_SPARC_PLT32		24	/* Direct 32 bit ref to PLT entry */
++#define R_SPARC_HIPLT22		25	/* High 22 bit PLT entry */
++#define R_SPARC_LOPLT10		26	/* Truncated 10 bit PLT entry */
++#define R_SPARC_PCPLT32		27	/* PC rel 32 bit ref to PLT entry */
++#define R_SPARC_PCPLT22		28	/* PC rel high 22 bit PLT entry */
++#define R_SPARC_PCPLT10		29	/* PC rel trunc 10 bit PLT entry */
++#define R_SPARC_10		30	/* Direct 10 bit */
++#define R_SPARC_11		31	/* Direct 11 bit */
++#define R_SPARC_64		32	/* Direct 64 bit */
++#define R_SPARC_OLO10		33	/* 10bit with secondary 13bit addend */
++#define R_SPARC_HH22		34	/* Top 22 bits of direct 64 bit */
++#define R_SPARC_HM10		35	/* High middle 10 bits of ... */
++#define R_SPARC_LM22		36	/* Low middle 22 bits of ... */
++#define R_SPARC_PC_HH22		37	/* Top 22 bits of pc rel 64 bit */
++#define R_SPARC_PC_HM10		38	/* High middle 10 bit of ... */
++#define R_SPARC_PC_LM22		39	/* Low miggle 22 bits of ... */
++#define R_SPARC_WDISP16		40	/* PC relative 16 bit shifted */
++#define R_SPARC_WDISP19		41	/* PC relative 19 bit shifted */
++#define R_SPARC_7		43	/* Direct 7 bit */
++#define R_SPARC_5		44	/* Direct 5 bit */
++#define R_SPARC_6		45	/* Direct 6 bit */
++#define R_SPARC_DISP64		46	/* PC relative 64 bit */
++#define R_SPARC_PLT64		47	/* Direct 64 bit ref to PLT entry */
++#define R_SPARC_HIX22		48	/* High 22 bit complemented */
++#define R_SPARC_LOX10		49	/* Truncated 11 bit complemented */
++#define R_SPARC_H44		50	/* Direct high 12 of 44 bit */
++#define R_SPARC_M44		51	/* Direct mid 22 of 44 bit */
++#define R_SPARC_L44		52	/* Direct low 10 of 44 bit */
++#define R_SPARC_REGISTER	53	/* Global register usage */
++#define R_SPARC_UA64		54	/* Direct 64 bit unaligned */
++#define R_SPARC_UA16		55	/* Direct 16 bit unaligned */
++#define R_SPARC_TLS_GD_HI22	56
++#define R_SPARC_TLS_GD_LO10	57
++#define R_SPARC_TLS_GD_ADD	58
++#define R_SPARC_TLS_GD_CALL	59
++#define R_SPARC_TLS_LDM_HI22	60
++#define R_SPARC_TLS_LDM_LO10	61
++#define R_SPARC_TLS_LDM_ADD	62
++#define R_SPARC_TLS_LDM_CALL	63
++#define R_SPARC_TLS_LDO_HIX22	64
++#define R_SPARC_TLS_LDO_LOX10	65
++#define R_SPARC_TLS_LDO_ADD	66
++#define R_SPARC_TLS_IE_HI22	67
++#define R_SPARC_TLS_IE_LO10	68
++#define R_SPARC_TLS_IE_LD	69
++#define R_SPARC_TLS_IE_LDX	70
++#define R_SPARC_TLS_IE_ADD	71
++#define R_SPARC_TLS_LE_HIX22	72
++#define R_SPARC_TLS_LE_LOX10	73
++#define R_SPARC_TLS_DTPMOD32	74
++#define R_SPARC_TLS_DTPMOD64	75
++#define R_SPARC_TLS_DTPOFF32	76
++#define R_SPARC_TLS_DTPOFF64	77
++#define R_SPARC_TLS_TPOFF32	78
++#define R_SPARC_TLS_TPOFF64	79
++/* Keep this the last entry.  */
++#define R_SPARC_NUM		80
++
++/* For Sparc64, legal values for d_tag of Elf64_Dyn.  */
++
++#define DT_SPARC_REGISTER 0x70000001
++#define DT_SPARC_NUM	2
++
++/* Bits present in AT_HWCAP, primarily for Sparc32.  */
++
++#define HWCAP_SPARC_FLUSH	1	/* The cpu supports flush insn.  */
++#define HWCAP_SPARC_STBAR	2
++#define HWCAP_SPARC_SWAP	4
++#define HWCAP_SPARC_MULDIV	8
++#define HWCAP_SPARC_V9		16	/* The cpu is v9, so v8plus is ok.  */
++#define HWCAP_SPARC_ULTRA3	32
++
++/* MIPS R3000 specific definitions.  */
++
++/* Legal values for e_flags field of Elf32_Ehdr.  */
++
++#define EF_MIPS_NOREORDER   1		/* A .noreorder directive was used */
++#define EF_MIPS_PIC	    2		/* Contains PIC code */
++#define EF_MIPS_CPIC	    4		/* Uses PIC calling sequence */
++#define EF_MIPS_XGOT	    8
++#define EF_MIPS_64BIT_WHIRL 16
++#define EF_MIPS_ABI2	    32
++#define EF_MIPS_ABI_ON32    64
++#define EF_MIPS_ARCH	    0xf0000000	/* MIPS architecture level */
++
++/* Legal values for MIPS architecture level.  */
++
++#define EF_MIPS_ARCH_1	    0x00000000	/* -mips1 code.  */
++#define EF_MIPS_ARCH_2	    0x10000000	/* -mips2 code.  */
++#define EF_MIPS_ARCH_3	    0x20000000	/* -mips3 code.  */
++#define EF_MIPS_ARCH_4	    0x30000000	/* -mips4 code.  */
++#define EF_MIPS_ARCH_5	    0x40000000	/* -mips5 code.  */
++#define EF_MIPS_ARCH_32	    0x60000000	/* MIPS32 code.  */
++#define EF_MIPS_ARCH_64	    0x70000000	/* MIPS64 code.  */
++
++/* The following are non-official names and should not be used.  */
++
++#define E_MIPS_ARCH_1	  0x00000000	/* -mips1 code.  */
++#define E_MIPS_ARCH_2	  0x10000000	/* -mips2 code.  */
++#define E_MIPS_ARCH_3	  0x20000000	/* -mips3 code.  */
++#define E_MIPS_ARCH_4	  0x30000000	/* -mips4 code.  */
++#define E_MIPS_ARCH_5	  0x40000000	/* -mips5 code.  */
++#define E_MIPS_ARCH_32	  0x60000000	/* MIPS32 code.  */
++#define E_MIPS_ARCH_64	  0x70000000	/* MIPS64 code.  */
++
++/* Special section indices.  */
++
++#define SHN_MIPS_ACOMMON    0xff00	/* Allocated common symbols */
++#define SHN_MIPS_TEXT	    0xff01	/* Allocated test symbols.  */
++#define SHN_MIPS_DATA	    0xff02	/* Allocated data symbols.  */
++#define SHN_MIPS_SCOMMON    0xff03	/* Small common symbols */
++#define SHN_MIPS_SUNDEFINED 0xff04	/* Small undefined symbols */
++
++/* Legal values for sh_type field of Elf32_Shdr.  */
++
++#define SHT_MIPS_LIBLIST       0x70000000 /* Shared objects used in link */
++#define SHT_MIPS_MSYM	       0x70000001
++#define SHT_MIPS_CONFLICT      0x70000002 /* Conflicting symbols */
++#define SHT_MIPS_GPTAB	       0x70000003 /* Global data area sizes */
++#define SHT_MIPS_UCODE	       0x70000004 /* Reserved for SGI/MIPS compilers */
++#define SHT_MIPS_DEBUG	       0x70000005 /* MIPS ECOFF debugging information*/
++#define SHT_MIPS_REGINFO       0x70000006 /* Register usage information */
++#define SHT_MIPS_PACKAGE       0x70000007
++#define SHT_MIPS_PACKSYM       0x70000008
++#define SHT_MIPS_RELD	       0x70000009
++#define SHT_MIPS_IFACE         0x7000000b
++#define SHT_MIPS_CONTENT       0x7000000c
++#define SHT_MIPS_OPTIONS       0x7000000d /* Miscellaneous options.  */
++#define SHT_MIPS_SHDR	       0x70000010
++#define SHT_MIPS_FDESC	       0x70000011
++#define SHT_MIPS_EXTSYM	       0x70000012
++#define SHT_MIPS_DENSE	       0x70000013
++#define SHT_MIPS_PDESC	       0x70000014
++#define SHT_MIPS_LOCSYM	       0x70000015
++#define SHT_MIPS_AUXSYM	       0x70000016
++#define SHT_MIPS_OPTSYM	       0x70000017
++#define SHT_MIPS_LOCSTR	       0x70000018
++#define SHT_MIPS_LINE	       0x70000019
++#define SHT_MIPS_RFDESC	       0x7000001a
++#define SHT_MIPS_DELTASYM      0x7000001b
++#define SHT_MIPS_DELTAINST     0x7000001c
++#define SHT_MIPS_DELTACLASS    0x7000001d
++#define SHT_MIPS_DWARF         0x7000001e /* DWARF debugging information.  */
++#define SHT_MIPS_DELTADECL     0x7000001f
++#define SHT_MIPS_SYMBOL_LIB    0x70000020
++#define SHT_MIPS_EVENTS	       0x70000021 /* Event section.  */
++#define SHT_MIPS_TRANSLATE     0x70000022
++#define SHT_MIPS_PIXIE	       0x70000023
++#define SHT_MIPS_XLATE	       0x70000024
++#define SHT_MIPS_XLATE_DEBUG   0x70000025
++#define SHT_MIPS_WHIRL	       0x70000026
++#define SHT_MIPS_EH_REGION     0x70000027
++#define SHT_MIPS_XLATE_OLD     0x70000028
++#define SHT_MIPS_PDR_EXCEPTION 0x70000029
++
++/* Legal values for sh_flags field of Elf32_Shdr.  */
++
++#define SHF_MIPS_GPREL	 0x10000000	/* Must be part of global data area */
++#define SHF_MIPS_MERGE	 0x20000000
++#define SHF_MIPS_ADDR	 0x40000000
++#define SHF_MIPS_STRINGS 0x80000000
++#define SHF_MIPS_NOSTRIP 0x08000000
++#define SHF_MIPS_LOCAL	 0x04000000
++#define SHF_MIPS_NAMES	 0x02000000
++#define SHF_MIPS_NODUPE	 0x01000000
++
++
++/* Symbol tables.  */
++
++/* MIPS specific values for `st_other'.  */
++#define STO_MIPS_DEFAULT		0x0
++#define STO_MIPS_INTERNAL		0x1
++#define STO_MIPS_HIDDEN			0x2
++#define STO_MIPS_PROTECTED		0x3
++#define STO_MIPS_SC_ALIGN_UNUSED	0xff
++
++/* MIPS specific values for `st_info'.  */
++#define STB_MIPS_SPLIT_COMMON		13
++
++/* Entries found in sections of type SHT_MIPS_GPTAB.  */
++
++typedef union
++{
++  struct
++    {
++      Elf32_Word gt_current_g_value;	/* -G value used for compilation */
++      Elf32_Word gt_unused;		/* Not used */
++    } gt_header;			/* First entry in section */
++  struct
++    {
++      Elf32_Word gt_g_value;		/* If this value were used for -G */
++      Elf32_Word gt_bytes;		/* This many bytes would be used */
++    } gt_entry;				/* Subsequent entries in section */
++} Elf32_gptab;
++
++/* Entry found in sections of type SHT_MIPS_REGINFO.  */
++
++typedef struct
++{
++  Elf32_Word	ri_gprmask;		/* General registers used */
++  Elf32_Word	ri_cprmask[4];		/* Coprocessor registers used */
++  Elf32_Sword	ri_gp_value;		/* $gp register value */
++} Elf32_RegInfo;
++
++/* Entries found in sections of type SHT_MIPS_OPTIONS.  */
++
++typedef struct
++{
++  unsigned char kind;		/* Determines interpretation of the
++				   variable part of descriptor.  */
++  unsigned char size;		/* Size of descriptor, including header.  */
++  Elf32_Section section;	/* Section header index of section affected,
++				   0 for global options.  */
++  Elf32_Word info;		/* Kind-specific information.  */
++} Elf_Options;
++
++/* Values for `kind' field in Elf_Options.  */
++
++#define ODK_NULL	0	/* Undefined.  */
++#define ODK_REGINFO	1	/* Register usage information.  */
++#define ODK_EXCEPTIONS	2	/* Exception processing options.  */
++#define ODK_PAD		3	/* Section padding options.  */
++#define ODK_HWPATCH	4	/* Hardware workarounds performed */
++#define ODK_FILL	5	/* record the fill value used by the linker. */
++#define ODK_TAGS	6	/* reserve space for desktop tools to write. */
++#define ODK_HWAND	7	/* HW workarounds.  'AND' bits when merging. */
++#define ODK_HWOR	8	/* HW workarounds.  'OR' bits when merging.  */
++
++/* Values for `info' in Elf_Options for ODK_EXCEPTIONS entries.  */
++
++#define OEX_FPU_MIN	0x1f	/* FPE's which MUST be enabled.  */
++#define OEX_FPU_MAX	0x1f00	/* FPE's which MAY be enabled.  */
++#define OEX_PAGE0	0x10000	/* page zero must be mapped.  */
++#define OEX_SMM		0x20000	/* Force sequential memory mode?  */
++#define OEX_FPDBUG	0x40000	/* Force floating point debug mode?  */
++#define OEX_PRECISEFP	OEX_FPDBUG
++#define OEX_DISMISS	0x80000	/* Dismiss invalid address faults?  */
++
++#define OEX_FPU_INVAL	0x10
++#define OEX_FPU_DIV0	0x08
++#define OEX_FPU_OFLO	0x04
++#define OEX_FPU_UFLO	0x02
++#define OEX_FPU_INEX	0x01
++
++/* Masks for `info' in Elf_Options for an ODK_HWPATCH entry.  */
++
++#define OHW_R4KEOP	0x1	/* R4000 end-of-page patch.  */
++#define OHW_R8KPFETCH	0x2	/* may need R8000 prefetch patch.  */
++#define OHW_R5KEOP	0x4	/* R5000 end-of-page patch.  */
++#define OHW_R5KCVTL	0x8	/* R5000 cvt.[ds].l bug.  clean=1.  */
++
++#define OPAD_PREFIX	0x1
++#define OPAD_POSTFIX	0x2
++#define OPAD_SYMBOL	0x4
++
++/* Entry found in `.options' section.  */
++
++typedef struct
++{
++  Elf32_Word hwp_flags1;	/* Extra flags.  */
++  Elf32_Word hwp_flags2;	/* Extra flags.  */
++} Elf_Options_Hw;
++
++/* Masks for `info' in ElfOptions for ODK_HWAND and ODK_HWOR entries.  */
++
++#define OHWA0_R4KEOP_CHECKED	0x00000001
++#define OHWA1_R4KEOP_CLEAN	0x00000002
++
++/* MIPS relocs.  */
++
++#define R_MIPS_NONE		0	/* No reloc */
++#define R_MIPS_16		1	/* Direct 16 bit */
++#define R_MIPS_32		2	/* Direct 32 bit */
++#define R_MIPS_REL32		3	/* PC relative 32 bit */
++#define R_MIPS_26		4	/* Direct 26 bit shifted */
++#define R_MIPS_HI16		5	/* High 16 bit */
++#define R_MIPS_LO16		6	/* Low 16 bit */
++#define R_MIPS_GPREL16		7	/* GP relative 16 bit */
++#define R_MIPS_LITERAL		8	/* 16 bit literal entry */
++#define R_MIPS_GOT16		9	/* 16 bit GOT entry */
++#define R_MIPS_PC16		10	/* PC relative 16 bit */
++#define R_MIPS_CALL16		11	/* 16 bit GOT entry for function */
++#define R_MIPS_GPREL32		12	/* GP relative 32 bit */
++
++#define R_MIPS_SHIFT5		16
++#define R_MIPS_SHIFT6		17
++#define R_MIPS_64		18
++#define R_MIPS_GOT_DISP		19
++#define R_MIPS_GOT_PAGE		20
++#define R_MIPS_GOT_OFST		21
++#define R_MIPS_GOT_HI16		22
++#define R_MIPS_GOT_LO16		23
++#define R_MIPS_SUB		24
++#define R_MIPS_INSERT_A		25
++#define R_MIPS_INSERT_B		26
++#define R_MIPS_DELETE		27
++#define R_MIPS_HIGHER		28
++#define R_MIPS_HIGHEST		29
++#define R_MIPS_CALL_HI16	30
++#define R_MIPS_CALL_LO16	31
++#define R_MIPS_SCN_DISP		32
++#define R_MIPS_REL16		33
++#define R_MIPS_ADD_IMMEDIATE	34
++#define R_MIPS_PJUMP		35
++#define R_MIPS_RELGOT		36
++#define R_MIPS_JALR		37
++/* Keep this the last entry.  */
++#define R_MIPS_NUM		38
++
++/* Legal values for p_type field of Elf32_Phdr.  */
++
++#define PT_MIPS_REGINFO	0x70000000	/* Register usage information */
++#define PT_MIPS_RTPROC  0x70000001	/* Runtime procedure table. */
++#define PT_MIPS_OPTIONS 0x70000002
++
++/* Special program header types.  */
++
++#define PF_MIPS_LOCAL	0x10000000
++
++/* Legal values for d_tag field of Elf32_Dyn.  */
++
++#define DT_MIPS_RLD_VERSION  0x70000001	/* Runtime linker interface version */
++#define DT_MIPS_TIME_STAMP   0x70000002	/* Timestamp */
++#define DT_MIPS_ICHECKSUM    0x70000003	/* Checksum */
++#define DT_MIPS_IVERSION     0x70000004	/* Version string (string tbl index) */
++#define DT_MIPS_FLAGS	     0x70000005	/* Flags */
++#define DT_MIPS_BASE_ADDRESS 0x70000006	/* Base address */
++#define DT_MIPS_MSYM	     0x70000007
++#define DT_MIPS_CONFLICT     0x70000008	/* Address of CONFLICT section */
++#define DT_MIPS_LIBLIST	     0x70000009	/* Address of LIBLIST section */
++#define DT_MIPS_LOCAL_GOTNO  0x7000000a	/* Number of local GOT entries */
++#define DT_MIPS_CONFLICTNO   0x7000000b	/* Number of CONFLICT entries */
++#define DT_MIPS_LIBLISTNO    0x70000010	/* Number of LIBLIST entries */
++#define DT_MIPS_SYMTABNO     0x70000011	/* Number of DYNSYM entries */
++#define DT_MIPS_UNREFEXTNO   0x70000012	/* First external DYNSYM */
++#define DT_MIPS_GOTSYM	     0x70000013	/* First GOT entry in DYNSYM */
++#define DT_MIPS_HIPAGENO     0x70000014	/* Number of GOT page table entries */
++#define DT_MIPS_RLD_MAP	     0x70000016	/* Address of run time loader map.  */
++#define DT_MIPS_DELTA_CLASS  0x70000017	/* Delta C++ class definition.  */
++#define DT_MIPS_DELTA_CLASS_NO    0x70000018 /* Number of entries in
++						DT_MIPS_DELTA_CLASS.  */
++#define DT_MIPS_DELTA_INSTANCE    0x70000019 /* Delta C++ class instances.  */
++#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a /* Number of entries in
++						DT_MIPS_DELTA_INSTANCE.  */
++#define DT_MIPS_DELTA_RELOC  0x7000001b /* Delta relocations.  */
++#define DT_MIPS_DELTA_RELOC_NO 0x7000001c /* Number of entries in
++					     DT_MIPS_DELTA_RELOC.  */
++#define DT_MIPS_DELTA_SYM    0x7000001d /* Delta symbols that Delta
++					   relocations refer to.  */
++#define DT_MIPS_DELTA_SYM_NO 0x7000001e /* Number of entries in
++					   DT_MIPS_DELTA_SYM.  */
++#define DT_MIPS_DELTA_CLASSSYM 0x70000020 /* Delta symbols that hold the
++					     class declaration.  */
++#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021 /* Number of entries in
++						DT_MIPS_DELTA_CLASSSYM.  */
++#define DT_MIPS_CXX_FLAGS    0x70000022 /* Flags indicating for C++ flavor.  */
++#define DT_MIPS_PIXIE_INIT   0x70000023
++#define DT_MIPS_SYMBOL_LIB   0x70000024
++#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025
++#define DT_MIPS_LOCAL_GOTIDX 0x70000026
++#define DT_MIPS_HIDDEN_GOTIDX 0x70000027
++#define DT_MIPS_PROTECTED_GOTIDX 0x70000028
++#define DT_MIPS_OPTIONS	     0x70000029 /* Address of .options.  */
++#define DT_MIPS_INTERFACE    0x7000002a /* Address of .interface.  */
++#define DT_MIPS_DYNSTR_ALIGN 0x7000002b
++#define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */
++#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d /* Address of rld_text_rsolve
++						    function stored in GOT.  */
++#define DT_MIPS_PERF_SUFFIX  0x7000002e /* Default suffix of dso to be added
++					   by rld on dlopen() calls.  */
++#define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */
++#define DT_MIPS_GP_VALUE     0x70000030 /* GP value for aux GOTs.  */
++#define DT_MIPS_AUX_DYNAMIC  0x70000031 /* Address of aux .dynamic.  */
++#define DT_MIPS_NUM	     0x32
++
++/* Legal values for DT_MIPS_FLAGS Elf32_Dyn entry.  */
++
++#define RHF_NONE		   0		/* No flags */
++#define RHF_QUICKSTART		   (1 << 0)	/* Use quickstart */
++#define RHF_NOTPOT		   (1 << 1)	/* Hash size not power of 2 */
++#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2)	/* Ignore LD_LIBRARY_PATH */
++#define RHF_NO_MOVE		   (1 << 3)
++#define RHF_SGI_ONLY		   (1 << 4)
++#define RHF_GUARANTEE_INIT	   (1 << 5)
++#define RHF_DELTA_C_PLUS_PLUS	   (1 << 6)
++#define RHF_GUARANTEE_START_INIT   (1 << 7)
++#define RHF_PIXIE		   (1 << 8)
++#define RHF_DEFAULT_DELAY_LOAD	   (1 << 9)
++#define RHF_REQUICKSTART	   (1 << 10)
++#define RHF_REQUICKSTARTED	   (1 << 11)
++#define RHF_CORD		   (1 << 12)
++#define RHF_NO_UNRES_UNDEF	   (1 << 13)
++#define RHF_RLD_ORDER_SAFE	   (1 << 14)
++
++/* Entries found in sections of type SHT_MIPS_LIBLIST.  */
++
++typedef struct
++{
++  Elf32_Word l_name;		/* Name (string table index) */
++  Elf32_Word l_time_stamp;	/* Timestamp */
++  Elf32_Word l_checksum;	/* Checksum */
++  Elf32_Word l_version;		/* Interface version */
++  Elf32_Word l_flags;		/* Flags */
++} Elf32_Lib;
++
++typedef struct
++{
++  Elf64_Word l_name;		/* Name (string table index) */
++  Elf64_Word l_time_stamp;	/* Timestamp */
++  Elf64_Word l_checksum;	/* Checksum */
++  Elf64_Word l_version;		/* Interface version */
++  Elf64_Word l_flags;		/* Flags */
++} Elf64_Lib;
++
++
++/* Legal values for l_flags.  */
++
++#define LL_NONE		  0
++#define LL_EXACT_MATCH	  (1 << 0)	/* Require exact match */
++#define LL_IGNORE_INT_VER (1 << 1)	/* Ignore interface version */
++#define LL_REQUIRE_MINOR  (1 << 2)
++#define LL_EXPORTS	  (1 << 3)
++#define LL_DELAY_LOAD	  (1 << 4)
++#define LL_DELTA	  (1 << 5)
++
++/* Entries found in sections of type SHT_MIPS_CONFLICT.  */
++
++typedef Elf32_Addr Elf32_Conflict;
++
++
++/* HPPA specific definitions.  */
++
++/* Legal values for e_flags field of Elf32_Ehdr.  */
++
++#define EF_PARISC_TRAPNIL	0x00010000 /* Trap nil pointer dereference.  */
++#define EF_PARISC_EXT		0x00020000 /* Program uses arch. extensions. */
++#define EF_PARISC_LSB		0x00040000 /* Program expects little endian. */
++#define EF_PARISC_WIDE		0x00080000 /* Program expects wide mode.  */
++#define EF_PARISC_NO_KABP	0x00100000 /* No kernel assisted branch
++					      prediction.  */
++#define EF_PARISC_LAZYSWAP	0x00400000 /* Allow lazy swapping.  */
++#define EF_PARISC_ARCH		0x0000ffff /* Architecture version.  */
++
++/* Defined values for `e_flags & EF_PARISC_ARCH' are:  */
++
++#define EFA_PARISC_1_0		    0x020b /* PA-RISC 1.0 big-endian.  */
++#define EFA_PARISC_1_1		    0x0210 /* PA-RISC 1.1 big-endian.  */
++#define EFA_PARISC_2_0		    0x0214 /* PA-RISC 2.0 big-endian.  */
++
++/* Additional section indeces.  */
++
++#define SHN_PARISC_ANSI_COMMON	0xff00	   /* Section for tenatively declared
++					      symbols in ANSI C.  */
++#define SHN_PARISC_HUGE_COMMON	0xff01	   /* Common blocks in huge model.  */
++
++/* Legal values for sh_type field of Elf32_Shdr.  */
++
++#define SHT_PARISC_EXT		0x70000000 /* Contains product specific ext. */
++#define SHT_PARISC_UNWIND	0x70000001 /* Unwind information.  */
++#define SHT_PARISC_DOC		0x70000002 /* Debug info for optimized code. */
++
++/* Legal values for sh_flags field of Elf32_Shdr.  */
++
++#define SHF_PARISC_SHORT	0x20000000 /* Section with short addressing. */
++#define SHF_PARISC_HUGE		0x40000000 /* Section far from gp.  */
++#define SHF_PARISC_SBP		0x80000000 /* Static branch prediction code. */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type).  */
++
++#define STT_PARISC_MILLICODE	13	/* Millicode function entry point.  */
++
++#define STT_HP_OPAQUE		(STT_LOOS + 0x1)
++#define STT_HP_STUB		(STT_LOOS + 0x2)
++
++/* HPPA relocs.  */
++
++#define R_PARISC_NONE		0	/* No reloc.  */
++#define R_PARISC_DIR32		1	/* Direct 32-bit reference.  */
++#define R_PARISC_DIR21L		2	/* Left 21 bits of eff. address.  */
++#define R_PARISC_DIR17R		3	/* Right 17 bits of eff. address.  */
++#define R_PARISC_DIR17F		4	/* 17 bits of eff. address.  */
++#define R_PARISC_DIR14R		6	/* Right 14 bits of eff. address.  */
++#define R_PARISC_PCREL32	9	/* 32-bit rel. address.  */
++#define R_PARISC_PCREL21L	10	/* Left 21 bits of rel. address.  */
++#define R_PARISC_PCREL17R	11	/* Right 17 bits of rel. address.  */
++#define R_PARISC_PCREL17F	12	/* 17 bits of rel. address.  */
++#define R_PARISC_PCREL14R	14	/* Right 14 bits of rel. address.  */
++#define R_PARISC_DPREL21L	18	/* Left 21 bits of rel. address.  */
++#define R_PARISC_DPREL14R	22	/* Right 14 bits of rel. address.  */
++#define R_PARISC_GPREL21L	26	/* GP-relative, left 21 bits.  */
++#define R_PARISC_GPREL14R	30	/* GP-relative, right 14 bits.  */
++#define R_PARISC_LTOFF21L	34	/* LT-relative, left 21 bits.  */
++#define R_PARISC_LTOFF14R	38	/* LT-relative, right 14 bits.  */
++#define R_PARISC_SECREL32	41	/* 32 bits section rel. address.  */
++#define R_PARISC_SEGBASE	48	/* No relocation, set segment base.  */
++#define R_PARISC_SEGREL32	49	/* 32 bits segment rel. address.  */
++#define R_PARISC_PLTOFF21L	50	/* PLT rel. address, left 21 bits.  */
++#define R_PARISC_PLTOFF14R	54	/* PLT rel. address, right 14 bits.  */
++#define R_PARISC_LTOFF_FPTR32	57	/* 32 bits LT-rel. function pointer. */
++#define R_PARISC_LTOFF_FPTR21L	58	/* LT-rel. fct ptr, left 21 bits. */
++#define R_PARISC_LTOFF_FPTR14R	62	/* LT-rel. fct ptr, right 14 bits. */
++#define R_PARISC_FPTR64		64	/* 64 bits function address.  */
++#define R_PARISC_PLABEL32	65	/* 32 bits function address.  */
++#define R_PARISC_PCREL64	72	/* 64 bits PC-rel. address.  */
++#define R_PARISC_PCREL22F	74	/* 22 bits PC-rel. address.  */
++#define R_PARISC_PCREL14WR	75	/* PC-rel. address, right 14 bits.  */
++#define R_PARISC_PCREL14DR	76	/* PC rel. address, right 14 bits.  */
++#define R_PARISC_PCREL16F	77	/* 16 bits PC-rel. address.  */
++#define R_PARISC_PCREL16WF	78	/* 16 bits PC-rel. address.  */
++#define R_PARISC_PCREL16DF	79	/* 16 bits PC-rel. address.  */
++#define R_PARISC_DIR64		80	/* 64 bits of eff. address.  */
++#define R_PARISC_DIR14WR	83	/* 14 bits of eff. address.  */
++#define R_PARISC_DIR14DR	84	/* 14 bits of eff. address.  */
++#define R_PARISC_DIR16F		85	/* 16 bits of eff. address.  */
++#define R_PARISC_DIR16WF	86	/* 16 bits of eff. address.  */
++#define R_PARISC_DIR16DF	87	/* 16 bits of eff. address.  */
++#define R_PARISC_GPREL64	88	/* 64 bits of GP-rel. address.  */
++#define R_PARISC_GPREL14WR	91	/* GP-rel. address, right 14 bits.  */
++#define R_PARISC_GPREL14DR	92	/* GP-rel. address, right 14 bits.  */
++#define R_PARISC_GPREL16F	93	/* 16 bits GP-rel. address.  */
++#define R_PARISC_GPREL16WF	94	/* 16 bits GP-rel. address.  */
++#define R_PARISC_GPREL16DF	95	/* 16 bits GP-rel. address.  */
++#define R_PARISC_LTOFF64	96	/* 64 bits LT-rel. address.  */
++#define R_PARISC_LTOFF14WR	99	/* LT-rel. address, right 14 bits.  */
++#define R_PARISC_LTOFF14DR	100	/* LT-rel. address, right 14 bits.  */
++#define R_PARISC_LTOFF16F	101	/* 16 bits LT-rel. address.  */
++#define R_PARISC_LTOFF16WF	102	/* 16 bits LT-rel. address.  */
++#define R_PARISC_LTOFF16DF	103	/* 16 bits LT-rel. address.  */
++#define R_PARISC_SECREL64	104	/* 64 bits section rel. address.  */
++#define R_PARISC_SEGREL64	112	/* 64 bits segment rel. address.  */
++#define R_PARISC_PLTOFF14WR	115	/* PLT-rel. address, right 14 bits.  */
++#define R_PARISC_PLTOFF14DR	116	/* PLT-rel. address, right 14 bits.  */
++#define R_PARISC_PLTOFF16F	117	/* 16 bits LT-rel. address.  */
++#define R_PARISC_PLTOFF16WF	118	/* 16 bits PLT-rel. address.  */
++#define R_PARISC_PLTOFF16DF	119	/* 16 bits PLT-rel. address.  */
++#define R_PARISC_LTOFF_FPTR64	120	/* 64 bits LT-rel. function ptr.  */
++#define R_PARISC_LTOFF_FPTR14WR	123	/* LT-rel. fct. ptr., right 14 bits. */
++#define R_PARISC_LTOFF_FPTR14DR	124	/* LT-rel. fct. ptr., right 14 bits. */
++#define R_PARISC_LTOFF_FPTR16F	125	/* 16 bits LT-rel. function ptr.  */
++#define R_PARISC_LTOFF_FPTR16WF	126	/* 16 bits LT-rel. function ptr.  */
++#define R_PARISC_LTOFF_FPTR16DF	127	/* 16 bits LT-rel. function ptr.  */
++#define R_PARISC_LORESERVE	128
++#define R_PARISC_COPY		128	/* Copy relocation.  */
++#define R_PARISC_IPLT		129	/* Dynamic reloc, imported PLT */
++#define R_PARISC_EPLT		130	/* Dynamic reloc, exported PLT */
++#define R_PARISC_TPREL32	153	/* 32 bits TP-rel. address.  */
++#define R_PARISC_TPREL21L	154	/* TP-rel. address, left 21 bits.  */
++#define R_PARISC_TPREL14R	158	/* TP-rel. address, right 14 bits.  */
++#define R_PARISC_LTOFF_TP21L	162	/* LT-TP-rel. address, left 21 bits. */
++#define R_PARISC_LTOFF_TP14R	166	/* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP14F	167	/* 14 bits LT-TP-rel. address.  */
++#define R_PARISC_TPREL64	216	/* 64 bits TP-rel. address.  */
++#define R_PARISC_TPREL14WR	219	/* TP-rel. address, right 14 bits.  */
++#define R_PARISC_TPREL14DR	220	/* TP-rel. address, right 14 bits.  */
++#define R_PARISC_TPREL16F	221	/* 16 bits TP-rel. address.  */
++#define R_PARISC_TPREL16WF	222	/* 16 bits TP-rel. address.  */
++#define R_PARISC_TPREL16DF	223	/* 16 bits TP-rel. address.  */
++#define R_PARISC_LTOFF_TP64	224	/* 64 bits LT-TP-rel. address.  */
++#define R_PARISC_LTOFF_TP14WR	227	/* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP14DR	228	/* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP16F	229	/* 16 bits LT-TP-rel. address.  */
++#define R_PARISC_LTOFF_TP16WF	230	/* 16 bits LT-TP-rel. address.  */
++#define R_PARISC_LTOFF_TP16DF	231	/* 16 bits LT-TP-rel. address.  */
++#define R_PARISC_HIRESERVE	255
++
++/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr.  */
++
++#define PT_HP_TLS		(PT_LOOS + 0x0)
++#define PT_HP_CORE_NONE		(PT_LOOS + 0x1)
++#define PT_HP_CORE_VERSION	(PT_LOOS + 0x2)
++#define PT_HP_CORE_KERNEL	(PT_LOOS + 0x3)
++#define PT_HP_CORE_COMM		(PT_LOOS + 0x4)
++#define PT_HP_CORE_PROC		(PT_LOOS + 0x5)
++#define PT_HP_CORE_LOADABLE	(PT_LOOS + 0x6)
++#define PT_HP_CORE_STACK	(PT_LOOS + 0x7)
++#define PT_HP_CORE_SHM		(PT_LOOS + 0x8)
++#define PT_HP_CORE_MMF		(PT_LOOS + 0x9)
++#define PT_HP_PARALLEL		(PT_LOOS + 0x10)
++#define PT_HP_FASTBIND		(PT_LOOS + 0x11)
++#define PT_HP_OPT_ANNOT		(PT_LOOS + 0x12)
++#define PT_HP_HSL_ANNOT		(PT_LOOS + 0x13)
++#define PT_HP_STACK		(PT_LOOS + 0x14)
++
++#define PT_PARISC_ARCHEXT	0x70000000
++#define PT_PARISC_UNWIND	0x70000001
++
++/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr.  */
++
++#define PF_PARISC_SBP		0x08000000
++
++#define PF_HP_PAGE_SIZE		0x00100000
++#define PF_HP_FAR_SHARED	0x00200000
++#define PF_HP_NEAR_SHARED	0x00400000
++#define PF_HP_CODE		0x01000000
++#define PF_HP_MODIFY		0x02000000
++#define PF_HP_LAZYSWAP		0x04000000
++#define PF_HP_SBP		0x08000000
++
++
++/* Alpha specific definitions.  */
++
++/* Legal values for e_flags field of Elf64_Ehdr.  */
++
++#define EF_ALPHA_32BIT		1	/* All addresses must be < 2GB.  */
++#define EF_ALPHA_CANRELAX	2	/* Relocations for relaxing exist.  */
++
++/* Legal values for sh_type field of Elf64_Shdr.  */
++
++/* These two are primerily concerned with ECOFF debugging info.  */
++#define SHT_ALPHA_DEBUG		0x70000001
++#define SHT_ALPHA_REGINFO	0x70000002
++
++/* Legal values for sh_flags field of Elf64_Shdr.  */
++
++#define SHF_ALPHA_GPREL		0x10000000
++
++/* Legal values for st_other field of Elf64_Sym.  */
++#define STO_ALPHA_NOPV		0x80	/* No PV required.  */
++#define STO_ALPHA_STD_GPLOAD	0x88	/* PV only used for initial ldgp.  */
++
++/* Alpha relocs.  */
++
++#define R_ALPHA_NONE		0	/* No reloc */
++#define R_ALPHA_REFLONG		1	/* Direct 32 bit */
++#define R_ALPHA_REFQUAD		2	/* Direct 64 bit */
++#define R_ALPHA_GPREL32		3	/* GP relative 32 bit */
++#define R_ALPHA_LITERAL		4	/* GP relative 16 bit w/optimization */
++#define R_ALPHA_LITUSE		5	/* Optimization hint for LITERAL */
++#define R_ALPHA_GPDISP		6	/* Add displacement to GP */
++#define R_ALPHA_BRADDR		7	/* PC+4 relative 23 bit shifted */
++#define R_ALPHA_HINT		8	/* PC+4 relative 16 bit shifted */
++#define R_ALPHA_SREL16		9	/* PC relative 16 bit */
++#define R_ALPHA_SREL32		10	/* PC relative 32 bit */
++#define R_ALPHA_SREL64		11	/* PC relative 64 bit */
++#define R_ALPHA_GPRELHIGH	17	/* GP relative 32 bit, high 16 bits */
++#define R_ALPHA_GPRELLOW	18	/* GP relative 32 bit, low 16 bits */
++#define R_ALPHA_GPREL16		19	/* GP relative 16 bit */
++#define R_ALPHA_COPY		24	/* Copy symbol at runtime */
++#define R_ALPHA_GLOB_DAT	25	/* Create GOT entry */
++#define R_ALPHA_JMP_SLOT	26	/* Create PLT entry */
++#define R_ALPHA_RELATIVE	27	/* Adjust by program base */
++#define R_ALPHA_TLS_GD_HI	28
++#define R_ALPHA_TLSGD		29
++#define R_ALPHA_TLS_LDM		30
++#define R_ALPHA_DTPMOD64	31
++#define R_ALPHA_GOTDTPREL	32
++#define R_ALPHA_DTPREL64	33
++#define R_ALPHA_DTPRELHI	34
++#define R_ALPHA_DTPRELLO	35
++#define R_ALPHA_DTPREL16	36
++#define R_ALPHA_GOTTPREL	37
++#define R_ALPHA_TPREL64		38
++#define R_ALPHA_TPRELHI		39
++#define R_ALPHA_TPRELLO		40
++#define R_ALPHA_TPREL16		41
++/* Keep this the last entry.  */
++#define R_ALPHA_NUM		46
++
++/* Magic values of the LITUSE relocation addend.  */
++#define LITUSE_ALPHA_ADDR	0
++#define LITUSE_ALPHA_BASE	1
++#define LITUSE_ALPHA_BYTOFF	2
++#define LITUSE_ALPHA_JSR	3
++#define LITUSE_ALPHA_TLS_GD	4
++#define LITUSE_ALPHA_TLS_LDM	5
++
++
++/* PowerPC specific declarations */
++
++/* Values for Elf32/64_Ehdr.e_flags.  */
++#define EF_PPC_EMB		0x80000000	/* PowerPC embedded flag */
++
++/* Cygnus local bits below */
++#define EF_PPC_RELOCATABLE	0x00010000	/* PowerPC -mrelocatable flag*/
++#define EF_PPC_RELOCATABLE_LIB	0x00008000	/* PowerPC -mrelocatable-lib
++						   flag */
++
++/* PowerPC relocations defined by the ABIs */
++#define R_PPC_NONE		0
++#define R_PPC_ADDR32		1	/* 32bit absolute address */
++#define R_PPC_ADDR24		2	/* 26bit address, 2 bits ignored.  */
++#define R_PPC_ADDR16		3	/* 16bit absolute address */
++#define R_PPC_ADDR16_LO		4	/* lower 16bit of absolute address */
++#define R_PPC_ADDR16_HI		5	/* high 16bit of absolute address */
++#define R_PPC_ADDR16_HA		6	/* adjusted high 16bit */
++#define R_PPC_ADDR14		7	/* 16bit address, 2 bits ignored */
++#define R_PPC_ADDR14_BRTAKEN	8
++#define R_PPC_ADDR14_BRNTAKEN	9
++#define R_PPC_REL24		10	/* PC relative 26 bit */
++#define R_PPC_REL14		11	/* PC relative 16 bit */
++#define R_PPC_REL14_BRTAKEN	12
++#define R_PPC_REL14_BRNTAKEN	13
++#define R_PPC_GOT16		14
++#define R_PPC_GOT16_LO		15
++#define R_PPC_GOT16_HI		16
++#define R_PPC_GOT16_HA		17
++#define R_PPC_PLTREL24		18
++#define R_PPC_COPY		19
++#define R_PPC_GLOB_DAT		20
++#define R_PPC_JMP_SLOT		21
++#define R_PPC_RELATIVE		22
++#define R_PPC_LOCAL24PC		23
++#define R_PPC_UADDR32		24
++#define R_PPC_UADDR16		25
++#define R_PPC_REL32		26
++#define R_PPC_PLT32		27
++#define R_PPC_PLTREL32		28
++#define R_PPC_PLT16_LO		29
++#define R_PPC_PLT16_HI		30
++#define R_PPC_PLT16_HA		31
++#define R_PPC_SDAREL16		32
++#define R_PPC_SECTOFF		33
++#define R_PPC_SECTOFF_LO	34
++#define R_PPC_SECTOFF_HI	35
++#define R_PPC_SECTOFF_HA	36
++
++/* PowerPC relocations defined for the TLS access ABI.  */
++#define R_PPC_TLS		67 /* none	(sym+add)@tls */
++#define R_PPC_DTPMOD32		68 /* word32	(sym+add)@dtpmod */
++#define R_PPC_TPREL16		69 /* half16*	(sym+add)@tprel */
++#define R_PPC_TPREL16_LO	70 /* half16	(sym+add)@tprel@l */
++#define R_PPC_TPREL16_HI	71 /* half16	(sym+add)@tprel@h */
++#define R_PPC_TPREL16_HA	72 /* half16	(sym+add)@tprel@ha */
++#define R_PPC_TPREL32		73 /* word32	(sym+add)@tprel */
++#define R_PPC_DTPREL16		74 /* half16*	(sym+add)@dtprel */
++#define R_PPC_DTPREL16_LO	75 /* half16	(sym+add)@dtprel@l */
++#define R_PPC_DTPREL16_HI	76 /* half16	(sym+add)@dtprel@h */
++#define R_PPC_DTPREL16_HA	77 /* half16	(sym+add)@dtprel@ha */
++#define R_PPC_DTPREL32		78 /* word32	(sym+add)@dtprel */
++#define R_PPC_GOT_TLSGD16	79 /* half16*	(sym+add)@got@tlsgd */
++#define R_PPC_GOT_TLSGD16_LO	80 /* half16	(sym+add)@got@tlsgd@l */
++#define R_PPC_GOT_TLSGD16_HI	81 /* half16	(sym+add)@got@tlsgd@h */
++#define R_PPC_GOT_TLSGD16_HA	82 /* half16	(sym+add)@got@tlsgd@ha */
++#define R_PPC_GOT_TLSLD16	83 /* half16*	(sym+add)@got@tlsld */
++#define R_PPC_GOT_TLSLD16_LO	84 /* half16	(sym+add)@got@tlsld@l */
++#define R_PPC_GOT_TLSLD16_HI	85 /* half16	(sym+add)@got@tlsld@h */
++#define R_PPC_GOT_TLSLD16_HA	86 /* half16	(sym+add)@got@tlsld@ha */
++#define R_PPC_GOT_TPREL16	87 /* half16*	(sym+add)@got@tprel */
++#define R_PPC_GOT_TPREL16_LO	88 /* half16	(sym+add)@got@tprel@l */
++#define R_PPC_GOT_TPREL16_HI	89 /* half16	(sym+add)@got@tprel@h */
++#define R_PPC_GOT_TPREL16_HA	90 /* half16	(sym+add)@got@tprel@ha */
++#define R_PPC_GOT_DTPREL16	91 /* half16*	(sym+add)@got@dtprel */
++#define R_PPC_GOT_DTPREL16_LO	92 /* half16*	(sym+add)@got@dtprel@l */
++#define R_PPC_GOT_DTPREL16_HI	93 /* half16*	(sym+add)@got@dtprel@h */
++#define R_PPC_GOT_DTPREL16_HA	94 /* half16*	(sym+add)@got@dtprel@ha */
++
++/* Keep this the last entry.  */
++#define R_PPC_NUM		95
++
++/* The remaining relocs are from the Embedded ELF ABI, and are not
++   in the SVR4 ELF ABI.  */
++#define R_PPC_EMB_NADDR32	101
++#define R_PPC_EMB_NADDR16	102
++#define R_PPC_EMB_NADDR16_LO	103
++#define R_PPC_EMB_NADDR16_HI	104
++#define R_PPC_EMB_NADDR16_HA	105
++#define R_PPC_EMB_SDAI16	106
++#define R_PPC_EMB_SDA2I16	107
++#define R_PPC_EMB_SDA2REL	108
++#define R_PPC_EMB_SDA21		109	/* 16 bit offset in SDA */
++#define R_PPC_EMB_MRKREF	110
++#define R_PPC_EMB_RELSEC16	111
++#define R_PPC_EMB_RELST_LO	112
++#define R_PPC_EMB_RELST_HI	113
++#define R_PPC_EMB_RELST_HA	114
++#define R_PPC_EMB_BIT_FLD	115
++#define R_PPC_EMB_RELSDA	116	/* 16 bit relative offset in SDA */
++
++/* Diab tool relocations.  */
++#define R_PPC_DIAB_SDA21_LO	180	/* like EMB_SDA21, but lower 16 bit */
++#define R_PPC_DIAB_SDA21_HI	181	/* like EMB_SDA21, but high 16 bit */
++#define R_PPC_DIAB_SDA21_HA	182	/* like EMB_SDA21, adjusted high 16 */
++#define R_PPC_DIAB_RELSDA_LO	183	/* like EMB_RELSDA, but lower 16 bit */
++#define R_PPC_DIAB_RELSDA_HI	184	/* like EMB_RELSDA, but high 16 bit */
++#define R_PPC_DIAB_RELSDA_HA	185	/* like EMB_RELSDA, adjusted high 16 */
++
++/* This is a phony reloc to handle any old fashioned TOC16 references
++   that may still be in object files.  */
++#define R_PPC_TOC16		255
++
++
++/* PowerPC64 relocations defined by the ABIs */
++#define R_PPC64_NONE		R_PPC_NONE
++#define R_PPC64_ADDR32		R_PPC_ADDR32 /* 32bit absolute address */
++#define R_PPC64_ADDR24		R_PPC_ADDR24 /* 26bit address, word aligned */
++#define R_PPC64_ADDR16		R_PPC_ADDR16 /* 16bit absolute address */
++#define R_PPC64_ADDR16_LO	R_PPC_ADDR16_LO	/* lower 16bits of address */
++#define R_PPC64_ADDR16_HI	R_PPC_ADDR16_HI	/* high 16bits of address. */
++#define R_PPC64_ADDR16_HA	R_PPC_ADDR16_HA /* adjusted high 16bits.  */
++#define R_PPC64_ADDR14		R_PPC_ADDR14 /* 16bit address, word aligned */
++#define R_PPC64_ADDR14_BRTAKEN	R_PPC_ADDR14_BRTAKEN
++#define R_PPC64_ADDR14_BRNTAKEN	R_PPC_ADDR14_BRNTAKEN
++#define R_PPC64_REL24		R_PPC_REL24 /* PC-rel. 26 bit, word aligned */
++#define R_PPC64_REL14		R_PPC_REL14 /* PC relative 16 bit */
++#define R_PPC64_REL14_BRTAKEN	R_PPC_REL14_BRTAKEN
++#define R_PPC64_REL14_BRNTAKEN	R_PPC_REL14_BRNTAKEN
++#define R_PPC64_GOT16		R_PPC_GOT16
++#define R_PPC64_GOT16_LO	R_PPC_GOT16_LO
++#define R_PPC64_GOT16_HI	R_PPC_GOT16_HI
++#define R_PPC64_GOT16_HA	R_PPC_GOT16_HA
++
++#define R_PPC64_COPY		R_PPC_COPY
++#define R_PPC64_GLOB_DAT	R_PPC_GLOB_DAT
++#define R_PPC64_JMP_SLOT	R_PPC_JMP_SLOT
++#define R_PPC64_RELATIVE	R_PPC_RELATIVE
++
++#define R_PPC64_UADDR32		R_PPC_UADDR32
++#define R_PPC64_UADDR16		R_PPC_UADDR16
++#define R_PPC64_REL32		R_PPC_REL32
++#define R_PPC64_PLT32		R_PPC_PLT32
++#define R_PPC64_PLTREL32	R_PPC_PLTREL32
++#define R_PPC64_PLT16_LO	R_PPC_PLT16_LO
++#define R_PPC64_PLT16_HI	R_PPC_PLT16_HI
++#define R_PPC64_PLT16_HA	R_PPC_PLT16_HA
++
++#define R_PPC64_SECTOFF		R_PPC_SECTOFF
++#define R_PPC64_SECTOFF_LO	R_PPC_SECTOFF_LO
++#define R_PPC64_SECTOFF_HI	R_PPC_SECTOFF_HI
++#define R_PPC64_SECTOFF_HA	R_PPC_SECTOFF_HA
++#define R_PPC64_ADDR30		37 /* word30 (S + A - P) >> 2 */
++#define R_PPC64_ADDR64		38 /* doubleword64 S + A */
++#define R_PPC64_ADDR16_HIGHER	39 /* half16 #higher(S + A) */
++#define R_PPC64_ADDR16_HIGHERA	40 /* half16 #highera(S + A) */
++#define R_PPC64_ADDR16_HIGHEST	41 /* half16 #highest(S + A) */
++#define R_PPC64_ADDR16_HIGHESTA	42 /* half16 #highesta(S + A) */
++#define R_PPC64_UADDR64		43 /* doubleword64 S + A */
++#define R_PPC64_REL64		44 /* doubleword64 S + A - P */
++#define R_PPC64_PLT64		45 /* doubleword64 L + A */
++#define R_PPC64_PLTREL64	46 /* doubleword64 L + A - P */
++#define R_PPC64_TOC16		47 /* half16* S + A - .TOC */
++#define R_PPC64_TOC16_LO	48 /* half16 #lo(S + A - .TOC.) */
++#define R_PPC64_TOC16_HI	49 /* half16 #hi(S + A - .TOC.) */
++#define R_PPC64_TOC16_HA	50 /* half16 #ha(S + A - .TOC.) */
++#define R_PPC64_TOC		51 /* doubleword64 .TOC */
++#define R_PPC64_PLTGOT16	52 /* half16* M + A */
++#define R_PPC64_PLTGOT16_LO	53 /* half16 #lo(M + A) */
++#define R_PPC64_PLTGOT16_HI	54 /* half16 #hi(M + A) */
++#define R_PPC64_PLTGOT16_HA	55 /* half16 #ha(M + A) */
++
++#define R_PPC64_ADDR16_DS	56 /* half16ds* (S + A) >> 2 */
++#define R_PPC64_ADDR16_LO_DS	57 /* half16ds  #lo(S + A) >> 2 */
++#define R_PPC64_GOT16_DS	58 /* half16ds* (G + A) >> 2 */
++#define R_PPC64_GOT16_LO_DS	59 /* half16ds  #lo(G + A) >> 2 */
++#define R_PPC64_PLT16_LO_DS	60 /* half16ds  #lo(L + A) >> 2 */
++#define R_PPC64_SECTOFF_DS	61 /* half16ds* (R + A) >> 2 */
++#define R_PPC64_SECTOFF_LO_DS	62 /* half16ds  #lo(R + A) >> 2 */
++#define R_PPC64_TOC16_DS	63 /* half16ds* (S + A - .TOC.) >> 2 */
++#define R_PPC64_TOC16_LO_DS	64 /* half16ds  #lo(S + A - .TOC.) >> 2 */
++#define R_PPC64_PLTGOT16_DS	65 /* half16ds* (M + A) >> 2 */
++#define R_PPC64_PLTGOT16_LO_DS	66 /* half16ds  #lo(M + A) >> 2 */
++
++/* PowerPC64 relocations defined for the TLS access ABI.  */
++#define R_PPC64_TLS		67 /* none	(sym+add)@tls */
++#define R_PPC64_DTPMOD64	68 /* doubleword64 (sym+add)@dtpmod */
++#define R_PPC64_TPREL16		69 /* half16*	(sym+add)@tprel */
++#define R_PPC64_TPREL16_LO	70 /* half16	(sym+add)@tprel@l */
++#define R_PPC64_TPREL16_HI	71 /* half16	(sym+add)@tprel@h */
++#define R_PPC64_TPREL16_HA	72 /* half16	(sym+add)@tprel@ha */
++#define R_PPC64_TPREL64		73 /* doubleword64 (sym+add)@tprel */
++#define R_PPC64_DTPREL16	74 /* half16*	(sym+add)@dtprel */
++#define R_PPC64_DTPREL16_LO	75 /* half16	(sym+add)@dtprel@l */
++#define R_PPC64_DTPREL16_HI	76 /* half16	(sym+add)@dtprel@h */
++#define R_PPC64_DTPREL16_HA	77 /* half16	(sym+add)@dtprel@ha */
++#define R_PPC64_DTPREL64	78 /* doubleword64 (sym+add)@dtprel */
++#define R_PPC64_GOT_TLSGD16	79 /* half16*	(sym+add)@got@tlsgd */
++#define R_PPC64_GOT_TLSGD16_LO	80 /* half16	(sym+add)@got@tlsgd@l */
++#define R_PPC64_GOT_TLSGD16_HI	81 /* half16	(sym+add)@got@tlsgd@h */
++#define R_PPC64_GOT_TLSGD16_HA	82 /* half16	(sym+add)@got@tlsgd@ha */
++#define R_PPC64_GOT_TLSLD16	83 /* half16*	(sym+add)@got@tlsld */
++#define R_PPC64_GOT_TLSLD16_LO	84 /* half16	(sym+add)@got@tlsld@l */
++#define R_PPC64_GOT_TLSLD16_HI	85 /* half16	(sym+add)@got@tlsld@h */
++#define R_PPC64_GOT_TLSLD16_HA	86 /* half16	(sym+add)@got@tlsld@ha */
++#define R_PPC64_GOT_TPREL16_DS	87 /* half16ds*	(sym+add)@got@tprel */
++#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
++#define R_PPC64_GOT_TPREL16_HI	89 /* half16	(sym+add)@got@tprel@h */
++#define R_PPC64_GOT_TPREL16_HA	90 /* half16	(sym+add)@got@tprel@ha */
++#define R_PPC64_GOT_DTPREL16_DS	91 /* half16ds*	(sym+add)@got@dtprel */
++#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
++#define R_PPC64_GOT_DTPREL16_HI	93 /* half16	(sym+add)@got@dtprel@h */
++#define R_PPC64_GOT_DTPREL16_HA	94 /* half16	(sym+add)@got@dtprel@ha */
++#define R_PPC64_TPREL16_DS	95 /* half16ds*	(sym+add)@tprel */
++#define R_PPC64_TPREL16_LO_DS	96 /* half16ds	(sym+add)@tprel@l */
++#define R_PPC64_TPREL16_HIGHER	97 /* half16	(sym+add)@tprel@higher */
++#define R_PPC64_TPREL16_HIGHERA	98 /* half16	(sym+add)@tprel@highera */
++#define R_PPC64_TPREL16_HIGHEST	99 /* half16	(sym+add)@tprel@highest */
++#define R_PPC64_TPREL16_HIGHESTA 100 /* half16	(sym+add)@tprel@highesta */
++#define R_PPC64_DTPREL16_DS	101 /* half16ds* (sym+add)@dtprel */
++#define R_PPC64_DTPREL16_LO_DS	102 /* half16ds	(sym+add)@dtprel@l */
++#define R_PPC64_DTPREL16_HIGHER	103 /* half16	(sym+add)@dtprel@higher */
++#define R_PPC64_DTPREL16_HIGHERA 104 /* half16	(sym+add)@dtprel@highera */
++#define R_PPC64_DTPREL16_HIGHEST 105 /* half16	(sym+add)@dtprel@highest */
++#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16	(sym+add)@dtprel@highesta */
++
++/* Keep this the last entry.  */
++#define R_PPC64_NUM		107
++
++/* PowerPC64 specific values for the Dyn d_tag field.  */
++#define DT_PPC64_GLINK  (DT_LOPROC + 0)
++#define DT_PPC64_NUM    1
++
++
++/* ARM specific declarations */
++
++/* Processor specific flags for the ELF header e_flags field.  */
++#define EF_ARM_RELEXEC     0x01
++#define EF_ARM_HASENTRY    0x02
++#define EF_ARM_INTERWORK   0x04
++#define EF_ARM_APCS_26     0x08
++#define EF_ARM_APCS_FLOAT  0x10
++#define EF_ARM_PIC         0x20
++#define EF_ARM_ALIGN8      0x40		/* 8-bit structure alignment is in use */
++#define EF_ARM_NEW_ABI     0x80
++#define EF_ARM_OLD_ABI     0x100
++
++/* Other constants defined in the ARM ELF spec. version B-01.  */
++/* NB. These conflict with values defined above.  */
++#define EF_ARM_SYMSARESORTED	0x04
++#define EF_ARM_DYNSYMSUSESEGIDX 0x08
++#define EF_ARM_MAPSYMSFIRST	0x10
++#define EF_ARM_EABIMASK		0XFF000000
++
++#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK)
++#define EF_ARM_EABI_UNKNOWN  0x00000000
++#define EF_ARM_EABI_VER1     0x01000000
++#define EF_ARM_EABI_VER2     0x02000000
++
++/* Additional symbol types for Thumb */
++#define STT_ARM_TFUNC      0xd
++
++/* ARM-specific values for sh_flags */
++#define SHF_ARM_ENTRYSECT  0x10000000   /* Section contains an entry point */
++#define SHF_ARM_COMDEF     0x80000000   /* Section may be multiply defined
++					   in the input to a link step */
++
++/* ARM-specific program header flags */
++#define PF_ARM_SB          0x10000000   /* Segment contains the location
++					   addressed by the static base */
++
++/* ARM relocs.  */
++#define R_ARM_NONE		0	/* No reloc */
++#define R_ARM_PC24		1	/* PC relative 26 bit branch */
++#define R_ARM_ABS32		2	/* Direct 32 bit  */
++#define R_ARM_REL32		3	/* PC relative 32 bit */
++#define R_ARM_PC13		4
++#define R_ARM_ABS16		5	/* Direct 16 bit */
++#define R_ARM_ABS12		6	/* Direct 12 bit */
++#define R_ARM_THM_ABS5		7
++#define R_ARM_ABS8		8	/* Direct 8 bit */
++#define R_ARM_SBREL32		9
++#define R_ARM_THM_PC22		10
++#define R_ARM_THM_PC8		11
++#define R_ARM_AMP_VCALL9	12
++#define R_ARM_SWI24		13
++#define R_ARM_THM_SWI8		14
++#define R_ARM_XPC25		15
++#define R_ARM_THM_XPC22		16
++#define R_ARM_COPY		20	/* Copy symbol at runtime */
++#define R_ARM_GLOB_DAT		21	/* Create GOT entry */
++#define R_ARM_JUMP_SLOT		22	/* Create PLT entry */
++#define R_ARM_RELATIVE		23	/* Adjust by program base */
++#define R_ARM_GOTOFF		24	/* 32 bit offset to GOT */
++#define R_ARM_GOTPC		25	/* 32 bit PC relative offset to GOT */
++#define R_ARM_GOT32		26	/* 32 bit GOT entry */
++#define R_ARM_PLT32		27	/* 32 bit PLT address */
++#define R_ARM_ALU_PCREL_7_0	32
++#define R_ARM_ALU_PCREL_15_8	33
++#define R_ARM_ALU_PCREL_23_15	34
++#define R_ARM_LDR_SBREL_11_0	35
++#define R_ARM_ALU_SBREL_19_12	36
++#define R_ARM_ALU_SBREL_27_20	37
++#define R_ARM_GNU_VTENTRY	100
++#define R_ARM_GNU_VTINHERIT	101
++#define R_ARM_THM_PC11		102	/* thumb unconditional branch */
++#define R_ARM_THM_PC9		103	/* thumb conditional branch */
++#define R_ARM_RXPC25		249
++#define R_ARM_RSBREL32		250
++#define R_ARM_THM_RPC22		251
++#define R_ARM_RREL32		252
++#define R_ARM_RABS22		253
++#define R_ARM_RPC24		254
++#define R_ARM_RBASE		255
++/* Keep this the last entry.  */
++#define R_ARM_NUM		256
++
++/* IA-64 specific declarations.  */
++
++/* Processor specific flags for the Ehdr e_flags field.  */
++#define EF_IA_64_MASKOS		0x0000000f	/* os-specific flags */
++#define EF_IA_64_ABI64		0x00000010	/* 64-bit ABI */
++#define EF_IA_64_ARCH		0xff000000	/* arch. version mask */
++
++/* Processor specific values for the Phdr p_type field.  */
++#define PT_IA_64_ARCHEXT	(PT_LOPROC + 0)	/* arch extension bits */
++#define PT_IA_64_UNWIND		(PT_LOPROC + 1)	/* ia64 unwind bits */
++
++/* Processor specific flags for the Phdr p_flags field.  */
++#define PF_IA_64_NORECOV	0x80000000	/* spec insns w/o recovery */
++
++/* Processor specific values for the Shdr sh_type field.  */
++#define SHT_IA_64_EXT		(SHT_LOPROC + 0) /* extension bits */
++#define SHT_IA_64_UNWIND	(SHT_LOPROC + 1) /* unwind bits */
++
++/* Processor specific flags for the Shdr sh_flags field.  */
++#define SHF_IA_64_SHORT		0x10000000	/* section near gp */
++#define SHF_IA_64_NORECOV	0x20000000	/* spec insns w/o recovery */
++
++/* Processor specific values for the Dyn d_tag field.  */
++#define DT_IA_64_PLT_RESERVE	(DT_LOPROC + 0)
++#define DT_IA_64_NUM		1
++
++/* IA-64 relocations.  */
++#define R_IA64_NONE		0x00	/* none */
++#define R_IA64_IMM14		0x21	/* symbol + addend, add imm14 */
++#define R_IA64_IMM22		0x22	/* symbol + addend, add imm22 */
++#define R_IA64_IMM64		0x23	/* symbol + addend, mov imm64 */
++#define R_IA64_DIR32MSB		0x24	/* symbol + addend, data4 MSB */
++#define R_IA64_DIR32LSB		0x25	/* symbol + addend, data4 LSB */
++#define R_IA64_DIR64MSB		0x26	/* symbol + addend, data8 MSB */
++#define R_IA64_DIR64LSB		0x27	/* symbol + addend, data8 LSB */
++#define R_IA64_GPREL22		0x2a	/* @gprel(sym + add), add imm22 */
++#define R_IA64_GPREL64I		0x2b	/* @gprel(sym + add), mov imm64 */
++#define R_IA64_GPREL32MSB	0x2c	/* @gprel(sym + add), data4 MSB */
++#define R_IA64_GPREL32LSB	0x2d	/* @gprel(sym + add), data4 LSB */
++#define R_IA64_GPREL64MSB	0x2e	/* @gprel(sym + add), data8 MSB */
++#define R_IA64_GPREL64LSB	0x2f	/* @gprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF22		0x32	/* @ltoff(sym + add), add imm22 */
++#define R_IA64_LTOFF64I		0x33	/* @ltoff(sym + add), mov imm64 */
++#define R_IA64_PLTOFF22		0x3a	/* @pltoff(sym + add), add imm22 */
++#define R_IA64_PLTOFF64I	0x3b	/* @pltoff(sym + add), mov imm64 */
++#define R_IA64_PLTOFF64MSB	0x3e	/* @pltoff(sym + add), data8 MSB */
++#define R_IA64_PLTOFF64LSB	0x3f	/* @pltoff(sym + add), data8 LSB */
++#define R_IA64_FPTR64I		0x43	/* @fptr(sym + add), mov imm64 */
++#define R_IA64_FPTR32MSB	0x44	/* @fptr(sym + add), data4 MSB */
++#define R_IA64_FPTR32LSB	0x45	/* @fptr(sym + add), data4 LSB */
++#define R_IA64_FPTR64MSB	0x46	/* @fptr(sym + add), data8 MSB */
++#define R_IA64_FPTR64LSB	0x47	/* @fptr(sym + add), data8 LSB */
++#define R_IA64_PCREL60B		0x48	/* @pcrel(sym + add), brl */
++#define R_IA64_PCREL21B		0x49	/* @pcrel(sym + add), ptb, call */
++#define R_IA64_PCREL21M		0x4a	/* @pcrel(sym + add), chk.s */
++#define R_IA64_PCREL21F		0x4b	/* @pcrel(sym + add), fchkf */
++#define R_IA64_PCREL32MSB	0x4c	/* @pcrel(sym + add), data4 MSB */
++#define R_IA64_PCREL32LSB	0x4d	/* @pcrel(sym + add), data4 LSB */
++#define R_IA64_PCREL64MSB	0x4e	/* @pcrel(sym + add), data8 MSB */
++#define R_IA64_PCREL64LSB	0x4f	/* @pcrel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_FPTR22	0x52	/* @ltoff(@fptr(s+a)), imm22 */
++#define R_IA64_LTOFF_FPTR64I	0x53	/* @ltoff(@fptr(s+a)), imm64 */
++#define R_IA64_LTOFF_FPTR32MSB	0x54	/* @ltoff(@fptr(s+a)), data4 MSB */
++#define R_IA64_LTOFF_FPTR32LSB	0x55	/* @ltoff(@fptr(s+a)), data4 LSB */
++#define R_IA64_LTOFF_FPTR64MSB	0x56	/* @ltoff(@fptr(s+a)), data8 MSB */
++#define R_IA64_LTOFF_FPTR64LSB	0x57	/* @ltoff(@fptr(s+a)), data8 LSB */
++#define R_IA64_SEGREL32MSB	0x5c	/* @segrel(sym + add), data4 MSB */
++#define R_IA64_SEGREL32LSB	0x5d	/* @segrel(sym + add), data4 LSB */
++#define R_IA64_SEGREL64MSB	0x5e	/* @segrel(sym + add), data8 MSB */
++#define R_IA64_SEGREL64LSB	0x5f	/* @segrel(sym + add), data8 LSB */
++#define R_IA64_SECREL32MSB	0x64	/* @secrel(sym + add), data4 MSB */
++#define R_IA64_SECREL32LSB	0x65	/* @secrel(sym + add), data4 LSB */
++#define R_IA64_SECREL64MSB	0x66	/* @secrel(sym + add), data8 MSB */
++#define R_IA64_SECREL64LSB	0x67	/* @secrel(sym + add), data8 LSB */
++#define R_IA64_REL32MSB		0x6c	/* data 4 + REL */
++#define R_IA64_REL32LSB		0x6d	/* data 4 + REL */
++#define R_IA64_REL64MSB		0x6e	/* data 8 + REL */
++#define R_IA64_REL64LSB		0x6f	/* data 8 + REL */
++#define R_IA64_LTV32MSB		0x74	/* symbol + addend, data4 MSB */
++#define R_IA64_LTV32LSB		0x75	/* symbol + addend, data4 LSB */
++#define R_IA64_LTV64MSB		0x76	/* symbol + addend, data8 MSB */
++#define R_IA64_LTV64LSB		0x77	/* symbol + addend, data8 LSB */
++#define R_IA64_PCREL21BI	0x79	/* @pcrel(sym + add), 21bit inst */
++#define R_IA64_PCREL22		0x7a	/* @pcrel(sym + add), 22bit inst */
++#define R_IA64_PCREL64I		0x7b	/* @pcrel(sym + add), 64bit inst */
++#define R_IA64_IPLTMSB		0x80	/* dynamic reloc, imported PLT, MSB */
++#define R_IA64_IPLTLSB		0x81	/* dynamic reloc, imported PLT, LSB */
++#define R_IA64_COPY		0x84	/* copy relocation */
++#define R_IA64_SUB		0x85	/* Addend and symbol difference */
++#define R_IA64_LTOFF22X		0x86	/* LTOFF22, relaxable.  */
++#define R_IA64_LDXMOV		0x87	/* Use of LTOFF22X.  */
++#define R_IA64_TPREL14		0x91	/* @tprel(sym + add), imm14 */
++#define R_IA64_TPREL22		0x92	/* @tprel(sym + add), imm22 */
++#define R_IA64_TPREL64I		0x93	/* @tprel(sym + add), imm64 */
++#define R_IA64_TPREL64MSB	0x96	/* @tprel(sym + add), data8 MSB */
++#define R_IA64_TPREL64LSB	0x97	/* @tprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_TPREL22	0x9a	/* @ltoff(@tprel(s+a)), imm2 */
++#define R_IA64_DTPMOD64MSB	0xa6	/* @dtpmod(sym + add), data8 MSB */
++#define R_IA64_DTPMOD64LSB	0xa7	/* @dtpmod(sym + add), data8 LSB */
++#define R_IA64_LTOFF_DTPMOD22	0xaa	/* @ltoff(@dtpmod(sym + add)), imm22 */
++#define R_IA64_DTPREL14		0xb1	/* @dtprel(sym + add), imm14 */
++#define R_IA64_DTPREL22		0xb2	/* @dtprel(sym + add), imm22 */
++#define R_IA64_DTPREL64I	0xb3	/* @dtprel(sym + add), imm64 */
++#define R_IA64_DTPREL32MSB	0xb4	/* @dtprel(sym + add), data4 MSB */
++#define R_IA64_DTPREL32LSB	0xb5	/* @dtprel(sym + add), data4 LSB */
++#define R_IA64_DTPREL64MSB	0xb6	/* @dtprel(sym + add), data8 MSB */
++#define R_IA64_DTPREL64LSB	0xb7	/* @dtprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_DTPREL22	0xba	/* @ltoff(@dtprel(s+a)), imm22 */
++
++/* SH specific declarations */
++
++/* SH relocs.  */
++#define	R_SH_NONE		0
++#define	R_SH_DIR32		1
++#define	R_SH_REL32		2
++#define	R_SH_DIR8WPN		3
++#define	R_SH_IND12W		4
++#define	R_SH_DIR8WPL		5
++#define	R_SH_DIR8WPZ		6
++#define	R_SH_DIR8BP		7
++#define	R_SH_DIR8W		8
++#define	R_SH_DIR8L		9
++#define	R_SH_SWITCH16		25
++#define	R_SH_SWITCH32		26
++#define	R_SH_USES		27
++#define	R_SH_COUNT		28
++#define	R_SH_ALIGN		29
++#define	R_SH_CODE		30
++#define	R_SH_DATA		31
++#define	R_SH_LABEL		32
++#define	R_SH_SWITCH8		33
++#define	R_SH_GNU_VTINHERIT	34
++#define	R_SH_GNU_VTENTRY	35
++#define	R_SH_TLS_GD_32		144
++#define	R_SH_TLS_LD_32		145
++#define	R_SH_TLS_LDO_32		146
++#define	R_SH_TLS_IE_32		147
++#define	R_SH_TLS_LE_32		148
++#define	R_SH_TLS_DTPMOD32	149
++#define	R_SH_TLS_DTPOFF32	150
++#define	R_SH_TLS_TPOFF32	151
++#define	R_SH_GOT32		160
++#define	R_SH_PLT32		161
++#define	R_SH_COPY		162
++#define	R_SH_GLOB_DAT		163
++#define	R_SH_JMP_SLOT		164
++#define	R_SH_RELATIVE		165
++#define	R_SH_GOTOFF		166
++#define	R_SH_GOTPC		167
++/* Keep this the last entry.  */
++#define	R_SH_NUM		256
++
++/* Additional s390 relocs */
++
++#define R_390_NONE		0	/* No reloc.  */
++#define R_390_8			1	/* Direct 8 bit.  */
++#define R_390_12		2	/* Direct 12 bit.  */
++#define R_390_16		3	/* Direct 16 bit.  */
++#define R_390_32		4	/* Direct 32 bit.  */
++#define R_390_PC32		5	/* PC relative 32 bit.	*/
++#define R_390_GOT12		6	/* 12 bit GOT offset.  */
++#define R_390_GOT32		7	/* 32 bit GOT offset.  */
++#define R_390_PLT32		8	/* 32 bit PC relative PLT address.  */
++#define R_390_COPY		9	/* Copy symbol at runtime.  */
++#define R_390_GLOB_DAT		10	/* Create GOT entry.  */
++#define R_390_JMP_SLOT		11	/* Create PLT entry.  */
++#define R_390_RELATIVE		12	/* Adjust by program base.  */
++#define R_390_GOTOFF32		13	/* 32 bit offset to GOT.	 */
++#define R_390_GOTPC		14	/* 32 bit PC relative offset to GOT.  */
++#define R_390_GOT16		15	/* 16 bit GOT offset.  */
++#define R_390_PC16		16	/* PC relative 16 bit.	*/
++#define R_390_PC16DBL		17	/* PC relative 16 bit shifted by 1.  */
++#define R_390_PLT16DBL		18	/* 16 bit PC rel. PLT shifted by 1.  */
++#define R_390_PC32DBL		19	/* PC relative 32 bit shifted by 1.  */
++#define R_390_PLT32DBL		20	/* 32 bit PC rel. PLT shifted by 1.  */
++#define R_390_GOTPCDBL		21	/* 32 bit PC rel. GOT shifted by 1.  */
++#define R_390_64		22	/* Direct 64 bit.  */
++#define R_390_PC64		23	/* PC relative 64 bit.	*/
++#define R_390_GOT64		24	/* 64 bit GOT offset.  */
++#define R_390_PLT64		25	/* 64 bit PC relative PLT address.  */
++#define R_390_GOTENT		26	/* 32 bit PC rel. to GOT entry >> 1. */
++#define R_390_GOTOFF16		27	/* 16 bit offset to GOT. */
++#define R_390_GOTOFF64		28	/* 64 bit offset to GOT. */
++#define R_390_GOTPLT12		29	/* 12 bit offset to jump slot.	*/
++#define R_390_GOTPLT16		30	/* 16 bit offset to jump slot.	*/
++#define R_390_GOTPLT32		31	/* 32 bit offset to jump slot.	*/
++#define R_390_GOTPLT64		32	/* 64 bit offset to jump slot.	*/
++#define R_390_GOTPLTENT		33	/* 32 bit rel. offset to jump slot.  */
++#define R_390_PLTOFF16		34	/* 16 bit offset from GOT to PLT. */
++#define R_390_PLTOFF32		35	/* 32 bit offset from GOT to PLT. */
++#define R_390_PLTOFF64		36	/* 16 bit offset from GOT to PLT. */
++#define R_390_TLS_LOAD		37	/* Tag for load insn in TLS code.  */
++#define R_390_TLS_GDCALL	38	/* Tag for function call in general
++					   dynamic TLS code. */
++#define R_390_TLS_LDCALL	39	/* Tag for function call in local
++					   dynamic TLS code. */
++#define R_390_TLS_GD32		40	/* Direct 32 bit for general dynamic
++					   thread local data.  */
++#define R_390_TLS_GD64		41	/* Direct 64 bit for general dynamic
++					  thread local data.  */
++#define R_390_TLS_GOTIE12	42	/* 12 bit GOT offset for static TLS
++					   block offset.  */
++#define R_390_TLS_GOTIE32	43	/* 32 bit GOT offset for static TLS
++					   block offset.  */
++#define R_390_TLS_GOTIE64	44	/* 64 bit GOT offset for static TLS
++					   block offset. */
++#define R_390_TLS_LDM32		45	/* Direct 32 bit for local dynamic
++					   thread local data in LE code.  */
++#define R_390_TLS_LDM64		46	/* Direct 64 bit for local dynamic
++					   thread local data in LE code.  */
++#define R_390_TLS_IE32		47	/* 32 bit address of GOT entry for
++					   negated static TLS block offset.  */
++#define R_390_TLS_IE64		48	/* 64 bit address of GOT entry for
++					   negated static TLS block offset.  */
++#define R_390_TLS_IEENT		49	/* 32 bit rel. offset to GOT entry for
++					   negated static TLS block offset.  */
++#define R_390_TLS_LE32		50	/* 32 bit negated offset relative to
++					   static TLS block.  */
++#define R_390_TLS_LE64		51	/* 64 bit negated offset relative to
++					   static TLS block.  */
++#define R_390_TLS_LDO32		52	/* 32 bit offset relative to TLS
++					   block.  */
++#define R_390_TLS_LDO64		53	/* 64 bit offset relative to TLS
++					   block.  */
++#define R_390_TLS_DTPMOD	54	/* ID of module containing symbol.  */
++#define R_390_TLS_DTPOFF	55	/* Offset in TLS block.	 */
++#define R_390_TLS_TPOFF		56	/* Negated offset in static TLS
++					   block.  */
++
++/* Keep this the last entry.  */
++#define R_390_NUM		57
++
++/* CRIS relocations.  */
++#define R_CRIS_NONE		0
++#define R_CRIS_8		1
++#define R_CRIS_16		2
++#define R_CRIS_32		3
++#define R_CRIS_8_PCREL		4
++#define R_CRIS_16_PCREL		5
++#define R_CRIS_32_PCREL		6
++#define R_CRIS_GNU_VTINHERIT	7
++#define R_CRIS_GNU_VTENTRY	8
++#define R_CRIS_COPY		9
++#define R_CRIS_GLOB_DAT		10
++#define R_CRIS_JUMP_SLOT	11
++#define R_CRIS_RELATIVE		12
++#define R_CRIS_16_GOT		13
++#define R_CRIS_32_GOT		14
++#define R_CRIS_16_GOTPLT	15
++#define R_CRIS_32_GOTPLT	16
++#define R_CRIS_32_GOTREL	17
++#define R_CRIS_32_PLT_GOTREL	18
++#define R_CRIS_32_PLT_PCREL	19
++
++#define R_CRIS_NUM		20
++
++/* AMD x86-64 relocations.  */
++#define R_X86_64_NONE		0	/* No reloc */
++#define R_X86_64_64		1	/* Direct 64 bit  */
++#define R_X86_64_PC32		2	/* PC relative 32 bit signed */
++#define R_X86_64_GOT32		3	/* 32 bit GOT entry */
++#define R_X86_64_PLT32		4	/* 32 bit PLT address */
++#define R_X86_64_COPY		5	/* Copy symbol at runtime */
++#define R_X86_64_GLOB_DAT	6	/* Create GOT entry */
++#define R_X86_64_JUMP_SLOT	7	/* Create PLT entry */
++#define R_X86_64_RELATIVE	8	/* Adjust by program base */
++#define R_X86_64_GOTPCREL	9	/* 32 bit signed PC relative
++					   offset to GOT */
++#define R_X86_64_32		10	/* Direct 32 bit zero extended */
++#define R_X86_64_32S		11	/* Direct 32 bit sign extended */
++#define R_X86_64_16		12	/* Direct 16 bit zero extended */
++#define R_X86_64_PC16		13	/* 16 bit sign extended pc relative */
++#define R_X86_64_8		14	/* Direct 8 bit sign extended  */
++#define R_X86_64_PC8		15	/* 8 bit sign extended pc relative */
++#define R_X86_64_DTPMOD64	16	/* ID of module containing symbol */
++#define R_X86_64_DTPOFF64	17	/* Offset in module's TLS block */
++#define R_X86_64_TPOFF64	18	/* Offset in initial TLS block */
++#define R_X86_64_TLSGD		19	/* 32 bit signed PC relative offset
++					   to two GOT entries for GD symbol */
++#define R_X86_64_TLSLD		20	/* 32 bit signed PC relative offset
++					   to two GOT entries for LD symbol */
++#define R_X86_64_DTPOFF32	21	/* Offset in TLS block */
++#define R_X86_64_GOTTPOFF	22	/* 32 bit signed PC relative offset
++					   to GOT entry for IE symbol */
++#define R_X86_64_TPOFF32	23	/* Offset in initial TLS block */
++
++#define R_X86_64_NUM		24
++
++__END_DECLS
++
++#endif	/* elf.h */
++
+ #include "elfconfig.h"
+ 
+ #if KERNEL_ELFCLASS == ELFCLASS32
+@@ -155,3 +2601,4 @@
+ void fatal(const char *fmt, ...);
+ void warn(const char *fmt, ...);
+ void merror(const char *fmt, ...);
++
+diff -Nur linux-2.6.32.orig/scripts/mod/sumversion.c linux-2.6.32/scripts/mod/sumversion.c
+--- linux-2.6.32.orig/scripts/mod/sumversion.c	Thu Dec  3 04:50:57 2009
++++ linux-2.6.32/scripts/mod/sumversion.c	Sat Dec 19 12:01:07 2009
+@@ -1,4 +1,4 @@
+-#include <netinet/in.h>
++/* #include <netinet/in.h> */
+ #ifdef __sun__
+ #include <inttypes.h>
+ #else

+ 14 - 0
target/linux/patches/2.6.33/cc-abstract.patch

@@ -0,0 +1,14 @@
+diff -Nur linux-2.6.32.orig/Makefile linux-2.6.32/Makefile
+--- linux-2.6.32.orig/Makefile	Thu Dec  3 04:50:57 2009
++++ linux-2.6.32/Makefile	Fri Dec 18 20:53:57 2009
+@@ -219,8 +219,8 @@
+ 	  else if [ -x /bin/bash ]; then echo /bin/bash; \
+ 	  else echo sh; fi ; fi)
+ 
+-HOSTCC       = gcc
+-HOSTCXX      = g++
++HOSTCC       ?= gcc
++HOSTCXX      ?= g++
+ HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+ HOSTCXXFLAGS = -O2
+ 

+ 14 - 0
target/linux/patches/2.6.33/cris-initrd.patch

@@ -0,0 +1,14 @@
+diff -Nur linux-2.6.31.4.orig/arch/cris/mm/init.c linux-2.6.31.4/arch/cris/mm/init.c
+--- linux-2.6.31.4.orig/arch/cris/mm/init.c	2009-10-12 22:15:40.000000000 +0200
++++ linux-2.6.31.4/arch/cris/mm/init.c	2009-10-25 12:59:24.418546156 +0100
+@@ -80,3 +80,10 @@
+         printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n",
+ 		(unsigned long)((&__init_end - &__init_begin) >> 10));
+ }
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++	return 0;
++}
++#endif

+ 14 - 0
target/linux/patches/2.6.33/cygwin-compat.patch

@@ -0,0 +1,14 @@
+diff -Nur linux-2.6.30.orig/scripts/mod/file2alias.c linux-2.6.30/scripts/mod/file2alias.c
+--- linux-2.6.30.orig/scripts/mod/file2alias.c	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/scripts/mod/file2alias.c	2009-06-11 09:17:10.000000000 +0200
+@@ -29,7 +29,11 @@
+ 
+ #include <ctype.h>
+ 
++#ifdef __CYGWIN__
++typedef __uint32_t      __u32;
++#else
+ typedef uint32_t	__u32;
++#endif
+ typedef uint16_t	__u16;
+ typedef unsigned char	__u8;

+ 11 - 0
target/linux/patches/2.6.33/exmap.patch

@@ -0,0 +1,11 @@
+diff -Nur linux-2.6.32.orig/kernel/pid.c linux-2.6.32/kernel/pid.c
+--- linux-2.6.32.orig/kernel/pid.c	2009-12-03 04:51:21.000000000 +0100
++++ linux-2.6.32/kernel/pid.c	2009-12-06 01:04:41.000000000 +0100
+@@ -387,6 +387,7 @@
+ {
+ 	return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
+ }
++EXPORT_SYMBOL(find_task_by_vpid);
+ 
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {

+ 11 - 0
target/linux/patches/2.6.33/freebsd-compat.patch

@@ -0,0 +1,11 @@
+diff -Nur linux-2.6.30.orig/arch/x86/boot/tools/build.c linux-2.6.30/arch/x86/boot/tools/build.c
+--- linux-2.6.30.orig/arch/x86/boot/tools/build.c	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/arch/x86/boot/tools/build.c	2009-06-11 09:18:50.000000000 +0200
+@@ -29,7 +29,6 @@
+ #include <stdarg.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+-#include <sys/sysmacros.h>
+ #include <unistd.h>
+ #include <fcntl.h>
+ #include <sys/mman.h>

+ 62 - 0
target/linux/patches/2.6.33/mtd-root.patch

@@ -0,0 +1,62 @@
+diff -Nur linux-2.6.31.4.orig/drivers/mtd/Kconfig linux-2.6.31.4/drivers/mtd/Kconfig
+--- linux-2.6.31.4.orig/drivers/mtd/Kconfig	2009-10-12 22:15:40.000000000 +0200
++++ linux-2.6.31.4/drivers/mtd/Kconfig	2009-10-21 11:45:10.557679089 +0200
+@@ -53,6 +53,11 @@
+ 	  should normally be compiled as kernel modules. The modules perform
+ 	  various checks and verifications when loaded.
+ 
++config MTD_ROOTFS_ROOT_DEV
++	bool "Automatically set 'rootfs' partition to be root filesystem"
++	depends on MTD_PARTITIONS
++	default y
++
+ config MTD_REDBOOT_PARTS
+ 	tristate "RedBoot partition table parsing"
+ 	depends on MTD_PARTITIONS
+diff -Nur linux-2.6.31.4.orig/drivers/mtd/mtdpart.c linux-2.6.31.4/drivers/mtd/mtdpart.c
+--- linux-2.6.31.4.orig/drivers/mtd/mtdpart.c	2009-10-12 22:15:40.000000000 +0200
++++ linux-2.6.31.4/drivers/mtd/mtdpart.c	2009-10-21 11:46:39.593679219 +0200
+@@ -18,6 +18,7 @@
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/mtd/compatmac.h>
++#include <linux/root_dev.h>
+ 
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+@@ -35,7 +36,7 @@
+  * the pointer to that structure with this macro.
+  */
+ #define PART(x)  ((struct mtd_part *)(x))
+-
++#define IS_PART(mtd) (mtd->read == part_read)
+ 
+ /*
+  * MTD methods which simply translate the effective address and pass through
+@@ -517,14 +518,23 @@
+ {
+ 	struct mtd_part *slave;
+ 	uint64_t cur_offset = 0;
+-	int i;
++	int i, j;
+ 
+ 	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+ 
+-	for (i = 0; i < nbparts; i++) {
+-		slave = add_one_partition(master, parts + i, i, cur_offset);
++ 	for (i = 0, j = 0; i < nbparts; i++) {
++ 		slave = add_one_partition(master, parts + i, j++, cur_offset);
+ 		if (!slave)
+ 			return -ENOMEM;
++ 		if (!strcmp(parts[i].name, "rootfs")) {
++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
++			if (ROOT_DEV == 0) {
++				printk(KERN_NOTICE "mtd: partition \"rootfs\" "
++					"set to be root filesystem\n");
++				ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
++			}
++#endif
++		}
+ 		cur_offset = slave->offset + slave->mtd.size;
+ 	}
+ 

+ 23653 - 0
target/linux/patches/2.6.33/ocf.patch

@@ -0,0 +1,23653 @@
+diff -Nur linux-2.6.30.orig/crypto/Kconfig linux-2.6.30/crypto/Kconfig
+--- linux-2.6.30.orig/crypto/Kconfig	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/crypto/Kconfig	2009-06-11 10:55:27.000000000 +0200
+@@ -781,3 +781,5 @@
+ source "drivers/crypto/Kconfig"
+ 
+ endif	# if CRYPTO
++
++source "crypto/ocf/Kconfig"
+diff -Nur linux-2.6.30.orig/crypto/Makefile linux-2.6.30/crypto/Makefile
+--- linux-2.6.30.orig/crypto/Makefile	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/crypto/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -84,6 +84,8 @@
+ obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
+ obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+ 
++obj-$(CONFIG_OCF_OCF) += ocf/
++
+ #
+ # generic algorithms and the async_tx api
+ #
+diff -Nur linux-2.6.30.orig/crypto/ocf/Config.in linux-2.6.30/crypto/ocf/Config.in
+--- linux-2.6.30.orig/crypto/ocf/Config.in	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/Config.in	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,34 @@
++#############################################################################
++
++mainmenu_option next_comment
++comment 'OCF Configuration'
++tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
++dep_mbool '  enable fips RNG checks (fips check on RNG data before use)' \
++				CONFIG_OCF_FIPS $CONFIG_OCF_OCF
++dep_mbool '  enable harvesting entropy for /dev/random' \
++				CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
++dep_tristate '  cryptodev (user space support)' \
++				CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
++dep_tristate '  cryptosoft (software crypto engine)' \
++				CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
++dep_tristate '  safenet (HW crypto engine)' \
++				CONFIG_OCF_SAFE $CONFIG_OCF_OCF
++dep_tristate '  IXP4xx (HW crypto engine)' \
++				CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
++dep_mbool    '  Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
++				CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
++dep_tristate '  hifn (HW crypto engine)' \
++				CONFIG_OCF_HIFN $CONFIG_OCF_OCF
++dep_tristate '  talitos (HW crypto engine)' \
++				CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
++dep_tristate '  pasemi (HW crypto engine)' \
++				CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
++dep_tristate '  ep80579 (HW crypto engine)' \
++				CONFIG_OCF_EP80579 $CONFIG_OCF_OCF
++dep_tristate '  ocfnull (does no crypto)' \
++				CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
++dep_tristate '  ocf-bench (HW crypto in-kernel benchmark)' \
++				CONFIG_OCF_BENCH $CONFIG_OCF_OCF
++endmenu
++
++#############################################################################
+diff -Nur linux-2.6.30.orig/crypto/ocf/criov.c linux-2.6.30/crypto/ocf/criov.c
+--- linux-2.6.30.orig/crypto/ocf/criov.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/criov.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,215 @@
++/*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $	*/
++
++/*
++ * Linux port done by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ * The license and original author are listed below.
++ *
++ * Copyright (c) 1999 Theo de Raadt
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *   notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *   notice, this list of conditions and the following disclaimer in the
++ *   documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *   derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/uio.h>
++#include <linux/skbuff.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <asm/io.h>
++
++#include <uio.h>
++#include <cryptodev.h>
++
++/*
++ * This macro is only for avoiding code duplication, as we need to skip
++ * given number of bytes in the same way in three functions below.
++ */
++#define	CUIO_SKIP()	do {						\
++	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
++	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
++	while (off > 0) {						\
++		KASSERT(iol >= 0, ("%s: empty in skip", __func__));	\
++		if (off < iov->iov_len)					\
++			break;						\
++		off -= iov->iov_len;					\
++		iol--;							\
++		iov++;							\
++	}								\
++} while (0)
++
++void
++cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
++{
++	struct iovec *iov = uio->uio_iov;
++	int iol = uio->uio_iovcnt;
++	unsigned count;
++
++	CUIO_SKIP();
++	while (len > 0) {
++		KASSERT(iol >= 0, ("%s: empty", __func__));
++		count = min((int)(iov->iov_len - off), len);
++		memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
++		len -= count;
++		cp += count;
++		off = 0;
++		iol--;
++		iov++;
++	}
++}
++
++void
++cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
++{
++	struct iovec *iov = uio->uio_iov;
++	int iol = uio->uio_iovcnt;
++	unsigned count;
++
++	CUIO_SKIP();
++	while (len > 0) {
++		KASSERT(iol >= 0, ("%s: empty", __func__));
++		count = min((int)(iov->iov_len - off), len);
++		memcpy(((caddr_t)iov->iov_base) + off, cp, count);
++		len -= count;
++		cp += count;
++		off = 0;
++		iol--;
++		iov++;
++	}
++}
++
++/*
++ * Return a pointer to iov/offset of location in iovec list.
++ */
++struct iovec *
++cuio_getptr(struct uio *uio, int loc, int *off)
++{
++	struct iovec *iov = uio->uio_iov;
++	int iol = uio->uio_iovcnt;
++
++	while (loc >= 0) {
++		/* Normal end of search */
++		if (loc < iov->iov_len) {
++	    		*off = loc;
++	    		return (iov);
++		}
++
++		loc -= iov->iov_len;
++		if (iol == 0) {
++			if (loc == 0) {
++				/* Point at the end of valid data */
++				*off = iov->iov_len;
++				return (iov);
++			} else
++				return (NULL);
++		} else {
++			iov++, iol--;
++		}
++    	}
++
++	return (NULL);
++}
++
++EXPORT_SYMBOL(cuio_copyback);
++EXPORT_SYMBOL(cuio_copydata);
++EXPORT_SYMBOL(cuio_getptr);
++
++
++static void
++skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
++{
++	int i;
++	if (offset < skb_headlen(skb)) {
++		memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
++		len -= skb_headlen(skb);
++		cp += skb_headlen(skb);
++	}
++	offset -= skb_headlen(skb);
++	for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
++		if (offset < skb_shinfo(skb)->frags[i].size) {
++			memcpy(page_address(skb_shinfo(skb)->frags[i].page) +
++					skb_shinfo(skb)->frags[i].page_offset,
++					cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
++			len -= skb_shinfo(skb)->frags[i].size;
++			cp += skb_shinfo(skb)->frags[i].size;
++		}
++		offset -= skb_shinfo(skb)->frags[i].size;
++	}
++}
++
++void
++crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
++{
++
++	if ((flags & CRYPTO_F_SKBUF) != 0)
++		skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
++	else if ((flags & CRYPTO_F_IOV) != 0)
++		cuio_copyback((struct uio *)buf, off, size, in);
++	else
++		bcopy(in, buf + off, size);
++}
++
++void
++crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
++{
++
++	if ((flags & CRYPTO_F_SKBUF) != 0)
++		skb_copy_bits((struct sk_buff *)buf, off, out, size);
++	else if ((flags & CRYPTO_F_IOV) != 0)
++		cuio_copydata((struct uio *)buf, off, size, out);
++	else
++		bcopy(buf + off, out, size);
++}
++
++int
++crypto_apply(int flags, caddr_t buf, int off, int len,
++    int (*f)(void *, void *, u_int), void *arg)
++{
++#if 0
++	int error;
++
++	if ((flags & CRYPTO_F_SKBUF) != 0)
++		error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
++	else if ((flags & CRYPTO_F_IOV) != 0)
++		error = cuio_apply((struct uio *)buf, off, len, f, arg);
++	else
++		error = (*f)(arg, buf + off, len);
++	return (error);
++#else
++	KASSERT(0, ("crypto_apply not implemented!\n"));
++#endif
++	return 0;
++}
++
++EXPORT_SYMBOL(crypto_copyback);
++EXPORT_SYMBOL(crypto_copydata);
++EXPORT_SYMBOL(crypto_apply);
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/crypto.c linux-2.6.30/crypto/ocf/crypto.c
+--- linux-2.6.30.orig/crypto/ocf/crypto.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/crypto.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,1741 @@
++/*-
++ * Linux port done by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ * The license and original author are listed below.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
++ *
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#if 0
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
++#endif
++
++/*
++ * Cryptographic Subsystem.
++ *
++ * This code is derived from the Openbsd Cryptographic Framework (OCF)
++ * that has the copyright shown below.  Very little of the original
++ * code remains.
++ */
++/*-
++ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
++ *
++ * This code was written by Angelos D. Keromytis in Athens, Greece, in
++ * February 2000. Network Security Technologies Inc. (NSTI) kindly
++ * supported the development of this code.
++ *
++ * Copyright (c) 2000, 2001 Angelos D. Keromytis
++ *
++ * Permission to use, copy, and modify this software with or without fee
++ * is hereby granted, provided that this entire notice is included in
++ * all source code copies of any software which is or includes a copy or
++ * modification of this software.
++ *
++ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
++ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
++ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
++ * PURPOSE.
++ *
++__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
++ */
++
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/spinlock.h>
++#include <linux/version.h>
++#include <cryptodev.h>
++
++/*
++ * keep track of whether or not we have been initialised, a big
++ * issue if we are linked into the kernel and a driver gets started before
++ * us
++ */
++static int crypto_initted = 0;
++
++/*
++ * Crypto drivers register themselves by allocating a slot in the
++ * crypto_drivers table with crypto_get_driverid() and then registering
++ * each algorithm they support with crypto_register() and crypto_kregister().
++ */
++
++/*
++ * lock on driver table
++ * we track its state as spin_is_locked does not do anything on non-SMP boxes
++ */
++static spinlock_t	crypto_drivers_lock;
++static int			crypto_drivers_locked;		/* for non-SMP boxes */
++
++#define	CRYPTO_DRIVER_LOCK() \
++			({ \
++				spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
++			 	crypto_drivers_locked = 1; \
++				dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
++			 })
++#define	CRYPTO_DRIVER_UNLOCK() \
++			({ \
++			 	dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
++			 	crypto_drivers_locked = 0; \
++				spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
++			 })
++#define	CRYPTO_DRIVER_ASSERT() \
++			({ \
++			 	if (!crypto_drivers_locked) { \
++					dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
++			 	} \
++			 })
++
++/*
++ * Crypto device/driver capabilities structure.
++ *
++ * Synchronization:
++ * (d) - protected by CRYPTO_DRIVER_LOCK()
++ * (q) - protected by CRYPTO_Q_LOCK()
++ * Not tagged fields are read-only.
++ */
++struct cryptocap {
++	device_t	cc_dev;			/* (d) device/driver */
++	u_int32_t	cc_sessions;		/* (d) # of sessions */
++	u_int32_t	cc_koperations;		/* (d) # os asym operations */
++	/*
++	 * Largest possible operator length (in bits) for each type of
++	 * encryption algorithm. XXX not used
++	 */
++	u_int16_t	cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
++	u_int8_t	cc_alg[CRYPTO_ALGORITHM_MAX + 1];
++	u_int8_t	cc_kalg[CRK_ALGORITHM_MAX + 1];
++
++	int		cc_flags;		/* (d) flags */
++#define CRYPTOCAP_F_CLEANUP	0x80000000	/* needs resource cleanup */
++	int		cc_qblocked;		/* (q) symmetric q blocked */
++	int		cc_kqblocked;		/* (q) asymmetric q blocked */
++};
++static struct cryptocap *crypto_drivers = NULL;
++static int crypto_drivers_num = 0;
++
++/*
++ * There are two queues for crypto requests; one for symmetric (e.g.
++ * cipher) operations and one for asymmetric (e.g. MOD)operations.
++ * A single mutex is used to lock access to both queues.  We could
++ * have one per-queue but having one simplifies handling of block/unblock
++ * operations.
++ */
++static	int crp_sleep = 0;
++static LIST_HEAD(crp_q);		/* request queues */
++static LIST_HEAD(crp_kq);
++
++static spinlock_t crypto_q_lock;
++
++int crypto_all_qblocked = 0;  /* protect with Q_LOCK */
++module_param(crypto_all_qblocked, int, 0444);
++MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
++
++int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
++module_param(crypto_all_kqblocked, int, 0444);
++MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
++
++#define	CRYPTO_Q_LOCK() \
++			({ \
++				spin_lock_irqsave(&crypto_q_lock, q_flags); \
++			 	dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
++			 })
++#define	CRYPTO_Q_UNLOCK() \
++			({ \
++			 	dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
++				spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
++			 })
++
++/*
++ * There are two queues for processing completed crypto requests; one
++ * for the symmetric and one for the asymmetric ops.  We only need one
++ * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
++ * mutex is used to lock access to both queues.  Note that this lock
++ * must be separate from the lock on request queues to insure driver
++ * callbacks don't generate lock order reversals.
++ */
++static LIST_HEAD(crp_ret_q);		/* callback queues */
++static LIST_HEAD(crp_ret_kq);
++
++static spinlock_t crypto_ret_q_lock;
++#define	CRYPTO_RETQ_LOCK() \
++			({ \
++				spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
++				dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
++			 })
++#define	CRYPTO_RETQ_UNLOCK() \
++			({ \
++			 	dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
++				spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
++			 })
++#define	CRYPTO_RETQ_EMPTY()	(list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++static kmem_cache_t *cryptop_zone;
++static kmem_cache_t *cryptodesc_zone;
++#else
++static struct kmem_cache *cryptop_zone;
++static struct kmem_cache *cryptodesc_zone;
++#endif
++
++#define debug crypto_debug
++int crypto_debug = 0;
++module_param(crypto_debug, int, 0644);
++MODULE_PARM_DESC(crypto_debug, "Enable debug");
++EXPORT_SYMBOL(crypto_debug);
++
++/*
++ * Maximum number of outstanding crypto requests before we start
++ * failing requests.  We need this to prevent DOS when too many
++ * requests are arriving for us to keep up.  Otherwise we will
++ * run the system out of memory.  Since crypto is slow,  we are
++ * usually the bottleneck that needs to say, enough is enough.
++ *
++ * We cannot print errors when this condition occurs,  we are already too
++ * slow,  printing anything will just kill us
++ */
++
++static int crypto_q_cnt = 0;
++module_param(crypto_q_cnt, int, 0444);
++MODULE_PARM_DESC(crypto_q_cnt,
++		"Current number of outstanding crypto requests");
++
++static int crypto_q_max = 1000;
++module_param(crypto_q_max, int, 0644);
++MODULE_PARM_DESC(crypto_q_max,
++		"Maximum number of outstanding crypto requests");
++
++#define bootverbose crypto_verbose
++static int crypto_verbose = 0;
++module_param(crypto_verbose, int, 0644);
++MODULE_PARM_DESC(crypto_verbose,
++		"Enable verbose crypto startup");
++
++int	crypto_usercrypto = 1;	/* userland may do crypto reqs */
++module_param(crypto_usercrypto, int, 0644);
++MODULE_PARM_DESC(crypto_usercrypto,
++	   "Enable/disable user-mode access to crypto support");
++
++int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
++module_param(crypto_userasymcrypto, int, 0644);
++MODULE_PARM_DESC(crypto_userasymcrypto,
++	   "Enable/disable user-mode access to asymmetric crypto support");
++
++int	crypto_devallowsoft = 0;	/* only use hardware crypto */
++module_param(crypto_devallowsoft, int, 0644);
++MODULE_PARM_DESC(crypto_devallowsoft,
++	   "Enable/disable use of software crypto support");
++
++static pid_t	cryptoproc = (pid_t) -1;
++static struct	completion cryptoproc_exited;
++static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
++static pid_t	cryptoretproc = (pid_t) -1;
++static struct	completion cryptoretproc_exited;
++static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
++
++static	int crypto_proc(void *arg);
++static	int crypto_ret_proc(void *arg);
++static	int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
++static	int crypto_kinvoke(struct cryptkop *krp, int flags);
++static	void crypto_exit(void);
++static  int crypto_init(void);
++
++static	struct cryptostats cryptostats;
++
++static struct cryptocap *
++crypto_checkdriver(u_int32_t hid)
++{
++	if (crypto_drivers == NULL)
++		return NULL;
++	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
++}
++
++/*
++ * Compare a driver's list of supported algorithms against another
++ * list; return non-zero if all algorithms are supported.
++ */
++static int
++driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
++{
++	const struct cryptoini *cr;
++
++	/* See if all the algorithms are supported. */
++	for (cr = cri; cr; cr = cr->cri_next)
++		if (cap->cc_alg[cr->cri_alg] == 0)
++			return 0;
++	return 1;
++}
++
++/*
++ * Select a driver for a new session that supports the specified
++ * algorithms and, optionally, is constrained according to the flags.
++ * The algorithm we use here is pretty stupid; just use the
++ * first driver that supports all the algorithms we need. If there
++ * are multiple drivers we choose the driver with the fewest active
++ * sessions.  We prefer hardware-backed drivers to software ones.
++ *
++ * XXX We need more smarts here (in real life too, but that's
++ * XXX another story altogether).
++ */
++static struct cryptocap *
++crypto_select_driver(const struct cryptoini *cri, int flags)
++{
++	struct cryptocap *cap, *best;
++	int match, hid;
++
++	CRYPTO_DRIVER_ASSERT();
++
++	/*
++	 * Look first for hardware crypto devices if permitted.
++	 */
++	if (flags & CRYPTOCAP_F_HARDWARE)
++		match = CRYPTOCAP_F_HARDWARE;
++	else
++		match = CRYPTOCAP_F_SOFTWARE;
++	best = NULL;
++again:
++	for (hid = 0; hid < crypto_drivers_num; hid++) {
++		cap = &crypto_drivers[hid];
++		/*
++		 * If it's not initialized, is in the process of
++		 * going away, or is not appropriate (hardware
++		 * or software based on match), then skip.
++		 */
++		if (cap->cc_dev == NULL ||
++		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
++		    (cap->cc_flags & match) == 0)
++			continue;
++
++		/* verify all the algorithms are supported. */
++		if (driver_suitable(cap, cri)) {
++			if (best == NULL ||
++			    cap->cc_sessions < best->cc_sessions)
++				best = cap;
++		}
++	}
++	if (best != NULL)
++		return best;
++	if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
++		/* sort of an Algol 68-style for loop */
++		match = CRYPTOCAP_F_SOFTWARE;
++		goto again;
++	}
++	return best;
++}
++
++/*
++ * Create a new session.  The crid argument specifies a crypto
++ * driver to use or constraints on a driver to select (hardware
++ * only, software only, either).  Whatever driver is selected
++ * must be capable of the requested crypto algorithms.
++ */
++int
++crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
++{
++	struct cryptocap *cap;
++	u_int32_t hid, lid;
++	int err;
++	unsigned long d_flags;
++
++	CRYPTO_DRIVER_LOCK();
++	if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
++		/*
++		 * Use specified driver; verify it is capable.
++		 */
++		cap = crypto_checkdriver(crid);
++		if (cap != NULL && !driver_suitable(cap, cri))
++			cap = NULL;
++	} else {
++		/*
++		 * No requested driver; select based on crid flags.
++		 */
++		cap = crypto_select_driver(cri, crid);
++		/*
++		 * if NULL then can't do everything in one session.
++		 * XXX Fix this. We need to inject a "virtual" session
++		 * XXX layer right about here.
++		 */
++	}
++	if (cap != NULL) {
++		/* Call the driver initialization routine. */
++		hid = cap - crypto_drivers;
++		lid = hid;		/* Pass the driver ID. */
++		cap->cc_sessions++;
++		CRYPTO_DRIVER_UNLOCK();
++		err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
++		CRYPTO_DRIVER_LOCK();
++		if (err == 0) {
++			(*sid) = (cap->cc_flags & 0xff000000)
++			       | (hid & 0x00ffffff);
++			(*sid) <<= 32;
++			(*sid) |= (lid & 0xffffffff);
++		} else
++			cap->cc_sessions--;
++	} else
++		err = EINVAL;
++	CRYPTO_DRIVER_UNLOCK();
++	return err;
++}
++
++static void
++crypto_remove(struct cryptocap *cap)
++{
++	CRYPTO_DRIVER_ASSERT();
++	if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
++		bzero(cap, sizeof(*cap));
++}
++
++/*
++ * Delete an existing session (or a reserved session on an unregistered
++ * driver).
++ */
++int
++crypto_freesession(u_int64_t sid)
++{
++	struct cryptocap *cap;
++	u_int32_t hid;
++	int err = 0;
++	unsigned long d_flags;
++
++	dprintk("%s()\n", __FUNCTION__);
++	CRYPTO_DRIVER_LOCK();
++
++	if (crypto_drivers == NULL) {
++		err = EINVAL;
++		goto done;
++	}
++
++	/* Determine two IDs. */
++	hid = CRYPTO_SESID2HID(sid);
++
++	if (hid >= crypto_drivers_num) {
++		dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
++		err = ENOENT;
++		goto done;
++	}
++	cap = &crypto_drivers[hid];
++
++	if (cap->cc_dev) {
++		CRYPTO_DRIVER_UNLOCK();
++		/* Call the driver cleanup routine, if available, unlocked. */
++		err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
++		CRYPTO_DRIVER_LOCK();
++	}
++
++	if (cap->cc_sessions)
++		cap->cc_sessions--;
++
++	if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
++		crypto_remove(cap);
++
++done:
++	CRYPTO_DRIVER_UNLOCK();
++	return err;
++}
++
++/*
++ * Return an unused driver id.  Used by drivers prior to registering
++ * support for the algorithms they handle.
++ */
++int32_t
++crypto_get_driverid(device_t dev, int flags)
++{
++	struct cryptocap *newdrv;
++	int i;
++	unsigned long d_flags;
++
++	if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
++		printf("%s: no flags specified when registering driver\n",
++		    device_get_nameunit(dev));
++		return -1;
++	}
++
++	CRYPTO_DRIVER_LOCK();
++
++	for (i = 0; i < crypto_drivers_num; i++) {
++		if (crypto_drivers[i].cc_dev == NULL &&
++		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
++			break;
++		}
++	}
++
++	/* Out of entries, allocate some more. */
++	if (i == crypto_drivers_num) {
++		/* Be careful about wrap-around. */
++		if (2 * crypto_drivers_num <= crypto_drivers_num) {
++			CRYPTO_DRIVER_UNLOCK();
++			printk("crypto: driver count wraparound!\n");
++			return -1;
++		}
++
++		newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
++				GFP_KERNEL);
++		if (newdrv == NULL) {
++			CRYPTO_DRIVER_UNLOCK();
++			printk("crypto: no space to expand driver table!\n");
++			return -1;
++		}
++
++		memcpy(newdrv, crypto_drivers,
++				crypto_drivers_num * sizeof(struct cryptocap));
++		memset(&newdrv[crypto_drivers_num], 0,
++				crypto_drivers_num * sizeof(struct cryptocap));
++
++		crypto_drivers_num *= 2;
++
++		kfree(crypto_drivers);
++		crypto_drivers = newdrv;
++	}
++
++	/* NB: state is zero'd on free */
++	crypto_drivers[i].cc_sessions = 1;	/* Mark */
++	crypto_drivers[i].cc_dev = dev;
++	crypto_drivers[i].cc_flags = flags;
++	if (bootverbose)
++		printf("crypto: assign %s driver id %u, flags %u\n",
++		    device_get_nameunit(dev), i, flags);
++
++	CRYPTO_DRIVER_UNLOCK();
++
++	return i;
++}
++
++/*
++ * Lookup a driver by name.  We match against the full device
++ * name and unit, and against just the name.  The latter gives
++ * us a simple widlcarding by device name.  On success return the
++ * driver/hardware identifier; otherwise return -1.
++ */
++int
++crypto_find_driver(const char *match)
++{
++	int i, len = strlen(match);
++	unsigned long d_flags;
++
++	CRYPTO_DRIVER_LOCK();
++	for (i = 0; i < crypto_drivers_num; i++) {
++		device_t dev = crypto_drivers[i].cc_dev;
++		if (dev == NULL ||
++		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
++			continue;
++		if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
++		    strncmp(match, device_get_name(dev), len) == 0)
++			break;
++	}
++	CRYPTO_DRIVER_UNLOCK();
++	return i < crypto_drivers_num ? i : -1;
++}
++
++/*
++ * Return the device_t for the specified driver or NULL
++ * if the driver identifier is invalid.
++ */
++device_t
++crypto_find_device_byhid(int hid)
++{
++	struct cryptocap *cap = crypto_checkdriver(hid);
++	return cap != NULL ? cap->cc_dev : NULL;
++}
++
++/*
++ * Return the device/driver capabilities.
++ */
++int
++crypto_getcaps(int hid)
++{
++	struct cryptocap *cap = crypto_checkdriver(hid);
++	return cap != NULL ? cap->cc_flags : 0;
++}
++
++/*
++ * Register support for a key-related algorithm.  This routine
++ * is called once for each algorithm supported a driver.
++ */
++int
++crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
++{
++	struct cryptocap *cap;
++	int err;
++	unsigned long d_flags;
++
++	dprintk("%s()\n", __FUNCTION__);
++	CRYPTO_DRIVER_LOCK();
++
++	cap = crypto_checkdriver(driverid);
++	if (cap != NULL &&
++	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
++		/*
++		 * XXX Do some performance testing to determine placing.
++		 * XXX We probably need an auxiliary data structure that
++		 * XXX describes relative performances.
++		 */
++
++		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
++		if (bootverbose)
++			printf("crypto: %s registers key alg %u flags %u\n"
++				, device_get_nameunit(cap->cc_dev)
++				, kalg
++				, flags
++			);
++		err = 0;
++	} else
++		err = EINVAL;
++
++	CRYPTO_DRIVER_UNLOCK();
++	return err;
++}
++
++/*
++ * Register support for a non-key-related algorithm.  This routine
++ * is called once for each such algorithm supported by a driver.
++ */
++int
++crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
++    u_int32_t flags)
++{
++	struct cryptocap *cap;
++	int err;
++	unsigned long d_flags;
++
++	dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
++			driverid, alg, maxoplen, flags);
++
++	CRYPTO_DRIVER_LOCK();
++
++	cap = crypto_checkdriver(driverid);
++	/* NB: algorithms are in the range [1..max] */
++	if (cap != NULL &&
++	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
++		/*
++		 * XXX Do some performance testing to determine placing.
++		 * XXX We probably need an auxiliary data structure that
++		 * XXX describes relative performances.
++		 */
++
++		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
++		cap->cc_max_op_len[alg] = maxoplen;
++		if (bootverbose)
++			printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
++				, device_get_nameunit(cap->cc_dev)
++				, alg
++				, flags
++				, maxoplen
++			);
++		cap->cc_sessions = 0;		/* Unmark */
++		err = 0;
++	} else
++		err = EINVAL;
++
++	CRYPTO_DRIVER_UNLOCK();
++	return err;
++}
++
++static void
++driver_finis(struct cryptocap *cap)
++{
++	u_int32_t ses, kops;
++
++	CRYPTO_DRIVER_ASSERT();
++
++	ses = cap->cc_sessions;
++	kops = cap->cc_koperations;
++	bzero(cap, sizeof(*cap));
++	if (ses != 0 || kops != 0) {
++		/*
++		 * If there are pending sessions,
++		 * just mark as invalid.
++		 */
++		cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
++		cap->cc_sessions = ses;
++		cap->cc_koperations = kops;
++	}
++}
++
++/*
++ * Unregister a crypto driver. If there are pending sessions using it,
++ * leave enough information around so that subsequent calls using those
++ * sessions will correctly detect the driver has been unregistered and
++ * reroute requests.
++ */
++int
++crypto_unregister(u_int32_t driverid, int alg)
++{
++	struct cryptocap *cap;
++	int i, err;
++	unsigned long d_flags;
++
++	dprintk("%s()\n", __FUNCTION__);
++	CRYPTO_DRIVER_LOCK();
++
++	cap = crypto_checkdriver(driverid);
++	if (cap != NULL &&
++	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
++	    cap->cc_alg[alg] != 0) {
++		cap->cc_alg[alg] = 0;
++		cap->cc_max_op_len[alg] = 0;
++
++		/* Was this the last algorithm ? */
++		for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
++			if (cap->cc_alg[i] != 0)
++				break;
++
++		if (i == CRYPTO_ALGORITHM_MAX + 1)
++			driver_finis(cap);
++		err = 0;
++	} else
++		err = EINVAL;
++	CRYPTO_DRIVER_UNLOCK();
++	return err;
++}
++
++/*
++ * Unregister all algorithms associated with a crypto driver.
++ * If there are pending sessions using it, leave enough information
++ * around so that subsequent calls using those sessions will
++ * correctly detect the driver has been unregistered and reroute
++ * requests.
++ */
++int
++crypto_unregister_all(u_int32_t driverid)
++{
++	struct cryptocap *cap;
++	int err;
++	unsigned long d_flags;
++
++	dprintk("%s()\n", __FUNCTION__);
++	CRYPTO_DRIVER_LOCK();
++	cap = crypto_checkdriver(driverid);
++	if (cap != NULL) {
++		driver_finis(cap);
++		err = 0;
++	} else
++		err = EINVAL;
++	CRYPTO_DRIVER_UNLOCK();
++
++	return err;
++}
++
++/*
++ * Clear blockage on a driver.  The what parameter indicates whether
++ * the driver is now ready for cryptop's and/or cryptokop's.
++ */
++int
++crypto_unblock(u_int32_t driverid, int what)
++{
++	struct cryptocap *cap;
++	int err;
++	unsigned long q_flags;
++
++	CRYPTO_Q_LOCK();
++	cap = crypto_checkdriver(driverid);
++	if (cap != NULL) {
++		if (what & CRYPTO_SYMQ) {
++			cap->cc_qblocked = 0;
++			crypto_all_qblocked = 0;
++		}
++		if (what & CRYPTO_ASYMQ) {
++			cap->cc_kqblocked = 0;
++			crypto_all_kqblocked = 0;
++		}
++		if (crp_sleep)
++			wake_up_interruptible(&cryptoproc_wait);
++		err = 0;
++	} else
++		err = EINVAL;
++	CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
++
++	return err;
++}
++
++/*
++ * Add a crypto request to a queue, to be processed by the kernel thread.
++ */
++int
++crypto_dispatch(struct cryptop *crp)
++{
++	struct cryptocap *cap;
++	int result = -1;
++	unsigned long q_flags;
++
++	dprintk("%s()\n", __FUNCTION__);
++
++	cryptostats.cs_ops++;
++
++	CRYPTO_Q_LOCK();
++	if (crypto_q_cnt >= crypto_q_max) {
++		CRYPTO_Q_UNLOCK();
++		cryptostats.cs_drops++;
++		return ENOMEM;
++	}
++	crypto_q_cnt++;
++
++	/*
++	 * Caller marked the request to be processed immediately; dispatch
++	 * it directly to the driver unless the driver is currently blocked.
++	 */
++	if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
++		int hid = CRYPTO_SESID2HID(crp->crp_sid);
++		cap = crypto_checkdriver(hid);
++		/* Driver cannot disappear when there is an active session. */
++		KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
++		if (!cap->cc_qblocked) {
++			crypto_all_qblocked = 0;
++			crypto_drivers[hid].cc_qblocked = 1;
++			CRYPTO_Q_UNLOCK();
++			result = crypto_invoke(cap, crp, 0);
++			CRYPTO_Q_LOCK();
++			if (result != ERESTART)
++				crypto_drivers[hid].cc_qblocked = 0;
++		}
++	}
++	if (result == ERESTART) {
++		/*
++		 * The driver ran out of resources, mark the
++		 * driver ``blocked'' for cryptop's and put
++		 * the request back in the queue.  It would
++		 * best to put the request back where we got
++		 * it but that's hard so for now we put it
++		 * at the front.  This should be ok; putting
++		 * it at the end does not work.
++		 */
++		list_add(&crp->crp_next, &crp_q);
++		cryptostats.cs_blocks++;
++	} else if (result == -1) {
++		TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
++	}
++	if (crp_sleep)
++		wake_up_interruptible(&cryptoproc_wait);
++	CRYPTO_Q_UNLOCK();
++	return 0;
++}
++
++/*
++ * Add an asymetric crypto request to a queue,
++ * to be processed by the kernel thread.
++ */
++int
++crypto_kdispatch(struct cryptkop *krp)
++{
++	int error;
++	unsigned long q_flags;
++
++	cryptostats.cs_kops++;
++
++	error = crypto_kinvoke(krp, krp->krp_crid);
++	if (error == ERESTART) {
++		CRYPTO_Q_LOCK();
++		TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
++		if (crp_sleep)
++			wake_up_interruptible(&cryptoproc_wait);
++		CRYPTO_Q_UNLOCK();
++		error = 0;
++	}
++	return error;
++}
++
++/*
++ * Verify a driver is suitable for the specified operation.
++ */
++static __inline int
++kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
++{
++	return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
++}
++
++/*
++ * Select a driver for an asym operation.  The driver must
++ * support the necessary algorithm.  The caller can constrain
++ * which device is selected with the flags parameter.  The
++ * algorithm we use here is pretty stupid; just use the first
++ * driver that supports the algorithms we need. If there are
++ * multiple suitable drivers we choose the driver with the
++ * fewest active operations.  We prefer hardware-backed
++ * drivers to software ones when either may be used.
++ */
++static struct cryptocap *
++crypto_select_kdriver(const struct cryptkop *krp, int flags)
++{
++	struct cryptocap *cap, *best, *blocked;
++	int match, hid;
++
++	CRYPTO_DRIVER_ASSERT();
++
++	/*
++	 * Look first for hardware crypto devices if permitted.
++	 */
++	if (flags & CRYPTOCAP_F_HARDWARE)
++		match = CRYPTOCAP_F_HARDWARE;
++	else
++		match = CRYPTOCAP_F_SOFTWARE;
++	best = NULL;
++	blocked = NULL;
++again:
++	for (hid = 0; hid < crypto_drivers_num; hid++) {
++		cap = &crypto_drivers[hid];
++		/*
++		 * If it's not initialized, is in the process of
++		 * going away, or is not appropriate (hardware
++		 * or software based on match), then skip.
++		 */
++		if (cap->cc_dev == NULL ||
++		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
++		    (cap->cc_flags & match) == 0)
++			continue;
++
++		/* verify all the algorithms are supported. */
++		if (kdriver_suitable(cap, krp)) {
++			if (best == NULL ||
++			    cap->cc_koperations < best->cc_koperations)
++				best = cap;
++		}
++	}
++	if (best != NULL)
++		return best;
++	if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
++		/* sort of an Algol 68-style for loop */
++		match = CRYPTOCAP_F_SOFTWARE;
++		goto again;
++	}
++	return best;
++}
++
++/*
++ * Dispatch an assymetric crypto request.
++ */
++static int
++crypto_kinvoke(struct cryptkop *krp, int crid)
++{
++	struct cryptocap *cap = NULL;
++	int error;
++	unsigned long d_flags;
++
++	KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
++	KASSERT(krp->krp_callback != NULL,
++	    ("%s: krp->crp_callback == NULL", __func__));
++
++	CRYPTO_DRIVER_LOCK();
++	if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
++		cap = crypto_checkdriver(crid);
++		if (cap != NULL) {
++			/*
++			 * Driver present, it must support the necessary
++			 * algorithm and, if s/w drivers are excluded,
++			 * it must be registered as hardware-backed.
++			 */
++			if (!kdriver_suitable(cap, krp) ||
++			    (!crypto_devallowsoft &&
++			     (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
++				cap = NULL;
++		}
++	} else {
++		/*
++		 * No requested driver; select based on crid flags.
++		 */
++		if (!crypto_devallowsoft)	/* NB: disallow s/w drivers */
++			crid &= ~CRYPTOCAP_F_SOFTWARE;
++		cap = crypto_select_kdriver(krp, crid);
++	}
++	if (cap != NULL && !cap->cc_kqblocked) {
++		krp->krp_hid = cap - crypto_drivers;
++		cap->cc_koperations++;
++		CRYPTO_DRIVER_UNLOCK();
++		error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
++		CRYPTO_DRIVER_LOCK();
++		if (error == ERESTART) {
++			cap->cc_koperations--;
++			CRYPTO_DRIVER_UNLOCK();
++			return (error);
++		}
++		/* return the actual device used */
++		krp->krp_crid = krp->krp_hid;
++	} else {
++		/*
++		 * NB: cap is !NULL if device is blocked; in
++		 *     that case return ERESTART so the operation
++		 *     is resubmitted if possible.
++		 */
++		error = (cap == NULL) ? ENODEV : ERESTART;
++	}
++	CRYPTO_DRIVER_UNLOCK();
++
++	if (error) {
++		krp->krp_status = error;
++		crypto_kdone(krp);
++	}
++	return 0;
++}
++
++
++/*
++ * Dispatch a crypto request to the appropriate crypto devices.
++ */
++static int
++crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
++{
++	KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
++	KASSERT(crp->crp_callback != NULL,
++	    ("%s: crp->crp_callback == NULL", __func__));
++	KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
++
++	dprintk("%s()\n", __FUNCTION__);
++
++#ifdef CRYPTO_TIMING
++	if (crypto_timing)
++		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
++#endif
++	if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
++		struct cryptodesc *crd;
++		u_int64_t nid;
++
++		/*
++		 * Driver has unregistered; migrate the session and return
++		 * an error to the caller so they'll resubmit the op.
++		 *
++		 * XXX: What if there are more already queued requests for this
++		 *      session?
++		 */
++		crypto_freesession(crp->crp_sid);
++
++		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
++			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
++
++		/* XXX propagate flags from initial session? */
++		if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
++		    CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
++			crp->crp_sid = nid;
++
++		crp->crp_etype = EAGAIN;
++		crypto_done(crp);
++		return 0;
++	} else {
++		/*
++		 * Invoke the driver to process the request.
++		 */
++		return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
++	}
++}
++
++/*
++ * Release a set of crypto descriptors.
++ */
++void
++crypto_freereq(struct cryptop *crp)
++{
++	struct cryptodesc *crd;
++
++	if (crp == NULL)
++		return;
++
++#ifdef DIAGNOSTIC
++	{
++		struct cryptop *crp2;
++		unsigned long q_flags;
++
++		CRYPTO_Q_LOCK();
++		TAILQ_FOREACH(crp2, &crp_q, crp_next) {
++			KASSERT(crp2 != crp,
++			    ("Freeing cryptop from the crypto queue (%p).",
++			    crp));
++		}
++		CRYPTO_Q_UNLOCK();
++		CRYPTO_RETQ_LOCK();
++		TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
++			KASSERT(crp2 != crp,
++			    ("Freeing cryptop from the return queue (%p).",
++			    crp));
++		}
++		CRYPTO_RETQ_UNLOCK();
++	}
++#endif
++
++	while ((crd = crp->crp_desc) != NULL) {
++		crp->crp_desc = crd->crd_next;
++		kmem_cache_free(cryptodesc_zone, crd);
++	}
++	kmem_cache_free(cryptop_zone, crp);
++}
++
++/*
++ * Acquire a set of crypto descriptors.
++ */
++struct cryptop *
++crypto_getreq(int num)
++{
++	struct cryptodesc *crd;
++	struct cryptop *crp;
++
++	crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
++	if (crp != NULL) {
++		memset(crp, 0, sizeof(*crp));
++		INIT_LIST_HEAD(&crp->crp_next);
++		init_waitqueue_head(&crp->crp_waitq);
++		while (num--) {
++			crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
++			if (crd == NULL) {
++				crypto_freereq(crp);
++				return NULL;
++			}
++			memset(crd, 0, sizeof(*crd));
++			crd->crd_next = crp->crp_desc;
++			crp->crp_desc = crd;
++		}
++	}
++	return crp;
++}
++
++/*
++ * Invoke the callback on behalf of the driver.
++ */
++void
++crypto_done(struct cryptop *crp)
++{
++	unsigned long q_flags;
++
++	dprintk("%s()\n", __FUNCTION__);
++	if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
++		crp->crp_flags |= CRYPTO_F_DONE;
++		CRYPTO_Q_LOCK();
++		crypto_q_cnt--;
++		CRYPTO_Q_UNLOCK();
++	} else
++		printk("crypto: crypto_done op already done, flags 0x%x",
++				crp->crp_flags);
++	if (crp->crp_etype != 0)
++		cryptostats.cs_errs++;
++	/*
++	 * CBIMM means unconditionally do the callback immediately;
++	 * CBIFSYNC means do the callback immediately only if the
++	 * operation was done synchronously.  Both are used to avoid
++	 * doing extraneous context switches; the latter is mostly
++	 * used with the software crypto driver.
++	 */
++	if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
++	    ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
++	     (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
++		/*
++		 * Do the callback directly.  This is ok when the
++		 * callback routine does very little (e.g. the
++		 * /dev/crypto callback method just does a wakeup).
++		 */
++		crp->crp_callback(crp);
++	} else {
++		unsigned long r_flags;
++		/*
++		 * Normal case; queue the callback for the thread.
++		 */
++		CRYPTO_RETQ_LOCK();
++		if (CRYPTO_RETQ_EMPTY())
++			wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
++		TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
++		CRYPTO_RETQ_UNLOCK();
++	}
++}
++
++/*
++ * Invoke the callback on behalf of the driver.
++ */
++void
++crypto_kdone(struct cryptkop *krp)
++{
++	struct cryptocap *cap;
++	unsigned long d_flags;
++
++	if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
++		printk("crypto: crypto_kdone op already done, flags 0x%x",
++				krp->krp_flags);
++	krp->krp_flags |= CRYPTO_KF_DONE;
++	if (krp->krp_status != 0)
++		cryptostats.cs_kerrs++;
++
++	CRYPTO_DRIVER_LOCK();
++	/* XXX: What if driver is loaded in the meantime? */
++	if (krp->krp_hid < crypto_drivers_num) {
++		cap = &crypto_drivers[krp->krp_hid];
++		cap->cc_koperations--;
++		KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
++		if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
++			crypto_remove(cap);
++	}
++	CRYPTO_DRIVER_UNLOCK();
++
++	/*
++	 * CBIMM means unconditionally do the callback immediately;
++	 * This is used to avoid doing extraneous context switches
++	 */
++	if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
++		/*
++		 * Do the callback directly.  This is ok when the
++		 * callback routine does very little (e.g. the
++		 * /dev/crypto callback method just does a wakeup).
++		 */
++		krp->krp_callback(krp);
++	} else {
++		unsigned long r_flags;
++		/*
++		 * Normal case; queue the callback for the thread.
++		 */
++		CRYPTO_RETQ_LOCK();
++		if (CRYPTO_RETQ_EMPTY())
++			wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
++		TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
++		CRYPTO_RETQ_UNLOCK();
++	}
++}
++
++int
++crypto_getfeat(int *featp)
++{
++	int hid, kalg, feat = 0;
++	unsigned long d_flags;
++
++	CRYPTO_DRIVER_LOCK();
++	for (hid = 0; hid < crypto_drivers_num; hid++) {
++		const struct cryptocap *cap = &crypto_drivers[hid];
++
++		if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
++		    !crypto_devallowsoft) {
++			continue;
++		}
++		for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
++			if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
++				feat |=  1 << kalg;
++	}
++	CRYPTO_DRIVER_UNLOCK();
++	*featp = feat;
++	return (0);
++}
++
++/*
++ * Crypto thread, dispatches crypto requests.
++ */
++static int
++crypto_proc(void *arg)
++{
++	struct cryptop *crp, *submit;
++	struct cryptkop *krp, *krpp;
++	struct cryptocap *cap;
++	u_int32_t hid;
++	int result, hint;
++	unsigned long q_flags;
++
++	ocf_daemonize("crypto");
++
++	CRYPTO_Q_LOCK();
++	for (;;) {
++		/*
++		 * we need to make sure we don't get into a busy loop with nothing
++		 * to do,  the two crypto_all_*blocked vars help us find out when
++		 * we are all full and can do nothing on any driver or Q.  If so we
++		 * wait for an unblock.
++		 */
++		crypto_all_qblocked  = !list_empty(&crp_q);
++
++		/*
++		 * Find the first element in the queue that can be
++		 * processed and look-ahead to see if multiple ops
++		 * are ready for the same driver.
++		 */
++		submit = NULL;
++		hint = 0;
++		list_for_each_entry(crp, &crp_q, crp_next) {
++			hid = CRYPTO_SESID2HID(crp->crp_sid);
++			cap = crypto_checkdriver(hid);
++			/*
++			 * Driver cannot disappear when there is an active
++			 * session.
++			 */
++			KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
++			    __func__, __LINE__));
++			if (cap == NULL || cap->cc_dev == NULL) {
++				/* Op needs to be migrated, process it. */
++				if (submit == NULL)
++					submit = crp;
++				break;
++			}
++			if (!cap->cc_qblocked) {
++				if (submit != NULL) {
++					/*
++					 * We stop on finding another op,
++					 * regardless whether its for the same
++					 * driver or not.  We could keep
++					 * searching the queue but it might be
++					 * better to just use a per-driver
++					 * queue instead.
++					 */
++					if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
++						hint = CRYPTO_HINT_MORE;
++					break;
++				} else {
++					submit = crp;
++					if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
++						break;
++					/* keep scanning for more are q'd */
++				}
++			}
++		}
++		if (submit != NULL) {
++			hid = CRYPTO_SESID2HID(submit->crp_sid);
++			crypto_all_qblocked = 0;
++			list_del(&submit->crp_next);
++			crypto_drivers[hid].cc_qblocked = 1;
++			cap = crypto_checkdriver(hid);
++			CRYPTO_Q_UNLOCK();
++			KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
++			    __func__, __LINE__));
++			result = crypto_invoke(cap, submit, hint);
++			CRYPTO_Q_LOCK();
++			if (result == ERESTART) {
++				/*
++				 * The driver ran out of resources, mark the
++				 * driver ``blocked'' for cryptop's and put
++				 * the request back in the queue.  It would
++				 * best to put the request back where we got
++				 * it but that's hard so for now we put it
++				 * at the front.  This should be ok; putting
++				 * it at the end does not work.
++				 */
++				/* XXX validate sid again? */
++				list_add(&submit->crp_next, &crp_q);
++				cryptostats.cs_blocks++;
++			} else
++				crypto_drivers[hid].cc_qblocked=0;
++		}
++
++		crypto_all_kqblocked = !list_empty(&crp_kq);
++
++		/* As above, but for key ops */
++		krp = NULL;
++		list_for_each_entry(krpp, &crp_kq, krp_next) {
++			cap = crypto_checkdriver(krpp->krp_hid);
++			if (cap == NULL || cap->cc_dev == NULL) {
++				/*
++				 * Operation needs to be migrated, invalidate
++				 * the assigned device so it will reselect a
++				 * new one below.  Propagate the original
++				 * crid selection flags if supplied.
++				 */
++				krp->krp_hid = krp->krp_crid &
++				    (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
++				if (krp->krp_hid == 0)
++					krp->krp_hid =
++				    CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
++				break;
++			}
++			if (!cap->cc_kqblocked) {
++				krp = krpp;
++				break;
++			}
++		}
++		if (krp != NULL) {
++			crypto_all_kqblocked = 0;
++			list_del(&krp->krp_next);
++			crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
++			CRYPTO_Q_UNLOCK();
++			result = crypto_kinvoke(krp, krp->krp_hid);
++			CRYPTO_Q_LOCK();
++			if (result == ERESTART) {
++				/*
++				 * The driver ran out of resources, mark the
++				 * driver ``blocked'' for cryptkop's and put
++				 * the request back in the queue.  It would
++				 * best to put the request back where we got
++				 * it but that's hard so for now we put it
++				 * at the front.  This should be ok; putting
++				 * it at the end does not work.
++				 */
++				/* XXX validate sid again? */
++				list_add(&krp->krp_next, &crp_kq);
++				cryptostats.cs_kblocks++;
++			} else
++				crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
++		}
++
++		if (submit == NULL && krp == NULL) {
++			/*
++			 * Nothing more to be processed.  Sleep until we're
++			 * woken because there are more ops to process.
++			 * This happens either by submission or by a driver
++			 * becoming unblocked and notifying us through
++			 * crypto_unblock.  Note that when we wakeup we
++			 * start processing each queue again from the
++			 * front. It's not clear that it's important to
++			 * preserve this ordering since ops may finish
++			 * out of order if dispatched to different devices
++			 * and some become blocked while others do not.
++			 */
++			dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
++					__FUNCTION__,
++					list_empty(&crp_q), crypto_all_qblocked,
++					list_empty(&crp_kq), crypto_all_kqblocked);
++			CRYPTO_Q_UNLOCK();
++			crp_sleep = 1;
++			wait_event_interruptible(cryptoproc_wait,
++					!(list_empty(&crp_q) || crypto_all_qblocked) ||
++					!(list_empty(&crp_kq) || crypto_all_kqblocked) ||
++					cryptoproc == (pid_t) -1);
++			crp_sleep = 0;
++			if (signal_pending (current)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++				spin_lock_irq(&current->sigmask_lock);
++#endif
++				flush_signals(current);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++				spin_unlock_irq(&current->sigmask_lock);
++#endif
++			}
++			CRYPTO_Q_LOCK();
++			dprintk("%s - awake\n", __FUNCTION__);
++			if (cryptoproc == (pid_t) -1)
++				break;
++			cryptostats.cs_intrs++;
++		}
++	}
++	CRYPTO_Q_UNLOCK();
++	complete_and_exit(&cryptoproc_exited, 0);
++}
++
++/*
++ * Crypto returns thread, does callbacks for processed crypto requests.
++ * Callbacks are done here, rather than in the crypto drivers, because
++ * callbacks typically are expensive and would slow interrupt handling.
++ */
++static int
++crypto_ret_proc(void *arg)
++{
++	struct cryptop *crpt;
++	struct cryptkop *krpt;
++	unsigned long  r_flags;
++
++	ocf_daemonize("crypto_ret");
++
++	CRYPTO_RETQ_LOCK();
++	for (;;) {
++		/* Harvest return q's for completed ops */
++		crpt = NULL;
++		if (!list_empty(&crp_ret_q))
++			crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
++		if (crpt != NULL)
++			list_del(&crpt->crp_next);
++
++		krpt = NULL;
++		if (!list_empty(&crp_ret_kq))
++			krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
++		if (krpt != NULL)
++			list_del(&krpt->krp_next);
++
++		if (crpt != NULL || krpt != NULL) {
++			CRYPTO_RETQ_UNLOCK();
++			/*
++			 * Run callbacks unlocked.
++			 */
++			if (crpt != NULL)
++				crpt->crp_callback(crpt);
++			if (krpt != NULL)
++				krpt->krp_callback(krpt);
++			CRYPTO_RETQ_LOCK();
++		} else {
++			/*
++			 * Nothing more to be processed.  Sleep until we're
++			 * woken because there are more returns to process.
++			 */
++			dprintk("%s - sleeping\n", __FUNCTION__);
++			CRYPTO_RETQ_UNLOCK();
++			wait_event_interruptible(cryptoretproc_wait,
++					cryptoretproc == (pid_t) -1 ||
++					!list_empty(&crp_ret_q) ||
++					!list_empty(&crp_ret_kq));
++			if (signal_pending (current)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++				spin_lock_irq(&current->sigmask_lock);
++#endif
++				flush_signals(current);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++				spin_unlock_irq(&current->sigmask_lock);
++#endif
++			}
++			CRYPTO_RETQ_LOCK();
++			dprintk("%s - awake\n", __FUNCTION__);
++			if (cryptoretproc == (pid_t) -1) {
++				dprintk("%s - EXITING!\n", __FUNCTION__);
++				break;
++			}
++			cryptostats.cs_rets++;
++		}
++	}
++	CRYPTO_RETQ_UNLOCK();
++	complete_and_exit(&cryptoretproc_exited, 0);
++}
++
++
++#if 0 /* should put this into /proc or something */
++static void
++db_show_drivers(void)
++{
++	int hid;
++
++	db_printf("%12s %4s %4s %8s %2s %2s\n"
++		, "Device"
++		, "Ses"
++		, "Kops"
++		, "Flags"
++		, "QB"
++		, "KB"
++	);
++	for (hid = 0; hid < crypto_drivers_num; hid++) {
++		const struct cryptocap *cap = &crypto_drivers[hid];
++		if (cap->cc_dev == NULL)
++			continue;
++		db_printf("%-12s %4u %4u %08x %2u %2u\n"
++		    , device_get_nameunit(cap->cc_dev)
++		    , cap->cc_sessions
++		    , cap->cc_koperations
++		    , cap->cc_flags
++		    , cap->cc_qblocked
++		    , cap->cc_kqblocked
++		);
++	}
++}
++
++DB_SHOW_COMMAND(crypto, db_show_crypto)
++{
++	struct cryptop *crp;
++
++	db_show_drivers();
++	db_printf("\n");
++
++	db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
++	    "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
++	    "Desc", "Callback");
++	TAILQ_FOREACH(crp, &crp_q, crp_next) {
++		db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
++		    , (int) CRYPTO_SESID2HID(crp->crp_sid)
++		    , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
++		    , crp->crp_ilen, crp->crp_olen
++		    , crp->crp_etype
++		    , crp->crp_flags
++		    , crp->crp_desc
++		    , crp->crp_callback
++		);
++	}
++	if (!TAILQ_EMPTY(&crp_ret_q)) {
++		db_printf("\n%4s %4s %4s %8s\n",
++		    "HID", "Etype", "Flags", "Callback");
++		TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
++			db_printf("%4u %4u %04x %8p\n"
++			    , (int) CRYPTO_SESID2HID(crp->crp_sid)
++			    , crp->crp_etype
++			    , crp->crp_flags
++			    , crp->crp_callback
++			);
++		}
++	}
++}
++
++DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
++{
++	struct cryptkop *krp;
++
++	db_show_drivers();
++	db_printf("\n");
++
++	db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
++	    "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
++	TAILQ_FOREACH(krp, &crp_kq, krp_next) {
++		db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
++		    , krp->krp_op
++		    , krp->krp_status
++		    , krp->krp_iparams, krp->krp_oparams
++		    , krp->krp_crid, krp->krp_hid
++		    , krp->krp_callback
++		);
++	}
++	if (!TAILQ_EMPTY(&crp_ret_q)) {
++		db_printf("%4s %5s %8s %4s %8s\n",
++		    "Op", "Status", "CRID", "HID", "Callback");
++		TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
++			db_printf("%4u %5u %08x %4u %8p\n"
++			    , krp->krp_op
++			    , krp->krp_status
++			    , krp->krp_crid, krp->krp_hid
++			    , krp->krp_callback
++			);
++		}
++	}
++}
++#endif
++
++
++static int
++crypto_init(void)
++{
++	int error;
++
++	dprintk("%s(0x%x)\n", __FUNCTION__, (int) crypto_init);
++
++	if (crypto_initted)
++		return 0;
++	crypto_initted = 1;
++
++	spin_lock_init(&crypto_drivers_lock);
++	spin_lock_init(&crypto_q_lock);
++	spin_lock_init(&crypto_ret_q_lock);
++
++	cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
++				       0, SLAB_HWCACHE_ALIGN, NULL
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++				       , NULL
++#endif
++					);
++
++	cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
++				       0, SLAB_HWCACHE_ALIGN, NULL
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++				       , NULL
++#endif
++					);
++
++	if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
++		printk("crypto: crypto_init cannot setup crypto zones\n");
++		error = ENOMEM;
++		goto bad;
++	}
++
++	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
++	crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
++			GFP_KERNEL);
++	if (crypto_drivers == NULL) {
++		printk("crypto: crypto_init cannot setup crypto drivers\n");
++		error = ENOMEM;
++		goto bad;
++	}
++
++	memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
++
++	init_completion(&cryptoproc_exited);
++	init_completion(&cryptoretproc_exited);
++
++	cryptoproc = 0; /* to avoid race condition where proc runs first */
++	cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
++	if (cryptoproc < 0) {
++		error = cryptoproc;
++		printk("crypto: crypto_init cannot start crypto thread; error %d",
++			error);
++		goto bad;
++	}
++
++	cryptoretproc = 0; /* to avoid race condition where proc runs first */
++	cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
++	if (cryptoretproc < 0) {
++		error = cryptoretproc;
++		printk("crypto: crypto_init cannot start cryptoret thread; error %d",
++				error);
++		goto bad;
++	}
++
++	return 0;
++bad:
++	crypto_exit();
++	return error;
++}
++
++
++static void
++crypto_exit(void)
++{
++	pid_t p;
++	unsigned long d_flags;
++
++	dprintk("%s()\n", __FUNCTION__);
++
++	/*
++	 * Terminate any crypto threads.
++	 */
++
++	CRYPTO_DRIVER_LOCK();
++	p = cryptoproc;
++	cryptoproc = (pid_t) -1;
++	kill_pid(p, SIGTERM, 1);
++	wake_up_interruptible(&cryptoproc_wait);
++	CRYPTO_DRIVER_UNLOCK();
++
++	wait_for_completion(&cryptoproc_exited);
++
++	CRYPTO_DRIVER_LOCK();
++	p = cryptoretproc;
++	cryptoretproc = (pid_t) -1;
++	kill_pid(p, SIGTERM, 1);
++	wake_up_interruptible(&cryptoretproc_wait);
++	CRYPTO_DRIVER_UNLOCK();
++
++	wait_for_completion(&cryptoretproc_exited);
++
++	/* XXX flush queues??? */
++
++	/* 
++	 * Reclaim dynamically allocated resources.
++	 */
++	if (crypto_drivers != NULL)
++		kfree(crypto_drivers);
++
++	if (cryptodesc_zone != NULL)
++		kmem_cache_destroy(cryptodesc_zone);
++	if (cryptop_zone != NULL)
++		kmem_cache_destroy(cryptop_zone);
++}
++
++
++EXPORT_SYMBOL(crypto_newsession);
++EXPORT_SYMBOL(crypto_freesession);
++EXPORT_SYMBOL(crypto_get_driverid);
++EXPORT_SYMBOL(crypto_kregister);
++EXPORT_SYMBOL(crypto_register);
++EXPORT_SYMBOL(crypto_unregister);
++EXPORT_SYMBOL(crypto_unregister_all);
++EXPORT_SYMBOL(crypto_unblock);
++EXPORT_SYMBOL(crypto_dispatch);
++EXPORT_SYMBOL(crypto_kdispatch);
++EXPORT_SYMBOL(crypto_freereq);
++EXPORT_SYMBOL(crypto_getreq);
++EXPORT_SYMBOL(crypto_done);
++EXPORT_SYMBOL(crypto_kdone);
++EXPORT_SYMBOL(crypto_getfeat);
++EXPORT_SYMBOL(crypto_userasymcrypto);
++EXPORT_SYMBOL(crypto_getcaps);
++EXPORT_SYMBOL(crypto_find_driver);
++EXPORT_SYMBOL(crypto_find_device_byhid);
++
++module_init(crypto_init);
++module_exit(crypto_exit);
++
++MODULE_LICENSE("BSD");
++MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
++MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
+diff -Nur linux-2.6.30.orig/crypto/ocf/cryptodev.c linux-2.6.30/crypto/ocf/cryptodev.c
+--- linux-2.6.30.orig/crypto/ocf/cryptodev.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/cryptodev.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,1048 @@
++/*	$OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $	*/
++
++/*-
++ * Linux port done by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ * The license and original author are listed below.
++ *
++ * Copyright (c) 2001 Theo de Raadt
++ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *   notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *   notice, this list of conditions and the following disclaimer in the
++ *   documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *   derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Effort sponsored in part by the Defense Advanced Research Projects
++ * Agency (DARPA) and Air Force Research Laboratory, Air Force
++ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
++ *
++__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/types.h>
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/list.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/wait.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/dcache.h>
++#include <linux/file.h>
++#include <linux/mount.h>
++#include <linux/miscdevice.h>
++#include <linux/version.h>
++#include <asm/uaccess.h>
++
++#include <cryptodev.h>
++#include <uio.h>
++
++extern asmlinkage long sys_dup(unsigned int fildes);
++
++#define debug cryptodev_debug
++int cryptodev_debug = 0;
++module_param(cryptodev_debug, int, 0644);
++MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
++
++struct csession_info {
++	u_int16_t	blocksize;
++	u_int16_t	minkey, maxkey;
++
++	u_int16_t	keysize;
++	/* u_int16_t	hashsize;  */
++	u_int16_t	authsize;
++	/* u_int16_t	ctxsize; */
++};
++
++struct csession {
++	struct list_head	list;
++	u_int64_t	sid;
++	u_int32_t	ses;
++
++	wait_queue_head_t waitq;
++
++	u_int32_t	cipher;
++
++	u_int32_t	mac;
++
++	caddr_t		key;
++	int		keylen;
++	u_char		tmp_iv[EALG_MAX_BLOCK_LEN];
++
++	caddr_t		mackey;
++	int		mackeylen;
++
++	struct csession_info info;
++
++	struct iovec	iovec;
++	struct uio	uio;
++	int		error;
++};
++
++struct fcrypt {
++	struct list_head	csessions;
++	int		sesn;
++};
++
++static struct csession *csefind(struct fcrypt *, u_int);
++static int csedelete(struct fcrypt *, struct csession *);
++static struct csession *cseadd(struct fcrypt *, struct csession *);
++static struct csession *csecreate(struct fcrypt *, u_int64_t,
++		struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
++static int csefree(struct csession *);
++
++static	int cryptodev_op(struct csession *, struct crypt_op *);
++static	int cryptodev_key(struct crypt_kop *);
++static	int cryptodev_find(struct crypt_find_op *);
++
++static int cryptodev_cb(void *);
++static int cryptodev_open(struct inode *inode, struct file *filp);
++
++/*
++ * Check a crypto identifier to see if it requested
++ * a valid crid and it's capabilities match.
++ */
++static int
++checkcrid(int crid)
++{
++	int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
++	int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
++	int caps = 0;
++	
++	/* if the user hasn't selected a driver, then just call newsession */
++	if (hid == 0 && typ != 0)
++		return 0;
++
++	caps = crypto_getcaps(hid);
++
++	/* didn't find anything with capabilities */
++	if (caps == 0) {
++		dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
++		return EINVAL;
++	}
++	
++	/* the user didn't specify SW or HW, so the driver is ok */
++	if (typ == 0)
++		return 0;
++
++	/* if the type specified didn't match */
++	if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
++		dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
++				hid, typ, caps);
++		return EINVAL;
++	}
++
++	return 0;
++}
++
++static int
++cryptodev_op(struct csession *cse, struct crypt_op *cop)
++{
++	struct cryptop *crp = NULL;
++	struct cryptodesc *crde = NULL, *crda = NULL;
++	int error = 0;
++
++	dprintk("%s()\n", __FUNCTION__);
++	if (cop->len > CRYPTO_MAX_DATA_LEN) {
++		dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
++		return (E2BIG);
++	}
++
++	if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
++		dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
++				cop->len);
++		return (EINVAL);
++	}
++
++	cse->uio.uio_iov = &cse->iovec;
++	cse->uio.uio_iovcnt = 1;
++	cse->uio.uio_offset = 0;
++#if 0
++	cse->uio.uio_resid = cop->len;
++	cse->uio.uio_segflg = UIO_SYSSPACE;
++	cse->uio.uio_rw = UIO_WRITE;
++	cse->uio.uio_td = td;
++#endif
++	cse->uio.uio_iov[0].iov_len = cop->len;
++	if (cse->info.authsize)
++		cse->uio.uio_iov[0].iov_len += cse->info.authsize;
++	cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
++			GFP_KERNEL);
++
++	if (cse->uio.uio_iov[0].iov_base == NULL) {
++		dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
++				cse->uio.uio_iov[0].iov_len);
++		return (ENOMEM);
++	}
++
++	crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
++	if (crp == NULL) {
++		dprintk("%s: ENOMEM\n", __FUNCTION__);
++		error = ENOMEM;
++		goto bail;
++	}
++
++	if (cse->info.authsize) {
++		crda = crp->crp_desc;
++		if (cse->info.blocksize)
++			crde = crda->crd_next;
++	} else {
++		if (cse->info.blocksize)
++			crde = crp->crp_desc;
++		else {
++			dprintk("%s: bad request\n", __FUNCTION__);
++			error = EINVAL;
++			goto bail;
++		}
++	}
++
++	if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
++					cop->len))) {
++		dprintk("%s: bad copy\n", __FUNCTION__);
++		goto bail;
++	}
++
++	if (crda) {
++		crda->crd_skip = 0;
++		crda->crd_len = cop->len;
++		crda->crd_inject = cop->len;
++
++		crda->crd_alg = cse->mac;
++		crda->crd_key = cse->mackey;
++		crda->crd_klen = cse->mackeylen * 8;
++	}
++
++	if (crde) {
++		if (cop->op == COP_ENCRYPT)
++			crde->crd_flags |= CRD_F_ENCRYPT;
++		else
++			crde->crd_flags &= ~CRD_F_ENCRYPT;
++		crde->crd_len = cop->len;
++		crde->crd_inject = 0;
++
++		crde->crd_alg = cse->cipher;
++		crde->crd_key = cse->key;
++		crde->crd_klen = cse->keylen * 8;
++	}
++
++	crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
++	crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
++		       | (cop->flags & COP_F_BATCH);
++	crp->crp_buf = (caddr_t)&cse->uio;
++	crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
++	crp->crp_sid = cse->sid;
++	crp->crp_opaque = (void *)cse;
++
++	if (cop->iv) {
++		if (crde == NULL) {
++			error = EINVAL;
++			dprintk("%s no crde\n", __FUNCTION__);
++			goto bail;
++		}
++		if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
++			error = EINVAL;
++			dprintk("%s arc4 with IV\n", __FUNCTION__);
++			goto bail;
++		}
++		if ((error = copy_from_user(cse->tmp_iv, cop->iv,
++						cse->info.blocksize))) {
++			dprintk("%s bad iv copy\n", __FUNCTION__);
++			goto bail;
++		}
++		memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
++		crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
++		crde->crd_skip = 0;
++	} else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
++		crde->crd_skip = 0;
++	} else if (crde) {
++		crde->crd_flags |= CRD_F_IV_PRESENT;
++		crde->crd_skip = cse->info.blocksize;
++		crde->crd_len -= cse->info.blocksize;
++	}
++
++	if (cop->mac && crda == NULL) {
++		error = EINVAL;
++		dprintk("%s no crda\n", __FUNCTION__);
++		goto bail;
++	}
++
++	/*
++	 * Let the dispatch run unlocked, then, interlock against the
++	 * callback before checking if the operation completed and going
++	 * to sleep.  This insures drivers don't inherit our lock which
++	 * results in a lock order reversal between crypto_dispatch forced
++	 * entry and the crypto_done callback into us.
++	 */
++	error = crypto_dispatch(crp);
++	if (error == 0) {
++		dprintk("%s about to WAIT\n", __FUNCTION__);
++		/*
++		 * we really need to wait for driver to complete to maintain
++		 * state,  luckily interrupts will be remembered
++		 */
++		do {
++			error = wait_event_interruptible(crp->crp_waitq,
++					((crp->crp_flags & CRYPTO_F_DONE) != 0));
++			/*
++			 * we can't break out of this loop or we will leave behind
++			 * a huge mess,  however,  staying here means if your driver
++			 * is broken user applications can hang and not be killed.
++			 * The solution,  fix your driver :-)
++			 */
++			if (error) {
++				schedule();
++				error = 0;
++			}
++		} while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
++		dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
++	}
++
++	if (crp->crp_etype != 0) {
++		error = crp->crp_etype;
++		dprintk("%s error in crp processing\n", __FUNCTION__);
++		goto bail;
++	}
++
++	if (cse->error) {
++		error = cse->error;
++		dprintk("%s error in cse processing\n", __FUNCTION__);
++		goto bail;
++	}
++
++	if (cop->dst && (error = copy_to_user(cop->dst,
++					cse->uio.uio_iov[0].iov_base, cop->len))) {
++		dprintk("%s bad dst copy\n", __FUNCTION__);
++		goto bail;
++	}
++
++	if (cop->mac &&
++			(error=copy_to_user(cop->mac,
++				(caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
++				cse->info.authsize))) {
++		dprintk("%s bad mac copy\n", __FUNCTION__);
++		goto bail;
++	}
++
++bail:
++	if (crp)
++		crypto_freereq(crp);
++	if (cse->uio.uio_iov[0].iov_base)
++		kfree(cse->uio.uio_iov[0].iov_base);
++
++	return (error);
++}
++
++static int
++cryptodev_cb(void *op)
++{
++	struct cryptop *crp = (struct cryptop *) op;
++	struct csession *cse = (struct csession *)crp->crp_opaque;
++	int error;
++
++	dprintk("%s()\n", __FUNCTION__);
++	error = crp->crp_etype;
++	if (error == EAGAIN) {
++		crp->crp_flags &= ~CRYPTO_F_DONE;
++#ifdef NOTYET
++		/*
++		 * DAVIDM I am fairly sure that we should turn this into a batch
++		 * request to stop bad karma/lockup, revisit
++		 */
++		crp->crp_flags |= CRYPTO_F_BATCH;
++#endif
++		return crypto_dispatch(crp);
++	}
++	if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
++		cse->error = error;
++		wake_up_interruptible(&crp->crp_waitq);
++	}
++	return (0);
++}
++
++static int
++cryptodevkey_cb(void *op)
++{
++	struct cryptkop *krp = (struct cryptkop *) op;
++	dprintk("%s()\n", __FUNCTION__);
++	wake_up_interruptible(&krp->krp_waitq);
++	return (0);
++}
++
++static int
++cryptodev_key(struct crypt_kop *kop)
++{
++	struct cryptkop *krp = NULL;
++	int error = EINVAL;
++	int in, out, size, i;
++
++	dprintk("%s()\n", __FUNCTION__);
++	if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
++		dprintk("%s params too big\n", __FUNCTION__);
++		return (EFBIG);
++	}
++
++	in = kop->crk_iparams;
++	out = kop->crk_oparams;
++	switch (kop->crk_op) {
++	case CRK_MOD_EXP:
++		if (in == 3 && out == 1)
++			break;
++		return (EINVAL);
++	case CRK_MOD_EXP_CRT:
++		if (in == 6 && out == 1)
++			break;
++		return (EINVAL);
++	case CRK_DSA_SIGN:
++		if (in == 5 && out == 2)
++			break;
++		return (EINVAL);
++	case CRK_DSA_VERIFY:
++		if (in == 7 && out == 0)
++			break;
++		return (EINVAL);
++	case CRK_DH_COMPUTE_KEY:
++		if (in == 3 && out == 1)
++			break;
++		return (EINVAL);
++	default:
++		return (EINVAL);
++	}
++
++	krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
++	if (!krp)
++		return (ENOMEM);
++	bzero(krp, sizeof *krp);
++	krp->krp_op = kop->crk_op;
++	krp->krp_status = kop->crk_status;
++	krp->krp_iparams = kop->crk_iparams;
++	krp->krp_oparams = kop->crk_oparams;
++	krp->krp_crid = kop->crk_crid;
++	krp->krp_status = 0;
++	krp->krp_flags = CRYPTO_KF_CBIMM;
++	krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
++	init_waitqueue_head(&krp->krp_waitq);
++
++	for (i = 0; i < CRK_MAXPARAM; i++)
++		krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
++	for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
++		size = (krp->krp_param[i].crp_nbits + 7) / 8;
++		if (size == 0)
++			continue;
++		krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
++		if (i >= krp->krp_iparams)
++			continue;
++		error = copy_from_user(krp->krp_param[i].crp_p,
++				kop->crk_param[i].crp_p, size);
++		if (error)
++			goto fail;
++	}
++
++	error = crypto_kdispatch(krp);
++	if (error)
++		goto fail;
++
++	do {
++		error = wait_event_interruptible(krp->krp_waitq,
++				((krp->krp_flags & CRYPTO_KF_DONE) != 0));
++		/*
++		 * we can't break out of this loop or we will leave behind
++		 * a huge mess,  however,  staying here means if your driver
++		 * is broken user applications can hang and not be killed.
++		 * The solution,  fix your driver :-)
++		 */
++		if (error) {
++			schedule();
++			error = 0;
++		}
++	} while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
++
++	dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
++	
++	kop->crk_crid = krp->krp_crid;		/* device that did the work */
++	if (krp->krp_status != 0) {
++		error = krp->krp_status;
++		goto fail;
++	}
++
++	for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
++		size = (krp->krp_param[i].crp_nbits + 7) / 8;
++		if (size == 0)
++			continue;
++		error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
++				size);
++		if (error)
++			goto fail;
++	}
++
++fail:
++	if (krp) {
++		kop->crk_status = krp->krp_status;
++		for (i = 0; i < CRK_MAXPARAM; i++) {
++			if (krp->krp_param[i].crp_p)
++				kfree(krp->krp_param[i].crp_p);
++		}
++		kfree(krp);
++	}
++	return (error);
++}
++
++static int
++cryptodev_find(struct crypt_find_op *find)
++{
++	device_t dev;
++
++	if (find->crid != -1) {
++		dev = crypto_find_device_byhid(find->crid);
++		if (dev == NULL)
++			return (ENOENT);
++		strlcpy(find->name, device_get_nameunit(dev),
++		    sizeof(find->name));
++	} else {
++		find->crid = crypto_find_driver(find->name);
++		if (find->crid == -1)
++			return (ENOENT);
++	}
++	return (0);
++}
++
++static struct csession *
++csefind(struct fcrypt *fcr, u_int ses)
++{
++	struct csession *cse;
++
++	dprintk("%s()\n", __FUNCTION__);
++	list_for_each_entry(cse, &fcr->csessions, list)
++		if (cse->ses == ses)
++			return (cse);
++	return (NULL);
++}
++
++static int
++csedelete(struct fcrypt *fcr, struct csession *cse_del)
++{
++	struct csession *cse;
++
++	dprintk("%s()\n", __FUNCTION__);
++	list_for_each_entry(cse, &fcr->csessions, list) {
++		if (cse == cse_del) {
++			list_del(&cse->list);
++			return (1);
++		}
++	}
++	return (0);
++}
++	
++static struct csession *
++cseadd(struct fcrypt *fcr, struct csession *cse)
++{
++	dprintk("%s()\n", __FUNCTION__);
++	list_add_tail(&cse->list, &fcr->csessions);
++	cse->ses = fcr->sesn++;
++	return (cse);
++}
++
++static struct csession *
++csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
++	struct cryptoini *cria, struct csession_info *info)
++{
++	struct csession *cse;
++
++	dprintk("%s()\n", __FUNCTION__);
++	cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
++	if (cse == NULL)
++		return NULL;
++	memset(cse, 0, sizeof(struct csession));
++
++	INIT_LIST_HEAD(&cse->list);
++	init_waitqueue_head(&cse->waitq);
++
++	cse->key = crie->cri_key;
++	cse->keylen = crie->cri_klen/8;
++	cse->mackey = cria->cri_key;
++	cse->mackeylen = cria->cri_klen/8;
++	cse->sid = sid;
++	cse->cipher = crie->cri_alg;
++	cse->mac = cria->cri_alg;
++	cse->info = *info;
++	cseadd(fcr, cse);
++	return (cse);
++}
++
++static int
++csefree(struct csession *cse)
++{
++	int error;
++
++	dprintk("%s()\n", __FUNCTION__);
++	error = crypto_freesession(cse->sid);
++	if (cse->key)
++		kfree(cse->key);
++	if (cse->mackey)
++		kfree(cse->mackey);
++	kfree(cse);
++	return(error);
++}
++
++static int
++cryptodev_ioctl(
++	struct inode *inode,
++	struct file *filp,
++	unsigned int cmd,
++	unsigned long arg)
++{
++	struct cryptoini cria, crie;
++	struct fcrypt *fcr = filp->private_data;
++	struct csession *cse;
++	struct csession_info info;
++	struct session2_op sop;
++	struct crypt_op cop;
++	struct crypt_kop kop;
++	struct crypt_find_op fop;
++	u_int64_t sid;
++	u_int32_t ses;
++	int feat, fd, error = 0, crid;
++	mm_segment_t fs;
++
++	dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
++
++	switch (cmd) {
++
++	case CRIOGET: {
++		dprintk("%s(CRIOGET)\n", __FUNCTION__);
++		fs = get_fs();
++		set_fs(get_ds());
++		for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
++			if (files_fdtable(current->files)->fd[fd] == filp)
++				break;
++		fd = sys_dup(fd);
++		set_fs(fs);
++		put_user(fd, (int *) arg);
++		return IS_ERR_VALUE(fd) ? fd : 0;
++		}
++
++#define	CIOCGSESSSTR	(cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
++	case CIOCGSESSION:
++	case CIOCGSESSION2:
++		dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
++		memset(&crie, 0, sizeof(crie));
++		memset(&cria, 0, sizeof(cria));
++		memset(&info, 0, sizeof(info));
++		memset(&sop, 0, sizeof(sop));
++
++		if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
++					sizeof(struct session_op) : sizeof(sop))) {
++			dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
++			error = EFAULT;
++			goto bail;
++		}
++
++		switch (sop.cipher) {
++		case 0:
++			dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
++			break;
++		case CRYPTO_NULL_CBC:
++			info.blocksize = NULL_BLOCK_LEN;
++			info.minkey = NULL_MIN_KEY_LEN;
++			info.maxkey = NULL_MAX_KEY_LEN;
++			break;
++		case CRYPTO_DES_CBC:
++			info.blocksize = DES_BLOCK_LEN;
++			info.minkey = DES_MIN_KEY_LEN;
++			info.maxkey = DES_MAX_KEY_LEN;
++			break;
++		case CRYPTO_3DES_CBC:
++			info.blocksize = DES3_BLOCK_LEN;
++			info.minkey = DES3_MIN_KEY_LEN;
++			info.maxkey = DES3_MAX_KEY_LEN;
++			break;
++		case CRYPTO_BLF_CBC:
++			info.blocksize = BLOWFISH_BLOCK_LEN;
++			info.minkey = BLOWFISH_MIN_KEY_LEN;
++			info.maxkey = BLOWFISH_MAX_KEY_LEN;
++			break;
++		case CRYPTO_CAST_CBC:
++			info.blocksize = CAST128_BLOCK_LEN;
++			info.minkey = CAST128_MIN_KEY_LEN;
++			info.maxkey = CAST128_MAX_KEY_LEN;
++			break;
++		case CRYPTO_SKIPJACK_CBC:
++			info.blocksize = SKIPJACK_BLOCK_LEN;
++			info.minkey = SKIPJACK_MIN_KEY_LEN;
++			info.maxkey = SKIPJACK_MAX_KEY_LEN;
++			break;
++		case CRYPTO_AES_CBC:
++			info.blocksize = AES_BLOCK_LEN;
++			info.minkey = AES_MIN_KEY_LEN;
++			info.maxkey = AES_MAX_KEY_LEN;
++			break;
++		case CRYPTO_ARC4:
++			info.blocksize = ARC4_BLOCK_LEN;
++			info.minkey = ARC4_MIN_KEY_LEN;
++			info.maxkey = ARC4_MAX_KEY_LEN;
++			break;
++		case CRYPTO_CAMELLIA_CBC:
++			info.blocksize = CAMELLIA_BLOCK_LEN;
++			info.minkey = CAMELLIA_MIN_KEY_LEN;
++			info.maxkey = CAMELLIA_MAX_KEY_LEN;
++			break;
++		default:
++			dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
++			error = EINVAL;
++			goto bail;
++		}
++
++		switch (sop.mac) {
++		case 0:
++			dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
++			break;
++		case CRYPTO_NULL_HMAC:
++			info.authsize = NULL_HASH_LEN;
++			break;
++		case CRYPTO_MD5:
++			info.authsize = MD5_HASH_LEN;
++			break;
++		case CRYPTO_SHA1:
++			info.authsize = SHA1_HASH_LEN;
++			break;
++		case CRYPTO_SHA2_256:
++			info.authsize = SHA2_256_HASH_LEN;
++			break;
++		case CRYPTO_SHA2_384:
++			info.authsize = SHA2_384_HASH_LEN;
++  			break;
++		case CRYPTO_SHA2_512:
++			info.authsize = SHA2_512_HASH_LEN;
++			break;
++		case CRYPTO_RIPEMD160:
++			info.authsize = RIPEMD160_HASH_LEN;
++			break;
++		case CRYPTO_MD5_HMAC:
++			info.authsize = MD5_HASH_LEN;
++			break;
++		case CRYPTO_SHA1_HMAC:
++			info.authsize = SHA1_HASH_LEN;
++			break;
++		case CRYPTO_SHA2_256_HMAC:
++			info.authsize = SHA2_256_HASH_LEN;
++			break;
++		case CRYPTO_SHA2_384_HMAC:
++			info.authsize = SHA2_384_HASH_LEN;
++  			break;
++		case CRYPTO_SHA2_512_HMAC:
++			info.authsize = SHA2_512_HASH_LEN;
++			break;
++		case CRYPTO_RIPEMD160_HMAC:
++			info.authsize = RIPEMD160_HASH_LEN;
++			break;
++		default:
++			dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
++			error = EINVAL;
++			goto bail;
++		}
++
++		if (info.blocksize) {
++			crie.cri_alg = sop.cipher;
++			crie.cri_klen = sop.keylen * 8;
++			if ((info.maxkey && sop.keylen > info.maxkey) ||
++				   	sop.keylen < info.minkey) {
++				dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
++				error = EINVAL;
++				goto bail;
++			}
++
++			crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
++			if (copy_from_user(crie.cri_key, sop.key,
++							crie.cri_klen/8)) {
++				dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
++				error = EFAULT;
++				goto bail;
++			}
++			if (info.authsize)
++				crie.cri_next = &cria;
++		}
++
++		if (info.authsize) {
++			cria.cri_alg = sop.mac;
++			cria.cri_klen = sop.mackeylen * 8;
++			if ((info.maxkey && sop.mackeylen > info.maxkey) ||
++					sop.keylen < info.minkey) {
++				dprintk("%s(%s) - mackeylen %d\n", __FUNCTION__, CIOCGSESSSTR,
++						sop.mackeylen);
++				error = EINVAL;
++				goto bail;
++			}
++
++			if (cria.cri_klen) {
++				cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
++				if (copy_from_user(cria.cri_key, sop.mackey,
++								cria.cri_klen / 8)) {
++					dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
++					error = EFAULT;
++					goto bail;
++				}
++			}
++		}
++
++		/* NB: CIOGSESSION2 has the crid */
++		if (cmd == CIOCGSESSION2) {
++			crid = sop.crid;
++			error = checkcrid(crid);
++			if (error) {
++				dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
++						CIOCGSESSSTR, error);
++				goto bail;
++			}
++		} else {
++			/* allow either HW or SW to be used */
++			crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
++		}
++		error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
++		if (error) {
++			dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
++			goto bail;
++		}
++
++		cse = csecreate(fcr, sid, &crie, &cria, &info);
++		if (cse == NULL) {
++			crypto_freesession(sid);
++			error = EINVAL;
++			dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
++			goto bail;
++		}
++		sop.ses = cse->ses;
++
++		if (cmd == CIOCGSESSION2) {
++			/* return hardware/driver id */
++			sop.crid = CRYPTO_SESID2HID(cse->sid);
++		}
++
++		if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
++					sizeof(struct session_op) : sizeof(sop))) {
++			dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
++			error = EFAULT;
++		}
++bail:
++		if (error) {
++			dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
++			if (crie.cri_key)
++				kfree(crie.cri_key);
++			if (cria.cri_key)
++				kfree(cria.cri_key);
++		}
++		break;
++	case CIOCFSESSION:
++		dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
++		get_user(ses, (uint32_t*)arg);
++		cse = csefind(fcr, ses);
++		if (cse == NULL) {
++			error = EINVAL;
++			dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
++			break;
++		}
++		csedelete(fcr, cse);
++		error = csefree(cse);
++		break;
++	case CIOCCRYPT:
++		dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
++		if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
++			dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
++			error = EFAULT;
++			goto bail;
++		}
++		cse = csefind(fcr, cop.ses);
++		if (cse == NULL) {
++			error = EINVAL;
++			dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
++			break;
++		}
++		error = cryptodev_op(cse, &cop);
++		if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
++			dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
++			error = EFAULT;
++			goto bail;
++		}
++		break;
++	case CIOCKEY:
++	case CIOCKEY2:
++		dprintk("%s(CIOCKEY)\n", __FUNCTION__);
++		if (!crypto_userasymcrypto)
++			return (EPERM);		/* XXX compat? */
++		if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
++			dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
++			error = EFAULT;
++			goto bail;
++		}
++		if (cmd == CIOCKEY) {
++			/* NB: crypto core enforces s/w driver use */
++			kop.crk_crid =
++			    CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
++		}
++		error = cryptodev_key(&kop);
++		if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
++			dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
++			error = EFAULT;
++			goto bail;
++		}
++		break;
++	case CIOCASYMFEAT:
++		dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
++		if (!crypto_userasymcrypto) {
++			/*
++			 * NB: if user asym crypto operations are
++			 * not permitted return "no algorithms"
++			 * so well-behaved applications will just
++			 * fallback to doing them in software.
++			 */
++			feat = 0;
++		} else
++			error = crypto_getfeat(&feat);
++		if (!error) {
++		  error = copy_to_user((void*)arg, &feat, sizeof(feat));
++		}
++		break;
++	case CIOCFINDDEV:
++		if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
++			dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
++			error = EFAULT;
++			goto bail;
++		}
++		error = cryptodev_find(&fop);
++		if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
++			dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
++			error = EFAULT;
++			goto bail;
++		}
++		break;
++	default:
++		dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
++		error = EINVAL;
++		break;
++	}
++	return(-error);
++}
++
++#ifdef HAVE_UNLOCKED_IOCTL
++static long
++cryptodev_unlocked_ioctl(
++	struct file *filp,
++	unsigned int cmd,
++	unsigned long arg)
++{
++	return cryptodev_ioctl(NULL, filp, cmd, arg);
++}
++#endif
++
++static int
++cryptodev_open(struct inode *inode, struct file *filp)
++{
++	struct fcrypt *fcr;
++
++	dprintk("%s()\n", __FUNCTION__);
++	if (filp->private_data) {
++		printk("cryptodev: Private data already exists !\n");
++		return(0);
++	}
++
++	fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
++	if (!fcr) {
++		dprintk("%s() - malloc failed\n", __FUNCTION__);
++		return(-ENOMEM);
++	}
++	memset(fcr, 0, sizeof(*fcr));
++
++	INIT_LIST_HEAD(&fcr->csessions);
++	filp->private_data = fcr;
++	return(0);
++}
++
++static int
++cryptodev_release(struct inode *inode, struct file *filp)
++{
++	struct fcrypt *fcr = filp->private_data;
++	struct csession *cse, *tmp;
++
++	dprintk("%s()\n", __FUNCTION__);
++	if (!filp) {
++		printk("cryptodev: No private data on release\n");
++		return(0);
++	}
++
++	list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
++		list_del(&cse->list);
++		(void)csefree(cse);
++	}
++	filp->private_data = NULL;
++	kfree(fcr);
++	return(0);
++}
++
++static struct file_operations cryptodev_fops = {
++	.owner = THIS_MODULE,
++	.open = cryptodev_open,
++	.release = cryptodev_release,
++	.ioctl = cryptodev_ioctl,
++#ifdef HAVE_UNLOCKED_IOCTL
++	.unlocked_ioctl = cryptodev_unlocked_ioctl,
++#endif
++};
++
++static struct miscdevice cryptodev = {
++	.minor = CRYPTODEV_MINOR,
++	.name = "crypto",
++	.fops = &cryptodev_fops,
++};
++
++static int __init
++cryptodev_init(void)
++{
++	int rc;
++
++	dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
++	rc = misc_register(&cryptodev);
++	if (rc) {
++		printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
++		return(rc);
++	}
++
++	return(0);
++}
++
++static void __exit
++cryptodev_exit(void)
++{
++	dprintk("%s()\n", __FUNCTION__);
++	misc_deregister(&cryptodev);
++}
++
++module_init(cryptodev_init);
++module_exit(cryptodev_exit);
++
++MODULE_LICENSE("BSD");
++MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
++MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
+diff -Nur linux-2.6.30.orig/crypto/ocf/cryptodev.h linux-2.6.30/crypto/ocf/cryptodev.h
+--- linux-2.6.30.orig/crypto/ocf/cryptodev.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/cryptodev.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,478 @@
++/*	$FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $	*/
++/*	$OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $	*/
++
++/*-
++ * Linux port done by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ * The license and original author are listed below.
++ *
++ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
++ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
++ *
++ * This code was written by Angelos D. Keromytis in Athens, Greece, in
++ * February 2000. Network Security Technologies Inc. (NSTI) kindly
++ * supported the development of this code.
++ *
++ * Copyright (c) 2000 Angelos D. Keromytis
++ *
++ * Permission to use, copy, and modify this software with or without fee
++ * is hereby granted, provided that this entire notice is included in
++ * all source code copies of any software which is or includes a copy or
++ * modification of this software.
++ *
++ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
++ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
++ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
++ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
++ * PURPOSE.
++ *
++ * Copyright (c) 2001 Theo de Raadt
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *   notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *   notice, this list of conditions and the following disclaimer in the
++ *   documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *   derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Effort sponsored in part by the Defense Advanced Research Projects
++ * Agency (DARPA) and Air Force Research Laboratory, Air Force
++ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
++ *
++ */
++
++#ifndef _CRYPTO_CRYPTO_H_
++#define _CRYPTO_CRYPTO_H_
++
++/* Some initial values */
++#define CRYPTO_DRIVERS_INITIAL	4
++#define CRYPTO_SW_SESSIONS	32
++
++/* Hash values */
++#define NULL_HASH_LEN		0
++#define MD5_HASH_LEN		16
++#define SHA1_HASH_LEN		20
++#define RIPEMD160_HASH_LEN	20
++#define SHA2_256_HASH_LEN	32
++#define SHA2_384_HASH_LEN	48
++#define SHA2_512_HASH_LEN	64
++#define MD5_KPDK_HASH_LEN	16
++#define SHA1_KPDK_HASH_LEN	20
++/* Maximum hash algorithm result length */
++#define HASH_MAX_LEN		SHA2_512_HASH_LEN /* Keep this updated */
++
++/* HMAC values */
++#define NULL_HMAC_BLOCK_LEN			1
++#define MD5_HMAC_BLOCK_LEN			64
++#define SHA1_HMAC_BLOCK_LEN			64
++#define RIPEMD160_HMAC_BLOCK_LEN	64
++#define SHA2_256_HMAC_BLOCK_LEN		64
++#define SHA2_384_HMAC_BLOCK_LEN		128
++#define SHA2_512_HMAC_BLOCK_LEN		128
++/* Maximum HMAC block length */
++#define HMAC_MAX_BLOCK_LEN		SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
++#define HMAC_IPAD_VAL			0x36
++#define HMAC_OPAD_VAL			0x5C
++
++/* Encryption algorithm block sizes */
++#define NULL_BLOCK_LEN			1
++#define DES_BLOCK_LEN			8
++#define DES3_BLOCK_LEN			8
++#define BLOWFISH_BLOCK_LEN		8
++#define SKIPJACK_BLOCK_LEN		8
++#define CAST128_BLOCK_LEN		8
++#define RIJNDAEL128_BLOCK_LEN	16
++#define AES_BLOCK_LEN			RIJNDAEL128_BLOCK_LEN
++#define CAMELLIA_BLOCK_LEN		16
++#define ARC4_BLOCK_LEN			1
++#define EALG_MAX_BLOCK_LEN		AES_BLOCK_LEN /* Keep this updated */
++
++/* Encryption algorithm min and max key sizes */
++#define NULL_MIN_KEY_LEN		0
++#define NULL_MAX_KEY_LEN		0
++#define DES_MIN_KEY_LEN			8
++#define DES_MAX_KEY_LEN			8
++#define DES3_MIN_KEY_LEN		24
++#define DES3_MAX_KEY_LEN		24
++#define BLOWFISH_MIN_KEY_LEN	4
++#define BLOWFISH_MAX_KEY_LEN	56
++#define SKIPJACK_MIN_KEY_LEN	10
++#define SKIPJACK_MAX_KEY_LEN	10
++#define CAST128_MIN_KEY_LEN		5
++#define CAST128_MAX_KEY_LEN		16
++#define RIJNDAEL128_MIN_KEY_LEN	16
++#define RIJNDAEL128_MAX_KEY_LEN	32
++#define AES_MIN_KEY_LEN			RIJNDAEL128_MIN_KEY_LEN
++#define AES_MAX_KEY_LEN			RIJNDAEL128_MAX_KEY_LEN
++#define CAMELLIA_MIN_KEY_LEN	16
++#define CAMELLIA_MAX_KEY_LEN	32
++#define ARC4_MIN_KEY_LEN		1
++#define ARC4_MAX_KEY_LEN		256
++
++/* Max size of data that can be processed */
++#define CRYPTO_MAX_DATA_LEN		64*1024 - 1
++
++#define CRYPTO_ALGORITHM_MIN	1
++#define CRYPTO_DES_CBC			1
++#define CRYPTO_3DES_CBC			2
++#define CRYPTO_BLF_CBC			3
++#define CRYPTO_CAST_CBC			4
++#define CRYPTO_SKIPJACK_CBC		5
++#define CRYPTO_MD5_HMAC			6
++#define CRYPTO_SHA1_HMAC		7
++#define CRYPTO_RIPEMD160_HMAC	8
++#define CRYPTO_MD5_KPDK			9
++#define CRYPTO_SHA1_KPDK		10
++#define CRYPTO_RIJNDAEL128_CBC	11 /* 128 bit blocksize */
++#define CRYPTO_AES_CBC			11 /* 128 bit blocksize -- the same as above */
++#define CRYPTO_ARC4				12
++#define CRYPTO_MD5				13
++#define CRYPTO_SHA1				14
++#define CRYPTO_NULL_HMAC		15
++#define CRYPTO_NULL_CBC			16
++#define CRYPTO_DEFLATE_COMP		17 /* Deflate compression algorithm */
++#define CRYPTO_SHA2_256_HMAC	18
++#define CRYPTO_SHA2_384_HMAC	19
++#define CRYPTO_SHA2_512_HMAC	20
++#define CRYPTO_CAMELLIA_CBC		21
++#define CRYPTO_SHA2_256			22
++#define CRYPTO_SHA2_384			23
++#define CRYPTO_SHA2_512			24
++#define CRYPTO_RIPEMD160		25
++#define CRYPTO_ALGORITHM_MAX	25 /* Keep updated - see below */
++
++/* Algorithm flags */
++#define CRYPTO_ALG_FLAG_SUPPORTED	0x01 /* Algorithm is supported */
++#define CRYPTO_ALG_FLAG_RNG_ENABLE	0x02 /* Has HW RNG for DH/DSA */
++#define CRYPTO_ALG_FLAG_DSA_SHA		0x04 /* Can do SHA on msg */
++
++/*
++ * Crypto driver/device flags.  They can set in the crid
++ * parameter when creating a session or submitting a key
++ * op to affect the device/driver assigned.  If neither
++ * of these are specified then the crid is assumed to hold
++ * the driver id of an existing (and suitable) device that
++ * must be used to satisfy the request.
++ */
++#define CRYPTO_FLAG_HARDWARE	0x01000000	/* hardware accelerated */
++#define CRYPTO_FLAG_SOFTWARE	0x02000000	/* software implementation */
++
++/* NB: deprecated */
++struct session_op {
++	u_int32_t	cipher;		/* ie. CRYPTO_DES_CBC */
++	u_int32_t	mac;		/* ie. CRYPTO_MD5_HMAC */
++
++	u_int32_t	keylen;		/* cipher key */
++	caddr_t		key;
++	int		mackeylen;	/* mac key */
++	caddr_t		mackey;
++
++  	u_int32_t	ses;		/* returns: session # */ 
++};
++
++struct session2_op {
++	u_int32_t	cipher;		/* ie. CRYPTO_DES_CBC */
++	u_int32_t	mac;		/* ie. CRYPTO_MD5_HMAC */
++
++	u_int32_t	keylen;		/* cipher key */
++	caddr_t		key;
++	int		mackeylen;	/* mac key */
++	caddr_t		mackey;
++
++  	u_int32_t	ses;		/* returns: session # */ 
++	int		crid;		/* driver id + flags (rw) */
++	int		pad[4];		/* for future expansion */
++};
++
++struct crypt_op {
++	u_int32_t	ses;
++	u_int16_t	op;		/* i.e. COP_ENCRYPT */
++#define COP_NONE	0
++#define COP_ENCRYPT	1
++#define COP_DECRYPT	2
++	u_int16_t	flags;
++#define	COP_F_BATCH	0x0008		/* Batch op if possible */
++	u_int		len;
++	caddr_t		src, dst;	/* become iov[] inside kernel */
++	caddr_t		mac;		/* must be big enough for chosen MAC */
++	caddr_t		iv;
++};
++
++/*
++ * Parameters for looking up a crypto driver/device by
++ * device name or by id.  The latter are returned for
++ * created sessions (crid) and completed key operations.
++ */
++struct crypt_find_op {
++	int		crid;		/* driver id + flags */
++	char		name[32];	/* device/driver name */
++};
++
++/* bignum parameter, in packed bytes, ... */
++struct crparam {
++	caddr_t		crp_p;
++	u_int		crp_nbits;
++};
++
++#define CRK_MAXPARAM	8
++
++struct crypt_kop {
++	u_int		crk_op;		/* ie. CRK_MOD_EXP or other */
++	u_int		crk_status;	/* return status */
++	u_short		crk_iparams;	/* # of input parameters */
++	u_short		crk_oparams;	/* # of output parameters */
++	u_int		crk_crid;	/* NB: only used by CIOCKEY2 (rw) */
++	struct crparam	crk_param[CRK_MAXPARAM];
++};
++#define CRK_ALGORITM_MIN	0
++#define CRK_MOD_EXP		0
++#define CRK_MOD_EXP_CRT		1
++#define CRK_DSA_SIGN		2
++#define CRK_DSA_VERIFY		3
++#define CRK_DH_COMPUTE_KEY	4
++#define CRK_ALGORITHM_MAX	4 /* Keep updated - see below */
++
++#define CRF_MOD_EXP		(1 << CRK_MOD_EXP)
++#define CRF_MOD_EXP_CRT		(1 << CRK_MOD_EXP_CRT)
++#define CRF_DSA_SIGN		(1 << CRK_DSA_SIGN)
++#define CRF_DSA_VERIFY		(1 << CRK_DSA_VERIFY)
++#define CRF_DH_COMPUTE_KEY	(1 << CRK_DH_COMPUTE_KEY)
++
++/*
++ * done against open of /dev/crypto, to get a cloned descriptor.
++ * Please use F_SETFD against the cloned descriptor.
++ */
++#define CRIOGET		_IOWR('c', 100, u_int32_t)
++#define CRIOASYMFEAT	CIOCASYMFEAT
++#define CRIOFINDDEV	CIOCFINDDEV
++
++/* the following are done against the cloned descriptor */
++#define CIOCGSESSION	_IOWR('c', 101, struct session_op)
++#define CIOCFSESSION	_IOW('c', 102, u_int32_t)
++#define CIOCCRYPT	_IOWR('c', 103, struct crypt_op)
++#define CIOCKEY		_IOWR('c', 104, struct crypt_kop)
++#define CIOCASYMFEAT	_IOR('c', 105, u_int32_t)
++#define CIOCGSESSION2	_IOWR('c', 106, struct session2_op)
++#define CIOCKEY2	_IOWR('c', 107, struct crypt_kop)
++#define CIOCFINDDEV	_IOWR('c', 108, struct crypt_find_op)
++
++struct cryptotstat {
++	struct timespec	acc;		/* total accumulated time */
++	struct timespec	min;		/* min time */
++	struct timespec	max;		/* max time */
++	u_int32_t	count;		/* number of observations */
++};
++
++struct cryptostats {
++	u_int32_t	cs_ops;		/* symmetric crypto ops submitted */
++	u_int32_t	cs_errs;	/* symmetric crypto ops that failed */
++	u_int32_t	cs_kops;	/* asymetric/key ops submitted */
++	u_int32_t	cs_kerrs;	/* asymetric/key ops that failed */
++	u_int32_t	cs_intrs;	/* crypto swi thread activations */
++	u_int32_t	cs_rets;	/* crypto return thread activations */
++	u_int32_t	cs_blocks;	/* symmetric op driver block */
++	u_int32_t	cs_kblocks;	/* symmetric op driver block */
++	/*
++	 * When CRYPTO_TIMING is defined at compile time and the
++	 * sysctl debug.crypto is set to 1, the crypto system will
++	 * accumulate statistics about how long it takes to process
++	 * crypto requests at various points during processing.
++	 */
++	struct cryptotstat cs_invoke;	/* crypto_dipsatch -> crypto_invoke */
++	struct cryptotstat cs_done;	/* crypto_invoke -> crypto_done */
++	struct cryptotstat cs_cb;	/* crypto_done -> callback */
++	struct cryptotstat cs_finis;	/* callback -> callback return */
++
++	u_int32_t	cs_drops;		/* crypto ops dropped due to congestion */
++};
++
++#ifdef __KERNEL__
++
++/* Standard initialization structure beginning */
++struct cryptoini {
++	int		cri_alg;	/* Algorithm to use */
++	int		cri_klen;	/* Key length, in bits */
++	int		cri_mlen;	/* Number of bytes we want from the
++					   entire hash. 0 means all. */
++	caddr_t		cri_key;	/* key to use */
++	u_int8_t	cri_iv[EALG_MAX_BLOCK_LEN];	/* IV to use */
++	struct cryptoini *cri_next;
++};
++
++/* Describe boundaries of a single crypto operation */
++struct cryptodesc {
++	int		crd_skip;	/* How many bytes to ignore from start */
++	int		crd_len;	/* How many bytes to process */
++	int		crd_inject;	/* Where to inject results, if applicable */
++	int		crd_flags;
++
++#define CRD_F_ENCRYPT		0x01	/* Set when doing encryption */
++#define CRD_F_IV_PRESENT	0x02	/* When encrypting, IV is already in
++					   place, so don't copy. */
++#define CRD_F_IV_EXPLICIT	0x04	/* IV explicitly provided */
++#define CRD_F_DSA_SHA_NEEDED	0x08	/* Compute SHA-1 of buffer for DSA */
++#define CRD_F_KEY_EXPLICIT	0x10	/* Key explicitly provided */
++#define CRD_F_COMP		0x0f    /* Set when doing compression */
++
++	struct cryptoini	CRD_INI; /* Initialization/context data */
++#define crd_iv		CRD_INI.cri_iv
++#define crd_key		CRD_INI.cri_key
++#define crd_alg		CRD_INI.cri_alg
++#define crd_klen	CRD_INI.cri_klen
++
++	struct cryptodesc *crd_next;
++};
++
++/* Structure describing complete operation */
++struct cryptop {
++	struct list_head crp_next;
++	wait_queue_head_t crp_waitq;
++
++	u_int64_t	crp_sid;	/* Session ID */
++	int		crp_ilen;	/* Input data total length */
++	int		crp_olen;	/* Result total length */
++
++	int		crp_etype;	/*
++					 * Error type (zero means no error).
++					 * All error codes except EAGAIN
++					 * indicate possible data corruption (as in,
++					 * the data have been touched). On all
++					 * errors, the crp_sid may have changed
++					 * (reset to a new one), so the caller
++					 * should always check and use the new
++					 * value on future requests.
++					 */
++	int		crp_flags;
++
++#define CRYPTO_F_SKBUF		0x0001	/* Input/output are skbuf chains */
++#define CRYPTO_F_IOV		0x0002	/* Input/output are uio */
++#define CRYPTO_F_REL		0x0004	/* Must return data in same place */
++#define CRYPTO_F_BATCH		0x0008	/* Batch op if possible */
++#define CRYPTO_F_CBIMM		0x0010	/* Do callback immediately */
++#define CRYPTO_F_DONE		0x0020	/* Operation completed */
++#define CRYPTO_F_CBIFSYNC	0x0040	/* Do CBIMM if op is synchronous */
++
++	caddr_t		crp_buf;	/* Data to be processed */
++	caddr_t		crp_opaque;	/* Opaque pointer, passed along */
++	struct cryptodesc *crp_desc;	/* Linked list of processing descriptors */
++
++	int (*crp_callback)(struct cryptop *); /* Callback function */
++};
++
++#define CRYPTO_BUF_CONTIG	0x0
++#define CRYPTO_BUF_IOV		0x1
++#define CRYPTO_BUF_SKBUF		0x2
++
++#define CRYPTO_OP_DECRYPT	0x0
++#define CRYPTO_OP_ENCRYPT	0x1
++
++/*
++ * Hints passed to process methods.
++ */
++#define CRYPTO_HINT_MORE	0x1	/* more ops coming shortly */
++
++struct cryptkop {
++	struct list_head krp_next;
++	wait_queue_head_t krp_waitq;
++
++	int		krp_flags;
++#define CRYPTO_KF_DONE		0x0001	/* Operation completed */
++#define CRYPTO_KF_CBIMM		0x0002	/* Do callback immediately */
++
++	u_int		krp_op;		/* ie. CRK_MOD_EXP or other */
++	u_int		krp_status;	/* return status */
++	u_short		krp_iparams;	/* # of input parameters */
++	u_short		krp_oparams;	/* # of output parameters */
++	u_int		krp_crid;	/* desired device, etc. */
++	u_int32_t	krp_hid;
++	struct crparam	krp_param[CRK_MAXPARAM];	/* kvm */
++	int		(*krp_callback)(struct cryptkop *);
++};
++
++#include <ocf-compat.h>
++
++/*
++ * Session ids are 64 bits.  The lower 32 bits contain a "local id" which
++ * is a driver-private session identifier.  The upper 32 bits contain a
++ * "hardware id" used by the core crypto code to identify the driver and
++ * a copy of the driver's capabilities that can be used by client code to
++ * optimize operation.
++ */
++#define CRYPTO_SESID2HID(_sid)	(((_sid) >> 32) & 0x00ffffff)
++#define CRYPTO_SESID2CAPS(_sid)	(((_sid) >> 32) & 0xff000000)
++#define CRYPTO_SESID2LID(_sid)	(((u_int32_t) (_sid)) & 0xffffffff)
++
++extern	int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
++extern	int crypto_freesession(u_int64_t sid);
++#define CRYPTOCAP_F_HARDWARE	CRYPTO_FLAG_HARDWARE
++#define CRYPTOCAP_F_SOFTWARE	CRYPTO_FLAG_SOFTWARE
++#define CRYPTOCAP_F_SYNC	0x04000000	/* operates synchronously */
++extern	int32_t crypto_get_driverid(device_t dev, int flags);
++extern	int crypto_find_driver(const char *);
++extern	device_t crypto_find_device_byhid(int hid);
++extern	int crypto_getcaps(int hid);
++extern	int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
++	    u_int32_t flags);
++extern	int crypto_kregister(u_int32_t, int, u_int32_t);
++extern	int crypto_unregister(u_int32_t driverid, int alg);
++extern	int crypto_unregister_all(u_int32_t driverid);
++extern	int crypto_dispatch(struct cryptop *crp);
++extern	int crypto_kdispatch(struct cryptkop *);
++#define CRYPTO_SYMQ	0x1
++#define CRYPTO_ASYMQ	0x2
++extern	int crypto_unblock(u_int32_t, int);
++extern	void crypto_done(struct cryptop *crp);
++extern	void crypto_kdone(struct cryptkop *);
++extern	int crypto_getfeat(int *);
++
++extern	void crypto_freereq(struct cryptop *crp);
++extern	struct cryptop *crypto_getreq(int num);
++
++extern  int crypto_usercrypto;      /* userland may do crypto requests */
++extern  int crypto_userasymcrypto;  /* userland may do asym crypto reqs */
++extern  int crypto_devallowsoft;    /* only use hardware crypto */
++
++/*
++ * random number support,  crypto_unregister_all will unregister
++ */
++extern int crypto_rregister(u_int32_t driverid,
++		int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
++extern int crypto_runregister_all(u_int32_t driverid);
++
++/*
++ * Crypto-related utility routines used mainly by drivers.
++ *
++ * XXX these don't really belong here; but for now they're
++ *     kept apart from the rest of the system.
++ */
++struct uio;
++extern	void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
++extern	void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
++extern	struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
++
++extern	void crypto_copyback(int flags, caddr_t buf, int off, int size,
++	    caddr_t in);
++extern	void crypto_copydata(int flags, caddr_t buf, int off, int size,
++	    caddr_t out);
++extern	int crypto_apply(int flags, caddr_t buf, int off, int len,
++	    int (*f)(void *, void *, u_int), void *arg);
++
++#endif /* __KERNEL__ */
++#endif /* _CRYPTO_CRYPTO_H_ */
+diff -Nur linux-2.6.30.orig/crypto/ocf/cryptosoft.c linux-2.6.30/crypto/ocf/cryptosoft.c
+--- linux-2.6.30.orig/crypto/ocf/cryptosoft.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/cryptosoft.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,898 @@
++/*
++ * An OCF module that uses the linux kernel cryptoapi, based on the
++ * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
++ * but is mostly unrecognisable,
++ *
++ * Written by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2004-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ *
++ * LICENSE TERMS
++ *
++ * The free distribution and use of this software in both source and binary
++ * form is allowed (with or without changes) provided that:
++ *
++ *   1. distributions of this source code include the above copyright
++ *      notice, this list of conditions and the following disclaimer;
++ *
++ *   2. distributions in binary form include the above copyright
++ *      notice, this list of conditions and the following disclaimer
++ *      in the documentation and/or other associated materials;
++ *
++ *   3. the copyright holder's name is not used to endorse products
++ *      built using this software without specific written permission.
++ *
++ * ALTERNATIVELY, provided that this notice is retained in full, this product
++ * may be distributed under the terms of the GNU General Public License (GPL),
++ * in which case the provisions of the GPL apply INSTEAD OF those given above.
++ *
++ * DISCLAIMER
++ *
++ * This software is provided 'as is' with no explicit or implied warranties
++ * in respect of its properties, including, but not limited to, correctness
++ * and/or fitness for purpose.
++ * ---------------------------------------------------------------------------
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/crypto.h>
++#include <linux/mm.h>
++#include <linux/skbuff.h>
++#include <linux/random.h>
++#include <linux/scatterlist.h>
++
++#include <cryptodev.h>
++#include <uio.h>
++
++struct {
++	softc_device_decl	sc_dev;
++} swcr_softc;
++
++#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
++
++/* Software session entry */
++
++#define SW_TYPE_CIPHER		0
++#define SW_TYPE_HMAC		1
++#define SW_TYPE_AUTH2		2
++#define SW_TYPE_HASH		3
++#define SW_TYPE_COMP		4
++#define SW_TYPE_BLKCIPHER	5
++
++struct swcr_data {
++	int					sw_type;
++	int					sw_alg;
++	struct crypto_tfm	*sw_tfm;
++	union {
++		struct {
++			char *sw_key;
++			int  sw_klen;
++			int  sw_mlen;
++		} hmac;
++		void *sw_comp_buf;
++	} u;
++	struct swcr_data	*sw_next;
++};
++
++#ifndef CRYPTO_TFM_MODE_CBC
++/*
++ * As of linux-2.6.21 this is no longer defined, and presumably no longer
++ * needed to be passed into the crypto core code.
++ */
++#define	CRYPTO_TFM_MODE_CBC	0
++#define	CRYPTO_TFM_MODE_ECB	0
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	/*
++	 * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
++	 * API into old API.
++	 */
++
++	/* Symmetric/Block Cipher */
++	struct blkcipher_desc
++	{
++		struct crypto_tfm *tfm;
++		void *info;
++	};
++	#define ecb(X)								#X
++	#define cbc(X)								#X
++	#define crypto_has_blkcipher(X, Y, Z)		crypto_alg_available(X, 0)
++	#define crypto_blkcipher_cast(X)			X
++	#define crypto_blkcipher_tfm(X)				X
++	#define crypto_alloc_blkcipher(X, Y, Z)		crypto_alloc_tfm(X, mode)
++	#define crypto_blkcipher_ivsize(X)			crypto_tfm_alg_ivsize(X)
++	#define crypto_blkcipher_blocksize(X)		crypto_tfm_alg_blocksize(X)
++	#define crypto_blkcipher_setkey(X, Y, Z)	crypto_cipher_setkey(X, Y, Z)
++	#define crypto_blkcipher_encrypt_iv(W, X, Y, Z)	\
++				crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
++	#define crypto_blkcipher_decrypt_iv(W, X, Y, Z)	\
++				crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
++
++	/* Hash/HMAC/Digest */
++	struct hash_desc
++	{
++		struct crypto_tfm *tfm;
++	};
++	#define hmac(X)							#X
++	#define crypto_has_hash(X, Y, Z)		crypto_alg_available(X, 0)
++	#define crypto_hash_cast(X)				X
++	#define crypto_hash_tfm(X)				X
++	#define crypto_alloc_hash(X, Y, Z)		crypto_alloc_tfm(X, mode)
++	#define crypto_hash_digestsize(X)		crypto_tfm_alg_digestsize(X)
++	#define crypto_hash_digest(W, X, Y, Z)	\
++				crypto_digest_digest((W)->tfm, X, sg_num, Z)
++
++	/* Asymmetric Cipher */
++	#define crypto_has_cipher(X, Y, Z)		crypto_alg_available(X, 0)
++
++	/* Compression */
++	#define crypto_has_comp(X, Y, Z)		crypto_alg_available(X, 0)
++	#define crypto_comp_tfm(X)				X
++	#define crypto_comp_cast(X)				X
++	#define crypto_alloc_comp(X, Y, Z)		crypto_alloc_tfm(X, mode)
++#else
++	#define ecb(X)	"ecb(" #X ")"
++	#define cbc(X)	"cbc(" #X ")"
++	#define hmac(X)	"hmac(" #X ")"
++#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
++
++struct crypto_details
++{
++	char *alg_name;
++	int mode;
++	int sw_type;
++};
++
++/*
++ * This needs to be kept updated with CRYPTO_xxx list (cryptodev.h).
++ * If the Algorithm is not supported, then insert a {NULL, 0, 0} entry.
++ *
++ * IMPORTANT: The index to the array IS CRYPTO_xxx.
++ */
++static struct crypto_details crypto_details[CRYPTO_ALGORITHM_MAX + 1] = {
++	{ NULL,              0,                   0 },
++	/* CRYPTO_xxx index starts at 1 */
++	{ cbc(des),          CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
++	{ cbc(des3_ede),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
++	{ cbc(blowfish),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
++	{ cbc(cast5),        CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
++	{ cbc(skipjack),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
++	{ hmac(md5),         0,                   SW_TYPE_HMAC },
++	{ hmac(sha1),        0,                   SW_TYPE_HMAC },
++	{ hmac(ripemd160),   0,                   SW_TYPE_HMAC },
++	{ "md5-kpdk??",      0,                   SW_TYPE_HASH },
++	{ "sha1-kpdk??",     0,                   SW_TYPE_HASH },
++	{ cbc(aes),          CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
++	{ ecb(arc4),         CRYPTO_TFM_MODE_ECB, SW_TYPE_BLKCIPHER },
++	{ "md5",             0,                   SW_TYPE_HASH },
++	{ "sha1",            0,                   SW_TYPE_HASH },
++	{ hmac(digest_null), 0,                   SW_TYPE_HMAC },
++	{ cbc(cipher_null),  CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
++	{ "deflate",         0,                   SW_TYPE_COMP },
++	{ hmac(sha256),      0,                   SW_TYPE_HMAC },
++	{ hmac(sha384),      0,                   SW_TYPE_HMAC },
++	{ hmac(sha512),      0,                   SW_TYPE_HMAC },
++	{ cbc(camellia),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
++	{ "sha256",          0,                   SW_TYPE_HASH },
++	{ "sha384",          0,                   SW_TYPE_HASH },
++	{ "sha512",          0,                   SW_TYPE_HASH },
++	{ "ripemd160",       0,                   SW_TYPE_HASH },
++};
++
++int32_t swcr_id = -1;
++module_param(swcr_id, int, 0444);
++MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
++
++int swcr_fail_if_compression_grows = 1;
++module_param(swcr_fail_if_compression_grows, int, 0644);
++MODULE_PARM_DESC(swcr_fail_if_compression_grows,
++                "Treat compression that results in more data as a failure");
++
++static struct swcr_data **swcr_sessions = NULL;
++static u_int32_t swcr_sesnum = 0;
++
++static	int swcr_process(device_t, struct cryptop *, int);
++static	int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
++static	int swcr_freesession(device_t, u_int64_t);
++
++static device_method_t swcr_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
++	DEVMETHOD(cryptodev_freesession,swcr_freesession),
++	DEVMETHOD(cryptodev_process,	swcr_process),
++};
++
++#define debug swcr_debug
++int swcr_debug = 0;
++module_param(swcr_debug, int, 0644);
++MODULE_PARM_DESC(swcr_debug, "Enable debug");
++
++/*
++ * Generate a new software session.
++ */
++static int
++swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
++{
++	struct swcr_data **swd;
++	u_int32_t i;
++	int error;
++	char *algo;
++	int mode, sw_type;
++
++	dprintk("%s()\n", __FUNCTION__);
++	if (sid == NULL || cri == NULL) {
++		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++
++	if (swcr_sessions) {
++		for (i = 1; i < swcr_sesnum; i++)
++			if (swcr_sessions[i] == NULL)
++				break;
++	} else
++		i = 1;		/* NB: to silence compiler warning */
++
++	if (swcr_sessions == NULL || i == swcr_sesnum) {
++		if (swcr_sessions == NULL) {
++			i = 1; /* We leave swcr_sessions[0] empty */
++			swcr_sesnum = CRYPTO_SW_SESSIONS;
++		} else
++			swcr_sesnum *= 2;
++
++		swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
++		if (swd == NULL) {
++			/* Reset session number */
++			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
++				swcr_sesnum = 0;
++			else
++				swcr_sesnum /= 2;
++			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
++			return ENOBUFS;
++		}
++		memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
++
++		/* Copy existing sessions */
++		if (swcr_sessions) {
++			memcpy(swd, swcr_sessions,
++			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
++			kfree(swcr_sessions);
++		}
++
++		swcr_sessions = swd;
++	}
++
++	swd = &swcr_sessions[i];
++	*sid = i;
++
++	while (cri) {
++		*swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
++				SLAB_ATOMIC);
++		if (*swd == NULL) {
++			swcr_freesession(NULL, i);
++			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
++			return ENOBUFS;
++		}
++		memset(*swd, 0, sizeof(struct swcr_data));
++
++		if (cri->cri_alg > CRYPTO_ALGORITHM_MAX) {
++			printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
++			swcr_freesession(NULL, i);
++			return EINVAL;
++		}
++
++		algo = crypto_details[cri->cri_alg].alg_name;
++		if (!algo || !*algo) {
++			printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
++			swcr_freesession(NULL, i);
++			return EINVAL;
++		}
++
++		mode = crypto_details[cri->cri_alg].mode;
++		sw_type = crypto_details[cri->cri_alg].sw_type;
++
++		/* Algorithm specific configuration */
++		switch (cri->cri_alg) {
++		case CRYPTO_NULL_CBC:
++			cri->cri_klen = 0; /* make it work with crypto API */
++			break;
++		default:
++			break;
++		}
++
++		if (sw_type == SW_TYPE_BLKCIPHER) {
++			dprintk("%s crypto_alloc_blkcipher(%s, 0x%x)\n", __FUNCTION__,
++					algo, mode);
++
++			(*swd)->sw_tfm = crypto_blkcipher_tfm(
++								crypto_alloc_blkcipher(algo, 0,
++									CRYPTO_ALG_ASYNC));
++			if (!(*swd)->sw_tfm) {
++				dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s,0x%x)\n",
++						algo,mode);
++				swcr_freesession(NULL, i);
++				return EINVAL;
++			}
++
++			if (debug) {
++				dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
++						__FUNCTION__,cri->cri_klen,(cri->cri_klen + 7)/8);
++				for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
++				{
++					dprintk("%s0x%x", (i % 8) ? " " : "\n    ",cri->cri_key[i]);
++				}
++				dprintk("\n");
++			}
++			error = crypto_blkcipher_setkey(
++						crypto_blkcipher_cast((*swd)->sw_tfm), cri->cri_key,
++							(cri->cri_klen + 7) / 8);
++			if (error) {
++				printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
++						(*swd)->sw_tfm->crt_flags);
++				swcr_freesession(NULL, i);
++				return error;
++			}
++		} else if (sw_type == SW_TYPE_HMAC || sw_type == SW_TYPE_HASH) {
++			dprintk("%s crypto_alloc_hash(%s, 0x%x)\n", __FUNCTION__,
++					algo, mode);
++
++			(*swd)->sw_tfm = crypto_hash_tfm(
++								crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
++
++			if (!(*swd)->sw_tfm) {
++				dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
++						algo, mode);
++				swcr_freesession(NULL, i);
++				return EINVAL;
++			}
++
++			(*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
++			(*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
++				SLAB_ATOMIC);
++			if ((*swd)->u.hmac.sw_key == NULL) {
++				swcr_freesession(NULL, i);
++				dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
++				return ENOBUFS;
++			}
++			memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
++			if (cri->cri_mlen) {
++				(*swd)->u.hmac.sw_mlen = cri->cri_mlen;
++			} else {
++				(*swd)->u.hmac.sw_mlen =
++						crypto_hash_digestsize(
++								crypto_hash_cast((*swd)->sw_tfm));
++			}
++		} else if (sw_type == SW_TYPE_COMP) {
++			(*swd)->sw_tfm = crypto_comp_tfm(
++					crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
++			if (!(*swd)->sw_tfm) {
++				dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
++						algo, mode);
++				swcr_freesession(NULL, i);
++				return EINVAL;
++			}
++			(*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
++			if ((*swd)->u.sw_comp_buf == NULL) {
++				swcr_freesession(NULL, i);
++				dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
++				return ENOBUFS;
++			}
++		} else {
++			printk("cryptosoft: Unhandled sw_type %d\n", sw_type);
++			swcr_freesession(NULL, i);
++			return EINVAL;
++		}
++
++		(*swd)->sw_alg = cri->cri_alg;
++		(*swd)->sw_type = sw_type;
++
++		cri = cri->cri_next;
++		swd = &((*swd)->sw_next);
++	}
++	return 0;
++}
++
++/*
++ * Free a session.
++ */
++static int
++swcr_freesession(device_t dev, u_int64_t tid)
++{
++	struct swcr_data *swd;
++	u_int32_t sid = CRYPTO_SESID2LID(tid);
++
++	dprintk("%s()\n", __FUNCTION__);
++	if (sid > swcr_sesnum || swcr_sessions == NULL ||
++			swcr_sessions[sid] == NULL) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		return(EINVAL);
++	}
++
++	/* Silently accept and return */
++	if (sid == 0)
++		return(0);
++
++	while ((swd = swcr_sessions[sid]) != NULL) {
++		swcr_sessions[sid] = swd->sw_next;
++		if (swd->sw_tfm)
++			crypto_free_tfm(swd->sw_tfm);
++		if (swd->sw_type == SW_TYPE_COMP) {
++			if (swd->u.sw_comp_buf)
++				kfree(swd->u.sw_comp_buf);
++		} else {
++			if (swd->u.hmac.sw_key)
++				kfree(swd->u.hmac.sw_key);
++		}
++		kfree(swd);
++	}
++	return 0;
++}
++
++/*
++ * Process a software request.
++ */
++static int
++swcr_process(device_t dev, struct cryptop *crp, int hint)
++{
++	struct cryptodesc *crd;
++	struct swcr_data *sw;
++	u_int32_t lid;
++#define SCATTERLIST_MAX 16
++	struct scatterlist sg[SCATTERLIST_MAX];
++	int sg_num, sg_len, skip;
++	struct sk_buff *skb = NULL;
++	struct uio *uiop = NULL;
++
++	dprintk("%s()\n", __FUNCTION__);
++	/* Sanity check */
++	if (crp == NULL) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++
++	crp->crp_etype = 0;
++
++	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		crp->crp_etype = EINVAL;
++		goto done;
++	}
++
++	lid = crp->crp_sid & 0xffffffff;
++	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
++			swcr_sessions[lid] == NULL) {
++		crp->crp_etype = ENOENT;
++		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
++		goto done;
++	}
++
++	/*
++	 * do some error checking outside of the loop for SKB and IOV processing
++	 * this leaves us with valid skb or uiop pointers for later
++	 */
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		skb = (struct sk_buff *) crp->crp_buf;
++		if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
++			printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
++					skb_shinfo(skb)->nr_frags);
++			goto done;
++		}
++	} else if (crp->crp_flags & CRYPTO_F_IOV) {
++		uiop = (struct uio *) crp->crp_buf;
++		if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
++			printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
++					uiop->uio_iovcnt);
++			goto done;
++		}
++	}
++
++	/* Go through crypto descriptors, processing as we go */
++	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
++		/*
++		 * Find the crypto context.
++		 *
++		 * XXX Note that the logic here prevents us from having
++		 * XXX the same algorithm multiple times in a session
++		 * XXX (or rather, we can but it won't give us the right
++		 * XXX results). To do that, we'd need some way of differentiating
++		 * XXX between the various instances of an algorithm (so we can
++		 * XXX locate the correct crypto context).
++		 */
++		for (sw = swcr_sessions[lid]; sw && sw->sw_alg != crd->crd_alg;
++				sw = sw->sw_next)
++			;
++
++		/* No such context ? */
++		if (sw == NULL) {
++			crp->crp_etype = EINVAL;
++			dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++			goto done;
++		}
++
++		skip = crd->crd_skip;
++
++		/*
++		 * setup the SG list skip from the start of the buffer
++		 */
++		memset(sg, 0, sizeof(sg));
++		if (crp->crp_flags & CRYPTO_F_SKBUF) {
++			int i, len;
++
++			sg_num = 0;
++			sg_len = 0;
++
++			if (skip < skb_headlen(skb)) {
++				len = skb_headlen(skb) - skip;
++				if (len + sg_len > crd->crd_len)
++					len = crd->crd_len - sg_len;
++				sg_set_page(&sg[sg_num],
++					virt_to_page(skb->data + skip), len,
++					offset_in_page(skb->data + skip));
++				sg_len += len;
++				sg_num++;
++				skip = 0;
++			} else
++				skip -= skb_headlen(skb);
++
++			for (i = 0; sg_len < crd->crd_len &&
++						i < skb_shinfo(skb)->nr_frags &&
++						sg_num < SCATTERLIST_MAX; i++) {
++				if (skip < skb_shinfo(skb)->frags[i].size) {
++					len = skb_shinfo(skb)->frags[i].size - skip;
++					if (len + sg_len > crd->crd_len)
++						len = crd->crd_len - sg_len;
++					sg_set_page(&sg[sg_num],
++						skb_shinfo(skb)->frags[i].page,
++						len,
++						skb_shinfo(skb)->frags[i].page_offset + skip);
++					sg_len += len;
++					sg_num++;
++					skip = 0;
++				} else
++					skip -= skb_shinfo(skb)->frags[i].size;
++			}
++		} else if (crp->crp_flags & CRYPTO_F_IOV) {
++			int len;
++
++			sg_len = 0;
++			for (sg_num = 0; sg_len <= crd->crd_len &&
++					sg_num < uiop->uio_iovcnt &&
++					sg_num < SCATTERLIST_MAX; sg_num++) {
++				if (skip <= uiop->uio_iov[sg_num].iov_len) {
++					len = uiop->uio_iov[sg_num].iov_len - skip;
++					if (len + sg_len > crd->crd_len)
++						len = crd->crd_len - sg_len;
++					sg_set_page(&sg[sg_num],
++						virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
++						len,
++						offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
++					sg_len += len;
++					skip = 0;
++				} else 
++					skip -= uiop->uio_iov[sg_num].iov_len;
++			}
++		} else {
++			sg_len = (crp->crp_ilen - skip);
++			if (sg_len > crd->crd_len)
++				sg_len = crd->crd_len;
++			sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip),
++				sg_len, offset_in_page(crp->crp_buf + skip));
++			sg_num = 1;
++		}
++
++
++		switch (sw->sw_type) {
++		case SW_TYPE_BLKCIPHER: {
++			unsigned char iv[EALG_MAX_BLOCK_LEN];
++			unsigned char *ivp = iv;
++			int ivsize = 
++				crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
++			struct blkcipher_desc desc;
++
++			if (sg_len < crypto_blkcipher_blocksize(
++					crypto_blkcipher_cast(sw->sw_tfm))) {
++				crp->crp_etype = EINVAL;
++				dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
++						sg_len, crypto_blkcipher_blocksize(
++							crypto_blkcipher_cast(sw->sw_tfm)));
++				goto done;
++			}
++
++			if (ivsize > sizeof(iv)) {
++				crp->crp_etype = EINVAL;
++				dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++				goto done;
++			}
++
++			if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
++				int i, error;
++
++				if (debug) {
++					dprintk("%s key:", __FUNCTION__);
++					for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
++						dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
++								crd->crd_key[i]);
++					dprintk("\n");
++				}
++				error = crypto_blkcipher_setkey(
++							crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
++							(crd->crd_klen + 7) / 8);
++				if (error) {
++					dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
++							error, sw->sw_tfm->crt_flags);
++					crp->crp_etype = -error;
++				}
++			}
++
++			memset(&desc, 0, sizeof(desc));
++			desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
++
++			if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
++
++				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
++					ivp = crd->crd_iv;
++				} else {
++					get_random_bytes(ivp, ivsize);
++				}
++				/*
++				 * do we have to copy the IV back to the buffer ?
++				 */
++				if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
++					crypto_copyback(crp->crp_flags, crp->crp_buf,
++							crd->crd_inject, ivsize, (caddr_t)ivp);
++				}
++				desc.info = ivp;
++				crypto_blkcipher_encrypt_iv(&desc, sg, sg, sg_len);
++
++			} else { /*decrypt */
++
++				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
++					ivp = crd->crd_iv;
++				} else {
++					crypto_copydata(crp->crp_flags, crp->crp_buf,
++							crd->crd_inject, ivsize, (caddr_t)ivp);
++				}
++				desc.info = ivp;
++				crypto_blkcipher_decrypt_iv(&desc, sg, sg, sg_len);
++			}
++			} break;
++		case SW_TYPE_HMAC:
++		case SW_TYPE_HASH:
++			{
++			char result[HASH_MAX_LEN];
++			struct hash_desc desc;
++
++			/* check we have room for the result */
++			if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
++				dprintk(
++			"cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d digestsize=%d\n",
++						crp->crp_ilen, crd->crd_skip + sg_len, crd->crd_inject,
++						sw->u.hmac.sw_mlen);
++				crp->crp_etype = EINVAL;
++				goto done;
++			}
++
++			memset(&desc, 0, sizeof(desc));
++			desc.tfm = crypto_hash_cast(sw->sw_tfm);
++
++			memset(result, 0, sizeof(result));
++
++			if (sw->sw_type == SW_TYPE_HMAC) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++				crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
++						sg, sg_num, result);
++#else
++				crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
++						sw->u.hmac.sw_klen);
++				crypto_hash_digest(&desc, sg, sg_len, result);
++#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
++				
++			} else { /* SW_TYPE_HASH */
++				crypto_hash_digest(&desc, sg, sg_len, result);
++			}
++
++			crypto_copyback(crp->crp_flags, crp->crp_buf,
++					crd->crd_inject, sw->u.hmac.sw_mlen, result);
++			}
++			break;
++
++		case SW_TYPE_COMP: {
++			void *ibuf = NULL;
++			void *obuf = sw->u.sw_comp_buf;
++			int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
++			int ret = 0;
++
++			/*
++			 * we need to use an additional copy if there is more than one
++			 * input chunk since the kernel comp routines do not handle
++			 * SG yet.  Otherwise we just use the input buffer as is.
++			 * Rather than allocate another buffer we just split the tmp
++			 * buffer we already have.
++			 * Perhaps we should just use zlib directly ?
++			 */
++			if (sg_num > 1) {
++				int blk;
++
++				ibuf = obuf;
++				for (blk = 0; blk < sg_num; blk++) {
++					memcpy(obuf, sg_virt(&sg[blk]),
++							sg[blk].length);
++					obuf += sg[blk].length;
++				}
++				olen -= sg_len;
++			} else
++				ibuf = sg_virt(&sg[0]);
++
++			if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
++				ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
++						ibuf, ilen, obuf, &olen);
++				if (!ret && olen > crd->crd_len) {
++					dprintk("cryptosoft: ERANGE compress %d into %d\n",
++							crd->crd_len, olen);
++					if (swcr_fail_if_compression_grows)
++						ret = ERANGE;
++				}
++			} else { /* decompress */
++				ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
++						ibuf, ilen, obuf, &olen);
++				if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
++					dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
++							"space for %d,at offset %d\n",
++							crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
++					ret = ETOOSMALL;
++				}
++			}
++			if (ret)
++				dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
++
++			/*
++			 * on success copy result back,
++			 * linux crpyto API returns -errno,  we need to fix that
++			 */
++			crp->crp_etype = ret < 0 ? -ret : ret;
++			if (ret == 0) {
++				/* copy back the result and return it's size */
++				crypto_copyback(crp->crp_flags, crp->crp_buf,
++						crd->crd_inject, olen, obuf);
++				crp->crp_olen = olen;
++			}
++
++
++			} break;
++
++		default:
++			/* Unknown/unsupported algorithm */
++			dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++			crp->crp_etype = EINVAL;
++			goto done;
++		}
++	}
++
++done:
++	crypto_done(crp);
++	return 0;
++}
++
++static int
++cryptosoft_init(void)
++{
++	int i, sw_type, mode;
++	char *algo;
++
++	dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
++
++	softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
++
++	swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
++			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
++	if (swcr_id < 0) {
++		printk("Software crypto device cannot initialize!");
++		return -ENODEV;
++	}
++
++#define	REGISTER(alg) \
++		crypto_register(swcr_id, alg, 0,0);
++
++	for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
++	{
++		
++		algo = crypto_details[i].alg_name;
++		if (!algo || !*algo)
++		{
++			dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
++			continue;
++		}
++
++		mode = crypto_details[i].mode;
++		sw_type = crypto_details[i].sw_type;
++
++		switch (sw_type)
++		{
++			case SW_TYPE_CIPHER:
++				if (crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC))
++				{
++					REGISTER(i);
++				}
++				else
++				{
++					dprintk("%s:CIPHER algorithm %d:'%s' not supported\n",
++								__FUNCTION__, i, algo);
++				}
++				break;
++			case SW_TYPE_HMAC:
++				if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
++				{
++					REGISTER(i);
++				}
++				else
++				{
++					dprintk("%s:HMAC algorithm %d:'%s' not supported\n",
++								__FUNCTION__, i, algo);
++				}
++				break;
++			case SW_TYPE_HASH:
++				if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
++				{
++					REGISTER(i);
++				}
++				else
++				{
++					dprintk("%s:HASH algorithm %d:'%s' not supported\n",
++								__FUNCTION__, i, algo);
++				}
++				break;
++			case SW_TYPE_COMP:
++				if (crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC))
++				{
++					REGISTER(i);
++				}
++				else
++				{
++					dprintk("%s:COMP algorithm %d:'%s' not supported\n",
++								__FUNCTION__, i, algo);
++				}
++				break;
++			case SW_TYPE_BLKCIPHER:
++				if (crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC))
++				{
++					REGISTER(i);
++				}
++				else
++				{
++					dprintk("%s:BLKCIPHER algorithm %d:'%s' not supported\n",
++								__FUNCTION__, i, algo);
++				}
++				break;
++			default:
++				dprintk(
++				"%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
++					__FUNCTION__, sw_type, i, algo);
++				break;
++		}
++	}
++
++	return(0);
++}
++
++static void
++cryptosoft_exit(void)
++{
++	dprintk("%s()\n", __FUNCTION__);
++	crypto_unregister_all(swcr_id);
++	swcr_id = -1;
++}
++
++module_init(cryptosoft_init);
++module_exit(cryptosoft_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
++MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
+diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/icp_asym.c linux-2.6.30/crypto/ocf/ep80579/icp_asym.c
+--- linux-2.6.30.orig/crypto/ocf/ep80579/icp_asym.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ep80579/icp_asym.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,1375 @@
++/***************************************************************************
++ *
++ * This file is provided under a dual BSD/GPLv2 license.  When using or 
++ *   redistributing this file, you may do so under either license.
++ * 
++ *   GPL LICENSE SUMMARY
++ * 
++ *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * 
++ *   This program is free software; you can redistribute it and/or modify 
++ *   it under the terms of version 2 of the GNU General Public License as
++ *   published by the Free Software Foundation.
++ * 
++ *   This program is distributed in the hope that it will be useful, but 
++ *   WITHOUT ANY WARRANTY; without even the implied warranty of 
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
++ *   General Public License for more details.
++ * 
++ *   You should have received a copy of the GNU General Public License 
++ *   along with this program; if not, write to the Free Software 
++ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *   The full GNU General Public License is included in this distribution 
++ *   in the file called LICENSE.GPL.
++ * 
++ *   Contact Information:
++ *   Intel Corporation
++ * 
++ *   BSD LICENSE 
++ * 
++ *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ *   All rights reserved.
++ * 
++ *   Redistribution and use in source and binary forms, with or without 
++ *   modification, are permitted provided that the following conditions 
++ *   are met:
++ * 
++ *     * Redistributions of source code must retain the above copyright 
++ *       notice, this list of conditions and the following disclaimer.
++ *     * Redistributions in binary form must reproduce the above copyright 
++ *       notice, this list of conditions and the following disclaimer in 
++ *       the documentation and/or other materials provided with the 
++ *       distribution.
++ *     * Neither the name of Intel Corporation nor the names of its 
++ *       contributors may be used to endorse or promote products derived 
++ *       from this software without specific prior written permission.
++ * 
++ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
++ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
++ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
++ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
++ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
++ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
++ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
++ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
++ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
++ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
++ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * 
++ * 
++ *  version: Security.L.1.0.130
++ *
++ ***************************************************************************/
++
++#include "icp_ocf.h"
++
++/*The following define values (containing the word 'INDEX') are used to find
++the index of each input buffer of the crypto_kop struct (see OCF cryptodev.h).
++These values were found through analysis of the OCF OpenSSL patch. If the
++calling program uses different input buffer positions, these defines will have
++to be changed.*/
++
++/*DIFFIE HELLMAN buffer index values*/
++#define ICP_DH_KRP_PARAM_PRIME_INDEX				(0)
++#define ICP_DH_KRP_PARAM_BASE_INDEX				(1)
++#define ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX			(2)
++#define ICP_DH_KRP_PARAM_RESULT_INDEX				(3)
++
++/*MOD EXP buffer index values*/
++#define ICP_MOD_EXP_KRP_PARAM_BASE_INDEX			(0)
++#define ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX			(1)
++#define ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX			(2)
++#define ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX			(3)
++
++#define SINGLE_BYTE_VALUE					(4)
++
++/*MOD EXP CRT buffer index values*/
++#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX			(0)
++#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX			(1)
++#define ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX			(2)
++#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX		(3)
++#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX		(4)
++#define ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX		(5)
++#define ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX			(6)
++
++/*DSA sign buffer index values*/
++#define ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX			(0)
++#define ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX			(1)
++#define ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX			(2)
++#define ICP_DSA_SIGN_KRP_PARAM_G_INDEX				(3)
++#define ICP_DSA_SIGN_KRP_PARAM_X_INDEX				(4)
++#define ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX			(5)
++#define ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX			(6)
++
++/*DSA verify buffer index values*/
++#define ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX			(0)
++#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX			(1)
++#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX			(2)
++#define ICP_DSA_VERIFY_KRP_PARAM_G_INDEX			(3)
++#define ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX			(4)
++#define ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX			(5)
++#define ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX			(6)
++
++/*DSA sign prime Q vs random number K size check values*/
++#define DONT_RUN_LESS_THAN_CHECK				(0)
++#define FAIL_A_IS_GREATER_THAN_B				(1)
++#define FAIL_A_IS_EQUAL_TO_B					(1)
++#define SUCCESS_A_IS_LESS_THAN_B				(0)
++#define DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS		(500)
++
++/* We need to set a cryptokp success value just in case it is set or allocated
++   and not set to zero outside of this module */
++#define CRYPTO_OP_SUCCESS					(0)
++
++static int icp_ocfDrvDHComputeKey(struct cryptkop *krp);
++
++static int icp_ocfDrvModExp(struct cryptkop *krp);
++
++static int icp_ocfDrvModExpCRT(struct cryptkop *krp);
++
++static int
++icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck);
++
++static int icp_ocfDrvDsaSign(struct cryptkop *krp);
++
++static int icp_ocfDrvDsaVerify(struct cryptkop *krp);
++
++static void
++icp_ocfDrvDhP1CallBack(void *callbackTag,
++		       CpaStatus status,
++		       void *pOpData, CpaFlatBuffer * pLocalOctetStringPV);
++
++static void
++icp_ocfDrvModExpCallBack(void *callbackTag,
++			 CpaStatus status,
++			 void *pOpData, CpaFlatBuffer * pResult);
++
++static void
++icp_ocfDrvModExpCRTCallBack(void *callbackTag,
++			    CpaStatus status,
++			    void *pOpData, CpaFlatBuffer * pOutputData);
++
++static void
++icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
++			    CpaStatus status,
++			    void *pOpData, CpaBoolean verifyStatus);
++
++static void
++icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
++			    CpaStatus status,
++			    void *pOpData,
++			    CpaBoolean protocolStatus,
++			    CpaFlatBuffer * pR, CpaFlatBuffer * pS);
++
++/* Name        : icp_ocfDrvPkeProcess
++ *
++ * Description : This function will choose which PKE process to follow
++ * based on the input arguments
++ */
++int icp_ocfDrvPkeProcess(device_t dev, struct cryptkop *krp, int hint)
++{
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++
++	if (NULL == krp) {
++		DPRINTK("%s(): Invalid input parameters, cryptkop = %p\n",
++			__FUNCTION__, krp);
++		return EINVAL;
++	}
++
++	if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
++		krp->krp_status = ECANCELED;
++		return ECANCELED;
++	}
++
++	switch (krp->krp_op) {
++	case CRK_DH_COMPUTE_KEY:
++		DPRINTK("%s() doing DH_COMPUTE_KEY\n", __FUNCTION__);
++		lacStatus = icp_ocfDrvDHComputeKey(krp);
++		if (CPA_STATUS_SUCCESS != lacStatus) {
++			EPRINTK("%s(): icp_ocfDrvDHComputeKey failed "
++				"(%d).\n", __FUNCTION__, lacStatus);
++			krp->krp_status = ECANCELED;
++			return ECANCELED;
++		}
++
++		break;
++
++	case CRK_MOD_EXP:
++		DPRINTK("%s() doing MOD_EXP \n", __FUNCTION__);
++		lacStatus = icp_ocfDrvModExp(krp);
++		if (CPA_STATUS_SUCCESS != lacStatus) {
++			EPRINTK("%s(): icp_ocfDrvModExp failed (%d).\n",
++				__FUNCTION__, lacStatus);
++			krp->krp_status = ECANCELED;
++			return ECANCELED;
++		}
++
++		break;
++
++	case CRK_MOD_EXP_CRT:
++		DPRINTK("%s() doing MOD_EXP_CRT \n", __FUNCTION__);
++		lacStatus = icp_ocfDrvModExpCRT(krp);
++		if (CPA_STATUS_SUCCESS != lacStatus) {
++			EPRINTK("%s(): icp_ocfDrvModExpCRT "
++				"failed (%d).\n", __FUNCTION__, lacStatus);
++			krp->krp_status = ECANCELED;
++			return ECANCELED;
++		}
++
++		break;
++
++	case CRK_DSA_SIGN:
++		DPRINTK("%s() doing DSA_SIGN \n", __FUNCTION__);
++		lacStatus = icp_ocfDrvDsaSign(krp);
++		if (CPA_STATUS_SUCCESS != lacStatus) {
++			EPRINTK("%s(): icp_ocfDrvDsaSign "
++				"failed (%d).\n", __FUNCTION__, lacStatus);
++			krp->krp_status = ECANCELED;
++			return ECANCELED;
++		}
++
++		break;
++
++	case CRK_DSA_VERIFY:
++		DPRINTK("%s() doing DSA_VERIFY \n", __FUNCTION__);
++		lacStatus = icp_ocfDrvDsaVerify(krp);
++		if (CPA_STATUS_SUCCESS != lacStatus) {
++			EPRINTK("%s(): icp_ocfDrvDsaVerify "
++				"failed (%d).\n", __FUNCTION__, lacStatus);
++			krp->krp_status = ECANCELED;
++			return ECANCELED;
++		}
++
++		break;
++
++	default:
++		EPRINTK("%s(): Asymettric function not "
++			"supported (%d).\n", __FUNCTION__, krp->krp_op);
++		krp->krp_status = EOPNOTSUPP;
++		return EOPNOTSUPP;
++	}
++
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++}
++
++/* Name        : icp_ocfDrvSwapBytes
++ *
++ * Description : This function is used to swap the byte order of a buffer.
++ * It has been seen that in general we are passed little endian byte order
++ * buffers, but LAC only accepts big endian byte order buffers.
++ */
++static void inline
++icp_ocfDrvSwapBytes(u_int8_t * num, u_int32_t buff_len_bytes)
++{
++
++	int i;
++	u_int8_t *end_ptr;
++	u_int8_t hold_val;
++
++	end_ptr = num + (buff_len_bytes - 1);
++	buff_len_bytes = buff_len_bytes >> 1;
++	for (i = 0; i < buff_len_bytes; i++) {
++		hold_val = *num;
++		*num = *end_ptr;
++		num++;
++		*end_ptr = hold_val;
++		end_ptr--;
++	}
++}
++
++/* Name        : icp_ocfDrvDHComputeKey
++ *
++ * Description : This function will map Diffie Hellman calls from OCF
++ * to the LAC API. OCF uses this function for Diffie Hellman Phase1 and
++ * Phase2. LAC has a separate Diffie Hellman Phase2 call, however both phases
++ * break down to a modular exponentiation.
++ */
++static int icp_ocfDrvDHComputeKey(struct cryptkop *krp)
++{
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	void *callbackTag = NULL;
++	CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
++	CpaFlatBuffer *pLocalOctetStringPV = NULL;
++	uint32_t dh_prime_len_bytes = 0, dh_prime_len_bits = 0;
++
++	/* Input checks - check prime is a multiple of 8 bits to allow for
++	   allocation later */
++	dh_prime_len_bits =
++	    (krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_nbits);
++
++	/* LAC can reject prime lengths based on prime key sizes, we just
++	   need to make sure we can allocate space for the base and
++	   exponent buffers correctly */
++	if ((dh_prime_len_bits % NUM_BITS_IN_BYTE) != 0) {
++		APRINTK("%s(): Warning Prime number buffer size is not a "
++			"multiple of 8 bits\n", __FUNCTION__);
++	}
++
++	/* Result storage space should be the same size as the prime as this
++	   value can take up the same amount of storage space */
++	if (dh_prime_len_bits !=
++	    krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits) {
++		DPRINTK("%s(): Return Buffer must be the same size "
++			"as the Prime buffer\n", __FUNCTION__);
++		krp->krp_status = EINVAL;
++		return EINVAL;
++	}
++	/* Switch to size in bytes */
++	BITS_TO_BYTES(dh_prime_len_bytes, dh_prime_len_bits);
++
++	callbackTag = krp;
++
++	pPhase1OpData = kmem_cache_zalloc(drvDH_zone, GFP_KERNEL);
++	if (NULL == pPhase1OpData) {
++		APRINTK("%s():Failed to get memory for key gen data\n",
++			__FUNCTION__);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	pLocalOctetStringPV = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
++	if (NULL == pLocalOctetStringPV) {
++		APRINTK("%s():Failed to get memory for pLocalOctetStringPV\n",
++			__FUNCTION__);
++		kmem_cache_free(drvDH_zone, pPhase1OpData);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	/* Link parameters */
++	pPhase1OpData->primeP.pData =
++	    krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_p;
++
++	pPhase1OpData->primeP.dataLenInBytes = dh_prime_len_bytes;
++
++	icp_ocfDrvSwapBytes(pPhase1OpData->primeP.pData, dh_prime_len_bytes);
++
++	pPhase1OpData->baseG.pData =
++	    krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_p;
++
++	BITS_TO_BYTES(pPhase1OpData->baseG.dataLenInBytes,
++		      krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_nbits);
++
++	icp_ocfDrvSwapBytes(pPhase1OpData->baseG.pData,
++			    pPhase1OpData->baseG.dataLenInBytes);
++
++	pPhase1OpData->privateValueX.pData =
++	    krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].crp_p;
++
++	BITS_TO_BYTES(pPhase1OpData->privateValueX.dataLenInBytes,
++		      krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(pPhase1OpData->privateValueX.pData,
++			    pPhase1OpData->privateValueX.dataLenInBytes);
++
++	/* Output parameters */
++	pLocalOctetStringPV->pData =
++	    krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_p;
++
++	BITS_TO_BYTES(pLocalOctetStringPV->dataLenInBytes,
++		      krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits);
++
++	lacStatus = cpaCyDhKeyGenPhase1(CPA_INSTANCE_HANDLE_SINGLE,
++					icp_ocfDrvDhP1CallBack,
++					callbackTag, pPhase1OpData,
++					pLocalOctetStringPV);
++
++	if (CPA_STATUS_SUCCESS != lacStatus) {
++		EPRINTK("%s(): DH Phase 1 Key Gen failed (%d).\n",
++			__FUNCTION__, lacStatus);
++		icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
++		kmem_cache_free(drvDH_zone, pPhase1OpData);
++	}
++
++	return lacStatus;
++}
++
++/* Name        : icp_ocfDrvModExp
++ *
++ * Description : This function will map ordinary Modular Exponentiation calls
++ * from OCF to the LAC API.
++ *
++ */
++static int icp_ocfDrvModExp(struct cryptkop *krp)
++{
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	void *callbackTag = NULL;
++	CpaCyLnModExpOpData *pModExpOpData = NULL;
++	CpaFlatBuffer *pResult = NULL;
++
++	if ((krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits %
++	     NUM_BITS_IN_BYTE) != 0) {
++		DPRINTK("%s(): Warning - modulus buffer size (%d) is not a "
++			"multiple of 8 bits\n", __FUNCTION__,
++			krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
++			crp_nbits);
++	}
++
++	/* Result storage space should be the same size as the prime as this
++	   value can take up the same amount of storage space */
++	if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits >
++	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_nbits) {
++		APRINTK("%s(): Return Buffer size must be the same or"
++			" greater than the Modulus buffer\n", __FUNCTION__);
++		krp->krp_status = EINVAL;
++		return EINVAL;
++	}
++
++	callbackTag = krp;
++
++	pModExpOpData = kmem_cache_zalloc(drvLnModExp_zone, GFP_KERNEL);
++	if (NULL == pModExpOpData) {
++		APRINTK("%s():Failed to get memory for key gen data\n",
++			__FUNCTION__);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	pResult = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
++	if (NULL == pResult) {
++		APRINTK("%s():Failed to get memory for ModExp result\n",
++			__FUNCTION__);
++		kmem_cache_free(drvLnModExp_zone, pModExpOpData);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	/* Link parameters */
++	pModExpOpData->modulus.pData =
++	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_p;
++	BITS_TO_BYTES(pModExpOpData->modulus.dataLenInBytes,
++		      krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(pModExpOpData->modulus.pData,
++			    pModExpOpData->modulus.dataLenInBytes);
++
++	/*OCF patch to Openswan Pluto regularly sends the base value as 2
++	   bits in size. In this case, it has been found it is better to
++	   use the base size memory space as the input buffer (if the number
++	   is in bits is less than a byte, the number of bits is the input
++	   value) */
++	if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits <
++	    NUM_BITS_IN_BYTE) {
++		DPRINTK("%s : base is small (%d)\n", __FUNCTION__, krp->
++			krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
++		pModExpOpData->base.dataLenInBytes = SINGLE_BYTE_VALUE;
++		pModExpOpData->base.pData =
++		    (uint8_t *) & (krp->
++				   krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
++				   crp_nbits);
++		*((uint32_t *) pModExpOpData->base.pData) =
++		    htonl(*((uint32_t *) pModExpOpData->base.pData));
++
++	} else {
++
++		DPRINTK("%s : base is big (%d)\n", __FUNCTION__, krp->
++			krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
++		pModExpOpData->base.pData =
++		    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_p;
++		BITS_TO_BYTES(pModExpOpData->base.dataLenInBytes,
++			      krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
++			      crp_nbits);
++		icp_ocfDrvSwapBytes(pModExpOpData->base.pData,
++				    pModExpOpData->base.dataLenInBytes);
++	}
++
++	pModExpOpData->exponent.pData =
++	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].crp_p;
++	BITS_TO_BYTES(pModExpOpData->exponent.dataLenInBytes,
++		      krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(pModExpOpData->exponent.pData,
++			    pModExpOpData->exponent.dataLenInBytes);
++	/* Output parameters */
++	pResult->pData =
++	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_p,
++	    BITS_TO_BYTES(pResult->dataLenInBytes,
++			  krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].
++			  crp_nbits);
++
++	lacStatus = cpaCyLnModExp(CPA_INSTANCE_HANDLE_SINGLE,
++				  icp_ocfDrvModExpCallBack,
++				  callbackTag, pModExpOpData, pResult);
++
++	if (CPA_STATUS_SUCCESS != lacStatus) {
++		EPRINTK("%s(): Mod Exp Operation failed (%d).\n",
++			__FUNCTION__, lacStatus);
++		krp->krp_status = ECANCELED;
++		icp_ocfDrvFreeFlatBuffer(pResult);
++		kmem_cache_free(drvLnModExp_zone, pModExpOpData);
++	}
++
++	return lacStatus;
++}
++
++/* Name        : icp_ocfDrvModExpCRT
++ *
++ * Description : This function will map ordinary Modular Exponentiation Chinese
++ * Remainder Theorem implementaion calls from OCF to the LAC API.
++ *
++ * Note : Mod Exp CRT for this driver is accelerated through LAC RSA type 2
++ * decrypt operation. Therefore P and Q input values must always be prime
++ * numbers. Although basic primality checks are done in LAC, it is up to the
++ * user to do any correct prime number checking before passing the inputs.
++ */
++
++static int icp_ocfDrvModExpCRT(struct cryptkop *krp)
++{
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	CpaCyRsaDecryptOpData *rsaDecryptOpData = NULL;
++	void *callbackTag = NULL;
++	CpaFlatBuffer *pOutputData = NULL;
++
++	/*Parameter input checks are all done by LAC, no need to repeat
++	   them here. */
++	callbackTag = krp;
++
++	rsaDecryptOpData = kmem_cache_zalloc(drvRSADecrypt_zone, GFP_KERNEL);
++	if (NULL == rsaDecryptOpData) {
++		APRINTK("%s():Failed to get memory"
++			" for MOD EXP CRT Op data struct\n", __FUNCTION__);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	rsaDecryptOpData->pRecipientPrivateKey
++	    = kmem_cache_zalloc(drvRSAPrivateKey_zone, GFP_KERNEL);
++	if (NULL == rsaDecryptOpData->pRecipientPrivateKey) {
++		APRINTK("%s():Failed to get memory for MOD EXP CRT"
++			" private key values struct\n", __FUNCTION__);
++		kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	rsaDecryptOpData->pRecipientPrivateKey->
++	    version = CPA_CY_RSA_VERSION_TWO_PRIME;
++	rsaDecryptOpData->pRecipientPrivateKey->
++	    privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
++
++	pOutputData = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
++	if (NULL == pOutputData) {
++		APRINTK("%s():Failed to get memory"
++			" for MOD EXP CRT output data\n", __FUNCTION__);
++		kmem_cache_free(drvRSAPrivateKey_zone,
++				rsaDecryptOpData->pRecipientPrivateKey);
++		kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	rsaDecryptOpData->pRecipientPrivateKey->
++	    version = CPA_CY_RSA_VERSION_TWO_PRIME;
++	rsaDecryptOpData->pRecipientPrivateKey->
++	    privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
++
++	/* Link parameters */
++	rsaDecryptOpData->inputData.pData =
++	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].crp_p;
++	BITS_TO_BYTES(rsaDecryptOpData->inputData.dataLenInBytes,
++		      krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(rsaDecryptOpData->inputData.pData,
++			    rsaDecryptOpData->inputData.dataLenInBytes);
++
++	rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime1P.pData =
++	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].crp_p;
++	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
++		      prime1P.dataLenInBytes,
++		      krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.prime1P.pData,
++			    rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.prime1P.dataLenInBytes);
++
++	rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime2Q.pData =
++	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].crp_p;
++	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
++		      prime2Q.dataLenInBytes,
++		      krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.prime2Q.pData,
++			    rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.prime2Q.dataLenInBytes);
++
++	rsaDecryptOpData->pRecipientPrivateKey->
++	    privateKeyRep2.exponent1Dp.pData =
++	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].crp_p;
++	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
++		      exponent1Dp.dataLenInBytes,
++		      krp->
++		      krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.exponent1Dp.pData,
++			    rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.exponent1Dp.dataLenInBytes);
++
++	rsaDecryptOpData->pRecipientPrivateKey->
++	    privateKeyRep2.exponent2Dq.pData =
++	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].crp_p;
++	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
++		      privateKeyRep2.exponent2Dq.dataLenInBytes,
++		      krp->
++		      krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.exponent2Dq.pData,
++			    rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.exponent2Dq.dataLenInBytes);
++
++	rsaDecryptOpData->pRecipientPrivateKey->
++	    privateKeyRep2.coefficientQInv.pData =
++	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].crp_p;
++	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
++		      privateKeyRep2.coefficientQInv.dataLenInBytes,
++		      krp->
++		      krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.coefficientQInv.pData,
++			    rsaDecryptOpData->pRecipientPrivateKey->
++			    privateKeyRep2.coefficientQInv.dataLenInBytes);
++
++	/* Output Parameter */
++	pOutputData->pData =
++	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].crp_p;
++	BITS_TO_BYTES(pOutputData->dataLenInBytes,
++		      krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].
++		      crp_nbits);
++
++	lacStatus = cpaCyRsaDecrypt(CPA_INSTANCE_HANDLE_SINGLE,
++				    icp_ocfDrvModExpCRTCallBack,
++				    callbackTag, rsaDecryptOpData, pOutputData);
++
++	if (CPA_STATUS_SUCCESS != lacStatus) {
++		EPRINTK("%s(): Mod Exp CRT Operation failed (%d).\n",
++			__FUNCTION__, lacStatus);
++		krp->krp_status = ECANCELED;
++		icp_ocfDrvFreeFlatBuffer(pOutputData);
++		kmem_cache_free(drvRSAPrivateKey_zone,
++				rsaDecryptOpData->pRecipientPrivateKey);
++		kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
++	}
++
++	return lacStatus;
++}
++
++/* Name        : icp_ocfDrvCheckALessThanB
++ *
++ * Description : This function will check whether the first argument is less
++ * than the second. It is used to check whether the DSA RS sign Random K
++ * value is less than the Prime Q value (as defined in the specification)
++ *
++ */
++static int
++icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck)
++{
++
++	uint8_t *MSB_K = pK->pData;
++	uint8_t *MSB_Q = pQ->pData;
++	uint32_t buffer_lengths_in_bytes = pQ->dataLenInBytes;
++
++	if (DONT_RUN_LESS_THAN_CHECK == *doCheck) {
++		return FAIL_A_IS_GREATER_THAN_B;
++	}
++
++/*Check MSBs
++if A == B, check next MSB
++if A > B, return A_IS_GREATER_THAN_B
++if A < B, return A_IS_LESS_THAN_B (success)
++*/
++	while (*MSB_K == *MSB_Q) {
++		MSB_K++;
++		MSB_Q++;
++
++		buffer_lengths_in_bytes--;
++		if (0 == buffer_lengths_in_bytes) {
++			DPRINTK("%s() Buffers have equal value!!\n",
++				__FUNCTION__);
++			return FAIL_A_IS_EQUAL_TO_B;
++		}
++
++	}
++
++	if (*MSB_K < *MSB_Q) {
++		return SUCCESS_A_IS_LESS_THAN_B;
++	} else {
++		return FAIL_A_IS_GREATER_THAN_B;
++	}
++
++}
++
++/* Name        : icp_ocfDrvDsaSign
++ *
++ * Description : This function will map DSA RS Sign from OCF to the LAC API.
++ *
++ * NOTE: From looking at OCF patch to OpenSSL and even the number of input
++ * parameters, OCF expects us to generate the random seed value. This value
++ * is generated and passed to LAC, however the number is discared in the
++ * callback and not returned to the user.
++ */
++static int icp_ocfDrvDsaSign(struct cryptkop *krp)
++{
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	CpaCyDsaRSSignOpData *dsaRsSignOpData = NULL;
++	void *callbackTag = NULL;
++	CpaCyRandGenOpData randGenOpData;
++	int primeQSizeInBytes = 0;
++	int doCheck = 0;
++	CpaFlatBuffer randData;
++	CpaBoolean protocolStatus = CPA_FALSE;
++	CpaFlatBuffer *pR = NULL;
++	CpaFlatBuffer *pS = NULL;
++
++	callbackTag = krp;
++
++	BITS_TO_BYTES(primeQSizeInBytes,
++		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
++		      crp_nbits);
++
++	if (DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES != primeQSizeInBytes) {
++		APRINTK("%s(): DSA PRIME Q size not equal to the "
++			"FIPS defined 20bytes, = %d\n",
++			__FUNCTION__, primeQSizeInBytes);
++		krp->krp_status = EDOM;
++		return EDOM;
++	}
++
++	dsaRsSignOpData = kmem_cache_zalloc(drvDSARSSign_zone, GFP_KERNEL);
++	if (NULL == dsaRsSignOpData) {
++		APRINTK("%s():Failed to get memory"
++			" for DSA RS Sign Op data struct\n", __FUNCTION__);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	dsaRsSignOpData->K.pData =
++	    kmem_cache_alloc(drvDSARSSignKValue_zone, GFP_ATOMIC);
++
++	if (NULL == dsaRsSignOpData->K.pData) {
++		APRINTK("%s():Failed to get memory"
++			" for DSA RS Sign Op Random value\n", __FUNCTION__);
++		kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	pR = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
++	if (NULL == pR) {
++		APRINTK("%s():Failed to get memory"
++			" for DSA signature R\n", __FUNCTION__);
++		kmem_cache_free(drvDSARSSignKValue_zone,
++				dsaRsSignOpData->K.pData);
++		kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	pS = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
++	if (NULL == pS) {
++		APRINTK("%s():Failed to get memory"
++			" for DSA signature S\n", __FUNCTION__);
++		icp_ocfDrvFreeFlatBuffer(pR);
++		kmem_cache_free(drvDSARSSignKValue_zone,
++				dsaRsSignOpData->K.pData);
++		kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	/*link prime number parameter for ease of processing */
++	dsaRsSignOpData->P.pData =
++	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].crp_p;
++	BITS_TO_BYTES(dsaRsSignOpData->P.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(dsaRsSignOpData->P.pData,
++			    dsaRsSignOpData->P.dataLenInBytes);
++
++	dsaRsSignOpData->Q.pData =
++	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].crp_p;
++	BITS_TO_BYTES(dsaRsSignOpData->Q.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
++		      crp_nbits);
++
++	icp_ocfDrvSwapBytes(dsaRsSignOpData->Q.pData,
++			    dsaRsSignOpData->Q.dataLenInBytes);
++
++	/*generate random number with equal buffer size to Prime value Q,
++	   but value less than Q */
++	dsaRsSignOpData->K.dataLenInBytes = dsaRsSignOpData->Q.dataLenInBytes;
++
++	randGenOpData.generateBits = CPA_TRUE;
++	randGenOpData.lenInBytes = dsaRsSignOpData->K.dataLenInBytes;
++
++	icp_ocfDrvPtrAndLenToFlatBuffer(dsaRsSignOpData->K.pData,
++					dsaRsSignOpData->K.dataLenInBytes,
++					&randData);
++
++	doCheck = 0;
++	while (icp_ocfDrvCheckALessThanB(&(dsaRsSignOpData->K),
++					 &(dsaRsSignOpData->Q), &doCheck)) {
++
++		if (CPA_STATUS_SUCCESS
++		    != cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
++				    NULL, NULL, &randGenOpData, &randData)) {
++			APRINTK("%s(): ERROR - Failed to generate DSA RS Sign K"
++				"value\n", __FUNCTION__);
++			icp_ocfDrvFreeFlatBuffer(pS);
++			icp_ocfDrvFreeFlatBuffer(pR);
++			kmem_cache_free(drvDSARSSignKValue_zone,
++					dsaRsSignOpData->K.pData);
++			kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
++			krp->krp_status = EAGAIN;
++			return EAGAIN;
++		}
++
++		doCheck++;
++		if (DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS == doCheck) {
++			APRINTK("%s(): ERROR - Failed to find DSA RS Sign K "
++				"value less than Q value\n", __FUNCTION__);
++			icp_ocfDrvFreeFlatBuffer(pS);
++			icp_ocfDrvFreeFlatBuffer(pR);
++			kmem_cache_free(drvDSARSSignKValue_zone,
++					dsaRsSignOpData->K.pData);
++			kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
++			krp->krp_status = EAGAIN;
++			return EAGAIN;
++		}
++
++	}
++	/*Rand Data - no need to swap bytes for pK */
++
++	/* Link parameters */
++	dsaRsSignOpData->G.pData =
++	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_p;
++	BITS_TO_BYTES(dsaRsSignOpData->G.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_nbits);
++
++	icp_ocfDrvSwapBytes(dsaRsSignOpData->G.pData,
++			    dsaRsSignOpData->G.dataLenInBytes);
++
++	dsaRsSignOpData->X.pData =
++	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_p;
++	BITS_TO_BYTES(dsaRsSignOpData->X.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_nbits);
++	icp_ocfDrvSwapBytes(dsaRsSignOpData->X.pData,
++			    dsaRsSignOpData->X.dataLenInBytes);
++
++	dsaRsSignOpData->M.pData =
++	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].crp_p;
++	BITS_TO_BYTES(dsaRsSignOpData->M.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].
++		      crp_nbits);
++	icp_ocfDrvSwapBytes(dsaRsSignOpData->M.pData,
++			    dsaRsSignOpData->M.dataLenInBytes);
++
++	/* Output Parameters */
++	pS->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].crp_p;
++	BITS_TO_BYTES(pS->dataLenInBytes,
++		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].
++		      crp_nbits);
++
++	pR->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].crp_p;
++	BITS_TO_BYTES(pR->dataLenInBytes,
++		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].
++		      crp_nbits);
++
++	lacStatus = cpaCyDsaSignRS(CPA_INSTANCE_HANDLE_SINGLE,
++				   icp_ocfDrvDsaRSSignCallBack,
++				   callbackTag, dsaRsSignOpData,
++				   &protocolStatus, pR, pS);
++
++	if (CPA_STATUS_SUCCESS != lacStatus) {
++		EPRINTK("%s(): DSA RS Sign Operation failed (%d).\n",
++			__FUNCTION__, lacStatus);
++		krp->krp_status = ECANCELED;
++		icp_ocfDrvFreeFlatBuffer(pS);
++		icp_ocfDrvFreeFlatBuffer(pR);
++		kmem_cache_free(drvDSARSSignKValue_zone,
++				dsaRsSignOpData->K.pData);
++		kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
++	}
++
++	return lacStatus;
++}
++
++/* Name        : icp_ocfDrvDsaVerify
++ *
++ * Description : This function will map DSA RS Verify from OCF to the LAC API.
++ *
++ */
++static int icp_ocfDrvDsaVerify(struct cryptkop *krp)
++{
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	CpaCyDsaVerifyOpData *dsaVerifyOpData = NULL;
++	void *callbackTag = NULL;
++	CpaBoolean verifyStatus = CPA_FALSE;
++
++	callbackTag = krp;
++
++	dsaVerifyOpData = kmem_cache_zalloc(drvDSAVerify_zone, GFP_KERNEL);
++	if (NULL == dsaVerifyOpData) {
++		APRINTK("%s():Failed to get memory"
++			" for DSA Verify Op data struct\n", __FUNCTION__);
++		krp->krp_status = ENOMEM;
++		return ENOMEM;
++	}
++
++	/* Link parameters */
++	dsaVerifyOpData->P.pData =
++	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].crp_p;
++	BITS_TO_BYTES(dsaVerifyOpData->P.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].
++		      crp_nbits);
++	icp_ocfDrvSwapBytes(dsaVerifyOpData->P.pData,
++			    dsaVerifyOpData->P.dataLenInBytes);
++
++	dsaVerifyOpData->Q.pData =
++	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].crp_p;
++	BITS_TO_BYTES(dsaVerifyOpData->Q.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].
++		      crp_nbits);
++	icp_ocfDrvSwapBytes(dsaVerifyOpData->Q.pData,
++			    dsaVerifyOpData->Q.dataLenInBytes);
++
++	dsaVerifyOpData->G.pData =
++	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].crp_p;
++	BITS_TO_BYTES(dsaVerifyOpData->G.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].
++		      crp_nbits);
++	icp_ocfDrvSwapBytes(dsaVerifyOpData->G.pData,
++			    dsaVerifyOpData->G.dataLenInBytes);
++
++	dsaVerifyOpData->Y.pData =
++	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].crp_p;
++	BITS_TO_BYTES(dsaVerifyOpData->Y.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].
++		      crp_nbits);
++	icp_ocfDrvSwapBytes(dsaVerifyOpData->Y.pData,
++			    dsaVerifyOpData->Y.dataLenInBytes);
++
++	dsaVerifyOpData->M.pData =
++	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].crp_p;
++	BITS_TO_BYTES(dsaVerifyOpData->M.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].
++		      crp_nbits);
++	icp_ocfDrvSwapBytes(dsaVerifyOpData->M.pData,
++			    dsaVerifyOpData->M.dataLenInBytes);
++
++	dsaVerifyOpData->R.pData =
++	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].crp_p;
++	BITS_TO_BYTES(dsaVerifyOpData->R.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].
++		      crp_nbits);
++	icp_ocfDrvSwapBytes(dsaVerifyOpData->R.pData,
++			    dsaVerifyOpData->R.dataLenInBytes);
++
++	dsaVerifyOpData->S.pData =
++	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].crp_p;
++	BITS_TO_BYTES(dsaVerifyOpData->S.dataLenInBytes,
++		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].
++		      crp_nbits);
++	icp_ocfDrvSwapBytes(dsaVerifyOpData->S.pData,
++			    dsaVerifyOpData->S.dataLenInBytes);
++
++	lacStatus = cpaCyDsaVerify(CPA_INSTANCE_HANDLE_SINGLE,
++				   icp_ocfDrvDsaVerifyCallBack,
++				   callbackTag, dsaVerifyOpData, &verifyStatus);
++
++	if (CPA_STATUS_SUCCESS != lacStatus) {
++		EPRINTK("%s(): DSA Verify Operation failed (%d).\n",
++			__FUNCTION__, lacStatus);
++		kmem_cache_free(drvDSAVerify_zone, dsaVerifyOpData);
++		krp->krp_status = ECANCELED;
++	}
++
++	return lacStatus;
++}
++
++/* Name        : icp_ocfDrvReadRandom
++ *
++ * Description : This function will map RNG functionality calls from OCF
++ * to the LAC API.
++ */
++int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords)
++{
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	CpaCyRandGenOpData randGenOpData;
++	CpaFlatBuffer randData;
++
++	if (NULL == buf) {
++		APRINTK("%s(): Invalid input parameters\n", __FUNCTION__);
++		return EINVAL;
++	}
++
++	/* maxwords here is number of integers to generate data for */
++	randGenOpData.generateBits = CPA_TRUE;
++
++	randGenOpData.lenInBytes = maxwords * sizeof(uint32_t);
++
++	icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *) buf,
++					randGenOpData.lenInBytes, &randData);
++
++	lacStatus = cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
++				 NULL, NULL, &randGenOpData, &randData);
++	if (CPA_STATUS_SUCCESS != lacStatus) {
++		EPRINTK("%s(): icp_LacSymRandGen failed (%d). \n",
++			__FUNCTION__, lacStatus);
++		return RETURN_RAND_NUM_GEN_FAILED;
++	}
++
++	return randGenOpData.lenInBytes / sizeof(uint32_t);
++}
++
++/* Name        : icp_ocfDrvDhP1Callback
++ *
++ * Description : When this function returns it signifies that the LAC
++ * component has completed the DH operation.
++ */
++static void
++icp_ocfDrvDhP1CallBack(void *callbackTag,
++		       CpaStatus status,
++		       void *pOpData, CpaFlatBuffer * pLocalOctetStringPV)
++{
++	struct cryptkop *krp = NULL;
++	CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
++
++	if (NULL == callbackTag) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"callbackTag data is NULL\n", __FUNCTION__);
++		return;
++	}
++	krp = (struct cryptkop *)callbackTag;
++
++	if (NULL == pOpData) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"Operation Data is NULL\n", __FUNCTION__);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++	pPhase1OpData = (CpaCyDhPhase1KeyGenOpData *) pOpData;
++
++	if (NULL == pLocalOctetStringPV) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"pLocalOctetStringPV Data is NULL\n", __FUNCTION__);
++		memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
++		kmem_cache_free(drvDH_zone, pPhase1OpData);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++
++	if (CPA_STATUS_SUCCESS == status) {
++		krp->krp_status = CRYPTO_OP_SUCCESS;
++	} else {
++		APRINTK("%s(): Diffie Hellman Phase1 Key Gen failed - "
++			"Operation Status = %d\n", __FUNCTION__, status);
++		krp->krp_status = ECANCELED;
++	}
++
++	icp_ocfDrvSwapBytes(pLocalOctetStringPV->pData,
++			    pLocalOctetStringPV->dataLenInBytes);
++
++	icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
++	memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
++	kmem_cache_free(drvDH_zone, pPhase1OpData);
++
++	crypto_kdone(krp);
++
++	return;
++}
++
++/* Name        : icp_ocfDrvModExpCallBack
++ *
++ * Description : When this function returns it signifies that the LAC
++ * component has completed the Mod Exp operation.
++ */
++static void
++icp_ocfDrvModExpCallBack(void *callbackTag,
++			 CpaStatus status,
++			 void *pOpdata, CpaFlatBuffer * pResult)
++{
++	struct cryptkop *krp = NULL;
++	CpaCyLnModExpOpData *pLnModExpOpData = NULL;
++
++	if (NULL == callbackTag) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"callbackTag data is NULL\n", __FUNCTION__);
++		return;
++	}
++	krp = (struct cryptkop *)callbackTag;
++
++	if (NULL == pOpdata) {
++		DPRINTK("%s(): Invalid Mod Exp input parameters - "
++			"Operation Data is NULL\n", __FUNCTION__);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++	pLnModExpOpData = (CpaCyLnModExpOpData *) pOpdata;
++
++	if (NULL == pResult) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"pResult data is NULL\n", __FUNCTION__);
++		krp->krp_status = ECANCELED;
++		memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
++		kmem_cache_free(drvLnModExp_zone, pLnModExpOpData);
++		crypto_kdone(krp);
++		return;
++	}
++
++	if (CPA_STATUS_SUCCESS == status) {
++		krp->krp_status = CRYPTO_OP_SUCCESS;
++	} else {
++		APRINTK("%s(): LAC Mod Exp Operation failed - "
++			"Operation Status = %d\n", __FUNCTION__, status);
++		krp->krp_status = ECANCELED;
++	}
++
++	icp_ocfDrvSwapBytes(pResult->pData, pResult->dataLenInBytes);
++
++	/*switch base size value back to original */
++	if (pLnModExpOpData->base.pData ==
++	    (uint8_t *) & (krp->
++			   krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
++			   crp_nbits)) {
++		*((uint32_t *) pLnModExpOpData->base.pData) =
++		    ntohl(*((uint32_t *) pLnModExpOpData->base.pData));
++	}
++	icp_ocfDrvFreeFlatBuffer(pResult);
++	memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
++	kmem_cache_free(drvLnModExp_zone, pLnModExpOpData);
++
++	crypto_kdone(krp);
++
++	return;
++
++}
++
++/* Name        : icp_ocfDrvModExpCRTCallBack
++ *
++ * Description : When this function returns it signifies that the LAC
++ * component has completed the Mod Exp CRT operation.
++ */
++static void
++icp_ocfDrvModExpCRTCallBack(void *callbackTag,
++			    CpaStatus status,
++			    void *pOpData, CpaFlatBuffer * pOutputData)
++{
++	struct cryptkop *krp = NULL;
++	CpaCyRsaDecryptOpData *pDecryptData = NULL;
++
++	if (NULL == callbackTag) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"callbackTag data is NULL\n", __FUNCTION__);
++		return;
++	}
++
++	krp = (struct cryptkop *)callbackTag;
++
++	if (NULL == pOpData) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"Operation Data is NULL\n", __FUNCTION__);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++	pDecryptData = (CpaCyRsaDecryptOpData *) pOpData;
++
++	if (NULL == pOutputData) {
++		DPRINTK("%s(): Invalid input parameter - "
++			"pOutputData is NULL\n", __FUNCTION__);
++		memset(pDecryptData->pRecipientPrivateKey, 0,
++		       sizeof(CpaCyRsaPrivateKey));
++		kmem_cache_free(drvRSAPrivateKey_zone,
++				pDecryptData->pRecipientPrivateKey);
++		memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
++		kmem_cache_free(drvRSADecrypt_zone, pDecryptData);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++
++	if (CPA_STATUS_SUCCESS == status) {
++		krp->krp_status = CRYPTO_OP_SUCCESS;
++	} else {
++		APRINTK("%s(): LAC Mod Exp CRT operation failed - "
++			"Operation Status = %d\n", __FUNCTION__, status);
++		krp->krp_status = ECANCELED;
++	}
++
++	icp_ocfDrvSwapBytes(pOutputData->pData, pOutputData->dataLenInBytes);
++
++	icp_ocfDrvFreeFlatBuffer(pOutputData);
++	memset(pDecryptData->pRecipientPrivateKey, 0,
++	       sizeof(CpaCyRsaPrivateKey));
++	kmem_cache_free(drvRSAPrivateKey_zone,
++			pDecryptData->pRecipientPrivateKey);
++	memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
++	kmem_cache_free(drvRSADecrypt_zone, pDecryptData);
++
++	crypto_kdone(krp);
++
++	return;
++}
++
++/* Name        : icp_ocfDrvDsaRSSignCallBack
++ *
++ * Description : When this function returns it signifies that the LAC
++ * component has completed the DSA RS sign operation.
++ */
++static void
++icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
++			    CpaStatus status,
++			    void *pOpData,
++			    CpaBoolean protocolStatus,
++			    CpaFlatBuffer * pR, CpaFlatBuffer * pS)
++{
++	struct cryptkop *krp = NULL;
++	CpaCyDsaRSSignOpData *pSignData = NULL;
++
++	if (NULL == callbackTag) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"callbackTag data is NULL\n", __FUNCTION__);
++		return;
++	}
++
++	krp = (struct cryptkop *)callbackTag;
++
++	if (NULL == pOpData) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"Operation Data is NULL\n", __FUNCTION__);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++	pSignData = (CpaCyDsaRSSignOpData *) pOpData;
++
++	if (NULL == pR) {
++		DPRINTK("%s(): Invalid input parameter - "
++			"pR sign is NULL\n", __FUNCTION__);
++		icp_ocfDrvFreeFlatBuffer(pS);
++		kmem_cache_free(drvDSARSSign_zone, pSignData);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++
++	if (NULL == pS) {
++		DPRINTK("%s(): Invalid input parameter - "
++			"pS sign is NULL\n", __FUNCTION__);
++		icp_ocfDrvFreeFlatBuffer(pR);
++		kmem_cache_free(drvDSARSSign_zone, pSignData);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++
++	if (CPA_STATUS_SUCCESS != status) {
++		APRINTK("%s(): LAC DSA RS Sign operation failed - "
++			"Operation Status = %d\n", __FUNCTION__, status);
++		krp->krp_status = ECANCELED;
++	} else {
++		krp->krp_status = CRYPTO_OP_SUCCESS;
++
++		if (CPA_TRUE != protocolStatus) {
++			DPRINTK("%s(): LAC DSA RS Sign operation failed due "
++				"to protocol error\n", __FUNCTION__);
++			krp->krp_status = EIO;
++		}
++	}
++
++	/* Swap bytes only when the callback status is successful and
++	   protocolStatus is set to true */
++	if (CPA_STATUS_SUCCESS == status && CPA_TRUE == protocolStatus) {
++		icp_ocfDrvSwapBytes(pR->pData, pR->dataLenInBytes);
++		icp_ocfDrvSwapBytes(pS->pData, pS->dataLenInBytes);
++	}
++
++	icp_ocfDrvFreeFlatBuffer(pR);
++	icp_ocfDrvFreeFlatBuffer(pS);
++	memset(pSignData->K.pData, 0, pSignData->K.dataLenInBytes);
++	kmem_cache_free(drvDSARSSignKValue_zone, pSignData->K.pData);
++	memset(pSignData, 0, sizeof(CpaCyDsaRSSignOpData));
++	kmem_cache_free(drvDSARSSign_zone, pSignData);
++	crypto_kdone(krp);
++
++	return;
++}
++
++/* Name        : icp_ocfDrvDsaVerifyCallback
++ *
++ * Description : When this function returns it signifies that the LAC
++ * component has completed the DSA Verify operation.
++ */
++static void
++icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
++			    CpaStatus status,
++			    void *pOpData, CpaBoolean verifyStatus)
++{
++
++	struct cryptkop *krp = NULL;
++	CpaCyDsaVerifyOpData *pVerData = NULL;
++
++	if (NULL == callbackTag) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"callbackTag data is NULL\n", __FUNCTION__);
++		return;
++	}
++
++	krp = (struct cryptkop *)callbackTag;
++
++	if (NULL == pOpData) {
++		DPRINTK("%s(): Invalid input parameters - "
++			"Operation Data is NULL\n", __FUNCTION__);
++		krp->krp_status = ECANCELED;
++		crypto_kdone(krp);
++		return;
++	}
++	pVerData = (CpaCyDsaVerifyOpData *) pOpData;
++
++	if (CPA_STATUS_SUCCESS != status) {
++		APRINTK("%s(): LAC DSA Verify operation failed - "
++			"Operation Status = %d\n", __FUNCTION__, status);
++		krp->krp_status = ECANCELED;
++	} else {
++		krp->krp_status = CRYPTO_OP_SUCCESS;
++
++		if (CPA_TRUE != verifyStatus) {
++			DPRINTK("%s(): DSA signature invalid\n", __FUNCTION__);
++			krp->krp_status = EIO;
++		}
++	}
++
++	/* Swap bytes only when the callback status is successful and
++	   verifyStatus is set to true */
++	/*Just swapping back the key values for now. Possibly all
++	   swapped buffers need to be reverted */
++	if (CPA_STATUS_SUCCESS == status && CPA_TRUE == verifyStatus) {
++		icp_ocfDrvSwapBytes(pVerData->R.pData,
++				    pVerData->R.dataLenInBytes);
++		icp_ocfDrvSwapBytes(pVerData->S.pData,
++				    pVerData->S.dataLenInBytes);
++	}
++
++	memset(pVerData, 0, sizeof(CpaCyDsaVerifyOpData));
++	kmem_cache_free(drvDSAVerify_zone, pVerData);
++	crypto_kdone(krp);
++
++	return;
++}
+diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/icp_common.c linux-2.6.30/crypto/ocf/ep80579/icp_common.c
+--- linux-2.6.30.orig/crypto/ocf/ep80579/icp_common.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ep80579/icp_common.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,891 @@
++/***************************************************************************
++ *
++ * This file is provided under a dual BSD/GPLv2 license.  When using or 
++ *   redistributing this file, you may do so under either license.
++ * 
++ *   GPL LICENSE SUMMARY
++ * 
++ *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * 
++ *   This program is free software; you can redistribute it and/or modify 
++ *   it under the terms of version 2 of the GNU General Public License as
++ *   published by the Free Software Foundation.
++ * 
++ *   This program is distributed in the hope that it will be useful, but 
++ *   WITHOUT ANY WARRANTY; without even the implied warranty of 
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
++ *   General Public License for more details.
++ * 
++ *   You should have received a copy of the GNU General Public License 
++ *   along with this program; if not, write to the Free Software 
++ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *   The full GNU General Public License is included in this distribution 
++ *   in the file called LICENSE.GPL.
++ * 
++ *   Contact Information:
++ *   Intel Corporation
++ * 
++ *   BSD LICENSE 
++ * 
++ *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ *   All rights reserved.
++ * 
++ *   Redistribution and use in source and binary forms, with or without 
++ *   modification, are permitted provided that the following conditions 
++ *   are met:
++ * 
++ *     * Redistributions of source code must retain the above copyright 
++ *       notice, this list of conditions and the following disclaimer.
++ *     * Redistributions in binary form must reproduce the above copyright 
++ *       notice, this list of conditions and the following disclaimer in 
++ *       the documentation and/or other materials provided with the 
++ *       distribution.
++ *     * Neither the name of Intel Corporation nor the names of its 
++ *       contributors may be used to endorse or promote products derived 
++ *       from this software without specific prior written permission.
++ * 
++ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
++ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
++ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
++ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
++ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
++ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
++ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
++ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
++ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
++ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
++ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * 
++ * 
++ *  version: Security.L.1.0.130
++ *
++ ***************************************************************************/
++
++/*
++ * An OCF module that uses Intel® QuickAssist Integrated Accelerator to do the 
++ * crypto.
++ *
++ * This driver requires the ICP Access Library that is available from Intel in
++ * order to operate.
++ */
++
++#include "icp_ocf.h"
++
++#define ICP_OCF_COMP_NAME 			"ICP_OCF"
++#define ICP_OCF_VER_MAIN			(2)
++#define ICP_OCF_VER_MJR				(0)
++#define ICP_OCF_VER_MNR 			(0)
++
++#define MAX_DEREG_RETRIES 			(100)
++#define DEFAULT_DEREG_RETRIES 			(10)
++#define DEFAULT_DEREG_DELAY_IN_JIFFIES		(10)
++
++/* This defines the maximum number of sessions possible between OCF
++   and the OCF Tolapai Driver. If set to zero, there is no limit. */
++#define DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT	(0)
++#define NUM_SUPPORTED_CAPABILITIES		(21)
++
++/*Slabs zones*/
++struct kmem_cache *drvSessionData_zone = NULL;
++struct kmem_cache *drvOpData_zone = NULL;
++struct kmem_cache *drvDH_zone = NULL;
++struct kmem_cache *drvLnModExp_zone = NULL;
++struct kmem_cache *drvRSADecrypt_zone = NULL;
++struct kmem_cache *drvRSAPrivateKey_zone = NULL;
++struct kmem_cache *drvDSARSSign_zone = NULL;
++struct kmem_cache *drvDSARSSignKValue_zone = NULL;
++struct kmem_cache *drvDSAVerify_zone = NULL;
++
++/*Slab zones for flatbuffers and bufferlist*/
++struct kmem_cache *drvFlatBuffer_zone = NULL;
++
++static int icp_ocfDrvInit(void);
++static void icp_ocfDrvExit(void);
++static void icp_ocfDrvFreeCaches(void);
++static void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg);
++
++int32_t icp_ocfDrvDriverId = INVALID_DRIVER_ID;
++
++/* Module parameter - gives the number of times LAC deregistration shall be
++   re-tried */
++int num_dereg_retries = DEFAULT_DEREG_RETRIES;
++
++/* Module parameter - gives the delay time in jiffies before a LAC session 
++   shall be attempted to be deregistered again */
++int dereg_retry_delay_in_jiffies = DEFAULT_DEREG_DELAY_IN_JIFFIES;
++
++/* Module parameter - gives the maximum number of sessions possible between
++   OCF and the OCF Tolapai Driver. If set to zero, there is no limit.*/
++int max_sessions = DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT;
++
++/* This is set when the module is removed from the system, no further
++   processing can take place if this is set */
++atomic_t icp_ocfDrvIsExiting = ATOMIC_INIT(0);
++
++/* This is used to show how many lac sessions were not deregistered*/
++atomic_t lac_session_failed_dereg_count = ATOMIC_INIT(0);
++
++/* This is used to track the number of registered sessions between OCF and
++ * and the OCF Tolapai driver, when max_session is set to value other than
++ * zero. This ensures that the max_session set for the OCF and the driver
++ * is equal to the LAC registered sessions */
++atomic_t num_ocf_to_drv_registered_sessions = ATOMIC_INIT(0);
++
++/* Head of linked list used to store session data */
++struct list_head icp_ocfDrvGlobalSymListHead;
++struct list_head icp_ocfDrvGlobalSymListHead_FreeMemList;
++
++spinlock_t icp_ocfDrvSymSessInfoListSpinlock = SPIN_LOCK_UNLOCKED;
++rwlock_t icp_kmem_cache_destroy_alloc_lock = RW_LOCK_UNLOCKED;
++
++struct workqueue_struct *icp_ocfDrvFreeLacSessionWorkQ;
++
++struct icp_drvBuffListInfo defBuffListInfo;
++
++static struct {
++	softc_device_decl sc_dev;
++} icpDev;
++
++static device_method_t icp_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession, icp_ocfDrvNewSession),
++	DEVMETHOD(cryptodev_freesession, icp_ocfDrvFreeLACSession),
++	DEVMETHOD(cryptodev_process, icp_ocfDrvSymProcess),
++	DEVMETHOD(cryptodev_kprocess, icp_ocfDrvPkeProcess),
++};
++
++module_param(num_dereg_retries, int, S_IRUGO);
++module_param(dereg_retry_delay_in_jiffies, int, S_IRUGO);
++module_param(max_sessions, int, S_IRUGO);
++
++MODULE_PARM_DESC(num_dereg_retries,
++		 "Number of times to retry LAC Sym Session Deregistration. "
++		 "Default 10, Max 100");
++MODULE_PARM_DESC(dereg_retry_delay_in_jiffies, "Delay in jiffies "
++		 "(added to a schedule() function call) before a LAC Sym "
++		 "Session Dereg is retried. Default 10");
++MODULE_PARM_DESC(max_sessions, "This sets the maximum number of sessions "
++		 "between OCF and this driver. If this value is set to zero, "
++		 "max session count checking is disabled. Default is zero(0)");
++
++/* Name        : icp_ocfDrvInit
++ *
++ * Description : This function will register all the symmetric and asymmetric
++ * functionality that will be accelerated by the hardware. It will also
++ * get a unique driver ID from the OCF and initialise all slab caches
++ */
++static int __init icp_ocfDrvInit(void)
++{
++	int ocfStatus = 0;
++
++	IPRINTK("=== %s ver %d.%d.%d ===\n", ICP_OCF_COMP_NAME,
++		ICP_OCF_VER_MAIN, ICP_OCF_VER_MJR, ICP_OCF_VER_MNR);
++
++	if (MAX_DEREG_RETRIES < num_dereg_retries) {
++		EPRINTK("Session deregistration retry count set to greater "
++			"than %d", MAX_DEREG_RETRIES);
++		return -1;
++	}
++
++	/* Initialize and Start the Cryptographic component */
++	if (CPA_STATUS_SUCCESS !=
++	    cpaCyStartInstance(CPA_INSTANCE_HANDLE_SINGLE)) {
++		EPRINTK("Failed to initialize and start the instance "
++			"of the Cryptographic component.\n");
++		return -1;
++	}
++
++	/* Set the default size of BufferList to allocate */
++	memset(&defBuffListInfo, 0, sizeof(struct icp_drvBuffListInfo));
++	if (ICP_OCF_DRV_STATUS_SUCCESS !=
++	    icp_ocfDrvBufferListMemInfo(ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS,
++					&defBuffListInfo)) {
++		EPRINTK("Failed to get bufferlist memory info.\n");
++		return -1;
++	}
++
++	/*Register OCF Tolapai Driver with OCF */
++	memset(&icpDev, 0, sizeof(icpDev));
++	softc_device_init(&icpDev, "icp", 0, icp_methods);
++
++	icp_ocfDrvDriverId = crypto_get_driverid(softc_get_device(&icpDev),
++						 CRYPTOCAP_F_HARDWARE);
++
++	if (icp_ocfDrvDriverId < 0) {
++		EPRINTK("%s : ICP driver failed to register with OCF!\n",
++			__FUNCTION__);
++		return -ENODEV;
++	}
++
++	/*Create all the slab caches used by the OCF Tolapai Driver */
++	drvSessionData_zone =
++	    ICP_CACHE_CREATE("ICP Session Data", struct icp_drvSessionData);
++	ICP_CACHE_NULL_CHECK(drvSessionData_zone);
++
++	/* 
++	 * Allocation of the OpData includes the allocation space for meta data.
++	 * The memory after the opData structure is reserved for this meta data.
++	 */
++	drvOpData_zone =
++	    kmem_cache_create("ICP Op Data", sizeof(struct icp_drvOpData) +
++	            defBuffListInfo.metaSize ,0, SLAB_HWCACHE_ALIGN, NULL, NULL);
++
++
++	ICP_CACHE_NULL_CHECK(drvOpData_zone);
++
++	drvDH_zone = ICP_CACHE_CREATE("ICP DH data", CpaCyDhPhase1KeyGenOpData);
++	ICP_CACHE_NULL_CHECK(drvDH_zone);
++
++	drvLnModExp_zone =
++	    ICP_CACHE_CREATE("ICP ModExp data", CpaCyLnModExpOpData);
++	ICP_CACHE_NULL_CHECK(drvLnModExp_zone);
++
++	drvRSADecrypt_zone =
++	    ICP_CACHE_CREATE("ICP RSA decrypt data", CpaCyRsaDecryptOpData);
++	ICP_CACHE_NULL_CHECK(drvRSADecrypt_zone);
++
++	drvRSAPrivateKey_zone =
++	    ICP_CACHE_CREATE("ICP RSA private key data", CpaCyRsaPrivateKey);
++	ICP_CACHE_NULL_CHECK(drvRSAPrivateKey_zone);
++
++	drvDSARSSign_zone =
++	    ICP_CACHE_CREATE("ICP DSA Sign", CpaCyDsaRSSignOpData);
++	ICP_CACHE_NULL_CHECK(drvDSARSSign_zone);
++
++	/*too awkward to use a macro here */
++	drvDSARSSignKValue_zone =
++	    kmem_cache_create("ICP DSA Sign Rand Val",
++			      DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES, 0,
++			      SLAB_HWCACHE_ALIGN, NULL, NULL);
++	ICP_CACHE_NULL_CHECK(drvDSARSSignKValue_zone);
++
++	drvDSAVerify_zone =
++	    ICP_CACHE_CREATE("ICP DSA Verify", CpaCyDsaVerifyOpData);
++	ICP_CACHE_NULL_CHECK(drvDSAVerify_zone);
++
++	drvFlatBuffer_zone =
++	    ICP_CACHE_CREATE("ICP Flat Buffers", CpaFlatBuffer);
++	ICP_CACHE_NULL_CHECK(drvFlatBuffer_zone);
++
++	/* Register the ICP symmetric crypto support. */
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_NULL_CBC);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_DES_CBC);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_3DES_CBC);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_AES_CBC);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_ARC4);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_MD5);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_MD5_HMAC);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA1);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA1_HMAC);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_256);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_256_HMAC);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_384);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_384_HMAC);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_512);
++	ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_512_HMAC);
++
++	/* Register the ICP asymmetric algorithm support */
++	ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DH_COMPUTE_KEY);
++	ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_MOD_EXP);
++	ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_MOD_EXP_CRT);
++	ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DSA_SIGN);
++	ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DSA_VERIFY);
++
++	/* Register the ICP random number generator support */
++	if (OCF_REGISTRATION_STATUS_SUCCESS ==
++	    crypto_rregister(icp_ocfDrvDriverId, icp_ocfDrvReadRandom, NULL)) {
++		ocfStatus++;
++	}
++
++	if (OCF_ZERO_FUNCTIONALITY_REGISTERED == ocfStatus) {
++		DPRINTK("%s: Failed to register any device capabilities\n",
++			__FUNCTION__);
++		icp_ocfDrvFreeCaches();
++		icp_ocfDrvDriverId = INVALID_DRIVER_ID;
++		return -ECANCELED;
++	}
++
++	DPRINTK("%s: Registered %d of %d device capabilities\n",
++		__FUNCTION__, ocfStatus, NUM_SUPPORTED_CAPABILITIES);
++
++/*Session data linked list used during module exit*/
++	INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead);
++	INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead_FreeMemList);
++
++	icp_ocfDrvFreeLacSessionWorkQ =
++	    create_singlethread_workqueue("ocfLacDeregWorkQueue");
++
++	return 0;
++}
++
++/* Name        : icp_ocfDrvExit
++ *
++ * Description : This function will deregister all the symmetric sessions
++ * registered with the LAC component. It will also deregister all symmetric
++ * and asymmetric functionality that can be accelerated by the hardware via OCF
++ * and random number generation if it is enabled.
++ */
++static void icp_ocfDrvExit(void)
++{
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	struct icp_drvSessionData *sessionData = NULL;
++	struct icp_drvSessionData *tempSessionData = NULL;
++	int i, remaining_delay_time_in_jiffies = 0;
++	/* There is a possibility of a process or new session command being   */
++	/* sent before this variable is incremented. The aim of this variable */
++	/* is to stop a loop of calls creating a deadlock situation which     */
++	/* would prevent the driver from exiting.                             */
++
++	atomic_inc(&icp_ocfDrvIsExiting);
++
++	/*Existing sessions will be routed to another driver after these calls */
++	crypto_unregister_all(icp_ocfDrvDriverId);
++	crypto_runregister_all(icp_ocfDrvDriverId);
++
++	/*If any sessions are waiting to be deregistered, do that. This also 
++	   flushes the work queue */
++	destroy_workqueue(icp_ocfDrvFreeLacSessionWorkQ);
++
++	/*ENTER CRITICAL SECTION */
++	spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
++	list_for_each_entry_safe(tempSessionData, sessionData,
++				 &icp_ocfDrvGlobalSymListHead, listNode) {
++		for (i = 0; i < num_dereg_retries; i++) {
++			/*No harm if bad input - LAC will handle error cases */
++			if (ICP_SESSION_RUNNING == tempSessionData->inUse) {
++				lacStatus =
++				    cpaCySymRemoveSession
++				    (CPA_INSTANCE_HANDLE_SINGLE,
++				     tempSessionData->sessHandle);
++				if (CPA_STATUS_SUCCESS == lacStatus) {
++					/* Succesfully deregistered */
++					break;
++				} else if (CPA_STATUS_RETRY != lacStatus) {
++					atomic_inc
++					    (&lac_session_failed_dereg_count);
++					break;
++				}
++
++				/*schedule_timout returns the time left for completion if 
++				 * this task is set to TASK_INTERRUPTIBLE */
++				remaining_delay_time_in_jiffies =
++				    dereg_retry_delay_in_jiffies;
++				while (0 > remaining_delay_time_in_jiffies) {
++					remaining_delay_time_in_jiffies =
++					    schedule_timeout
++					    (remaining_delay_time_in_jiffies);
++				}
++
++				DPRINTK
++				    ("%s(): Retry %d to deregistrate the session\n",
++				     __FUNCTION__, i);
++			}
++		}
++
++		/*remove from current list */
++		list_del(&(tempSessionData->listNode));
++		/*add to free mem linked list */
++		list_add(&(tempSessionData->listNode),
++			 &icp_ocfDrvGlobalSymListHead_FreeMemList);
++
++	}
++
++	/*EXIT CRITICAL SECTION */
++	spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
++
++	/*set back to initial values */
++	sessionData = NULL;
++	/*still have a reference in our list! */
++	tempSessionData = NULL;
++	/*free memory */
++	list_for_each_entry_safe(tempSessionData, sessionData,
++				 &icp_ocfDrvGlobalSymListHead_FreeMemList,
++				 listNode) {
++
++		list_del(&(tempSessionData->listNode));
++		/* Free allocated CpaCySymSessionCtx */
++		if (NULL != tempSessionData->sessHandle) {
++			kfree(tempSessionData->sessHandle);
++		}
++		memset(tempSessionData, 0, sizeof(struct icp_drvSessionData));
++		kmem_cache_free(drvSessionData_zone, tempSessionData);
++	}
++
++	if (0 != atomic_read(&lac_session_failed_dereg_count)) {
++		DPRINTK("%s(): %d LAC sessions were not deregistered "
++			"correctly. This is not a clean exit! \n",
++			__FUNCTION__,
++			atomic_read(&lac_session_failed_dereg_count));
++	}
++
++	icp_ocfDrvFreeCaches();
++	icp_ocfDrvDriverId = INVALID_DRIVER_ID;
++
++	/* Shutdown the Cryptographic component */
++	lacStatus = cpaCyStopInstance(CPA_INSTANCE_HANDLE_SINGLE);
++	if (CPA_STATUS_SUCCESS != lacStatus) {
++		DPRINTK("%s(): Failed to stop instance of the "
++			"Cryptographic component.(status == %d)\n",
++			__FUNCTION__, lacStatus);
++	}
++
++}
++
++/* Name        : icp_ocfDrvFreeCaches
++ *
++ * Description : This function deregisters all slab caches
++ */
++static void icp_ocfDrvFreeCaches(void)
++{
++	if (atomic_read(&icp_ocfDrvIsExiting) != CPA_TRUE) {
++		atomic_set(&icp_ocfDrvIsExiting, 1);
++	}
++
++	/*Sym Zones */
++	ICP_CACHE_DESTROY(drvSessionData_zone);
++	ICP_CACHE_DESTROY(drvOpData_zone);
++
++	/*Asym zones */
++	ICP_CACHE_DESTROY(drvDH_zone);
++	ICP_CACHE_DESTROY(drvLnModExp_zone);
++	ICP_CACHE_DESTROY(drvRSADecrypt_zone);
++	ICP_CACHE_DESTROY(drvRSAPrivateKey_zone);
++	ICP_CACHE_DESTROY(drvDSARSSignKValue_zone);
++	ICP_CACHE_DESTROY(drvDSARSSign_zone);
++	ICP_CACHE_DESTROY(drvDSAVerify_zone);
++
++	/*FlatBuffer and BufferList Zones */
++	ICP_CACHE_DESTROY(drvFlatBuffer_zone);
++
++}
++
++/* Name        : icp_ocfDrvDeregRetry
++ *
++ * Description : This function will try to farm the session deregistration
++ * off to a work queue. If it fails, nothing more can be done and it
++ * returns an error
++ */
++
++int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister)
++{
++	struct icp_ocfDrvFreeLacSession *workstore = NULL;
++
++	DPRINTK("%s(): Retry - Deregistering session (%p)\n",
++		__FUNCTION__, sessionToDeregister);
++
++	/*make sure the session is not available to be allocated during this
++	   process */
++	atomic_inc(&lac_session_failed_dereg_count);
++
++	/*Farm off to work queue */
++	workstore =
++	    kmalloc(sizeof(struct icp_ocfDrvFreeLacSession), GFP_ATOMIC);
++	if (NULL == workstore) {
++		DPRINTK("%s(): unable to free session - no memory available "
++			"for work queue\n", __FUNCTION__);
++		return ENOMEM;
++	}
++
++	workstore->sessionToDeregister = sessionToDeregister;
++
++	INIT_WORK(&(workstore->work), icp_ocfDrvDeferedFreeLacSessionProcess,
++		  workstore);
++	queue_work(icp_ocfDrvFreeLacSessionWorkQ, &(workstore->work));
++
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++
++}
++
++/* Name        : icp_ocfDrvDeferedFreeLacSessionProcess
++ *
++ * Description : This function will retry (module input parameter)
++ * 'num_dereg_retries' times to deregister any symmetric session that recieves a
++ * CPA_STATUS_RETRY message from the LAC component. This function is run in
++ * Thread context because it is called from a worker thread
++ */
++static void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg)
++{
++	struct icp_ocfDrvFreeLacSession *workstore = NULL;
++	CpaCySymSessionCtx sessionToDeregister = NULL;
++	int i = 0;
++	int remaining_delay_time_in_jiffies = 0;
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++
++	workstore = (struct icp_ocfDrvFreeLacSession *)arg;
++	if (NULL == workstore) {
++		DPRINTK("%s() function called with null parameter \n",
++			__FUNCTION__);
++		return;
++	}
++
++	sessionToDeregister = workstore->sessionToDeregister;
++	kfree(workstore);
++
++	/*if exiting, give deregistration one more blast only */
++	if (atomic_read(&icp_ocfDrvIsExiting) == CPA_TRUE) {
++		lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
++						  sessionToDeregister);
++
++		if (lacStatus != CPA_STATUS_SUCCESS) {
++			DPRINTK("%s() Failed to Dereg LAC session %p "
++				"during module exit\n", __FUNCTION__,
++				sessionToDeregister);
++			return;
++		}
++
++		atomic_dec(&lac_session_failed_dereg_count);
++		return;
++	}
++
++	for (i = 0; i <= num_dereg_retries; i++) {
++		lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
++						  sessionToDeregister);
++
++		if (lacStatus == CPA_STATUS_SUCCESS) {
++			atomic_dec(&lac_session_failed_dereg_count);
++			return;
++		}
++		if (lacStatus != CPA_STATUS_RETRY) {
++			DPRINTK("%s() Failed to deregister session - lacStatus "
++				" = %d", __FUNCTION__, lacStatus);
++			break;
++		}
++
++		/*schedule_timout returns the time left for completion if this
++		   task is set to TASK_INTERRUPTIBLE */
++		remaining_delay_time_in_jiffies = dereg_retry_delay_in_jiffies;
++		while (0 > remaining_delay_time_in_jiffies) {
++			remaining_delay_time_in_jiffies =
++			    schedule_timeout(remaining_delay_time_in_jiffies);
++		}
++
++	}
++
++	DPRINTK("%s(): Unable to deregister session\n", __FUNCTION__);
++	DPRINTK("%s(): Number of unavailable LAC sessions = %d\n", __FUNCTION__,
++		atomic_read(&lac_session_failed_dereg_count));
++}
++
++/* Name        : icp_ocfDrvPtrAndLenToFlatBuffer 
++ *
++ * Description : This function converts a "pointer and length" buffer 
++ * structure to Fredericksburg Flat Buffer (CpaFlatBuffer) format.
++ *
++ * This function assumes that the data passed in are valid.
++ */
++inline void
++icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
++				CpaFlatBuffer * pFlatBuffer)
++{
++	pFlatBuffer->pData = pData;
++	pFlatBuffer->dataLenInBytes = len;
++}
++
++/* Name        : icp_ocfDrvSingleSkBuffToFlatBuffer 
++ *
++ * Description : This function converts a single socket buffer (sk_buff)
++ * structure to a Fredericksburg Flat Buffer (CpaFlatBuffer) format.
++ *
++ * This function assumes that the data passed in are valid.
++ */
++static inline void
++icp_ocfDrvSingleSkBuffToFlatBuffer(struct sk_buff *pSkb,
++				   CpaFlatBuffer * pFlatBuffer)
++{
++	pFlatBuffer->pData = pSkb->data;
++	pFlatBuffer->dataLenInBytes = skb_headlen(pSkb);
++}
++
++/* Name        : icp_ocfDrvSkBuffToBufferList 
++ *
++ * Description : This function converts a socket buffer (sk_buff) structure to
++ * Fredericksburg Scatter/Gather (CpaBufferList) buffer format.
++ *
++ * This function assumes that the bufferlist has been allocated with the correct
++ * number of buffer arrays.
++ * 
++ */
++inline int
++icp_ocfDrvSkBuffToBufferList(struct sk_buff *pSkb, CpaBufferList * bufferList)
++{
++	CpaFlatBuffer *curFlatBuffer = NULL;
++	char *skbuffPageAddr = NULL;
++	struct sk_buff *pCurFrag = NULL;
++	struct skb_shared_info *pShInfo = NULL;
++	uint32_t page_offset = 0, i = 0;
++
++	DPRINTK("%s(): Entry Point\n", __FUNCTION__);
++
++	/*
++	 * In all cases, the first skb needs to be translated to FlatBuffer.
++	 * Perform a buffer translation for the first skbuff
++	 */
++	curFlatBuffer = bufferList->pBuffers;
++	icp_ocfDrvSingleSkBuffToFlatBuffer(pSkb, curFlatBuffer);
++
++	/* Set the userData to point to the original sk_buff */
++	bufferList->pUserData = (void *)pSkb;
++
++	/* We now know we'll have at least one element in the SGL */
++	bufferList->numBuffers = 1;
++
++	if (0 == skb_is_nonlinear(pSkb)) {
++		/* Is a linear buffer - therefore it's a single skbuff */
++		DPRINTK("%s(): Exit Point\n", __FUNCTION__);
++		return ICP_OCF_DRV_STATUS_SUCCESS;
++	}
++
++	curFlatBuffer++;
++	pShInfo = skb_shinfo(pSkb);
++	if (pShInfo->frag_list != NULL && pShInfo->nr_frags != 0) {
++		EPRINTK("%s():"
++			"Translation for a combination of frag_list "
++			"and frags[] array not supported!\n", __FUNCTION__);
++		return ICP_OCF_DRV_STATUS_FAIL;
++	} else if (pShInfo->frag_list != NULL) {
++		/*
++		 * Non linear skbuff supported through frag_list 
++		 * Perform translation for each fragment (sk_buff)
++		 * in the frag_list of the first sk_buff.
++		 */
++		for (pCurFrag = pShInfo->frag_list;
++		     pCurFrag != NULL; pCurFrag = pCurFrag->next) {
++			icp_ocfDrvSingleSkBuffToFlatBuffer(pCurFrag,
++							   curFlatBuffer);
++			curFlatBuffer++;
++			bufferList->numBuffers++;
++		}
++	} else if (pShInfo->nr_frags != 0) {
++		/*
++		 * Perform translation for each fragment in frags array
++		 * and add to the BufferList
++		 */
++		for (i = 0; i < pShInfo->nr_frags; i++) {
++			/* Get the page address and offset of this frag */
++			skbuffPageAddr = (char *)pShInfo->frags[i].page;
++			page_offset = pShInfo->frags[i].page_offset;
++
++			/* Convert a pointer and length to a flat buffer */
++			icp_ocfDrvPtrAndLenToFlatBuffer(skbuffPageAddr +
++							page_offset,
++							pShInfo->frags[i].size,
++							curFlatBuffer);
++			curFlatBuffer++;
++			bufferList->numBuffers++;
++		}
++	} else {
++		EPRINTK("%s():" "Could not recognize skbuff fragments!\n",
++			__FUNCTION__);
++		return ICP_OCF_DRV_STATUS_FAIL;
++	}
++
++	DPRINTK("%s(): Exit Point\n", __FUNCTION__);
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++}
++
++/* Name        : icp_ocfDrvBufferListToSkBuff 
++ *
++ * Description : This function converts a Fredericksburg Scatter/Gather 
++ * (CpaBufferList) buffer format to socket buffer structure.
++ */
++inline int
++icp_ocfDrvBufferListToSkBuff(CpaBufferList * bufferList, struct sk_buff **skb)
++{
++	DPRINTK("%s(): Entry Point\n", __FUNCTION__);
++
++	/* Retrieve the orignal skbuff */
++	*skb = (struct sk_buff *)bufferList->pUserData;
++	if (NULL == *skb) {
++		EPRINTK("%s():"
++			"Error on converting from a BufferList. "
++			"The BufferList does not contain an sk_buff.\n",
++			__FUNCTION__);
++		return ICP_OCF_DRV_STATUS_FAIL;
++	}
++	DPRINTK("%s(): Exit Point\n", __FUNCTION__);
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++}
++
++/* Name        : icp_ocfDrvPtrAndLenToBufferList
++ *
++ * Description : This function converts a "pointer and length" buffer
++ * structure to Fredericksburg Scatter/Gather Buffer (CpaBufferList) format.
++ *
++ * This function assumes that the data passed in are valid.
++ */
++inline void
++icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
++				CpaBufferList * pBufferList)
++{
++	pBufferList->numBuffers = 1;
++	pBufferList->pBuffers->pData = pDataIn;
++	pBufferList->pBuffers->dataLenInBytes = length;
++}
++
++/* Name        : icp_ocfDrvBufferListToPtrAndLen
++ *
++ * Description : This function converts Fredericksburg Scatter/Gather Buffer
++ * (CpaBufferList) format to a "pointer and length" buffer structure.
++ *
++ * This function assumes that the data passed in are valid.
++ */
++inline void
++icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
++				void **ppDataOut, uint32_t * pLength)
++{
++	*ppDataOut = pBufferList->pBuffers->pData;
++	*pLength = pBufferList->pBuffers->dataLenInBytes;
++}
++
++/* Name        : icp_ocfDrvBufferListMemInfo
++ *
++ * Description : This function will set the number of flat buffers in 
++ * bufferlist, the size of memory to allocate for the pPrivateMetaData 
++ * member of the CpaBufferList.
++ */
++int
++icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
++			    struct icp_drvBuffListInfo *buffListInfo)
++{
++	buffListInfo->numBuffers = numBuffers;
++
++	if (CPA_STATUS_SUCCESS !=
++	    cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
++				       buffListInfo->numBuffers,
++				       &(buffListInfo->metaSize))) {
++		EPRINTK("%s() Failed to get buffer list meta size.\n",
++			__FUNCTION__);
++		return ICP_OCF_DRV_STATUS_FAIL;
++	}
++
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++}
++
++/* Name        : icp_ocfDrvGetSkBuffFrags
++ *
++ * Description : This function will determine the number of 
++ * fragments in a socket buffer(sk_buff).
++ */
++inline uint16_t icp_ocfDrvGetSkBuffFrags(struct sk_buff * pSkb)
++{
++	uint16_t numFrags = 0;
++	struct sk_buff *pCurFrag = NULL;
++	struct skb_shared_info *pShInfo = NULL;
++
++	if (NULL == pSkb)
++		return 0;
++
++	numFrags = 1;
++	if (0 == skb_is_nonlinear(pSkb)) {
++		/* Linear buffer - it's a single skbuff */
++		return numFrags;
++	}
++
++	pShInfo = skb_shinfo(pSkb);
++	if (NULL != pShInfo->frag_list && 0 != pShInfo->nr_frags) {
++		EPRINTK("%s(): Combination of frag_list "
++			"and frags[] array not supported!\n", __FUNCTION__);
++		return 0;
++	} else if (0 != pShInfo->nr_frags) {
++		numFrags += pShInfo->nr_frags;
++		return numFrags;
++	} else if (NULL != pShInfo->frag_list) {
++		for (pCurFrag = pShInfo->frag_list;
++		     pCurFrag != NULL; pCurFrag = pCurFrag->next) {
++			numFrags++;
++		}
++		return numFrags;
++	} else {
++		return 0;
++	}
++}
++
++/* Name        : icp_ocfDrvFreeFlatBuffer
++ *
++ * Description : This function will deallocate flat buffer.
++ */
++inline void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer)
++{
++	if (pFlatBuffer != NULL) {
++		memset(pFlatBuffer, 0, sizeof(CpaFlatBuffer));
++		kmem_cache_free(drvFlatBuffer_zone, pFlatBuffer);
++	}
++}
++
++/* Name        : icp_ocfDrvAllocMetaData
++ *
++ * Description : This function will allocate memory for the
++ * pPrivateMetaData member of CpaBufferList.
++ */
++inline int
++icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
++        const struct icp_drvOpData *pOpData)
++{
++	Cpa32U metaSize = 0;
++
++	if (pBufferList->numBuffers <= ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS){
++	    void *pOpDataStartAddr = (void *)pOpData;
++
++	    if (0 == defBuffListInfo.metaSize) {
++			pBufferList->pPrivateMetaData = NULL;
++			return ICP_OCF_DRV_STATUS_SUCCESS;
++		}
++		/*
++		 * The meta data allocation has been included as part of the 
++		 * op data.  It has been pre-allocated in memory just after the
++		 * icp_drvOpData structure.
++		 */
++		pBufferList->pPrivateMetaData = pOpDataStartAddr +
++		        sizeof(struct icp_drvOpData);
++	} else {
++		if (CPA_STATUS_SUCCESS !=
++		    cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
++					       pBufferList->numBuffers,
++					       &metaSize)) {
++			EPRINTK("%s() Failed to get buffer list meta size.\n",
++				__FUNCTION__);
++			return ICP_OCF_DRV_STATUS_FAIL;
++		}
++
++		if (0 == metaSize) {
++			pBufferList->pPrivateMetaData = NULL;
++			return ICP_OCF_DRV_STATUS_SUCCESS;
++		}
++
++		pBufferList->pPrivateMetaData = kmalloc(metaSize, GFP_ATOMIC);
++	}
++	if (NULL == pBufferList->pPrivateMetaData) {
++		EPRINTK("%s() Failed to allocate pPrivateMetaData.\n",
++			__FUNCTION__);
++		return ICP_OCF_DRV_STATUS_FAIL;
++	}
++
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++}
++
++/* Name        : icp_ocfDrvFreeMetaData
++ *
++ * Description : This function will deallocate pPrivateMetaData memory.
++ */
++inline void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList)
++{
++	if (NULL == pBufferList->pPrivateMetaData) {
++		return;
++	}
++
++	/*
++	 * Only free the meta data if the BufferList has more than 
++	 * ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS number of buffers.
++	 * Otherwise, the meta data shall be freed when the icp_drvOpData is
++	 * freed.
++	 */
++	if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < pBufferList->numBuffers){
++		kfree(pBufferList->pPrivateMetaData);
++	}
++}
++
++module_init(icp_ocfDrvInit);
++module_exit(icp_ocfDrvExit);
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Intel");
++MODULE_DESCRIPTION("OCF Driver for Intel Quick Assist crypto acceleration");
+diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/icp_ocf.h linux-2.6.30/crypto/ocf/ep80579/icp_ocf.h
+--- linux-2.6.30.orig/crypto/ocf/ep80579/icp_ocf.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ep80579/icp_ocf.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,363 @@
++/***************************************************************************
++ *
++ * This file is provided under a dual BSD/GPLv2 license.  When using or 
++ *   redistributing this file, you may do so under either license.
++ * 
++ *   GPL LICENSE SUMMARY
++ * 
++ *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * 
++ *   This program is free software; you can redistribute it and/or modify 
++ *   it under the terms of version 2 of the GNU General Public License as
++ *   published by the Free Software Foundation.
++ * 
++ *   This program is distributed in the hope that it will be useful, but 
++ *   WITHOUT ANY WARRANTY; without even the implied warranty of 
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
++ *   General Public License for more details.
++ * 
++ *   You should have received a copy of the GNU General Public License 
++ *   along with this program; if not, write to the Free Software 
++ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *   The full GNU General Public License is included in this distribution 
++ *   in the file called LICENSE.GPL.
++ * 
++ *   Contact Information:
++ *   Intel Corporation
++ * 
++ *   BSD LICENSE 
++ * 
++ *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ *   All rights reserved.
++ * 
++ *   Redistribution and use in source and binary forms, with or without 
++ *   modification, are permitted provided that the following conditions 
++ *   are met:
++ * 
++ *     * Redistributions of source code must retain the above copyright 
++ *       notice, this list of conditions and the following disclaimer.
++ *     * Redistributions in binary form must reproduce the above copyright 
++ *       notice, this list of conditions and the following disclaimer in 
++ *       the documentation and/or other materials provided with the 
++ *       distribution.
++ *     * Neither the name of Intel Corporation nor the names of its 
++ *       contributors may be used to endorse or promote products derived 
++ *       from this software without specific prior written permission.
++ * 
++ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
++ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
++ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
++ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
++ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
++ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
++ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
++ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
++ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
++ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
++ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * 
++ * 
++ *  version: Security.L.1.0.130
++ *
++ ***************************************************************************/
++
++/*
++ * OCF drv driver header file for the Intel ICP processor.
++ */
++
++#ifndef ICP_OCF_H
++#define ICP_OCF_H
++
++#include <linux/crypto.h>
++#include <linux/delay.h>
++#include <linux/skbuff.h>
++
++#include "cryptodev.h"
++#include "uio.h"
++
++#include "cpa.h"
++#include "cpa_cy_im.h"
++#include "cpa_cy_sym.h"
++#include "cpa_cy_rand.h"
++#include "cpa_cy_dh.h"
++#include "cpa_cy_rsa.h"
++#include "cpa_cy_ln.h"
++#include "cpa_cy_common.h"
++#include "cpa_cy_dsa.h"
++
++#define NUM_BITS_IN_BYTE (8)
++#define NUM_BITS_IN_BYTE_MINUS_ONE (NUM_BITS_IN_BYTE -1)
++#define INVALID_DRIVER_ID (-1)
++#define RETURN_RAND_NUM_GEN_FAILED (-1)
++
++/*This is define means only one operation can be chained to another
++(resulting in one chain of two operations)*/
++#define MAX_NUM_OF_CHAINED_OPS (1)
++/*This is the max block cipher initialisation vector*/
++#define MAX_IV_LEN_IN_BYTES (20)
++/*This is used to check whether the OCF to this driver session limit has
++  been disabled*/
++#define NO_OCF_TO_DRV_MAX_SESSIONS		(0)
++
++/*OCF values mapped here*/
++#define ICP_SHA1_DIGEST_SIZE_IN_BYTES 		(SHA1_HASH_LEN)
++#define ICP_SHA256_DIGEST_SIZE_IN_BYTES 	(SHA2_256_HASH_LEN)
++#define ICP_SHA384_DIGEST_SIZE_IN_BYTES 	(SHA2_384_HASH_LEN)
++#define ICP_SHA512_DIGEST_SIZE_IN_BYTES 	(SHA2_512_HASH_LEN)
++#define ICP_MD5_DIGEST_SIZE_IN_BYTES 		(MD5_HASH_LEN)
++#define ARC4_COUNTER_LEN 			(ARC4_BLOCK_LEN)
++
++#define OCF_REGISTRATION_STATUS_SUCCESS 	(0)
++#define OCF_ZERO_FUNCTIONALITY_REGISTERED 	(0)
++#define ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR 	(0)
++#define ICP_OCF_DRV_STATUS_SUCCESS 		(0)
++#define ICP_OCF_DRV_STATUS_FAIL 		(1)
++
++/*Turn on/off debug options*/
++#define ICP_OCF_PRINT_DEBUG_MESSAGES		(0)
++#define ICP_OCF_PRINT_KERN_ALERT		(1)
++#define ICP_OCF_PRINT_KERN_ERRS			(1)
++
++/*DSA Prime Q size in bytes (as defined in the standard) */
++#define DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES	(20)
++
++/*MACRO DEFINITIONS*/
++
++#define BITS_TO_BYTES(bytes, bits) 					\
++	bytes = (bits + NUM_BITS_IN_BYTE_MINUS_ONE) / NUM_BITS_IN_BYTE
++
++#define ICP_CACHE_CREATE(cache_ID, cache_name) 				\
++	kmem_cache_create(cache_ID, sizeof(cache_name),0, 		\
++		SLAB_HWCACHE_ALIGN, NULL, NULL);
++
++#define ICP_CACHE_NULL_CHECK(slab_zone)					\
++{									\
++	if(NULL == slab_zone){ 						\
++		icp_ocfDrvFreeCaches(); 				\
++		EPRINTK("%s() line %d: Not enough memory!\n", 		\
++			__FUNCTION__, __LINE__); 			\
++		return ENOMEM; 						\
++	}								\
++}
++
++#define ICP_CACHE_DESTROY(slab_zone) 	                                \
++{                                                                       \
++        if(NULL != slab_zone){						\
++                kmem_cache_destroy(slab_zone);				\
++                slab_zone = NULL;					\
++        }								\
++}
++
++#define ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(alg)			\
++{									\
++	if(OCF_REGISTRATION_STATUS_SUCCESS ==				\
++		crypto_register(icp_ocfDrvDriverId,			\
++				    alg,				\
++				    0,					\
++				    0)) {				\
++		ocfStatus++;						\
++	}								\
++}
++
++#define ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(alg)			\
++{									\
++	if(OCF_REGISTRATION_STATUS_SUCCESS ==				\
++		crypto_kregister(icp_ocfDrvDriverId,			\
++				      alg,				\
++				      0)){				\
++		ocfStatus++;						\
++	}								\
++}
++
++#if ICP_OCF_PRINT_DEBUG_MESSAGES == 1
++#define DPRINTK(args...)      \
++{			      \
++                printk(args); \
++}
++
++#else				//ICP_OCF_PRINT_DEBUG_MESSAGES == 1
++
++#define DPRINTK(args...)
++
++#endif				//ICP_OCF_PRINT_DEBUG_MESSAGES == 1
++
++#if ICP_OCF_PRINT_KERN_ALERT == 1
++#define APRINTK(args...)      						\
++{			      						\
++       printk(KERN_ALERT args);						\
++}
++
++#else				//ICP_OCF_PRINT_KERN_ALERT == 1
++
++#define APRINTK(args...)
++
++#endif				//ICP_OCF_PRINT_KERN_ALERT == 1
++
++#if ICP_OCF_PRINT_KERN_ERRS == 1
++#define EPRINTK(args...)      \
++{			      \
++       printk(KERN_ERR args); \
++}
++
++#else				//ICP_OCF_PRINT_KERN_ERRS == 1
++
++#define EPRINTK(args...)
++
++#endif				//ICP_OCF_PRINT_KERN_ERRS == 1
++
++#define IPRINTK(args...)      \
++{			      \
++      printk(KERN_INFO args); \
++}
++
++/*END OF MACRO DEFINITIONS*/
++
++typedef enum {
++	ICP_OCF_DRV_ALG_CIPHER = 0,
++	ICP_OCF_DRV_ALG_HASH
++} icp_ocf_drv_alg_type_t;
++
++/* These are all defined in icp_common.c */
++extern atomic_t lac_session_failed_dereg_count;
++extern atomic_t icp_ocfDrvIsExiting;
++extern atomic_t num_ocf_to_drv_registered_sessions;
++
++/*These are use inputs used in icp_sym.c and icp_common.c
++  They are instantiated in icp_common.c*/
++extern int max_sessions;
++
++extern int32_t icp_ocfDrvDriverId;
++extern struct list_head icp_ocfDrvGlobalSymListHead;
++extern struct list_head icp_ocfDrvGlobalSymListHead_FreeMemList;
++extern struct workqueue_struct *icp_ocfDrvFreeLacSessionWorkQ;
++extern spinlock_t icp_ocfDrvSymSessInfoListSpinlock;
++extern rwlock_t icp_kmem_cache_destroy_alloc_lock;
++
++/*Slab zones for symettric functionality, instantiated in icp_common.c*/
++extern struct kmem_cache *drvSessionData_zone;
++extern struct kmem_cache *drvOpData_zone;
++
++/*Slabs zones for asymettric functionality, instantiated in icp_common.c*/
++extern struct kmem_cache *drvDH_zone;
++extern struct kmem_cache *drvLnModExp_zone;
++extern struct kmem_cache *drvRSADecrypt_zone;
++extern struct kmem_cache *drvRSAPrivateKey_zone;
++extern struct kmem_cache *drvDSARSSign_zone;
++extern struct kmem_cache *drvDSARSSignKValue_zone;
++extern struct kmem_cache *drvDSAVerify_zone;
++
++/*Slab zones for flatbuffers and bufferlist*/
++extern struct kmem_cache *drvFlatBuffer_zone;
++
++#define ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS     (16)
++
++struct icp_drvBuffListInfo {
++	Cpa16U numBuffers;
++	Cpa32U metaSize;
++	Cpa32U metaOffset;
++	Cpa32U buffListSize;
++};
++extern struct icp_drvBuffListInfo defBuffListInfo;
++
++/*
++* This struct is used to keep a reference to the relevant node in the list
++* of sessionData structs, to the buffer type required by OCF and to the OCF
++* provided crp struct that needs to be returned. All this info is needed in
++* the callback function.
++*
++* IV can sometimes be stored in non-contiguous memory (e.g. skbuff
++* linked/frag list, therefore a contiguous memory space for the IV data must be
++* created and passed to LAC
++*
++*/
++struct icp_drvOpData {
++	CpaCySymOpData lacOpData;
++	uint32_t digestSizeInBytes;
++	struct cryptop *crp;
++	uint8_t bufferType;
++	uint8_t ivData[MAX_IV_LEN_IN_BYTES];
++	uint16_t numBufferListArray;
++	CpaBufferList srcBuffer;
++	CpaFlatBuffer bufferListArray[ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS];
++	CpaBoolean verifyResult;
++};
++/*Values used to derisk chances of performs being called against
++deregistered sessions (for which the slab page has been reclaimed)
++This is not a fix - since page frames are reclaimed from a slab, one cannot
++rely on that memory not being re-used by another app.*/
++typedef enum {
++	ICP_SESSION_INITIALISED = 0x5C5C5C,
++	ICP_SESSION_RUNNING = 0x005C00,
++	ICP_SESSION_DEREGISTERED = 0xC5C5C5
++} usage_derisk;
++
++/*
++This is the OCF<->OCF_DRV session object:
++
++1.The first member is a listNode. These session objects are added to a linked
++  list in order to make it easier to remove them all at session exit time.
++2.The second member is used to give the session object state and derisk the
++  possibility of OCF batch calls executing against a deregistered session (as
++  described above).
++3.The third member is a LAC<->OCF_DRV session handle (initialised with the first
++  perform request for that session).
++4.The fourth is the LAC session context. All the parameters for this structure
++  are only known when the first perform request for this session occurs. That is
++  why the OCF Tolapai Driver only registers a new LAC session at perform time
++*/
++struct icp_drvSessionData {
++	struct list_head listNode;
++	usage_derisk inUse;
++	CpaCySymSessionCtx sessHandle;
++	CpaCySymSessionSetupData lacSessCtx;
++};
++
++/* This struct is required for deferred session
++ deregistration as a work queue function can
++ only have one argument*/
++struct icp_ocfDrvFreeLacSession {
++	CpaCySymSessionCtx sessionToDeregister;
++	struct work_struct work;
++};
++
++int icp_ocfDrvNewSession(device_t dev, uint32_t * sild, struct cryptoini *cri);
++
++int icp_ocfDrvFreeLACSession(device_t dev, uint64_t sid);
++
++int icp_ocfDrvSymProcess(device_t dev, struct cryptop *crp, int hint);
++
++int icp_ocfDrvPkeProcess(device_t dev, struct cryptkop *krp, int hint);
++
++int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords);
++
++int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister);
++
++int icp_ocfDrvSkBuffToBufferList(struct sk_buff *skb,
++				 CpaBufferList * bufferList);
++
++int icp_ocfDrvBufferListToSkBuff(CpaBufferList * bufferList,
++				 struct sk_buff **skb);
++
++void icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
++				     CpaFlatBuffer * pFlatBuffer);
++
++void icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
++				     CpaBufferList * pBufferList);
++
++void icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
++				     void **ppDataOut, uint32_t * pLength);
++
++int icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
++				struct icp_drvBuffListInfo *buffListInfo);
++
++uint16_t icp_ocfDrvGetSkBuffFrags(struct sk_buff *pSkb);
++
++void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer);
++
++int icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList, 
++                const struct icp_drvOpData *pOpData);
++
++void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList);
++
++#endif
++/* ICP_OCF_H */
+diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/icp_sym.c linux-2.6.30/crypto/ocf/ep80579/icp_sym.c
+--- linux-2.6.30.orig/crypto/ocf/ep80579/icp_sym.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ep80579/icp_sym.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,1382 @@
++/***************************************************************************
++ *
++ * This file is provided under a dual BSD/GPLv2 license.  When using or 
++ *   redistributing this file, you may do so under either license.
++ * 
++ *   GPL LICENSE SUMMARY
++ * 
++ *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * 
++ *   This program is free software; you can redistribute it and/or modify 
++ *   it under the terms of version 2 of the GNU General Public License as
++ *   published by the Free Software Foundation.
++ * 
++ *   This program is distributed in the hope that it will be useful, but 
++ *   WITHOUT ANY WARRANTY; without even the implied warranty of 
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
++ *   General Public License for more details.
++ * 
++ *   You should have received a copy of the GNU General Public License 
++ *   along with this program; if not, write to the Free Software 
++ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *   The full GNU General Public License is included in this distribution 
++ *   in the file called LICENSE.GPL.
++ * 
++ *   Contact Information:
++ *   Intel Corporation
++ * 
++ *   BSD LICENSE 
++ * 
++ *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ *   All rights reserved.
++ * 
++ *   Redistribution and use in source and binary forms, with or without 
++ *   modification, are permitted provided that the following conditions 
++ *   are met:
++ * 
++ *     * Redistributions of source code must retain the above copyright 
++ *       notice, this list of conditions and the following disclaimer.
++ *     * Redistributions in binary form must reproduce the above copyright 
++ *       notice, this list of conditions and the following disclaimer in 
++ *       the documentation and/or other materials provided with the 
++ *       distribution.
++ *     * Neither the name of Intel Corporation nor the names of its 
++ *       contributors may be used to endorse or promote products derived 
++ *       from this software without specific prior written permission.
++ * 
++ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
++ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
++ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
++ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
++ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
++ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
++ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
++ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
++ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
++ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
++ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * 
++ * 
++ *  version: Security.L.1.0.130
++ *
++ ***************************************************************************/
++/*
++ * An OCF module that uses the API for Intel® QuickAssist Technology to do the
++ * cryptography.
++ *
++ * This driver requires the ICP Access Library that is available from Intel in
++ * order to operate.
++ */
++
++#include "icp_ocf.h"
++
++/*This is the call back function for all symmetric cryptographic processes.
++  Its main functionality is to free driver crypto operation structure and to 
++  call back to OCF*/
++static void
++icp_ocfDrvSymCallBack(void *callbackTag,
++		      CpaStatus status,
++		      const CpaCySymOp operationType,
++		      void *pOpData,
++		      CpaBufferList * pDstBuffer, CpaBoolean verifyResult);
++
++/*This function is used to extract crypto processing information from the OCF
++  inputs, so as that it may be passed onto LAC*/
++static int
++icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
++			   struct cryptodesc *crp_desc);
++
++/*This function checks whether the crp_desc argument pertains to a digest or a
++  cipher operation*/
++static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc);
++
++/*This function copies all the passed in session context information and stores
++  it in a LAC context structure*/
++static int
++icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
++			 CpaCySymSessionSetupData * lacSessCtx);
++
++/*This top level function is used to find a pointer to where a digest is 
++  stored/needs to be inserted. */
++static uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
++					    struct cryptodesc *crp_desc);
++
++/*This function is called when a digest pointer has to be found within a
++  SKBUFF.*/
++static inline uint8_t *icp_ocfDrvSkbuffDigestPointerFind(struct icp_drvOpData
++							 *drvOpData,
++							 int offsetInBytes,
++							 uint32_t
++							 digestSizeInBytes);
++
++/*The following two functions are called if the SKBUFF digest pointer is not 
++  positioned in the linear portion of the buffer (i.e. it is in a linked SKBUFF
++   or page fragment).*/
++/*This function takes care of the page fragment case.*/
++static inline uint8_t *icp_ocfDrvDigestSkbNRFragsCheck(struct sk_buff *skb,
++						       struct skb_shared_info
++						       *skb_shared,
++						       int offsetInBytes,
++						       uint32_t
++						       digestSizeInBytes);
++
++/*This function takes care of the linked list case.*/
++static inline uint8_t *icp_ocfDrvDigestSkbFragListCheck(struct sk_buff *skb,
++							struct skb_shared_info
++							*skb_shared,
++							int offsetInBytes,
++							uint32_t
++							digestSizeInBytes);
++
++/*This function is used to free an OCF->OCF_DRV session object*/
++static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData);
++
++/*max IOV buffs supported in a UIO structure*/
++#define NUM_IOV_SUPPORTED		(1)
++
++/* Name        : icp_ocfDrvSymCallBack
++ *
++ * Description : When this function returns it signifies that the LAC
++ * component has completed the relevant symmetric operation. 
++ *
++ * Notes : The callbackTag is a pointer to an icp_drvOpData. This memory
++ * object was passed to LAC for the cryptographic processing and contains all
++ * the relevant information for cleaning up buffer handles etc. so that the
++ * OCF Tolapai Driver portion of this crypto operation can be fully completed.
++ */
++static void
++icp_ocfDrvSymCallBack(void *callbackTag,
++		      CpaStatus status,
++		      const CpaCySymOp operationType,
++		      void *pOpData,
++		      CpaBufferList * pDstBuffer, CpaBoolean verifyResult)
++{
++	struct cryptop *crp = NULL;
++	struct icp_drvOpData *temp_drvOpData =
++	    (struct icp_drvOpData *)callbackTag;
++	uint64_t *tempBasePtr = NULL;
++	uint32_t tempLen = 0;
++
++	if (NULL == temp_drvOpData) {
++		DPRINTK("%s(): The callback from the LAC component"
++			" has failed due to Null userOpaque data"
++			"(status == %d).\n", __FUNCTION__, status);
++		DPRINTK("%s(): Unable to call OCF back! \n", __FUNCTION__);
++		return;
++	}
++
++	crp = temp_drvOpData->crp;
++	crp->crp_etype = ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR;
++
++	if (NULL == pOpData) {
++		DPRINTK("%s(): The callback from the LAC component"
++			" has failed due to Null Symmetric Op data"
++			"(status == %d).\n", __FUNCTION__, status);
++		crp->crp_etype = ECANCELED;
++		crypto_done(crp);
++		return;
++	}
++
++	if (NULL == pDstBuffer) {
++		DPRINTK("%s(): The callback from the LAC component"
++			" has failed due to Null Dst Bufferlist data"
++			"(status == %d).\n", __FUNCTION__, status);
++		crp->crp_etype = ECANCELED;
++		crypto_done(crp);
++		return;
++	}
++
++	if (CPA_STATUS_SUCCESS == status) {
++
++		if (temp_drvOpData->bufferType == CRYPTO_F_SKBUF) {
++			if (ICP_OCF_DRV_STATUS_SUCCESS !=
++			    icp_ocfDrvBufferListToSkBuff(pDstBuffer,
++							 (struct sk_buff **)
++							 &(crp->crp_buf))) {
++				EPRINTK("%s(): BufferList to SkBuff "
++					"conversion error.\n", __FUNCTION__);
++				crp->crp_etype = EPERM;
++			}
++		} else {
++			icp_ocfDrvBufferListToPtrAndLen(pDstBuffer,
++							(void **)&tempBasePtr,
++							&tempLen);
++			crp->crp_olen = (int)tempLen;
++		}
++
++	} else {
++		DPRINTK("%s(): The callback from the LAC component has failed"
++			"(status == %d).\n", __FUNCTION__, status);
++
++		crp->crp_etype = ECANCELED;
++	}
++
++	if (temp_drvOpData->numBufferListArray >
++	    ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
++		kfree(pDstBuffer->pBuffers);
++	}
++	icp_ocfDrvFreeMetaData(pDstBuffer);
++	kmem_cache_free(drvOpData_zone, temp_drvOpData);
++
++	/* Invoke the OCF callback function */
++	crypto_done(crp);
++
++	return;
++}
++
++/* Name        : icp_ocfDrvNewSession 
++ *
++ * Description : This function will create a new Driver<->OCF session
++ *
++ * Notes : LAC session registration happens during the first perform call.
++ * That is the first time we know all information about a given session.
++ */
++int icp_ocfDrvNewSession(device_t dev, uint32_t * sid, struct cryptoini *cri)
++{
++	struct icp_drvSessionData *sessionData = NULL;
++	uint32_t delete_session = 0;
++
++	/* The SID passed in should be our driver ID. We can return the     */
++	/* local ID (LID) which is a unique identifier which we can use     */
++	/* to differentiate between the encrypt/decrypt LAC session handles */
++	if (NULL == sid) {
++		EPRINTK("%s(): Invalid input parameters - NULL sid.\n",
++			__FUNCTION__);
++		return EINVAL;
++	}
++
++	if (NULL == cri) {
++		EPRINTK("%s(): Invalid input parameters - NULL cryptoini.\n",
++			__FUNCTION__);
++		return EINVAL;
++	}
++
++	if (icp_ocfDrvDriverId != *sid) {
++		EPRINTK("%s(): Invalid input parameters - bad driver ID\n",
++			__FUNCTION__);
++		EPRINTK("\t sid = 0x08%p \n \t cri = 0x08%p \n", sid, cri);
++		return EINVAL;
++	}
++
++	sessionData = kmem_cache_zalloc(drvSessionData_zone, GFP_ATOMIC);
++	if (NULL == sessionData) {
++		DPRINTK("%s():No memory for Session Data\n", __FUNCTION__);
++		return ENOMEM;
++	}
++
++	/*ENTER CRITICAL SECTION */
++	spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
++	/*put this check in the spinlock so no new sessions can be added to the
++	   linked list when we are exiting */
++	if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
++		delete_session++;
++
++	} else if (NO_OCF_TO_DRV_MAX_SESSIONS != max_sessions) {
++		if (atomic_read(&num_ocf_to_drv_registered_sessions) >=
++		    (max_sessions -
++		     atomic_read(&lac_session_failed_dereg_count))) {
++			delete_session++;
++		} else {
++			atomic_inc(&num_ocf_to_drv_registered_sessions);
++			/* Add to session data linked list */
++			list_add(&(sessionData->listNode),
++				 &icp_ocfDrvGlobalSymListHead);
++		}
++
++	} else if (NO_OCF_TO_DRV_MAX_SESSIONS == max_sessions) {
++		list_add(&(sessionData->listNode),
++			 &icp_ocfDrvGlobalSymListHead);
++	}
++
++	sessionData->inUse = ICP_SESSION_INITIALISED;
++
++	/*EXIT CRITICAL SECTION */
++	spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
++
++	if (delete_session) {
++		DPRINTK("%s():No Session handles available\n", __FUNCTION__);
++		kmem_cache_free(drvSessionData_zone, sessionData);
++		return EPERM;
++	}
++
++	if (ICP_OCF_DRV_STATUS_SUCCESS !=
++	    icp_ocfDrvAlgorithmSetup(cri, &(sessionData->lacSessCtx))) {
++		DPRINTK("%s():algorithm not supported\n", __FUNCTION__);
++		icp_ocfDrvFreeOCFSession(sessionData);
++		return EINVAL;
++	}
++
++	if (cri->cri_next) {
++		if (cri->cri_next->cri_next != NULL) {
++			DPRINTK("%s():only two chained algorithms supported\n",
++				__FUNCTION__);
++			icp_ocfDrvFreeOCFSession(sessionData);
++			return EPERM;
++		}
++
++		if (ICP_OCF_DRV_STATUS_SUCCESS !=
++		    icp_ocfDrvAlgorithmSetup(cri->cri_next,
++					     &(sessionData->lacSessCtx))) {
++			DPRINTK("%s():second algorithm not supported\n",
++				__FUNCTION__);
++			icp_ocfDrvFreeOCFSession(sessionData);
++			return EINVAL;
++		}
++
++		sessionData->lacSessCtx.symOperation =
++		    CPA_CY_SYM_OP_ALGORITHM_CHAINING;
++	}
++
++	*sid = (uint32_t) sessionData;
++
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++}
++
++/* Name        : icp_ocfDrvAlgorithmSetup
++ *
++ * Description : This function builds the session context data from the
++ * information supplied through OCF. Algorithm chain order and whether the
++ * session is Encrypt/Decrypt can only be found out at perform time however, so
++ * the session is registered with LAC at that time.
++ */
++static int
++icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
++			 CpaCySymSessionSetupData * lacSessCtx)
++{
++
++	lacSessCtx->sessionPriority = CPA_CY_PRIORITY_NORMAL;
++
++	switch (cri->cri_alg) {
++
++	case CRYPTO_NULL_CBC:
++		DPRINTK("%s(): NULL CBC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
++		lacSessCtx->cipherSetupData.cipherAlgorithm =
++		    CPA_CY_SYM_CIPHER_NULL;
++		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
++		break;
++
++	case CRYPTO_DES_CBC:
++		DPRINTK("%s(): DES CBC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
++		lacSessCtx->cipherSetupData.cipherAlgorithm =
++		    CPA_CY_SYM_CIPHER_DES_CBC;
++		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
++		break;
++
++	case CRYPTO_3DES_CBC:
++		DPRINTK("%s(): 3DES CBC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
++		lacSessCtx->cipherSetupData.cipherAlgorithm =
++		    CPA_CY_SYM_CIPHER_3DES_CBC;
++		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
++		break;
++
++	case CRYPTO_AES_CBC:
++		DPRINTK("%s(): AES CBC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
++		lacSessCtx->cipherSetupData.cipherAlgorithm =
++		    CPA_CY_SYM_CIPHER_AES_CBC;
++		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
++		break;
++
++	case CRYPTO_ARC4:
++		DPRINTK("%s(): ARC4\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
++		lacSessCtx->cipherSetupData.cipherAlgorithm =
++		    CPA_CY_SYM_CIPHER_ARC4;
++		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
++		break;
++
++	case CRYPTO_SHA1:
++		DPRINTK("%s(): SHA1\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
++
++		break;
++
++	case CRYPTO_SHA1_HMAC:
++		DPRINTK("%s(): SHA1_HMAC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
++		lacSessCtx->hashSetupData.authModeSetupData.authKey =
++		    cri->cri_key;
++		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
++
++		break;
++
++	case CRYPTO_SHA2_256:
++		DPRINTK("%s(): SHA256\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm =
++		    CPA_CY_SYM_HASH_SHA256;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
++
++		break;
++
++	case CRYPTO_SHA2_256_HMAC:
++		DPRINTK("%s(): SHA256_HMAC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm =
++		    CPA_CY_SYM_HASH_SHA256;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
++		lacSessCtx->hashSetupData.authModeSetupData.authKey =
++		    cri->cri_key;
++		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
++
++		break;
++
++	case CRYPTO_SHA2_384:
++		DPRINTK("%s(): SHA384\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm =
++		    CPA_CY_SYM_HASH_SHA384;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
++
++		break;
++
++	case CRYPTO_SHA2_384_HMAC:
++		DPRINTK("%s(): SHA384_HMAC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm =
++		    CPA_CY_SYM_HASH_SHA384;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
++		lacSessCtx->hashSetupData.authModeSetupData.authKey =
++		    cri->cri_key;
++		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
++
++		break;
++
++	case CRYPTO_SHA2_512:
++		DPRINTK("%s(): SHA512\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm =
++		    CPA_CY_SYM_HASH_SHA512;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
++
++		break;
++
++	case CRYPTO_SHA2_512_HMAC:
++		DPRINTK("%s(): SHA512_HMAC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm =
++		    CPA_CY_SYM_HASH_SHA512;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
++		lacSessCtx->hashSetupData.authModeSetupData.authKey =
++		    cri->cri_key;
++		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
++
++		break;
++
++	case CRYPTO_MD5:
++		DPRINTK("%s(): MD5\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
++
++		break;
++
++	case CRYPTO_MD5_HMAC:
++		DPRINTK("%s(): MD5_HMAC\n", __FUNCTION__);
++		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
++		lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
++		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
++		lacSessCtx->hashSetupData.digestResultLenInBytes =
++		    (cri->cri_mlen ?
++		     cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
++		lacSessCtx->hashSetupData.authModeSetupData.authKey =
++		    cri->cri_key;
++		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
++		    cri->cri_klen / NUM_BITS_IN_BYTE;
++		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
++
++		break;
++
++	default:
++		DPRINTK("%s(): ALG Setup FAIL\n", __FUNCTION__);
++		return ICP_OCF_DRV_STATUS_FAIL;
++	}
++
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++}
++
++/* Name        : icp_ocfDrvFreeOCFSession
++ *
++ * Description : This function deletes all existing Session data representing
++ * the Cryptographic session established between OCF and this driver. This
++ * also includes freeing the memory allocated for the session context. The
++ * session object is also removed from the session linked list.
++ */
++static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData)
++{
++
++	sessionData->inUse = ICP_SESSION_DEREGISTERED;
++
++	/*ENTER CRITICAL SECTION */
++	spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
++
++	if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
++		/*If the Driver is exiting, allow that process to
++		   handle any deletions */
++		/*EXIT CRITICAL SECTION */
++		spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
++		return;
++	}
++
++	atomic_dec(&num_ocf_to_drv_registered_sessions);
++
++	list_del(&(sessionData->listNode));
++
++	/*EXIT CRITICAL SECTION */
++	spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
++
++	if (NULL != sessionData->sessHandle) {
++		kfree(sessionData->sessHandle);
++	}
++	kmem_cache_free(drvSessionData_zone, sessionData);
++}
++
++/* Name        : icp_ocfDrvFreeLACSession
++ *
++ * Description : This attempts to deregister a LAC session. If it fails, the
++ * deregistation retry function is called.
++ */
++int icp_ocfDrvFreeLACSession(device_t dev, uint64_t sid)
++{
++	CpaCySymSessionCtx sessionToDeregister = NULL;
++	struct icp_drvSessionData *sessionData = NULL;
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	int retval = 0;
++
++	sessionData = (struct icp_drvSessionData *)CRYPTO_SESID2LID(sid);
++	if (NULL == sessionData) {
++		EPRINTK("%s(): OCF Free session called with Null Session ID.\n",
++			__FUNCTION__);
++		return EINVAL;
++	}
++
++	sessionToDeregister = sessionData->sessHandle;
++
++	if (ICP_SESSION_INITIALISED == sessionData->inUse) {
++		DPRINTK("%s() Session not registered with LAC\n", __FUNCTION__);
++	} else if (NULL == sessionData->sessHandle) {
++		EPRINTK
++		    ("%s(): OCF Free session called with Null Session Handle.\n",
++		     __FUNCTION__);
++		return EINVAL;
++	} else {
++		lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
++						  sessionToDeregister);
++		if (CPA_STATUS_RETRY == lacStatus) {
++			if (ICP_OCF_DRV_STATUS_SUCCESS !=
++			    icp_ocfDrvDeregRetry(&sessionToDeregister)) {
++				/* the retry function increments the 
++				   dereg failed count */
++				DPRINTK("%s(): LAC failed to deregister the "
++					"session. (localSessionId= %p)\n",
++					__FUNCTION__, sessionToDeregister);
++				retval = EPERM;
++			}
++
++		} else if (CPA_STATUS_SUCCESS != lacStatus) {
++			DPRINTK("%s(): LAC failed to deregister the session. "
++				"localSessionId= %p, lacStatus = %d\n",
++				__FUNCTION__, sessionToDeregister, lacStatus);
++			atomic_inc(&lac_session_failed_dereg_count);
++			retval = EPERM;
++		}
++	}
++
++	icp_ocfDrvFreeOCFSession(sessionData);
++	return retval;
++
++}
++
++/* Name        : icp_ocfDrvAlgCheck 
++ *
++ * Description : This function checks whether the cryptodesc argument pertains
++ * to a sym or hash function
++ */
++static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc)
++{
++
++	if (crp_desc->crd_alg == CRYPTO_3DES_CBC ||
++	    crp_desc->crd_alg == CRYPTO_AES_CBC ||
++	    crp_desc->crd_alg == CRYPTO_DES_CBC ||
++	    crp_desc->crd_alg == CRYPTO_NULL_CBC ||
++	    crp_desc->crd_alg == CRYPTO_ARC4) {
++		return ICP_OCF_DRV_ALG_CIPHER;
++	}
++
++	return ICP_OCF_DRV_ALG_HASH;
++}
++
++/* Name        : icp_ocfDrvSymProcess 
++ *
++ * Description : This function will map symmetric functionality calls from OCF
++ * to the LAC API. It will also allocate memory to store the session context.
++ * 
++ * Notes: If it is the first perform call for a given session, then a LAC
++ * session is registered. After the session is registered, no checks as
++ * to whether session paramaters have changed (e.g. alg chain order) are
++ * done.
++ */
++int icp_ocfDrvSymProcess(device_t dev, struct cryptop *crp, int hint)
++{
++	struct icp_drvSessionData *sessionData = NULL;
++	struct icp_drvOpData *drvOpData = NULL;
++	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
++	Cpa32U sessionCtxSizeInBytes = 0;
++	uint16_t numBufferListArray = 0;
++
++	if (NULL == crp) {
++		DPRINTK("%s(): Invalid input parameters, cryptop is NULL\n",
++			__FUNCTION__);
++		return EINVAL;
++	}
++
++	if (NULL == crp->crp_desc) {
++		DPRINTK("%s(): Invalid input parameters, no crp_desc attached "
++			"to crp\n", __FUNCTION__);
++		crp->crp_etype = EINVAL;
++		return EINVAL;
++	}
++
++	if (NULL == crp->crp_buf) {
++		DPRINTK("%s(): Invalid input parameters, no buffer attached "
++			"to crp\n", __FUNCTION__);
++		crp->crp_etype = EINVAL;
++		return EINVAL;
++	}
++
++	if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
++		crp->crp_etype = EFAULT;
++		return EFAULT;
++	}
++
++	sessionData = (struct icp_drvSessionData *)
++	    (CRYPTO_SESID2LID(crp->crp_sid));
++	if (NULL == sessionData) {
++		DPRINTK("%s(): Invalid input parameters, Null Session ID \n",
++			__FUNCTION__);
++		crp->crp_etype = EINVAL;
++		return EINVAL;
++	}
++
++/*If we get a request against a deregisted session, cancel operation*/
++	if (ICP_SESSION_DEREGISTERED == sessionData->inUse) {
++		DPRINTK("%s(): Session ID %d was deregistered \n",
++			__FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
++		crp->crp_etype = EFAULT;
++		return EFAULT;
++	}
++
++/*If none of the session states are set, then the session structure was either
++  not initialised properly or we are reading from a freed memory area (possible
++  due to OCF batch mode not removing queued requests against deregistered 
++  sessions*/
++	if (ICP_SESSION_INITIALISED != sessionData->inUse &&
++	    ICP_SESSION_RUNNING != sessionData->inUse) {
++		DPRINTK("%s(): Session - ID %d - not properly initialised or "
++			"memory freed back to the kernel \n",
++			__FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
++		crp->crp_etype = EINVAL;
++		return EINVAL;
++	}
++
++	/*For the below checks, remember error checking is already done in LAC.
++	   We're not validating inputs subsequent to registration */
++	if (sessionData->inUse == ICP_SESSION_INITIALISED) {
++		DPRINTK("%s(): Initialising session\n", __FUNCTION__);
++
++		if (NULL != crp->crp_desc->crd_next) {
++			if (ICP_OCF_DRV_ALG_CIPHER ==
++			    icp_ocfDrvAlgCheck(crp->crp_desc)) {
++
++				sessionData->lacSessCtx.algChainOrder =
++				    CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
++
++				if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
++					sessionData->lacSessCtx.cipherSetupData.
++					    cipherDirection =
++					    CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
++				} else {
++					sessionData->lacSessCtx.cipherSetupData.
++					    cipherDirection =
++					    CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
++				}
++			} else {
++				sessionData->lacSessCtx.algChainOrder =
++				    CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
++
++				if (crp->crp_desc->crd_next->crd_flags &
++				    CRD_F_ENCRYPT) {
++					sessionData->lacSessCtx.cipherSetupData.
++					    cipherDirection =
++					    CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
++				} else {
++					sessionData->lacSessCtx.cipherSetupData.
++					    cipherDirection =
++					    CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
++				}
++
++			}
++
++		} else if (ICP_OCF_DRV_ALG_CIPHER ==
++			   icp_ocfDrvAlgCheck(crp->crp_desc)) {
++			if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
++				sessionData->lacSessCtx.cipherSetupData.
++				    cipherDirection =
++				    CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
++			} else {
++				sessionData->lacSessCtx.cipherSetupData.
++				    cipherDirection =
++				    CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
++			}
++
++		}
++
++		/*No action required for standalone Auth here */
++
++		/* Allocate memory for SymSessionCtx before the Session Registration */
++		lacStatus =
++		    cpaCySymSessionCtxGetSize(CPA_INSTANCE_HANDLE_SINGLE,
++					      &(sessionData->lacSessCtx),
++					      &sessionCtxSizeInBytes);
++		if (CPA_STATUS_SUCCESS != lacStatus) {
++			EPRINTK("%s(): cpaCySymSessionCtxGetSize failed - %d\n",
++				__FUNCTION__, lacStatus);
++			return EINVAL;
++		}
++		sessionData->sessHandle =
++		    kmalloc(sessionCtxSizeInBytes, GFP_ATOMIC);
++		if (NULL == sessionData->sessHandle) {
++			EPRINTK
++			    ("%s(): Failed to get memory for SymSessionCtx\n",
++			     __FUNCTION__);
++			return ENOMEM;
++		}
++
++		lacStatus = cpaCySymInitSession(CPA_INSTANCE_HANDLE_SINGLE,
++						icp_ocfDrvSymCallBack,
++						&(sessionData->lacSessCtx),
++						sessionData->sessHandle);
++
++		if (CPA_STATUS_SUCCESS != lacStatus) {
++			EPRINTK("%s(): cpaCySymInitSession failed -%d \n",
++				__FUNCTION__, lacStatus);
++			return EFAULT;
++		}
++
++		sessionData->inUse = ICP_SESSION_RUNNING;
++	}
++
++	drvOpData = kmem_cache_zalloc(drvOpData_zone, GFP_ATOMIC);
++	if (NULL == drvOpData) {
++		EPRINTK("%s():Failed to get memory for drvOpData\n",
++			__FUNCTION__);
++		crp->crp_etype = ENOMEM;
++		return ENOMEM;
++	}
++
++	drvOpData->lacOpData.pSessionCtx = sessionData->sessHandle;
++	drvOpData->digestSizeInBytes = sessionData->lacSessCtx.hashSetupData.
++	    digestResultLenInBytes;
++	drvOpData->crp = crp;
++
++	/* Set the default buffer list array memory allocation */
++	drvOpData->srcBuffer.pBuffers = drvOpData->bufferListArray;
++	drvOpData->numBufferListArray = ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS;
++
++	/* 
++	 * Allocate buffer list array memory allocation if the
++	 * data fragment is more than the default allocation
++	 */
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		numBufferListArray = icp_ocfDrvGetSkBuffFrags((struct sk_buff *)
++							      crp->crp_buf);
++		if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < numBufferListArray) {
++			DPRINTK("%s() numBufferListArray more than default\n",
++				__FUNCTION__);
++			drvOpData->srcBuffer.pBuffers = NULL;
++			drvOpData->srcBuffer.pBuffers =
++			    kmalloc(numBufferListArray *
++				    sizeof(CpaFlatBuffer), GFP_ATOMIC);
++			if (NULL == drvOpData->srcBuffer.pBuffers) {
++				EPRINTK("%s() Failed to get memory for "
++					"pBuffers\n", __FUNCTION__);
++				kmem_cache_free(drvOpData_zone, drvOpData);
++				crp->crp_etype = ENOMEM;
++				return ENOMEM;
++			}
++			drvOpData->numBufferListArray = numBufferListArray;
++		}
++	}
++
++	/*
++	 * Check the type of buffer structure we got and convert it into
++	 * CpaBufferList format.
++	 */
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		if (ICP_OCF_DRV_STATUS_SUCCESS !=
++		    icp_ocfDrvSkBuffToBufferList((struct sk_buff *)crp->crp_buf,
++						 &(drvOpData->srcBuffer))) {
++			EPRINTK("%s():Failed to translate from SK_BUF "
++				"to bufferlist\n", __FUNCTION__);
++			crp->crp_etype = EINVAL;
++			goto err;
++		}
++
++		drvOpData->bufferType = CRYPTO_F_SKBUF;
++	} else if (crp->crp_flags & CRYPTO_F_IOV) {
++		/* OCF only supports IOV of one entry. */
++		if (NUM_IOV_SUPPORTED ==
++		    ((struct uio *)(crp->crp_buf))->uio_iovcnt) {
++
++			icp_ocfDrvPtrAndLenToBufferList(((struct uio *)(crp->
++									crp_buf))->
++							uio_iov[0].iov_base,
++							((struct uio *)(crp->
++									crp_buf))->
++							uio_iov[0].iov_len,
++							&(drvOpData->
++							  srcBuffer));
++
++			drvOpData->bufferType = CRYPTO_F_IOV;
++
++		} else {
++			DPRINTK("%s():Unable to handle IOVs with lengths of "
++				"greater than one!\n", __FUNCTION__);
++			crp->crp_etype = EINVAL;
++			goto err;
++		}
++
++	} else {
++		icp_ocfDrvPtrAndLenToBufferList(crp->crp_buf,
++						crp->crp_ilen,
++						&(drvOpData->srcBuffer));
++
++		drvOpData->bufferType = CRYPTO_BUF_CONTIG;
++	}
++
++	if (ICP_OCF_DRV_STATUS_SUCCESS !=
++	    icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->crp_desc)) {
++		crp->crp_etype = EINVAL;
++		goto err;
++	}
++
++	if (drvOpData->crp->crp_desc->crd_next != NULL) {
++		if (icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->
++					       crp_desc->crd_next)) {
++			crp->crp_etype = EINVAL;
++			goto err;
++		}
++
++	}
++
++	/* Allocate srcBuffer's private meta data */
++	if (ICP_OCF_DRV_STATUS_SUCCESS !=
++	    icp_ocfDrvAllocMetaData(&(drvOpData->srcBuffer), drvOpData)) {
++		EPRINTK("%s() icp_ocfDrvAllocMetaData failed\n", __FUNCTION__);
++		memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
++		crp->crp_etype = EINVAL;
++		goto err;
++	}
++
++	/* Perform "in-place" crypto operation */
++	lacStatus = cpaCySymPerformOp(CPA_INSTANCE_HANDLE_SINGLE,
++				      (void *)drvOpData,
++				      &(drvOpData->lacOpData),
++				      &(drvOpData->srcBuffer),
++				      &(drvOpData->srcBuffer),
++				      &(drvOpData->verifyResult));
++	if (CPA_STATUS_RETRY == lacStatus) {
++		DPRINTK("%s(): cpaCySymPerformOp retry, lacStatus = %d\n",
++			__FUNCTION__, lacStatus);
++		memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
++		crp->crp_etype = EINVAL;
++		goto err;
++	}
++	if (CPA_STATUS_SUCCESS != lacStatus) {
++		EPRINTK("%s(): cpaCySymPerformOp failed, lacStatus = %d\n",
++			__FUNCTION__, lacStatus);
++		memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
++		crp->crp_etype = EINVAL;
++		goto err;
++	}
++
++	return 0;		//OCF success status value
++
++      err:
++	if (drvOpData->numBufferListArray > ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
++		kfree(drvOpData->srcBuffer.pBuffers);
++	}
++	icp_ocfDrvFreeMetaData(&(drvOpData->srcBuffer));
++	kmem_cache_free(drvOpData_zone, drvOpData);
++
++	return crp->crp_etype;
++}
++
++/* Name        : icp_ocfDrvProcessDataSetup
++ *
++ * Description : This function will setup all the cryptographic operation data
++ *               that is required by LAC to execute the operation.
++ */
++static int icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
++				      struct cryptodesc *crp_desc)
++{
++	CpaCyRandGenOpData randGenOpData;
++	CpaFlatBuffer randData;
++
++	drvOpData->lacOpData.packetType = CPA_CY_SYM_PACKET_TYPE_FULL;
++
++	/* Convert from the cryptop to the ICP LAC crypto parameters */
++	switch (crp_desc->crd_alg) {
++	case CRYPTO_NULL_CBC:
++		drvOpData->lacOpData.
++		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
++		drvOpData->lacOpData.
++		    messageLenToCipherInBytes = crp_desc->crd_len;
++		drvOpData->verifyResult = CPA_FALSE;
++		drvOpData->lacOpData.ivLenInBytes = NULL_BLOCK_LEN;
++		break;
++	case CRYPTO_DES_CBC:
++		drvOpData->lacOpData.
++		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
++		drvOpData->lacOpData.
++		    messageLenToCipherInBytes = crp_desc->crd_len;
++		drvOpData->verifyResult = CPA_FALSE;
++		drvOpData->lacOpData.ivLenInBytes = DES_BLOCK_LEN;
++		break;
++	case CRYPTO_3DES_CBC:
++		drvOpData->lacOpData.
++		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
++		drvOpData->lacOpData.
++		    messageLenToCipherInBytes = crp_desc->crd_len;
++		drvOpData->verifyResult = CPA_FALSE;
++		drvOpData->lacOpData.ivLenInBytes = DES3_BLOCK_LEN;
++		break;
++	case CRYPTO_ARC4:
++		drvOpData->lacOpData.
++		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
++		drvOpData->lacOpData.
++		    messageLenToCipherInBytes = crp_desc->crd_len;
++		drvOpData->verifyResult = CPA_FALSE;
++		drvOpData->lacOpData.ivLenInBytes = ARC4_COUNTER_LEN;
++		break;
++	case CRYPTO_AES_CBC:
++		drvOpData->lacOpData.
++		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
++		drvOpData->lacOpData.
++		    messageLenToCipherInBytes = crp_desc->crd_len;
++		drvOpData->verifyResult = CPA_FALSE;
++		drvOpData->lacOpData.ivLenInBytes = RIJNDAEL128_BLOCK_LEN;
++		break;
++	case CRYPTO_SHA1:
++	case CRYPTO_SHA1_HMAC:
++	case CRYPTO_SHA2_256:
++	case CRYPTO_SHA2_256_HMAC:
++	case CRYPTO_SHA2_384:
++	case CRYPTO_SHA2_384_HMAC:
++	case CRYPTO_SHA2_512:
++	case CRYPTO_SHA2_512_HMAC:
++	case CRYPTO_MD5:
++	case CRYPTO_MD5_HMAC:
++		drvOpData->lacOpData.
++		    hashStartSrcOffsetInBytes = crp_desc->crd_skip;
++		drvOpData->lacOpData.
++		    messageLenToHashInBytes = crp_desc->crd_len;
++		drvOpData->lacOpData.
++		    pDigestResult =
++		    icp_ocfDrvDigestPointerFind(drvOpData, crp_desc);
++
++		if (NULL == drvOpData->lacOpData.pDigestResult) {
++			DPRINTK("%s(): ERROR - could not calculate "
++				"Digest Result memory address\n", __FUNCTION__);
++			return ICP_OCF_DRV_STATUS_FAIL;
++		}
++
++		drvOpData->lacOpData.digestVerify = CPA_FALSE;
++		break;
++	default:
++		DPRINTK("%s(): Crypto process error - algorithm not "
++			"found \n", __FUNCTION__);
++		return ICP_OCF_DRV_STATUS_FAIL;
++	}
++
++	/* Figure out what the IV is supposed to be */
++	if ((crp_desc->crd_alg == CRYPTO_DES_CBC) ||
++	    (crp_desc->crd_alg == CRYPTO_3DES_CBC) ||
++	    (crp_desc->crd_alg == CRYPTO_AES_CBC)) {
++		/*ARC4 doesn't use an IV */
++		if (crp_desc->crd_flags & CRD_F_IV_EXPLICIT) {
++			/* Explicit IV provided to OCF */
++			drvOpData->lacOpData.pIv = crp_desc->crd_iv;
++		} else {
++			/* IV is not explicitly provided to OCF */
++
++			/* Point the LAC OP Data IV pointer to our allocated
++			   storage location for this session. */
++			drvOpData->lacOpData.pIv = drvOpData->ivData;
++
++			if ((crp_desc->crd_flags & CRD_F_ENCRYPT) &&
++			    ((crp_desc->crd_flags & CRD_F_IV_PRESENT) == 0)) {
++
++				/* Encrypting - need to create IV */
++				randGenOpData.generateBits = CPA_TRUE;
++				randGenOpData.lenInBytes = MAX_IV_LEN_IN_BYTES;
++
++				icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *)
++								drvOpData->
++								ivData,
++								MAX_IV_LEN_IN_BYTES,
++								&randData);
++
++				if (CPA_STATUS_SUCCESS !=
++				    cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
++						 NULL, NULL,
++						 &randGenOpData, &randData)) {
++					DPRINTK("%s(): ERROR - Failed to"
++						" generate"
++						" Initialisation Vector\n",
++						__FUNCTION__);
++					return ICP_OCF_DRV_STATUS_FAIL;
++				}
++
++				crypto_copyback(drvOpData->crp->
++						crp_flags,
++						drvOpData->crp->crp_buf,
++						crp_desc->crd_inject,
++						drvOpData->lacOpData.
++						ivLenInBytes,
++						(caddr_t) (drvOpData->lacOpData.
++							   pIv));
++			} else {
++				/* Reading IV from buffer */
++				crypto_copydata(drvOpData->crp->
++						crp_flags,
++						drvOpData->crp->crp_buf,
++						crp_desc->crd_inject,
++						drvOpData->lacOpData.
++						ivLenInBytes,
++						(caddr_t) (drvOpData->lacOpData.
++							   pIv));
++			}
++
++		}
++
++	}
++
++	return ICP_OCF_DRV_STATUS_SUCCESS;
++}
++
++/* Name        : icp_ocfDrvDigestPointerFind
++ *
++ * Description : This function is used to find the memory address of where the
++ * digest information shall be stored in. Input buffer types are an skbuff, iov
++ * or flat buffer. The address is found using the buffer data start address and
++ * an offset.
++ *
++ * Note: In the case of a linux skbuff, the digest address may exist within
++ * a memory space linked to from the start buffer. These linked memory spaces
++ * must be traversed by the data length offset in order to find the digest start
++ * address. Whether there is enough space for the digest must also be checked.
++ */
++
++static uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
++					    struct cryptodesc *crp_desc)
++{
++
++	int offsetInBytes = crp_desc->crd_inject;
++	uint32_t digestSizeInBytes = drvOpData->digestSizeInBytes;
++	uint8_t *flat_buffer_base = NULL;
++	int flat_buffer_length = 0;
++	struct sk_buff *skb;
++
++	if (drvOpData->crp->crp_flags & CRYPTO_F_SKBUF) {
++		/*check if enough overall space to store hash */
++		skb = (struct sk_buff *)(drvOpData->crp->crp_buf);
++
++		if (skb->len < (offsetInBytes + digestSizeInBytes)) {
++			DPRINTK("%s() Not enough space for Digest"
++				" payload after the offset (%d), "
++				"digest size (%d) \n", __FUNCTION__,
++				offsetInBytes, digestSizeInBytes);
++			return NULL;
++		}
++
++		return icp_ocfDrvSkbuffDigestPointerFind(drvOpData,
++							 offsetInBytes,
++							 digestSizeInBytes);
++
++	} else {
++		/* IOV or flat buffer */
++		if (drvOpData->crp->crp_flags & CRYPTO_F_IOV) {
++			/*single IOV check has already been done */
++			flat_buffer_base = ((struct uio *)
++					    (drvOpData->crp->crp_buf))->
++			    uio_iov[0].iov_base;
++			flat_buffer_length = ((struct uio *)
++					      (drvOpData->crp->crp_buf))->
++			    uio_iov[0].iov_len;
++		} else {
++			flat_buffer_base = (uint8_t *) drvOpData->crp->crp_buf;
++			flat_buffer_length = drvOpData->crp->crp_ilen;
++		}
++
++		if (flat_buffer_length < (offsetInBytes + digestSizeInBytes)) {
++			DPRINTK("%s() Not enough space for Digest "
++				"(IOV/Flat Buffer) \n", __FUNCTION__);
++			return NULL;
++		} else {
++			return (uint8_t *) (flat_buffer_base + offsetInBytes);
++		}
++	}
++	DPRINTK("%s() Should not reach this point\n", __FUNCTION__);
++	return NULL;
++}
++
++/* Name        : icp_ocfDrvSkbuffDigestPointerFind
++ *
++ * Description : This function is used by icp_ocfDrvDigestPointerFind to process
++ * the non-linear portion of the skbuff if the fragmentation type is a linked
++ * list (frag_list is not NULL in the skb_shared_info structure)
++ */
++static inline uint8_t *icp_ocfDrvSkbuffDigestPointerFind(struct icp_drvOpData
++							 *drvOpData,
++							 int offsetInBytes,
++							 uint32_t
++							 digestSizeInBytes)
++{
++
++	struct sk_buff *skb = NULL;
++	struct skb_shared_info *skb_shared = NULL;
++
++	uint32_t skbuffisnonlinear = 0;
++
++	uint32_t skbheadlen = 0;
++
++	skb = (struct sk_buff *)(drvOpData->crp->crp_buf);
++	skbuffisnonlinear = skb_is_nonlinear(skb);
++
++	skbheadlen = skb_headlen(skb);
++
++	/*Linear skb checks */
++	if (skbheadlen > offsetInBytes) {
++
++		if (skbheadlen >= (offsetInBytes + digestSizeInBytes)) {
++			return (uint8_t *) (skb->data + offsetInBytes);
++		} else {
++			DPRINTK("%s() Auth payload stretches "
++				"accross contiguous memory\n", __FUNCTION__);
++			return NULL;
++		}
++	} else {
++		if (skbuffisnonlinear) {
++			offsetInBytes -= skbheadlen;
++		} else {
++			DPRINTK("%s() Offset outside of buffer boundaries\n",
++				__FUNCTION__);
++			return NULL;
++		}
++	}
++
++	/*Non Linear checks */
++	skb_shared = (struct skb_shared_info *)(skb->end);
++	if (unlikely(NULL == skb_shared)) {
++		DPRINTK("%s() skbuff shared info stucture is NULL! \n",
++			__FUNCTION__);
++		return NULL;
++	} else if ((0 != skb_shared->nr_frags) &&
++		   (skb_shared->frag_list != NULL)) {
++		DPRINTK("%s() skbuff nr_frags AND "
++			"frag_list not supported \n", __FUNCTION__);
++		return NULL;
++	}
++
++	/*TCP segmentation more likely than IP fragmentation */
++	if (likely(0 != skb_shared->nr_frags)) {
++		return icp_ocfDrvDigestSkbNRFragsCheck(skb, skb_shared,
++						       offsetInBytes,
++						       digestSizeInBytes);
++	} else if (skb_shared->frag_list != NULL) {
++		return icp_ocfDrvDigestSkbFragListCheck(skb, skb_shared,
++							offsetInBytes,
++							digestSizeInBytes);
++	} else {
++		DPRINTK("%s() skbuff is non-linear but does not show any "
++			"linked data\n", __FUNCTION__);
++		return NULL;
++	}
++
++}
++
++/* Name        : icp_ocfDrvDigestSkbNRFragsCheck
++ *
++ * Description : This function is used by icp_ocfDrvSkbuffDigestPointerFind to
++ * process the non-linear portion of the skbuff, if the fragmentation type is
++ * page fragments
++ */
++static inline uint8_t *icp_ocfDrvDigestSkbNRFragsCheck(struct sk_buff *skb,
++						       struct skb_shared_info
++						       *skb_shared,
++						       int offsetInBytes,
++						       uint32_t
++						       digestSizeInBytes)
++{
++	int i = 0;
++	/*nr_frags starts from 1 */
++	if (MAX_SKB_FRAGS < skb_shared->nr_frags) {
++		DPRINTK("%s error processing skbuff "
++			"page frame -- MAX FRAGS exceeded \n", __FUNCTION__);
++		return NULL;
++	}
++
++	for (i = 0; i < skb_shared->nr_frags; i++) {
++
++		if (offsetInBytes >= skb_shared->frags[i].size) {
++			/*offset still greater than data position */
++			offsetInBytes -= skb_shared->frags[i].size;
++		} else {
++			/* found the page containing start of hash */
++
++			if (NULL == skb_shared->frags[i].page) {
++				DPRINTK("%s() Linked page is NULL!\n",
++					__FUNCTION__);
++				return NULL;
++			}
++
++			if (offsetInBytes + digestSizeInBytes >
++			    skb_shared->frags[i].size) {
++				DPRINTK("%s() Auth payload stretches accross "
++					"contiguous memory\n", __FUNCTION__);
++				return NULL;
++			} else {
++				return (uint8_t *) (skb_shared->frags[i].page +
++						    skb_shared->frags[i].
++						    page_offset +
++						    offsetInBytes);
++			}
++		}
++		/*only possible if internal page sizes are set wrong */
++		if (offsetInBytes < 0) {
++			DPRINTK("%s error processing skbuff page frame "
++				"-- offset calculation \n", __FUNCTION__);
++			return NULL;
++		}
++	}
++	/*only possible if internal page sizes are set wrong */
++	DPRINTK("%s error processing skbuff page frame "
++		"-- ran out of page fragments, remaining offset = %d \n",
++		__FUNCTION__, offsetInBytes);
++	return NULL;
++
++}
++
++/* Name        : icp_ocfDrvDigestSkbFragListCheck
++ *
++ * Description : This function is used by icp_ocfDrvSkbuffDigestPointerFind to 
++ * process the non-linear portion of the skbuff, if the fragmentation type is 
++ * a linked list
++ * 
++ */
++static inline uint8_t *icp_ocfDrvDigestSkbFragListCheck(struct sk_buff *skb,
++							struct skb_shared_info
++							*skb_shared,
++							int offsetInBytes,
++							uint32_t
++							digestSizeInBytes)
++{
++
++	struct sk_buff *skb_list = skb_shared->frag_list;
++	/*check added for readability */
++	if (NULL == skb_list) {
++		DPRINTK("%s error processing skbuff "
++			"-- no more list! \n", __FUNCTION__);
++		return NULL;
++	}
++
++	for (; skb_list; skb_list = skb_list->next) {
++		if (NULL == skb_list) {
++			DPRINTK("%s error processing skbuff "
++				"-- no more list! \n", __FUNCTION__);
++			return NULL;
++		}
++
++		if (offsetInBytes >= skb_list->len) {
++			offsetInBytes -= skb_list->len;
++
++		} else {
++			if (offsetInBytes + digestSizeInBytes > skb_list->len) {
++				DPRINTK("%s() Auth payload stretches accross "
++					"contiguous memory\n", __FUNCTION__);
++				return NULL;
++			} else {
++				return (uint8_t *)
++				    (skb_list->data + offsetInBytes);
++			}
++
++		}
++
++		/*This check is only needed if internal skb_list length values
++		   are set wrong. */
++		if (0 > offsetInBytes) {
++			DPRINTK("%s() error processing skbuff object -- offset "
++				"calculation \n", __FUNCTION__);
++			return NULL;
++		}
++
++	}
++
++	/*catch all for unusual for-loop exit. 
++	   This code should never be reached */
++	DPRINTK("%s() Catch-All hit! Process error.\n", __FUNCTION__);
++	return NULL;
++}
+diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/Makefile linux-2.6.30/crypto/ocf/ep80579/Makefile
+--- linux-2.6.30.orig/crypto/ocf/ep80579/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ep80579/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,107 @@
++#########################################################################
++#
++#  Targets supported
++#  all     - builds everything and installs
++#  install - identical to all
++#  depend  - build dependencies
++#  clean   - clears derived objects except the .depend files
++#  distclean- clears all derived objects and the .depend file
++#  
++# @par
++# This file is provided under a dual BSD/GPLv2 license.  When using or 
++#   redistributing this file, you may do so under either license.
++# 
++#   GPL LICENSE SUMMARY
++# 
++#   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++# 
++#   This program is free software; you can redistribute it and/or modify 
++#   it under the terms of version 2 of the GNU General Public License as
++#   published by the Free Software Foundation.
++# 
++#   This program is distributed in the hope that it will be useful, but 
++#   WITHOUT ANY WARRANTY; without even the implied warranty of 
++#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
++#   General Public License for more details.
++# 
++#   You should have received a copy of the GNU General Public License 
++#   along with this program; if not, write to the Free Software 
++#   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#   The full GNU General Public License is included in this distribution 
++#   in the file called LICENSE.GPL.
++# 
++#   Contact Information:
++#   Intel Corporation
++# 
++#   BSD LICENSE 
++# 
++#   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++#   All rights reserved.
++# 
++#   Redistribution and use in source and binary forms, with or without 
++#   modification, are permitted provided that the following conditions 
++#   are met:
++# 
++#     * Redistributions of source code must retain the above copyright 
++#       notice, this list of conditions and the following disclaimer.
++#     * Redistributions in binary form must reproduce the above copyright 
++#       notice, this list of conditions and the following disclaimer in 
++#       the documentation and/or other materials provided with the 
++#       distribution.
++#     * Neither the name of Intel Corporation nor the names of its 
++#       contributors may be used to endorse or promote products derived 
++#       from this software without specific prior written permission.
++# 
++#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
++#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
++#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
++#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
++#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
++#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
++#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
++#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
++#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
++#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
++#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++# 
++# 
++#  version: Security.L.1.0.130
++############################################################################
++
++
++####################Common variables and definitions########################
++
++# Ensure The ENV_DIR environmental var is defined.
++ifndef ICP_ENV_DIR
++$(error ICP_ENV_DIR is undefined. Please set the path to your environment makefile \
++        "-> setenv ICP_ENV_DIR <path>")
++endif
++
++#Add your project environment Makefile
++include $(ICP_ENV_DIR)/environment.mk
++
++#include the makefile with all the default and common Make variable definitions
++include $(ICP_BUILDSYSTEM_PATH)/build_files/common.mk
++
++#Add the name for the executable, Library or Module output definitions
++OUTPUT_NAME= icp_ocf
++
++# List of Source Files to be compiled 
++SOURCES= icp_common.c icp_sym.c icp_asym.c
++
++#common includes between all supported OSes
++INCLUDES= -I $(ICP_API_DIR) -I$(ICP_LAC_API) \
++-I$(ICP_OCF_SRC_DIR)
++
++# The location of the os level makefile needs to be changed.
++include $(ICP_ENV_DIR)/$(ICP_OS)_$(ICP_OS_LEVEL).mk
++
++# On the line directly below list the outputs you wish to build for,
++# e.g "lib_static lib_shared exe module" as show below
++install: module
++
++###################Include rules makefiles########################
++include $(ICP_BUILDSYSTEM_PATH)/build_files/rules.mk
++###################End of Rules inclusion#########################
++
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifn7751.c linux-2.6.30/crypto/ocf/hifn/hifn7751.c
+--- linux-2.6.30.orig/crypto/ocf/hifn/hifn7751.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/hifn/hifn7751.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,2970 @@
++/*	$OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $	*/
++
++/*-
++ * Invertex AEON / Hifn 7751 driver
++ * Copyright (c) 1999 Invertex Inc. All rights reserved.
++ * Copyright (c) 1999 Theo de Raadt
++ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
++ *			http://www.netsec.net
++ * Copyright (c) 2003 Hifn Inc.
++ *
++ * This driver is based on a previous driver by Invertex, for which they
++ * requested:  Please send any comments, feedback, bug-fixes, or feature
++ * requests to software@invertex.com.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *   notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *   notice, this list of conditions and the following disclaimer in the
++ *   documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *   derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Effort sponsored in part by the Defense Advanced Research Projects
++ * Agency (DARPA) and Air Force Research Laboratory, Air Force
++ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
++ *
++ *
++__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
++ */
++
++/*
++ * Driver for various Hifn encryption processors.
++ */
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++#include <linux/version.h>
++#include <linux/skbuff.h>
++#include <asm/io.h>
++
++#include <cryptodev.h>
++#include <uio.h>
++#include <hifn/hifn7751reg.h>
++#include <hifn/hifn7751var.h>
++
++#if 1
++#define	DPRINTF(a...)	if (hifn_debug) { \
++							printk("%s: ", sc ? \
++								device_get_nameunit(sc->sc_dev) : "hifn"); \
++							printk(a); \
++						} else
++#else
++#define	DPRINTF(a...)
++#endif
++
++static inline int
++pci_get_revid(struct pci_dev *dev)
++{
++	u8 rid = 0;
++	pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
++	return rid;
++}
++
++static	struct hifn_stats hifnstats;
++
++#define	debug hifn_debug
++int hifn_debug = 0;
++module_param(hifn_debug, int, 0644);
++MODULE_PARM_DESC(hifn_debug, "Enable debug");
++
++int hifn_maxbatch = 1;
++module_param(hifn_maxbatch, int, 0644);
++MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
++
++#ifdef MODULE_PARM
++char *hifn_pllconfig = NULL;
++MODULE_PARM(hifn_pllconfig, "s");
++#else
++char hifn_pllconfig[32]; /* This setting is RO after loading */
++module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
++#endif
++MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
++
++#ifdef HIFN_VULCANDEV
++#include <sys/conf.h>
++#include <sys/uio.h>
++
++static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
++#endif
++
++/*
++ * Prototypes and count for the pci_device structure
++ */
++static	int  hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
++static	void hifn_remove(struct pci_dev *dev);
++
++static	int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
++static	int hifn_freesession(device_t, u_int64_t);
++static	int hifn_process(device_t, struct cryptop *, int);
++
++static device_method_t hifn_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession,	hifn_newsession),
++	DEVMETHOD(cryptodev_freesession,hifn_freesession),
++	DEVMETHOD(cryptodev_process,	hifn_process),
++};
++
++static	void hifn_reset_board(struct hifn_softc *, int);
++static	void hifn_reset_puc(struct hifn_softc *);
++static	void hifn_puc_wait(struct hifn_softc *);
++static	int hifn_enable_crypto(struct hifn_softc *);
++static	void hifn_set_retry(struct hifn_softc *sc);
++static	void hifn_init_dma(struct hifn_softc *);
++static	void hifn_init_pci_registers(struct hifn_softc *);
++static	int hifn_sramsize(struct hifn_softc *);
++static	int hifn_dramsize(struct hifn_softc *);
++static	int hifn_ramtype(struct hifn_softc *);
++static	void hifn_sessions(struct hifn_softc *);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++static irqreturn_t hifn_intr(int irq, void *arg);
++#else
++static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
++#endif
++static	u_int hifn_write_command(struct hifn_command *, u_int8_t *);
++static	u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
++static	void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
++static	int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
++static	int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
++static	int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
++static	int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
++static	int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
++static	int hifn_init_pubrng(struct hifn_softc *);
++static	void hifn_tick(unsigned long arg);
++static	void hifn_abort(struct hifn_softc *);
++static	void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
++
++static	void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
++static	void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
++
++#ifdef CONFIG_OCF_RANDOMHARVEST
++static	int hifn_read_random(void *arg, u_int32_t *buf, int len);
++#endif
++
++#define HIFN_MAX_CHIPS	8
++static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
++
++static __inline u_int32_t
++READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
++{
++	u_int32_t v = readl(sc->sc_bar0 + reg);
++	sc->sc_bar0_lastreg = (bus_size_t) -1;
++	return (v);
++}
++#define	WRITE_REG_0(sc, reg, val)	hifn_write_reg_0(sc, reg, val)
++
++static __inline u_int32_t
++READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
++{
++	u_int32_t v = readl(sc->sc_bar1 + reg);
++	sc->sc_bar1_lastreg = (bus_size_t) -1;
++	return (v);
++}
++#define	WRITE_REG_1(sc, reg, val)	hifn_write_reg_1(sc, reg, val)
++
++/*
++ * map in a given buffer (great on some arches :-)
++ */
++
++static int
++pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
++{
++	struct iovec *iov = uio->uio_iov;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	buf->mapsize = 0;
++	for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
++		buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
++				iov->iov_base, iov->iov_len,
++				PCI_DMA_BIDIRECTIONAL);
++		buf->segs[buf->nsegs].ds_len = iov->iov_len;
++		buf->mapsize += iov->iov_len;
++		iov++;
++		buf->nsegs++;
++	}
++	/* identify this buffer by the first segment */
++	buf->map = (void *) buf->segs[0].ds_addr;
++	return(0);
++}
++
++/*
++ * map in a given sk_buff
++ */
++
++static int
++pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
++{
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	buf->mapsize = 0;
++
++	buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
++			skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
++	buf->segs[0].ds_len = skb_headlen(skb);
++	buf->mapsize += buf->segs[0].ds_len;
++
++	buf->nsegs = 1;
++
++	for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
++		buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
++		buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
++				page_address(skb_shinfo(skb)->frags[i].page) +
++					skb_shinfo(skb)->frags[i].page_offset,
++				buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
++		buf->mapsize += buf->segs[buf->nsegs].ds_len;
++		buf->nsegs++;
++	}
++
++	/* identify this buffer by the first segment */
++	buf->map = (void *) buf->segs[0].ds_addr;
++	return(0);
++}
++
++/*
++ * map in a given contiguous buffer
++ */
++
++static int
++pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
++{
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	buf->mapsize = 0;
++	buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
++			b, len, PCI_DMA_BIDIRECTIONAL);
++	buf->segs[0].ds_len = len;
++	buf->mapsize += buf->segs[0].ds_len;
++	buf->nsegs = 1;
++
++	/* identify this buffer by the first segment */
++	buf->map = (void *) buf->segs[0].ds_addr;
++	return(0);
++}
++
++#if 0 /* not needed at this time */
++static void
++pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
++{
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++	for (i = 0; i < buf->nsegs; i++)
++		pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
++				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
++}
++#endif
++
++static void
++pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
++{
++	int i;
++	DPRINTF("%s()\n", __FUNCTION__);
++	for (i = 0; i < buf->nsegs; i++) {
++		pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
++				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
++		buf->segs[i].ds_addr = 0;
++		buf->segs[i].ds_len = 0;
++	}
++	buf->nsegs = 0;
++	buf->mapsize = 0;
++	buf->map = 0;
++}
++
++static const char*
++hifn_partname(struct hifn_softc *sc)
++{
++	/* XXX sprintf numbers when not decoded */
++	switch (pci_get_vendor(sc->sc_pcidev)) {
++	case PCI_VENDOR_HIFN:
++		switch (pci_get_device(sc->sc_pcidev)) {
++		case PCI_PRODUCT_HIFN_6500:	return "Hifn 6500";
++		case PCI_PRODUCT_HIFN_7751:	return "Hifn 7751";
++		case PCI_PRODUCT_HIFN_7811:	return "Hifn 7811";
++		case PCI_PRODUCT_HIFN_7951:	return "Hifn 7951";
++		case PCI_PRODUCT_HIFN_7955:	return "Hifn 7955";
++		case PCI_PRODUCT_HIFN_7956:	return "Hifn 7956";
++		}
++		return "Hifn unknown-part";
++	case PCI_VENDOR_INVERTEX:
++		switch (pci_get_device(sc->sc_pcidev)) {
++		case PCI_PRODUCT_INVERTEX_AEON:	return "Invertex AEON";
++		}
++		return "Invertex unknown-part";
++	case PCI_VENDOR_NETSEC:
++		switch (pci_get_device(sc->sc_pcidev)) {
++		case PCI_PRODUCT_NETSEC_7751:	return "NetSec 7751";
++		}
++		return "NetSec unknown-part";
++	}
++	return "Unknown-vendor unknown-part";
++}
++
++static u_int
++checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
++{
++	struct hifn_softc *sc = pci_get_drvdata(dev);
++	if (v > max) {
++		device_printf(sc->sc_dev, "Warning, %s %u out of range, "
++			"using max %u\n", what, v, max);
++		v = max;
++	} else if (v < min) {
++		device_printf(sc->sc_dev, "Warning, %s %u out of range, "
++			"using min %u\n", what, v, min);
++		v = min;
++	}
++	return v;
++}
++
++/*
++ * Select PLL configuration for 795x parts.  This is complicated in
++ * that we cannot determine the optimal parameters without user input.
++ * The reference clock is derived from an external clock through a
++ * multiplier.  The external clock is either the host bus (i.e. PCI)
++ * or an external clock generator.  When using the PCI bus we assume
++ * the clock is either 33 or 66 MHz; for an external source we cannot
++ * tell the speed.
++ *
++ * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
++ * for an external source, followed by the frequency.  We calculate
++ * the appropriate multiplier and PLL register contents accordingly.
++ * When no configuration is given we default to "pci66" since that
++ * always will allow the card to work.  If a card is using the PCI
++ * bus clock and in a 33MHz slot then it will be operating at half
++ * speed until the correct information is provided.
++ *
++ * We use a default setting of "ext66" because according to Mike Ham
++ * of HiFn, almost every board in existence has an external crystal
++ * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
++ * because PCI33 can have clocks from 0 to 33Mhz, and some have
++ * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
++ */
++static void
++hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
++{
++	const char *pllspec = hifn_pllconfig;
++	u_int freq, mul, fl, fh;
++	u_int32_t pllconfig;
++	char *nxt;
++
++	if (pllspec == NULL)
++		pllspec = "ext66";
++	fl = 33, fh = 66;
++	pllconfig = 0;
++	if (strncmp(pllspec, "ext", 3) == 0) {
++		pllspec += 3;
++		pllconfig |= HIFN_PLL_REF_SEL;
++		switch (pci_get_device(dev)) {
++		case PCI_PRODUCT_HIFN_7955:
++		case PCI_PRODUCT_HIFN_7956:
++			fl = 20, fh = 100;
++			break;
++#ifdef notyet
++		case PCI_PRODUCT_HIFN_7954:
++			fl = 20, fh = 66;
++			break;
++#endif
++		}
++	} else if (strncmp(pllspec, "pci", 3) == 0)
++		pllspec += 3;
++	freq = strtoul(pllspec, &nxt, 10);
++	if (nxt == pllspec)
++		freq = 66;
++	else
++		freq = checkmaxmin(dev, "frequency", freq, fl, fh);
++	/*
++	 * Calculate multiplier.  We target a Fck of 266 MHz,
++	 * allowing only even values, possibly rounded down.
++	 * Multipliers > 8 must set the charge pump current.
++	 */
++	mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
++	pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
++	if (mul > 8)
++		pllconfig |= HIFN_PLL_IS;
++	*pll = pllconfig;
++}
++
++/*
++ * Attach an interface that successfully probed.
++ */
++static int
++hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
++{
++	struct hifn_softc *sc = NULL;
++	char rbase;
++	u_int16_t ena, rev;
++	int rseg, rc;
++	unsigned long mem_start, mem_len;
++	static int num_chips = 0;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (pci_enable_device(dev) < 0)
++		return(-ENODEV);
++
++	if (pci_set_mwi(dev))
++		return(-ENODEV);
++
++	if (!dev->irq) {
++		printk("hifn: found device with no IRQ assigned. check BIOS settings!");
++		pci_disable_device(dev);
++		return(-ENODEV);
++	}
++
++	sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
++	if (!sc)
++		return(-ENOMEM);
++	memset(sc, 0, sizeof(*sc));
++
++	softc_device_init(sc, "hifn", num_chips, hifn_methods);
++
++	sc->sc_pcidev = dev;
++	sc->sc_irq = -1;
++	sc->sc_cid = -1;
++	sc->sc_num = num_chips++;
++	if (sc->sc_num < HIFN_MAX_CHIPS)
++		hifn_chip_idx[sc->sc_num] = sc;
++
++	pci_set_drvdata(sc->sc_pcidev, sc);
++
++	spin_lock_init(&sc->sc_mtx);
++
++	/* XXX handle power management */
++
++	/*
++	 * The 7951 and 795x have a random number generator and
++	 * public key support; note this.
++	 */
++	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
++	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
++	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
++	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
++		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
++	/*
++	 * The 7811 has a random number generator and
++	 * we also note it's identity 'cuz of some quirks.
++	 */
++	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
++	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
++		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
++
++	/*
++	 * The 795x parts support AES.
++	 */
++	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
++	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
++	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
++		sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
++		/*
++		 * Select PLL configuration.  This depends on the
++		 * bus and board design and must be manually configured
++		 * if the default setting is unacceptable.
++		 */
++		hifn_getpllconfig(dev, &sc->sc_pllconfig);
++	}
++
++	/*
++	 * Setup PCI resources. Note that we record the bus
++	 * tag and handle for each register mapping, this is
++	 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
++	 * and WRITE_REG_1 macros throughout the driver.
++	 */
++	mem_start = pci_resource_start(sc->sc_pcidev, 0);
++	mem_len   = pci_resource_len(sc->sc_pcidev, 0);
++	sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
++	if (!sc->sc_bar0) {
++		device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
++		goto fail;
++	}
++	sc->sc_bar0_lastreg = (bus_size_t) -1;
++
++	mem_start = pci_resource_start(sc->sc_pcidev, 1);
++	mem_len   = pci_resource_len(sc->sc_pcidev, 1);
++	sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
++	if (!sc->sc_bar1) {
++		device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
++		goto fail;
++	}
++	sc->sc_bar1_lastreg = (bus_size_t) -1;
++
++	/* fix up the bus size */
++	if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
++		device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
++		goto fail;
++	}
++	if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
++		device_printf(sc->sc_dev,
++				"No usable consistent DMA configuration, aborting.\n");
++		goto fail;
++	}
++
++	hifn_set_retry(sc);
++
++	/*
++	 * Setup the area where the Hifn DMA's descriptors
++	 * and associated data structures.
++	 */
++	sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
++			sizeof(*sc->sc_dma),
++			&sc->sc_dma_physaddr);
++	if (!sc->sc_dma) {
++		device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
++		goto fail;
++	}
++	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
++
++	/*
++	 * Reset the board and do the ``secret handshake''
++	 * to enable the crypto support.  Then complete the
++	 * initialization procedure by setting up the interrupt
++	 * and hooking in to the system crypto support so we'll
++	 * get used for system services like the crypto device,
++	 * IPsec, RNG device, etc.
++	 */
++	hifn_reset_board(sc, 0);
++
++	if (hifn_enable_crypto(sc) != 0) {
++		device_printf(sc->sc_dev, "crypto enabling failed\n");
++		goto fail;
++	}
++	hifn_reset_puc(sc);
++
++	hifn_init_dma(sc);
++	hifn_init_pci_registers(sc);
++
++	pci_set_master(sc->sc_pcidev);
++
++	/* XXX can't dynamically determine ram type for 795x; force dram */
++	if (sc->sc_flags & HIFN_IS_7956)
++		sc->sc_drammodel = 1;
++	else if (hifn_ramtype(sc))
++		goto fail;
++
++	if (sc->sc_drammodel == 0)
++		hifn_sramsize(sc);
++	else
++		hifn_dramsize(sc);
++
++	/*
++	 * Workaround for NetSec 7751 rev A: half ram size because two
++	 * of the address lines were left floating
++	 */
++	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
++	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
++	    pci_get_revid(dev) == 0x61)	/*XXX???*/
++		sc->sc_ramsize >>= 1;
++
++	/*
++	 * Arrange the interrupt line.
++	 */
++	rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
++	if (rc) {
++		device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
++		goto fail;
++	}
++	sc->sc_irq = dev->irq;
++
++	hifn_sessions(sc);
++
++	/*
++	 * NB: Keep only the low 16 bits; this masks the chip id
++	 *     from the 7951.
++	 */
++	rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
++
++	rseg = sc->sc_ramsize / 1024;
++	rbase = 'K';
++	if (sc->sc_ramsize >= (1024 * 1024)) {
++		rbase = 'M';
++		rseg /= 1024;
++	}
++	device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
++		hifn_partname(sc), rev,
++		rseg, rbase, sc->sc_drammodel ? 'd' : 's');
++	if (sc->sc_flags & HIFN_IS_7956)
++		printf(", pll=0x%x<%s clk, %ux mult>",
++			sc->sc_pllconfig,
++			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
++			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
++	printf("\n");
++
++	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
++	if (sc->sc_cid < 0) {
++		device_printf(sc->sc_dev, "could not get crypto driver id\n");
++		goto fail;
++	}
++
++	WRITE_REG_0(sc, HIFN_0_PUCNFG,
++	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
++	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
++
++	switch (ena) {
++	case HIFN_PUSTAT_ENA_2:
++		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
++		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
++		if (sc->sc_flags & HIFN_HAS_AES)
++			crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
++		/*FALLTHROUGH*/
++	case HIFN_PUSTAT_ENA_1:
++		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
++		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
++		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
++		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
++		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
++		break;
++	}
++
++	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
++		hifn_init_pubrng(sc);
++
++	init_timer(&sc->sc_tickto);
++	sc->sc_tickto.function = hifn_tick;
++	sc->sc_tickto.data = (unsigned long) sc->sc_num;
++	mod_timer(&sc->sc_tickto, jiffies + HZ);
++
++	return (0);
++
++fail:
++    if (sc->sc_cid >= 0)
++        crypto_unregister_all(sc->sc_cid);
++    if (sc->sc_irq != -1)
++        free_irq(sc->sc_irq, sc);
++    if (sc->sc_dma) {
++		/* Turn off DMA polling */
++		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++
++        pci_free_consistent(sc->sc_pcidev,
++				sizeof(*sc->sc_dma),
++                sc->sc_dma, sc->sc_dma_physaddr);
++	}
++    kfree(sc);
++	return (-ENXIO);
++}
++
++/*
++ * Detach an interface that successfully probed.
++ */
++static void
++hifn_remove(struct pci_dev *dev)
++{
++	struct hifn_softc *sc = pci_get_drvdata(dev);
++	unsigned long l_flags;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
++
++	/* disable interrupts */
++	HIFN_LOCK(sc);
++	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
++	HIFN_UNLOCK(sc);
++
++	/*XXX other resources */
++	del_timer_sync(&sc->sc_tickto);
++
++	/* Turn off DMA polling */
++	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++
++	crypto_unregister_all(sc->sc_cid);
++
++	free_irq(sc->sc_irq, sc);
++
++	pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
++                sc->sc_dma, sc->sc_dma_physaddr);
++}
++
++
++static int
++hifn_init_pubrng(struct hifn_softc *sc)
++{
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
++		/* Reset 7951 public key/rng engine */
++		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
++		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
++
++		for (i = 0; i < 100; i++) {
++			DELAY(1000);
++			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
++			    HIFN_PUBRST_RESET) == 0)
++				break;
++		}
++
++		if (i == 100) {
++			device_printf(sc->sc_dev, "public key init failed\n");
++			return (1);
++		}
++	}
++
++	/* Enable the rng, if available */
++#ifdef CONFIG_OCF_RANDOMHARVEST
++	if (sc->sc_flags & HIFN_HAS_RNG) {
++		if (sc->sc_flags & HIFN_IS_7811) {
++			u_int32_t r;
++			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
++			if (r & HIFN_7811_RNGENA_ENA) {
++				r &= ~HIFN_7811_RNGENA_ENA;
++				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
++			}
++			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
++			    HIFN_7811_RNGCFG_DEFL);
++			r |= HIFN_7811_RNGENA_ENA;
++			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
++		} else
++			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
++			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
++			    HIFN_RNGCFG_ENA);
++
++		sc->sc_rngfirst = 1;
++		crypto_rregister(sc->sc_cid, hifn_read_random, sc);
++	}
++#endif
++
++	/* Enable public key engine, if available */
++	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
++		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
++		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
++		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
++#ifdef HIFN_VULCANDEV
++		sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, 
++					UID_ROOT, GID_WHEEL, 0666,
++					"vulcanpk");
++		sc->sc_pkdev->si_drv1 = sc;
++#endif
++	}
++
++	return (0);
++}
++
++#ifdef CONFIG_OCF_RANDOMHARVEST
++static int
++hifn_read_random(void *arg, u_int32_t *buf, int len)
++{
++	struct hifn_softc *sc = (struct hifn_softc *) arg;
++	u_int32_t sts;
++	int i, rc = 0;
++
++	if (len <= 0)
++		return rc;
++
++	if (sc->sc_flags & HIFN_IS_7811) {
++		/* ONLY VALID ON 7811!!!! */
++		for (i = 0; i < 5; i++) {
++			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
++			if (sts & HIFN_7811_RNGSTS_UFL) {
++				device_printf(sc->sc_dev,
++					      "RNG underflow: disabling\n");
++				/* DAVIDM perhaps return -1 */
++				break;
++			}
++			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
++				break;
++
++			/*
++			 * There are at least two words in the RNG FIFO
++			 * at this point.
++			 */
++			if (rc < len)
++				buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
++			if (rc < len)
++				buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
++		}
++	} else
++		buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
++
++	/* NB: discard first data read */
++	if (sc->sc_rngfirst) {
++		sc->sc_rngfirst = 0;
++		rc = 0;
++	}
++
++	return(rc);
++}
++#endif /* CONFIG_OCF_RANDOMHARVEST */
++
++static void
++hifn_puc_wait(struct hifn_softc *sc)
++{
++	int i;
++	int reg = HIFN_0_PUCTRL;
++
++	if (sc->sc_flags & HIFN_IS_7956) {
++		reg = HIFN_0_PUCTRL2;
++	}
++
++	for (i = 5000; i > 0; i--) {
++		DELAY(1);
++		if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
++			break;
++	}
++	if (!i)
++		device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
++				READ_REG_0(sc, HIFN_0_PUCTRL));
++}
++
++/*
++ * Reset the processing unit.
++ */
++static void
++hifn_reset_puc(struct hifn_softc *sc)
++{
++	/* Reset processing unit */
++	int reg = HIFN_0_PUCTRL;
++
++	if (sc->sc_flags & HIFN_IS_7956) {
++		reg = HIFN_0_PUCTRL2;
++	}
++	WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
++
++	hifn_puc_wait(sc);
++}
++
++/*
++ * Set the Retry and TRDY registers; note that we set them to
++ * zero because the 7811 locks up when forced to retry (section
++ * 3.6 of "Specification Update SU-0014-04".  Not clear if we
++ * should do this for all Hifn parts, but it doesn't seem to hurt.
++ */
++static void
++hifn_set_retry(struct hifn_softc *sc)
++{
++	DPRINTF("%s()\n", __FUNCTION__);
++	/* NB: RETRY only responds to 8-bit reads/writes */
++	pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
++	pci_write_config_dword(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
++}
++
++/*
++ * Resets the board.  Values in the regesters are left as is
++ * from the reset (i.e. initial values are assigned elsewhere).
++ */
++static void
++hifn_reset_board(struct hifn_softc *sc, int full)
++{
++	u_int32_t reg;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++	/*
++	 * Set polling in the DMA configuration register to zero.  0x7 avoids
++	 * resetting the board and zeros out the other fields.
++	 */
++	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++
++	/*
++	 * Now that polling has been disabled, we have to wait 1 ms
++	 * before resetting the board.
++	 */
++	DELAY(1000);
++
++	/* Reset the DMA unit */
++	if (full) {
++		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
++		DELAY(1000);
++	} else {
++		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
++		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
++		hifn_reset_puc(sc);
++	}
++
++	KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
++	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
++
++	/* Bring dma unit out of reset */
++	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++
++	hifn_puc_wait(sc);
++	hifn_set_retry(sc);
++
++	if (sc->sc_flags & HIFN_IS_7811) {
++		for (reg = 0; reg < 1000; reg++) {
++			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
++			    HIFN_MIPSRST_CRAMINIT)
++				break;
++			DELAY(1000);
++		}
++		if (reg == 1000)
++			device_printf(sc->sc_dev, ": cram init timeout\n");
++	} else {
++	  /* set up DMA configuration register #2 */
++	  /* turn off all PK and BAR0 swaps */
++	  WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
++		      (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
++		      (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
++		      (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
++		      (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
++	}
++}
++
++static u_int32_t
++hifn_next_signature(u_int32_t a, u_int cnt)
++{
++	int i;
++	u_int32_t v;
++
++	for (i = 0; i < cnt; i++) {
++
++		/* get the parity */
++		v = a & 0x80080125;
++		v ^= v >> 16;
++		v ^= v >> 8;
++		v ^= v >> 4;
++		v ^= v >> 2;
++		v ^= v >> 1;
++
++		a = (v & 1) ^ (a << 1);
++	}
++
++	return a;
++}
++
++
++/*
++ * Checks to see if crypto is already enabled.  If crypto isn't enable,
++ * "hifn_enable_crypto" is called to enable it.  The check is important,
++ * as enabling crypto twice will lock the board.
++ */
++static int 
++hifn_enable_crypto(struct hifn_softc *sc)
++{
++	u_int32_t dmacfg, ramcfg, encl, addr, i;
++	char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++					  0x00, 0x00, 0x00, 0x00 };
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
++	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
++
++	/*
++	 * The RAM config register's encrypt level bit needs to be set before
++	 * every read performed on the encryption level register.
++	 */
++	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
++
++	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
++
++	/*
++	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
++	 * next reboot.
++	 */
++	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
++#ifdef HIFN_DEBUG
++		if (hifn_debug)
++			device_printf(sc->sc_dev,
++			    "Strong crypto already enabled!\n");
++#endif
++		goto report;
++	}
++
++	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
++#ifdef HIFN_DEBUG
++		if (hifn_debug)
++			device_printf(sc->sc_dev,
++			      "Unknown encryption level 0x%x\n", encl);
++#endif
++		return 1;
++	}
++
++	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
++	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++	DELAY(1000);
++	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
++	DELAY(1000);
++	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
++	DELAY(1000);
++
++	for (i = 0; i <= 12; i++) {
++		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
++		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
++
++		DELAY(1000);
++	}
++
++	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
++	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
++
++#ifdef HIFN_DEBUG
++	if (hifn_debug) {
++		if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
++			device_printf(sc->sc_dev, "Engine is permanently "
++				"locked until next system reset!\n");
++		else
++			device_printf(sc->sc_dev, "Engine enabled "
++				"successfully!\n");
++	}
++#endif
++
++report:
++	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
++	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
++
++	switch (encl) {
++	case HIFN_PUSTAT_ENA_1:
++	case HIFN_PUSTAT_ENA_2:
++		break;
++	case HIFN_PUSTAT_ENA_0:
++	default:
++		device_printf(sc->sc_dev, "disabled\n");
++		break;
++	}
++
++	return 0;
++}
++
++/*
++ * Give initial values to the registers listed in the "Register Space"
++ * section of the HIFN Software Development reference manual.
++ */
++static void 
++hifn_init_pci_registers(struct hifn_softc *sc)
++{
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	/* write fixed values needed by the Initialization registers */
++	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
++	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
++	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
++
++	/* write all 4 ring address registers */
++	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
++	    offsetof(struct hifn_dma, cmdr[0]));
++	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
++	    offsetof(struct hifn_dma, srcr[0]));
++	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
++	    offsetof(struct hifn_dma, dstr[0]));
++	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
++	    offsetof(struct hifn_dma, resr[0]));
++
++	DELAY(2000);
++
++	/* write status register */
++	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
++	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
++	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
++	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
++	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
++	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
++	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
++	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
++	    HIFN_DMACSR_S_WAIT |
++	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
++	    HIFN_DMACSR_C_WAIT |
++	    HIFN_DMACSR_ENGINE |
++	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
++		HIFN_DMACSR_PUBDONE : 0) |
++	    ((sc->sc_flags & HIFN_IS_7811) ?
++		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
++
++	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
++	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
++	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
++	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
++	    ((sc->sc_flags & HIFN_IS_7811) ?
++		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
++	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
++	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
++
++
++	if (sc->sc_flags & HIFN_IS_7956) {
++		u_int32_t pll;
++
++		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
++		    HIFN_PUCNFG_TCALLPHASES |
++		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
++
++		/* turn off the clocks and insure bypass is set */
++		pll = READ_REG_1(sc, HIFN_1_PLL);
++		pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
++		  | HIFN_PLL_BP | HIFN_PLL_MBSET;
++		WRITE_REG_1(sc, HIFN_1_PLL, pll);
++		DELAY(10*1000);		/* 10ms */
++
++		/* change configuration */
++		pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
++		WRITE_REG_1(sc, HIFN_1_PLL, pll);
++		DELAY(10*1000);		/* 10ms */
++
++		/* disable bypass */
++		pll &= ~HIFN_PLL_BP;
++		WRITE_REG_1(sc, HIFN_1_PLL, pll);
++		/* enable clocks with new configuration */
++		pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
++		WRITE_REG_1(sc, HIFN_1_PLL, pll);
++	} else {
++		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
++		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
++		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
++		    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
++	}
++
++	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
++	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
++	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
++	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
++}
++
++/*
++ * The maximum number of sessions supported by the card
++ * is dependent on the amount of context ram, which
++ * encryption algorithms are enabled, and how compression
++ * is configured.  This should be configured before this
++ * routine is called.
++ */
++static void
++hifn_sessions(struct hifn_softc *sc)
++{
++	u_int32_t pucnfg;
++	int ctxsize;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
++
++	if (pucnfg & HIFN_PUCNFG_COMPSING) {
++		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
++			ctxsize = 128;
++		else
++			ctxsize = 512;
++		/*
++		 * 7955/7956 has internal context memory of 32K
++		 */
++		if (sc->sc_flags & HIFN_IS_7956)
++			sc->sc_maxses = 32768 / ctxsize;
++		else
++			sc->sc_maxses = 1 +
++			    ((sc->sc_ramsize - 32768) / ctxsize);
++	} else
++		sc->sc_maxses = sc->sc_ramsize / 16384;
++
++	if (sc->sc_maxses > 2048)
++		sc->sc_maxses = 2048;
++}
++
++/*
++ * Determine ram type (sram or dram).  Board should be just out of a reset
++ * state when this is called.
++ */
++static int
++hifn_ramtype(struct hifn_softc *sc)
++{
++	u_int8_t data[8], dataexpect[8];
++	int i;
++
++	for (i = 0; i < sizeof(data); i++)
++		data[i] = dataexpect[i] = 0x55;
++	if (hifn_writeramaddr(sc, 0, data))
++		return (-1);
++	if (hifn_readramaddr(sc, 0, data))
++		return (-1);
++	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
++		sc->sc_drammodel = 1;
++		return (0);
++	}
++
++	for (i = 0; i < sizeof(data); i++)
++		data[i] = dataexpect[i] = 0xaa;
++	if (hifn_writeramaddr(sc, 0, data))
++		return (-1);
++	if (hifn_readramaddr(sc, 0, data))
++		return (-1);
++	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
++		sc->sc_drammodel = 1;
++		return (0);
++	}
++
++	return (0);
++}
++
++#define	HIFN_SRAM_MAX		(32 << 20)
++#define	HIFN_SRAM_STEP_SIZE	16384
++#define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
++
++static int
++hifn_sramsize(struct hifn_softc *sc)
++{
++	u_int32_t a;
++	u_int8_t data[8];
++	u_int8_t dataexpect[sizeof(data)];
++	int32_t i;
++
++	for (i = 0; i < sizeof(data); i++)
++		data[i] = dataexpect[i] = i ^ 0x5a;
++
++	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
++		a = i * HIFN_SRAM_STEP_SIZE;
++		bcopy(&i, data, sizeof(i));
++		hifn_writeramaddr(sc, a, data);
++	}
++
++	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
++		a = i * HIFN_SRAM_STEP_SIZE;
++		bcopy(&i, dataexpect, sizeof(i));
++		if (hifn_readramaddr(sc, a, data) < 0)
++			return (0);
++		if (bcmp(data, dataexpect, sizeof(data)) != 0)
++			return (0);
++		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
++	}
++
++	return (0);
++}
++
++/*
++ * XXX For dram boards, one should really try all of the
++ * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
++ * is already set up correctly.
++ */
++static int
++hifn_dramsize(struct hifn_softc *sc)
++{
++	u_int32_t cnfg;
++
++	if (sc->sc_flags & HIFN_IS_7956) {
++		/*
++		 * 7955/7956 have a fixed internal ram of only 32K.
++		 */
++		sc->sc_ramsize = 32768;
++	} else {
++		cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
++		    HIFN_PUCNFG_DRAMMASK;
++		sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
++	}
++	return (0);
++}
++
++static void
++hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
++		dma->cmdi = 0;
++		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
++		wmb();
++		dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
++		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
++		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
++	}
++	*cmdp = dma->cmdi++;
++	dma->cmdk = dma->cmdi;
++
++	if (dma->srci == HIFN_D_SRC_RSIZE) {
++		dma->srci = 0;
++		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
++		wmb();
++		dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
++		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
++		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
++	}
++	*srcp = dma->srci++;
++	dma->srck = dma->srci;
++
++	if (dma->dsti == HIFN_D_DST_RSIZE) {
++		dma->dsti = 0;
++		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
++		wmb();
++		dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
++		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
++		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
++	}
++	*dstp = dma->dsti++;
++	dma->dstk = dma->dsti;
++
++	if (dma->resi == HIFN_D_RES_RSIZE) {
++		dma->resi = 0;
++		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
++		wmb();
++		dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
++		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
++		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
++	}
++	*resp = dma->resi++;
++	dma->resk = dma->resi;
++}
++
++static int
++hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++	hifn_base_command_t wc;
++	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
++	int r, cmdi, resi, srci, dsti;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	wc.masks = htole16(3 << 13);
++	wc.session_num = htole16(addr >> 14);
++	wc.total_source_count = htole16(8);
++	wc.total_dest_count = htole16(addr & 0x3fff);
++
++	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
++
++	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
++	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
++	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
++
++	/* build write command */
++	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
++	*(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
++	bcopy(data, &dma->test_src, sizeof(dma->test_src));
++
++	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
++	    + offsetof(struct hifn_dma, test_src));
++	dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
++	    + offsetof(struct hifn_dma, test_dst));
++
++	dma->cmdr[cmdi].l = htole32(16 | masks);
++	dma->srcr[srci].l = htole32(8 | masks);
++	dma->dstr[dsti].l = htole32(4 | masks);
++	dma->resr[resi].l = htole32(4 | masks);
++
++	for (r = 10000; r >= 0; r--) {
++		DELAY(10);
++		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
++			break;
++	}
++	if (r == 0) {
++		device_printf(sc->sc_dev, "writeramaddr -- "
++		    "result[%d](addr %d) still valid\n", resi, addr);
++		r = -1;
++		return (-1);
++	} else
++		r = 0;
++
++	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
++	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
++	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
++
++	return (r);
++}
++
++static int
++hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++	hifn_base_command_t rc;
++	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
++	int r, cmdi, srci, dsti, resi;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	rc.masks = htole16(2 << 13);
++	rc.session_num = htole16(addr >> 14);
++	rc.total_source_count = htole16(addr & 0x3fff);
++	rc.total_dest_count = htole16(8);
++
++	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
++
++	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
++	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
++	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
++
++	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
++	*(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
++
++	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
++	    offsetof(struct hifn_dma, test_src));
++	dma->test_src = 0;
++	dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
++	    offsetof(struct hifn_dma, test_dst));
++	dma->test_dst = 0;
++	dma->cmdr[cmdi].l = htole32(8 | masks);
++	dma->srcr[srci].l = htole32(8 | masks);
++	dma->dstr[dsti].l = htole32(8 | masks);
++	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
++
++	for (r = 10000; r >= 0; r--) {
++		DELAY(10);
++		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
++			break;
++	}
++	if (r == 0) {
++		device_printf(sc->sc_dev, "readramaddr -- "
++		    "result[%d](addr %d) still valid\n", resi, addr);
++		r = -1;
++	} else {
++		r = 0;
++		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
++	}
++
++	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
++	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
++	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
++
++	return (r);
++}
++
++/*
++ * Initialize the descriptor rings.
++ */
++static void 
++hifn_init_dma(struct hifn_softc *sc)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	hifn_set_retry(sc);
++
++	/* initialize static pointer values */
++	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
++		dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
++		    offsetof(struct hifn_dma, command_bufs[i][0]));
++	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
++		dma->resr[i].p = htole32(sc->sc_dma_physaddr +
++		    offsetof(struct hifn_dma, result_bufs[i][0]));
++
++	dma->cmdr[HIFN_D_CMD_RSIZE].p =
++	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
++	dma->srcr[HIFN_D_SRC_RSIZE].p =
++	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
++	dma->dstr[HIFN_D_DST_RSIZE].p =
++	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
++	dma->resr[HIFN_D_RES_RSIZE].p =
++	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
++
++	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
++	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
++	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
++}
++
++/*
++ * Writes out the raw command buffer space.  Returns the
++ * command buffer size.
++ */
++static u_int
++hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
++{
++	struct hifn_softc *sc = NULL;
++	u_int8_t *buf_pos;
++	hifn_base_command_t *base_cmd;
++	hifn_mac_command_t *mac_cmd;
++	hifn_crypt_command_t *cry_cmd;
++	int using_mac, using_crypt, len, ivlen;
++	u_int32_t dlen, slen;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	buf_pos = buf;
++	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
++	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
++
++	base_cmd = (hifn_base_command_t *)buf_pos;
++	base_cmd->masks = htole16(cmd->base_masks);
++	slen = cmd->src_mapsize;
++	if (cmd->sloplen)
++		dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
++	else
++		dlen = cmd->dst_mapsize;
++	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
++	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
++	dlen >>= 16;
++	slen >>= 16;
++	base_cmd->session_num = htole16(
++	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
++	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
++	buf_pos += sizeof(hifn_base_command_t);
++
++	if (using_mac) {
++		mac_cmd = (hifn_mac_command_t *)buf_pos;
++		dlen = cmd->maccrd->crd_len;
++		mac_cmd->source_count = htole16(dlen & 0xffff);
++		dlen >>= 16;
++		mac_cmd->masks = htole16(cmd->mac_masks |
++		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
++		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
++		mac_cmd->reserved = 0;
++		buf_pos += sizeof(hifn_mac_command_t);
++	}
++
++	if (using_crypt) {
++		cry_cmd = (hifn_crypt_command_t *)buf_pos;
++		dlen = cmd->enccrd->crd_len;
++		cry_cmd->source_count = htole16(dlen & 0xffff);
++		dlen >>= 16;
++		cry_cmd->masks = htole16(cmd->cry_masks |
++		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
++		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
++		cry_cmd->reserved = 0;
++		buf_pos += sizeof(hifn_crypt_command_t);
++	}
++
++	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
++		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
++		buf_pos += HIFN_MAC_KEY_LENGTH;
++	}
++
++	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
++		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
++		case HIFN_CRYPT_CMD_ALG_3DES:
++			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
++			buf_pos += HIFN_3DES_KEY_LENGTH;
++			break;
++		case HIFN_CRYPT_CMD_ALG_DES:
++			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
++			buf_pos += HIFN_DES_KEY_LENGTH;
++			break;
++		case HIFN_CRYPT_CMD_ALG_RC4:
++			len = 256;
++			do {
++				int clen;
++
++				clen = MIN(cmd->cklen, len);
++				bcopy(cmd->ck, buf_pos, clen);
++				len -= clen;
++				buf_pos += clen;
++			} while (len > 0);
++			bzero(buf_pos, 4);
++			buf_pos += 4;
++			break;
++		case HIFN_CRYPT_CMD_ALG_AES:
++			/*
++			 * AES keys are variable 128, 192 and
++			 * 256 bits (16, 24 and 32 bytes).
++			 */
++			bcopy(cmd->ck, buf_pos, cmd->cklen);
++			buf_pos += cmd->cklen;
++			break;
++		}
++	}
++
++	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
++		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
++		case HIFN_CRYPT_CMD_ALG_AES:
++			ivlen = HIFN_AES_IV_LENGTH;
++			break;
++		default:
++			ivlen = HIFN_IV_LENGTH;
++			break;
++		}
++		bcopy(cmd->iv, buf_pos, ivlen);
++		buf_pos += ivlen;
++	}
++
++	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
++		bzero(buf_pos, 8);
++		buf_pos += 8;
++	}
++
++	return (buf_pos - buf);
++}
++
++static int
++hifn_dmamap_aligned(struct hifn_operand *op)
++{
++	struct hifn_softc *sc = NULL;
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	for (i = 0; i < op->nsegs; i++) {
++		if (op->segs[i].ds_addr & 3)
++			return (0);
++		if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
++			return (0);
++	}
++	return (1);
++}
++
++static __inline int
++hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++
++	if (++idx == HIFN_D_DST_RSIZE) {
++		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
++		    HIFN_D_MASKDONEIRQ);
++		HIFN_DSTR_SYNC(sc, idx,
++		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++		idx = 0;
++	}
++	return (idx);
++}
++
++static int
++hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++	struct hifn_operand *dst = &cmd->dst;
++	u_int32_t p, l;
++	int idx, used = 0, i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	idx = dma->dsti;
++	for (i = 0; i < dst->nsegs - 1; i++) {
++		dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
++		dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
++		wmb();
++		dma->dstr[idx].l |= htole32(HIFN_D_VALID);
++		HIFN_DSTR_SYNC(sc, idx,
++		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++		used++;
++
++		idx = hifn_dmamap_dstwrap(sc, idx);
++	}
++
++	if (cmd->sloplen == 0) {
++		p = dst->segs[i].ds_addr;
++		l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
++		    dst->segs[i].ds_len;
++	} else {
++		p = sc->sc_dma_physaddr +
++		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
++		l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
++		    sizeof(u_int32_t);
++
++		if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
++			dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
++			dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
++			    (dst->segs[i].ds_len - cmd->sloplen));
++			wmb();
++			dma->dstr[idx].l |= htole32(HIFN_D_VALID);
++			HIFN_DSTR_SYNC(sc, idx,
++			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++			used++;
++
++			idx = hifn_dmamap_dstwrap(sc, idx);
++		}
++	}
++	dma->dstr[idx].p = htole32(p);
++	dma->dstr[idx].l = htole32(l);
++	wmb();
++	dma->dstr[idx].l |= htole32(HIFN_D_VALID);
++	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++	used++;
++
++	idx = hifn_dmamap_dstwrap(sc, idx);
++
++	dma->dsti = idx;
++	dma->dstu += used;
++	return (idx);
++}
++
++static __inline int
++hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++
++	if (++idx == HIFN_D_SRC_RSIZE) {
++		dma->srcr[idx].l = htole32(HIFN_D_VALID |
++		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
++		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
++		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
++		idx = 0;
++	}
++	return (idx);
++}
++
++static int
++hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++	struct hifn_operand *src = &cmd->src;
++	int idx, i;
++	u_int32_t last = 0;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	idx = dma->srci;
++	for (i = 0; i < src->nsegs; i++) {
++		if (i == src->nsegs - 1)
++			last = HIFN_D_LAST;
++
++		dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
++		dma->srcr[idx].l = htole32(src->segs[i].ds_len |
++		    HIFN_D_MASKDONEIRQ | last);
++		wmb();
++		dma->srcr[idx].l |= htole32(HIFN_D_VALID);
++		HIFN_SRCR_SYNC(sc, idx,
++		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
++
++		idx = hifn_dmamap_srcwrap(sc, idx);
++	}
++	dma->srci = idx;
++	dma->srcu += src->nsegs;
++	return (idx);
++} 
++
++
++static int 
++hifn_crypto(
++	struct hifn_softc *sc,
++	struct hifn_command *cmd,
++	struct cryptop *crp,
++	int hint)
++{
++	struct	hifn_dma *dma = sc->sc_dma;
++	u_int32_t cmdlen, csr;
++	int cmdi, resi, err = 0;
++	unsigned long l_flags;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	/*
++	 * need 1 cmd, and 1 res
++	 *
++	 * NB: check this first since it's easy.
++	 */
++	HIFN_LOCK(sc);
++	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
++	    (dma->resu + 1) > HIFN_D_RES_RSIZE) {
++#ifdef HIFN_DEBUG
++		if (hifn_debug) {
++			device_printf(sc->sc_dev,
++				"cmd/result exhaustion, cmdu %u resu %u\n",
++				dma->cmdu, dma->resu);
++		}
++#endif
++		hifnstats.hst_nomem_cr++;
++		sc->sc_needwakeup |= CRYPTO_SYMQ;
++		HIFN_UNLOCK(sc);
++		return (ERESTART);
++	}
++
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
++			hifnstats.hst_nomem_load++;
++			err = ENOMEM;
++			goto err_srcmap1;
++		}
++	} else if (crp->crp_flags & CRYPTO_F_IOV) {
++		if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
++			hifnstats.hst_nomem_load++;
++			err = ENOMEM;
++			goto err_srcmap1;
++		}
++	} else {
++		if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
++			hifnstats.hst_nomem_load++;
++			err = ENOMEM;
++			goto err_srcmap1;
++		}
++	}
++
++	if (hifn_dmamap_aligned(&cmd->src)) {
++		cmd->sloplen = cmd->src_mapsize & 3;
++		cmd->dst = cmd->src;
++	} else {
++		if (crp->crp_flags & CRYPTO_F_IOV) {
++			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++			err = EINVAL;
++			goto err_srcmap;
++		} else if (crp->crp_flags & CRYPTO_F_SKBUF) {
++#ifdef NOTYET
++			int totlen, len;
++			struct mbuf *m, *m0, *mlast;
++
++			KASSERT(cmd->dst_m == cmd->src_m,
++				("hifn_crypto: dst_m initialized improperly"));
++			hifnstats.hst_unaligned++;
++			/*
++			 * Source is not aligned on a longword boundary.
++			 * Copy the data to insure alignment.  If we fail
++			 * to allocate mbufs or clusters while doing this
++			 * we return ERESTART so the operation is requeued
++			 * at the crypto later, but only if there are
++			 * ops already posted to the hardware; otherwise we
++			 * have no guarantee that we'll be re-entered.
++			 */
++			totlen = cmd->src_mapsize;
++			if (cmd->src_m->m_flags & M_PKTHDR) {
++				len = MHLEN;
++				MGETHDR(m0, M_DONTWAIT, MT_DATA);
++				if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
++					m_free(m0);
++					m0 = NULL;
++				}
++			} else {
++				len = MLEN;
++				MGET(m0, M_DONTWAIT, MT_DATA);
++			}
++			if (m0 == NULL) {
++				hifnstats.hst_nomem_mbuf++;
++				err = dma->cmdu ? ERESTART : ENOMEM;
++				goto err_srcmap;
++			}
++			if (totlen >= MINCLSIZE) {
++				MCLGET(m0, M_DONTWAIT);
++				if ((m0->m_flags & M_EXT) == 0) {
++					hifnstats.hst_nomem_mcl++;
++					err = dma->cmdu ? ERESTART : ENOMEM;
++					m_freem(m0);
++					goto err_srcmap;
++				}
++				len = MCLBYTES;
++			}
++			totlen -= len;
++			m0->m_pkthdr.len = m0->m_len = len;
++			mlast = m0;
++
++			while (totlen > 0) {
++				MGET(m, M_DONTWAIT, MT_DATA);
++				if (m == NULL) {
++					hifnstats.hst_nomem_mbuf++;
++					err = dma->cmdu ? ERESTART : ENOMEM;
++					m_freem(m0);
++					goto err_srcmap;
++				}
++				len = MLEN;
++				if (totlen >= MINCLSIZE) {
++					MCLGET(m, M_DONTWAIT);
++					if ((m->m_flags & M_EXT) == 0) {
++						hifnstats.hst_nomem_mcl++;
++						err = dma->cmdu ? ERESTART : ENOMEM;
++						mlast->m_next = m;
++						m_freem(m0);
++						goto err_srcmap;
++					}
++					len = MCLBYTES;
++				}
++
++				m->m_len = len;
++				m0->m_pkthdr.len += len;
++				totlen -= len;
++
++				mlast->m_next = m;
++				mlast = m;
++			}
++			cmd->dst_m = m0;
++#else
++			device_printf(sc->sc_dev,
++					"%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
++					__FILE__, __LINE__);
++			err = EINVAL;
++			goto err_srcmap;
++#endif
++		} else {
++			device_printf(sc->sc_dev,
++					"%s,%d: unaligned contig buffers not implemented\n",
++					__FILE__, __LINE__);
++			err = EINVAL;
++			goto err_srcmap;
++		}
++	}
++
++	if (cmd->dst_map == NULL) {
++		if (crp->crp_flags & CRYPTO_F_SKBUF) {
++			if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
++				hifnstats.hst_nomem_map++;
++				err = ENOMEM;
++				goto err_dstmap1;
++			}
++		} else if (crp->crp_flags & CRYPTO_F_IOV) {
++			if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
++				hifnstats.hst_nomem_load++;
++				err = ENOMEM;
++				goto err_dstmap1;
++			}
++		} else {
++			if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
++				hifnstats.hst_nomem_load++;
++				err = ENOMEM;
++				goto err_dstmap1;
++			}
++		}
++	}
++
++#ifdef HIFN_DEBUG
++	if (hifn_debug) {
++		device_printf(sc->sc_dev,
++		    "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
++		    READ_REG_1(sc, HIFN_1_DMA_CSR),
++		    READ_REG_1(sc, HIFN_1_DMA_IER),
++		    dma->cmdu, dma->srcu, dma->dstu, dma->resu,
++		    cmd->src_nsegs, cmd->dst_nsegs);
++	}
++#endif
++
++#if 0
++	if (cmd->src_map == cmd->dst_map) {
++		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
++		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
++	} else {
++		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
++		    BUS_DMASYNC_PREWRITE);
++		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
++		    BUS_DMASYNC_PREREAD);
++	}
++#endif
++
++	/*
++	 * need N src, and N dst
++	 */
++	if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
++	    (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
++#ifdef HIFN_DEBUG
++		if (hifn_debug) {
++			device_printf(sc->sc_dev,
++				"src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
++				dma->srcu, cmd->src_nsegs,
++				dma->dstu, cmd->dst_nsegs);
++		}
++#endif
++		hifnstats.hst_nomem_sd++;
++		err = ERESTART;
++		goto err_dstmap;
++	}
++
++	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
++		dma->cmdi = 0;
++		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
++		wmb();
++		dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
++		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
++		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
++	}
++	cmdi = dma->cmdi++;
++	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
++	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
++
++	/* .p for command/result already set */
++	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
++	    HIFN_D_MASKDONEIRQ);
++	wmb();
++	dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
++	HIFN_CMDR_SYNC(sc, cmdi,
++	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
++	dma->cmdu++;
++
++	/*
++	 * We don't worry about missing an interrupt (which a "command wait"
++	 * interrupt salvages us from), unless there is more than one command
++	 * in the queue.
++	 */
++	if (dma->cmdu > 1) {
++		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
++		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
++	}
++
++	hifnstats.hst_ipackets++;
++	hifnstats.hst_ibytes += cmd->src_mapsize;
++
++	hifn_dmamap_load_src(sc, cmd);
++
++	/*
++	 * Unlike other descriptors, we don't mask done interrupt from
++	 * result descriptor.
++	 */
++#ifdef HIFN_DEBUG
++	if (hifn_debug)
++		device_printf(sc->sc_dev, "load res\n");
++#endif
++	if (dma->resi == HIFN_D_RES_RSIZE) {
++		dma->resi = 0;
++		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
++		wmb();
++		dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
++		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
++		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++	}
++	resi = dma->resi++;
++	KASSERT(dma->hifn_commands[resi] == NULL,
++		("hifn_crypto: command slot %u busy", resi));
++	dma->hifn_commands[resi] = cmd;
++	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
++	if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
++		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
++		    HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
++		wmb();
++		dma->resr[resi].l |= htole32(HIFN_D_VALID);
++		sc->sc_curbatch++;
++		if (sc->sc_curbatch > hifnstats.hst_maxbatch)
++			hifnstats.hst_maxbatch = sc->sc_curbatch;
++		hifnstats.hst_totbatch++;
++	} else {
++		dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
++		wmb();
++		dma->resr[resi].l |= htole32(HIFN_D_VALID);
++		sc->sc_curbatch = 0;
++	}
++	HIFN_RESR_SYNC(sc, resi,
++	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++	dma->resu++;
++
++	if (cmd->sloplen)
++		cmd->slopidx = resi;
++
++	hifn_dmamap_load_dst(sc, cmd);
++
++	csr = 0;
++	if (sc->sc_c_busy == 0) {
++		csr |= HIFN_DMACSR_C_CTRL_ENA;
++		sc->sc_c_busy = 1;
++	}
++	if (sc->sc_s_busy == 0) {
++		csr |= HIFN_DMACSR_S_CTRL_ENA;
++		sc->sc_s_busy = 1;
++	}
++	if (sc->sc_r_busy == 0) {
++		csr |= HIFN_DMACSR_R_CTRL_ENA;
++		sc->sc_r_busy = 1;
++	}
++	if (sc->sc_d_busy == 0) {
++		csr |= HIFN_DMACSR_D_CTRL_ENA;
++		sc->sc_d_busy = 1;
++	}
++	if (csr)
++		WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
++
++#ifdef HIFN_DEBUG
++	if (hifn_debug) {
++		device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
++		    READ_REG_1(sc, HIFN_1_DMA_CSR),
++		    READ_REG_1(sc, HIFN_1_DMA_IER));
++	}
++#endif
++
++	sc->sc_active = 5;
++	HIFN_UNLOCK(sc);
++	KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
++	return (err);		/* success */
++
++err_dstmap:
++	if (cmd->src_map != cmd->dst_map)
++		pci_unmap_buf(sc, &cmd->dst);
++err_dstmap1:
++err_srcmap:
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		if (cmd->src_skb != cmd->dst_skb)
++#ifdef NOTYET
++			m_freem(cmd->dst_m);
++#else
++			device_printf(sc->sc_dev,
++					"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
++					__FILE__, __LINE__);
++#endif
++	}
++	pci_unmap_buf(sc, &cmd->src);
++err_srcmap1:
++	HIFN_UNLOCK(sc);
++	return (err);
++}
++
++static void
++hifn_tick(unsigned long arg)
++{
++	struct hifn_softc *sc;
++	unsigned long l_flags;
++
++	if (arg >= HIFN_MAX_CHIPS)
++		return;
++	sc = hifn_chip_idx[arg];
++	if (!sc)
++		return;
++
++	HIFN_LOCK(sc);
++	if (sc->sc_active == 0) {
++		struct hifn_dma *dma = sc->sc_dma;
++		u_int32_t r = 0;
++
++		if (dma->cmdu == 0 && sc->sc_c_busy) {
++			sc->sc_c_busy = 0;
++			r |= HIFN_DMACSR_C_CTRL_DIS;
++		}
++		if (dma->srcu == 0 && sc->sc_s_busy) {
++			sc->sc_s_busy = 0;
++			r |= HIFN_DMACSR_S_CTRL_DIS;
++		}
++		if (dma->dstu == 0 && sc->sc_d_busy) {
++			sc->sc_d_busy = 0;
++			r |= HIFN_DMACSR_D_CTRL_DIS;
++		}
++		if (dma->resu == 0 && sc->sc_r_busy) {
++			sc->sc_r_busy = 0;
++			r |= HIFN_DMACSR_R_CTRL_DIS;
++		}
++		if (r)
++			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
++	} else
++		sc->sc_active--;
++	HIFN_UNLOCK(sc);
++	mod_timer(&sc->sc_tickto, jiffies + HZ);
++}
++
++static irqreturn_t
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++hifn_intr(int irq, void *arg)
++#else
++hifn_intr(int irq, void *arg, struct pt_regs *regs)
++#endif
++{
++	struct hifn_softc *sc = arg;
++	struct hifn_dma *dma;
++	u_int32_t dmacsr, restart;
++	int i, u;
++	unsigned long l_flags;
++
++	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
++
++	/* Nothing in the DMA unit interrupted */
++	if ((dmacsr & sc->sc_dmaier) == 0)
++		return IRQ_NONE;
++
++	HIFN_LOCK(sc);
++
++	dma = sc->sc_dma;
++
++#ifdef HIFN_DEBUG
++	if (hifn_debug) {
++		device_printf(sc->sc_dev,
++		    "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
++		    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
++		    dma->cmdi, dma->srci, dma->dsti, dma->resi,
++		    dma->cmdk, dma->srck, dma->dstk, dma->resk,
++		    dma->cmdu, dma->srcu, dma->dstu, dma->resu);
++	}
++#endif
++
++	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
++
++	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
++	    (dmacsr & HIFN_DMACSR_PUBDONE))
++		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
++		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
++
++	restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
++	if (restart)
++		device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
++
++	if (sc->sc_flags & HIFN_IS_7811) {
++		if (dmacsr & HIFN_DMACSR_ILLR)
++			device_printf(sc->sc_dev, "illegal read\n");
++		if (dmacsr & HIFN_DMACSR_ILLW)
++			device_printf(sc->sc_dev, "illegal write\n");
++	}
++
++	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
++	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
++	if (restart) {
++		device_printf(sc->sc_dev, "abort, resetting.\n");
++		hifnstats.hst_abort++;
++		hifn_abort(sc);
++		HIFN_UNLOCK(sc);
++		return IRQ_HANDLED;
++	}
++
++	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
++		/*
++		 * If no slots to process and we receive a "waiting on
++		 * command" interrupt, we disable the "waiting on command"
++		 * (by clearing it).
++		 */
++		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
++		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
++	}
++
++	/* clear the rings */
++	i = dma->resk; u = dma->resu;
++	while (u != 0) {
++		HIFN_RESR_SYNC(sc, i,
++		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
++		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
++			HIFN_RESR_SYNC(sc, i,
++			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++			break;
++		}
++
++		if (i != HIFN_D_RES_RSIZE) {
++			struct hifn_command *cmd;
++			u_int8_t *macbuf = NULL;
++
++			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
++			cmd = dma->hifn_commands[i];
++			KASSERT(cmd != NULL,
++				("hifn_intr: null command slot %u", i));
++			dma->hifn_commands[i] = NULL;
++
++			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
++				macbuf = dma->result_bufs[i];
++				macbuf += 12;
++			}
++
++			hifn_callback(sc, cmd, macbuf);
++			hifnstats.hst_opackets++;
++			u--;
++		}
++
++		if (++i == (HIFN_D_RES_RSIZE + 1))
++			i = 0;
++	}
++	dma->resk = i; dma->resu = u;
++
++	i = dma->srck; u = dma->srcu;
++	while (u != 0) {
++		if (i == HIFN_D_SRC_RSIZE)
++			i = 0;
++		HIFN_SRCR_SYNC(sc, i,
++		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
++		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
++			HIFN_SRCR_SYNC(sc, i,
++			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++			break;
++		}
++		i++, u--;
++	}
++	dma->srck = i; dma->srcu = u;
++
++	i = dma->cmdk; u = dma->cmdu;
++	while (u != 0) {
++		HIFN_CMDR_SYNC(sc, i,
++		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
++		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
++			HIFN_CMDR_SYNC(sc, i,
++			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++			break;
++		}
++		if (i != HIFN_D_CMD_RSIZE) {
++			u--;
++			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
++		}
++		if (++i == (HIFN_D_CMD_RSIZE + 1))
++			i = 0;
++	}
++	dma->cmdk = i; dma->cmdu = u;
++
++	HIFN_UNLOCK(sc);
++
++	if (sc->sc_needwakeup) {		/* XXX check high watermark */
++		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
++#ifdef HIFN_DEBUG
++		if (hifn_debug)
++			device_printf(sc->sc_dev,
++				"wakeup crypto (%x) u %d/%d/%d/%d\n",
++				sc->sc_needwakeup,
++				dma->cmdu, dma->srcu, dma->dstu, dma->resu);
++#endif
++		sc->sc_needwakeup &= ~wakeup;
++		crypto_unblock(sc->sc_cid, wakeup);
++	}
++
++	return IRQ_HANDLED;
++}
++
++/*
++ * Allocate a new 'session' and return an encoded session id.  'sidp'
++ * contains our registration id, and should contain an encoded session
++ * id on successful allocation.
++ */
++static int
++hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
++{
++	struct hifn_softc *sc = device_get_softc(dev);
++	struct cryptoini *c;
++	int mac = 0, cry = 0, sesn;
++	struct hifn_session *ses = NULL;
++	unsigned long l_flags;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	KASSERT(sc != NULL, ("hifn_newsession: null softc"));
++	if (sidp == NULL || cri == NULL || sc == NULL) {
++		DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
++		return (EINVAL);
++	}
++
++	HIFN_LOCK(sc);
++	if (sc->sc_sessions == NULL) {
++		ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
++				SLAB_ATOMIC);
++		if (ses == NULL) {
++			HIFN_UNLOCK(sc);
++			return (ENOMEM);
++		}
++		sesn = 0;
++		sc->sc_nsessions = 1;
++	} else {
++		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
++			if (!sc->sc_sessions[sesn].hs_used) {
++				ses = &sc->sc_sessions[sesn];
++				break;
++			}
++		}
++
++		if (ses == NULL) {
++			sesn = sc->sc_nsessions;
++			ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
++					SLAB_ATOMIC);
++			if (ses == NULL) {
++				HIFN_UNLOCK(sc);
++				return (ENOMEM);
++			}
++			bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
++			bzero(sc->sc_sessions, sesn * sizeof(*ses));
++			kfree(sc->sc_sessions);
++			sc->sc_sessions = ses;
++			ses = &sc->sc_sessions[sesn];
++			sc->sc_nsessions++;
++		}
++	}
++	HIFN_UNLOCK(sc);
++
++	bzero(ses, sizeof(*ses));
++	ses->hs_used = 1;
++
++	for (c = cri; c != NULL; c = c->cri_next) {
++		switch (c->cri_alg) {
++		case CRYPTO_MD5:
++		case CRYPTO_SHA1:
++		case CRYPTO_MD5_HMAC:
++		case CRYPTO_SHA1_HMAC:
++			if (mac) {
++				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++				return (EINVAL);
++			}
++			mac = 1;
++			ses->hs_mlen = c->cri_mlen;
++			if (ses->hs_mlen == 0) {
++				switch (c->cri_alg) {
++				case CRYPTO_MD5:
++				case CRYPTO_MD5_HMAC:
++					ses->hs_mlen = 16;
++					break;
++				case CRYPTO_SHA1:
++				case CRYPTO_SHA1_HMAC:
++					ses->hs_mlen = 20;
++					break;
++				}
++			}
++			break;
++		case CRYPTO_DES_CBC:
++		case CRYPTO_3DES_CBC:
++		case CRYPTO_AES_CBC:
++			/* XXX this may read fewer, does it matter? */
++			read_random(ses->hs_iv,
++				c->cri_alg == CRYPTO_AES_CBC ?
++					HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
++			/*FALLTHROUGH*/
++		case CRYPTO_ARC4:
++			if (cry) {
++				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++				return (EINVAL);
++			}
++			cry = 1;
++			break;
++		default:
++			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++			return (EINVAL);
++		}
++	}
++	if (mac == 0 && cry == 0) {
++		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++		return (EINVAL);
++	}
++
++	*sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
++
++	return (0);
++}
++
++/*
++ * Deallocate a session.
++ * XXX this routine should run a zero'd mac/encrypt key into context ram.
++ * XXX to blow away any keys already stored there.
++ */
++static int
++hifn_freesession(device_t dev, u_int64_t tid)
++{
++	struct hifn_softc *sc = device_get_softc(dev);
++	int session, error;
++	u_int32_t sid = CRYPTO_SESID2LID(tid);
++	unsigned long l_flags;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	KASSERT(sc != NULL, ("hifn_freesession: null softc"));
++	if (sc == NULL) {
++		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++		return (EINVAL);
++	}
++
++	HIFN_LOCK(sc);
++	session = HIFN_SESSION(sid);
++	if (session < sc->sc_nsessions) {
++		bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
++		error = 0;
++	} else {
++		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++		error = EINVAL;
++	}
++	HIFN_UNLOCK(sc);
++
++	return (error);
++}
++
++static int
++hifn_process(device_t dev, struct cryptop *crp, int hint)
++{
++	struct hifn_softc *sc = device_get_softc(dev);
++	struct hifn_command *cmd = NULL;
++	int session, err, ivlen;
++	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (crp == NULL || crp->crp_callback == NULL) {
++		hifnstats.hst_invalid++;
++		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++		return (EINVAL);
++	}
++	session = HIFN_SESSION(crp->crp_sid);
++
++	if (sc == NULL || session >= sc->sc_nsessions) {
++		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++		err = EINVAL;
++		goto errout;
++	}
++
++	cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
++	if (cmd == NULL) {
++		hifnstats.hst_nomem++;
++		err = ENOMEM;
++		goto errout;
++	}
++	memset(cmd, 0, sizeof(*cmd));
++
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		cmd->src_skb = (struct sk_buff *)crp->crp_buf;
++		cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
++	} else if (crp->crp_flags & CRYPTO_F_IOV) {
++		cmd->src_io = (struct uio *)crp->crp_buf;
++		cmd->dst_io = (struct uio *)crp->crp_buf;
++	} else {
++		cmd->src_buf = crp->crp_buf;
++		cmd->dst_buf = crp->crp_buf;
++	}
++
++	crd1 = crp->crp_desc;
++	if (crd1 == NULL) {
++		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++		err = EINVAL;
++		goto errout;
++	}
++	crd2 = crd1->crd_next;
++
++	if (crd2 == NULL) {
++		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
++		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
++		    crd1->crd_alg == CRYPTO_SHA1 ||
++		    crd1->crd_alg == CRYPTO_MD5) {
++			maccrd = crd1;
++			enccrd = NULL;
++		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
++		    crd1->crd_alg == CRYPTO_3DES_CBC ||
++		    crd1->crd_alg == CRYPTO_AES_CBC ||
++		    crd1->crd_alg == CRYPTO_ARC4) {
++			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
++				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
++			maccrd = NULL;
++			enccrd = crd1;
++		} else {
++			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++			err = EINVAL;
++			goto errout;
++		}
++	} else {
++		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
++                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
++                     crd1->crd_alg == CRYPTO_MD5 ||
++                     crd1->crd_alg == CRYPTO_SHA1) &&
++		    (crd2->crd_alg == CRYPTO_DES_CBC ||
++		     crd2->crd_alg == CRYPTO_3DES_CBC ||
++		     crd2->crd_alg == CRYPTO_AES_CBC ||
++		     crd2->crd_alg == CRYPTO_ARC4) &&
++		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
++			cmd->base_masks = HIFN_BASE_CMD_DECODE;
++			maccrd = crd1;
++			enccrd = crd2;
++		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
++		     crd1->crd_alg == CRYPTO_ARC4 ||
++		     crd1->crd_alg == CRYPTO_3DES_CBC ||
++		     crd1->crd_alg == CRYPTO_AES_CBC) &&
++		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
++                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
++                     crd2->crd_alg == CRYPTO_MD5 ||
++                     crd2->crd_alg == CRYPTO_SHA1) &&
++		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
++			enccrd = crd1;
++			maccrd = crd2;
++		} else {
++			/*
++			 * We cannot order the 7751 as requested
++			 */
++			DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
++			err = EINVAL;
++			goto errout;
++		}
++	}
++
++	if (enccrd) {
++		cmd->enccrd = enccrd;
++		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
++		switch (enccrd->crd_alg) {
++		case CRYPTO_ARC4:
++			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
++			break;
++		case CRYPTO_DES_CBC:
++			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
++			    HIFN_CRYPT_CMD_MODE_CBC |
++			    HIFN_CRYPT_CMD_NEW_IV;
++			break;
++		case CRYPTO_3DES_CBC:
++			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
++			    HIFN_CRYPT_CMD_MODE_CBC |
++			    HIFN_CRYPT_CMD_NEW_IV;
++			break;
++		case CRYPTO_AES_CBC:
++			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
++			    HIFN_CRYPT_CMD_MODE_CBC |
++			    HIFN_CRYPT_CMD_NEW_IV;
++			break;
++		default:
++			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++			err = EINVAL;
++			goto errout;
++		}
++		if (enccrd->crd_alg != CRYPTO_ARC4) {
++			ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
++				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
++			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
++				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
++					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
++				else
++					bcopy(sc->sc_sessions[session].hs_iv,
++					    cmd->iv, ivlen);
++
++				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
++				    == 0) {
++					crypto_copyback(crp->crp_flags,
++					    crp->crp_buf, enccrd->crd_inject,
++					    ivlen, cmd->iv);
++				}
++			} else {
++				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
++					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
++				else {
++					crypto_copydata(crp->crp_flags,
++					    crp->crp_buf, enccrd->crd_inject,
++					    ivlen, cmd->iv);
++				}
++			}
++		}
++
++		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
++			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
++		cmd->ck = enccrd->crd_key;
++		cmd->cklen = enccrd->crd_klen >> 3;
++		cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
++
++		/* 
++		 * Need to specify the size for the AES key in the masks.
++		 */
++		if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
++		    HIFN_CRYPT_CMD_ALG_AES) {
++			switch (cmd->cklen) {
++			case 16:
++				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
++				break;
++			case 24:
++				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
++				break;
++			case 32:
++				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
++				break;
++			default:
++				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
++				err = EINVAL;
++				goto errout;
++			}
++		}
++	}
++
++	if (maccrd) {
++		cmd->maccrd = maccrd;
++		cmd->base_masks |= HIFN_BASE_CMD_MAC;
++
++		switch (maccrd->crd_alg) {
++		case CRYPTO_MD5:
++			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
++			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
++			    HIFN_MAC_CMD_POS_IPSEC;
++                       break;
++		case CRYPTO_MD5_HMAC:
++			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
++			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
++			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
++			break;
++		case CRYPTO_SHA1:
++			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
++			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
++			    HIFN_MAC_CMD_POS_IPSEC;
++			break;
++		case CRYPTO_SHA1_HMAC:
++			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
++			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
++			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
++			break;
++		}
++
++		if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
++		     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
++			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
++			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
++			bzero(cmd->mac + (maccrd->crd_klen >> 3),
++			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
++		}
++	}
++
++	cmd->crp = crp;
++	cmd->session_num = session;
++	cmd->softc = sc;
++
++	err = hifn_crypto(sc, cmd, crp, hint);
++	if (!err) {
++		return 0;
++	} else if (err == ERESTART) {
++		/*
++		 * There weren't enough resources to dispatch the request
++		 * to the part.  Notify the caller so they'll requeue this
++		 * request and resubmit it again soon.
++		 */
++#ifdef HIFN_DEBUG
++		if (hifn_debug)
++			device_printf(sc->sc_dev, "requeue request\n");
++#endif
++		kfree(cmd);
++		sc->sc_needwakeup |= CRYPTO_SYMQ;
++		return (err);
++	}
++
++errout:
++	if (cmd != NULL)
++		kfree(cmd);
++	if (err == EINVAL)
++		hifnstats.hst_invalid++;
++	else
++		hifnstats.hst_nomem++;
++	crp->crp_etype = err;
++	crypto_done(crp);
++	return (err);
++}
++
++static void
++hifn_abort(struct hifn_softc *sc)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++	struct hifn_command *cmd;
++	struct cryptop *crp;
++	int i, u;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	i = dma->resk; u = dma->resu;
++	while (u != 0) {
++		cmd = dma->hifn_commands[i];
++		KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
++		dma->hifn_commands[i] = NULL;
++		crp = cmd->crp;
++
++		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
++			/* Salvage what we can. */
++			u_int8_t *macbuf;
++
++			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
++				macbuf = dma->result_bufs[i];
++				macbuf += 12;
++			} else
++				macbuf = NULL;
++			hifnstats.hst_opackets++;
++			hifn_callback(sc, cmd, macbuf);
++		} else {
++#if 0
++			if (cmd->src_map == cmd->dst_map) {
++				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
++				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
++			} else {
++				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
++				    BUS_DMASYNC_POSTWRITE);
++				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
++				    BUS_DMASYNC_POSTREAD);
++			}
++#endif
++
++			if (cmd->src_skb != cmd->dst_skb) {
++#ifdef NOTYET
++				m_freem(cmd->src_m);
++				crp->crp_buf = (caddr_t)cmd->dst_m;
++#else
++				device_printf(sc->sc_dev,
++						"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
++						__FILE__, __LINE__);
++#endif
++			}
++
++			/* non-shared buffers cannot be restarted */
++			if (cmd->src_map != cmd->dst_map) {
++				/*
++				 * XXX should be EAGAIN, delayed until
++				 * after the reset.
++				 */
++				crp->crp_etype = ENOMEM;
++				pci_unmap_buf(sc, &cmd->dst);
++			} else
++				crp->crp_etype = ENOMEM;
++
++			pci_unmap_buf(sc, &cmd->src);
++
++			kfree(cmd);
++			if (crp->crp_etype != EAGAIN)
++				crypto_done(crp);
++		}
++
++		if (++i == HIFN_D_RES_RSIZE)
++			i = 0;
++		u--;
++	}
++	dma->resk = i; dma->resu = u;
++
++	hifn_reset_board(sc, 1);
++	hifn_init_dma(sc);
++	hifn_init_pci_registers(sc);
++}
++
++static void
++hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
++{
++	struct hifn_dma *dma = sc->sc_dma;
++	struct cryptop *crp = cmd->crp;
++	struct cryptodesc *crd;
++	int i, u, ivlen;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++#if 0
++	if (cmd->src_map == cmd->dst_map) {
++		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
++		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
++	} else {
++		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
++		    BUS_DMASYNC_POSTWRITE);
++		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
++		    BUS_DMASYNC_POSTREAD);
++	}
++#endif
++
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		if (cmd->src_skb != cmd->dst_skb) {
++#ifdef NOTYET
++			crp->crp_buf = (caddr_t)cmd->dst_m;
++			totlen = cmd->src_mapsize;
++			for (m = cmd->dst_m; m != NULL; m = m->m_next) {
++				if (totlen < m->m_len) {
++					m->m_len = totlen;
++					totlen = 0;
++				} else
++					totlen -= m->m_len;
++			}
++			cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
++			m_freem(cmd->src_m);
++#else
++			device_printf(sc->sc_dev,
++					"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
++					__FILE__, __LINE__);
++#endif
++		}
++	}
++
++	if (cmd->sloplen != 0) {
++		crypto_copyback(crp->crp_flags, crp->crp_buf,
++		    cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
++		    (caddr_t)&dma->slop[cmd->slopidx]);
++	}
++
++	i = dma->dstk; u = dma->dstu;
++	while (u != 0) {
++		if (i == HIFN_D_DST_RSIZE)
++			i = 0;
++#if 0
++		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
++		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
++#endif
++		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
++#if 0
++			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
++			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++#endif
++			break;
++		}
++		i++, u--;
++	}
++	dma->dstk = i; dma->dstu = u;
++
++	hifnstats.hst_obytes += cmd->dst_mapsize;
++
++	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
++	    HIFN_BASE_CMD_CRYPT) {
++		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
++			if (crd->crd_alg != CRYPTO_DES_CBC &&
++			    crd->crd_alg != CRYPTO_3DES_CBC &&
++			    crd->crd_alg != CRYPTO_AES_CBC)
++				continue;
++			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
++				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
++			crypto_copydata(crp->crp_flags, crp->crp_buf,
++			    crd->crd_skip + crd->crd_len - ivlen, ivlen,
++			    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
++			break;
++		}
++	}
++
++	if (macbuf != NULL) {
++		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
++                        int len;
++
++			if (crd->crd_alg != CRYPTO_MD5 &&
++			    crd->crd_alg != CRYPTO_SHA1 &&
++			    crd->crd_alg != CRYPTO_MD5_HMAC &&
++			    crd->crd_alg != CRYPTO_SHA1_HMAC) {
++				continue;
++			}
++			len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
++			crypto_copyback(crp->crp_flags, crp->crp_buf,
++			    crd->crd_inject, len, macbuf);
++			break;
++		}
++	}
++
++	if (cmd->src_map != cmd->dst_map)
++		pci_unmap_buf(sc, &cmd->dst);
++	pci_unmap_buf(sc, &cmd->src);
++	kfree(cmd);
++	crypto_done(crp);
++}
++
++/*
++ * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
++ * and Group 1 registers; avoid conditions that could create
++ * burst writes by doing a read in between the writes.
++ *
++ * NB: The read we interpose is always to the same register;
++ *     we do this because reading from an arbitrary (e.g. last)
++ *     register may not always work.
++ */
++static void
++hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
++{
++	if (sc->sc_flags & HIFN_IS_7811) {
++		if (sc->sc_bar0_lastreg == reg - 4)
++			readl(sc->sc_bar0 + HIFN_0_PUCNFG);
++		sc->sc_bar0_lastreg = reg;
++	}
++	writel(val, sc->sc_bar0 + reg);
++}
++
++static void
++hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
++{
++	if (sc->sc_flags & HIFN_IS_7811) {
++		if (sc->sc_bar1_lastreg == reg - 4)
++			readl(sc->sc_bar1 + HIFN_1_REVID);
++		sc->sc_bar1_lastreg = reg;
++	}
++	writel(val, sc->sc_bar1 + reg);
++}
++
++
++static struct pci_device_id hifn_pci_tbl[] = {
++	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	{ PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	{ PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	/*
++	 * Other vendors share this PCI ID as well, such as
++	 * http://www.powercrypt.com, and obviously they also
++	 * use the same key.
++	 */
++	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	{ 0, 0, 0, 0, 0, 0, }
++};
++MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
++
++static struct pci_driver hifn_driver = {
++	.name         = "hifn",
++	.id_table     = hifn_pci_tbl,
++	.probe        =	hifn_probe,
++	.remove       = hifn_remove,
++	/* add PM stuff here one day */
++};
++
++static int __init hifn_init (void)
++{
++	struct hifn_softc *sc = NULL;
++	int rc;
++
++	DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
++
++	rc = pci_register_driver(&hifn_driver);
++	pci_register_driver_compat(&hifn_driver, rc);
++
++	return rc;
++}
++
++static void __exit hifn_exit (void)
++{
++	pci_unregister_driver(&hifn_driver);
++}
++
++module_init(hifn_init);
++module_exit(hifn_exit);
++
++MODULE_LICENSE("BSD");
++MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
++MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
+diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifn7751reg.h linux-2.6.30/crypto/ocf/hifn/hifn7751reg.h
+--- linux-2.6.30.orig/crypto/ocf/hifn/hifn7751reg.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/hifn/hifn7751reg.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,540 @@
++/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
++/*	$OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $	*/
++
++/*-
++ * Invertex AEON / Hifn 7751 driver
++ * Copyright (c) 1999 Invertex Inc. All rights reserved.
++ * Copyright (c) 1999 Theo de Raadt
++ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
++ *			http://www.netsec.net
++ *
++ * Please send any comments, feedback, bug-fixes, or feature requests to
++ * software@invertex.com.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Effort sponsored in part by the Defense Advanced Research Projects
++ * Agency (DARPA) and Air Force Research Laboratory, Air Force
++ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
++ *
++ */
++#ifndef __HIFN_H__
++#define	__HIFN_H__
++
++/*
++ * Some PCI configuration space offset defines.  The names were made
++ * identical to the names used by the Linux kernel.
++ */
++#define	HIFN_BAR0		PCIR_BAR(0)	/* PUC register map */
++#define	HIFN_BAR1		PCIR_BAR(1)	/* DMA register map */
++#define	HIFN_TRDY_TIMEOUT	0x40
++#define	HIFN_RETRY_TIMEOUT	0x41
++
++/*
++ * PCI vendor and device identifiers
++ * (the names are preserved from their OpenBSD source).
++ */
++#define	PCI_VENDOR_HIFN		0x13a3		/* Hifn */
++#define	PCI_PRODUCT_HIFN_7751	0x0005		/* 7751 */
++#define	PCI_PRODUCT_HIFN_6500	0x0006		/* 6500 */
++#define	PCI_PRODUCT_HIFN_7811	0x0007		/* 7811 */
++#define	PCI_PRODUCT_HIFN_7855	0x001f		/* 7855 */
++#define	PCI_PRODUCT_HIFN_7951	0x0012		/* 7951 */
++#define	PCI_PRODUCT_HIFN_7955	0x0020		/* 7954/7955 */
++#define	PCI_PRODUCT_HIFN_7956	0x001d		/* 7956 */
++
++#define	PCI_VENDOR_INVERTEX	0x14e1		/* Invertex */
++#define	PCI_PRODUCT_INVERTEX_AEON 0x0005	/* AEON */
++
++#define	PCI_VENDOR_NETSEC	0x1660		/* NetSec */
++#define	PCI_PRODUCT_NETSEC_7751	0x7751		/* 7751 */
++
++/*
++ * The values below should multiple of 4 -- and be large enough to handle
++ * any command the driver implements.
++ *
++ * MAX_COMMAND = base command + mac command + encrypt command +
++ *			mac-key + rc4-key
++ * MAX_RESULT  = base result + mac result + mac + encrypt result
++ *			
++ *
++ */
++#define	HIFN_MAX_COMMAND	(8 + 8 + 8 + 64 + 260)
++#define	HIFN_MAX_RESULT		(8 + 4 + 20 + 4)
++
++/*
++ * hifn_desc_t
++ *
++ * Holds an individual descriptor for any of the rings.
++ */
++typedef struct hifn_desc {
++	volatile u_int32_t l;		/* length and status bits */
++	volatile u_int32_t p;
++} hifn_desc_t;
++
++/*
++ * Masks for the "length" field of struct hifn_desc.
++ */
++#define	HIFN_D_LENGTH		0x0000ffff	/* length bit mask */
++#define	HIFN_D_MASKDONEIRQ	0x02000000	/* mask the done interrupt */
++#define	HIFN_D_DESTOVER		0x04000000	/* destination overflow */
++#define	HIFN_D_OVER		0x08000000	/* overflow */
++#define	HIFN_D_LAST		0x20000000	/* last descriptor in chain */
++#define	HIFN_D_JUMP		0x40000000	/* jump descriptor */
++#define	HIFN_D_VALID		0x80000000	/* valid bit */
++
++
++/*
++ * Processing Unit Registers (offset from BASEREG0)
++ */
++#define	HIFN_0_PUDATA		0x00	/* Processing Unit Data */
++#define	HIFN_0_PUCTRL		0x04	/* Processing Unit Control */
++#define	HIFN_0_PUISR		0x08	/* Processing Unit Interrupt Status */
++#define	HIFN_0_PUCNFG		0x0c	/* Processing Unit Configuration */
++#define	HIFN_0_PUIER		0x10	/* Processing Unit Interrupt Enable */
++#define	HIFN_0_PUSTAT		0x14	/* Processing Unit Status/Chip ID */
++#define	HIFN_0_FIFOSTAT		0x18	/* FIFO Status */
++#define	HIFN_0_FIFOCNFG		0x1c	/* FIFO Configuration */
++#define	HIFN_0_PUCTRL2		0x28	/* Processing Unit Control (2nd map) */
++#define	HIFN_0_MUTE1		0x80
++#define	HIFN_0_MUTE2		0x90
++#define	HIFN_0_SPACESIZE	0x100	/* Register space size */
++
++/* Processing Unit Control Register (HIFN_0_PUCTRL) */
++#define	HIFN_PUCTRL_CLRSRCFIFO	0x0010	/* clear source fifo */
++#define	HIFN_PUCTRL_STOP	0x0008	/* stop pu */
++#define	HIFN_PUCTRL_LOCKRAM	0x0004	/* lock ram */
++#define	HIFN_PUCTRL_DMAENA	0x0002	/* enable dma */
++#define	HIFN_PUCTRL_RESET	0x0001	/* Reset processing unit */
++
++/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
++#define	HIFN_PUISR_CMDINVAL	0x8000	/* Invalid command interrupt */
++#define	HIFN_PUISR_DATAERR	0x4000	/* Data error interrupt */
++#define	HIFN_PUISR_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
++#define	HIFN_PUISR_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
++#define	HIFN_PUISR_DSTOVER	0x0200	/* Destination overrun interrupt */
++#define	HIFN_PUISR_SRCCMD	0x0080	/* Source command interrupt */
++#define	HIFN_PUISR_SRCCTX	0x0040	/* Source context interrupt */
++#define	HIFN_PUISR_SRCDATA	0x0020	/* Source data interrupt */
++#define	HIFN_PUISR_DSTDATA	0x0010	/* Destination data interrupt */
++#define	HIFN_PUISR_DSTRESULT	0x0004	/* Destination result interrupt */
++
++/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
++#define	HIFN_PUCNFG_DRAMMASK	0xe000	/* DRAM size mask */
++#define	HIFN_PUCNFG_DSZ_256K	0x0000	/* 256k dram */
++#define	HIFN_PUCNFG_DSZ_512K	0x2000	/* 512k dram */
++#define	HIFN_PUCNFG_DSZ_1M	0x4000	/* 1m dram */
++#define	HIFN_PUCNFG_DSZ_2M	0x6000	/* 2m dram */
++#define	HIFN_PUCNFG_DSZ_4M	0x8000	/* 4m dram */
++#define	HIFN_PUCNFG_DSZ_8M	0xa000	/* 8m dram */
++#define	HIFN_PUNCFG_DSZ_16M	0xc000	/* 16m dram */
++#define	HIFN_PUCNFG_DSZ_32M	0xe000	/* 32m dram */
++#define	HIFN_PUCNFG_DRAMREFRESH	0x1800	/* DRAM refresh rate mask */
++#define	HIFN_PUCNFG_DRFR_512	0x0000	/* 512 divisor of ECLK */
++#define	HIFN_PUCNFG_DRFR_256	0x0800	/* 256 divisor of ECLK */
++#define	HIFN_PUCNFG_DRFR_128	0x1000	/* 128 divisor of ECLK */
++#define	HIFN_PUCNFG_TCALLPHASES	0x0200	/* your guess is as good as mine... */
++#define	HIFN_PUCNFG_TCDRVTOTEM	0x0100	/* your guess is as good as mine... */
++#define	HIFN_PUCNFG_BIGENDIAN	0x0080	/* DMA big endian mode */
++#define	HIFN_PUCNFG_BUS32	0x0040	/* Bus width 32bits */
++#define	HIFN_PUCNFG_BUS16	0x0000	/* Bus width 16 bits */
++#define	HIFN_PUCNFG_CHIPID	0x0020	/* Allow chipid from PUSTAT */
++#define	HIFN_PUCNFG_DRAM	0x0010	/* Context RAM is DRAM */
++#define	HIFN_PUCNFG_SRAM	0x0000	/* Context RAM is SRAM */
++#define	HIFN_PUCNFG_COMPSING	0x0004	/* Enable single compression context */
++#define	HIFN_PUCNFG_ENCCNFG	0x0002	/* Encryption configuration */
++
++/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
++#define	HIFN_PUIER_CMDINVAL	0x8000	/* Invalid command interrupt */
++#define	HIFN_PUIER_DATAERR	0x4000	/* Data error interrupt */
++#define	HIFN_PUIER_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
++#define	HIFN_PUIER_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
++#define	HIFN_PUIER_DSTOVER	0x0200	/* Destination overrun interrupt */
++#define	HIFN_PUIER_SRCCMD	0x0080	/* Source command interrupt */
++#define	HIFN_PUIER_SRCCTX	0x0040	/* Source context interrupt */
++#define	HIFN_PUIER_SRCDATA	0x0020	/* Source data interrupt */
++#define	HIFN_PUIER_DSTDATA	0x0010	/* Destination data interrupt */
++#define	HIFN_PUIER_DSTRESULT	0x0004	/* Destination result interrupt */
++
++/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
++#define	HIFN_PUSTAT_CMDINVAL	0x8000	/* Invalid command interrupt */
++#define	HIFN_PUSTAT_DATAERR	0x4000	/* Data error interrupt */
++#define	HIFN_PUSTAT_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
++#define	HIFN_PUSTAT_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
++#define	HIFN_PUSTAT_DSTOVER	0x0200	/* Destination overrun interrupt */
++#define	HIFN_PUSTAT_SRCCMD	0x0080	/* Source command interrupt */
++#define	HIFN_PUSTAT_SRCCTX	0x0040	/* Source context interrupt */
++#define	HIFN_PUSTAT_SRCDATA	0x0020	/* Source data interrupt */
++#define	HIFN_PUSTAT_DSTDATA	0x0010	/* Destination data interrupt */
++#define	HIFN_PUSTAT_DSTRESULT	0x0004	/* Destination result interrupt */
++#define	HIFN_PUSTAT_CHIPREV	0x00ff	/* Chip revision mask */
++#define	HIFN_PUSTAT_CHIPENA	0xff00	/* Chip enabled mask */
++#define	HIFN_PUSTAT_ENA_2	0x1100	/* Level 2 enabled */
++#define	HIFN_PUSTAT_ENA_1	0x1000	/* Level 1 enabled */
++#define	HIFN_PUSTAT_ENA_0	0x3000	/* Level 0 enabled */
++#define	HIFN_PUSTAT_REV_2	0x0020	/* 7751 PT6/2 */
++#define	HIFN_PUSTAT_REV_3	0x0030	/* 7751 PT6/3 */
++
++/* FIFO Status Register (HIFN_0_FIFOSTAT) */
++#define	HIFN_FIFOSTAT_SRC	0x7f00	/* Source FIFO available */
++#define	HIFN_FIFOSTAT_DST	0x007f	/* Destination FIFO available */
++
++/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
++#define	HIFN_FIFOCNFG_THRESHOLD	0x0400	/* must be written as this value */
++
++/*
++ * DMA Interface Registers (offset from BASEREG1)
++ */
++#define	HIFN_1_DMA_CRAR		0x0c	/* DMA Command Ring Address */
++#define	HIFN_1_DMA_SRAR		0x1c	/* DMA Source Ring Address */
++#define	HIFN_1_DMA_RRAR		0x2c	/* DMA Result Ring Address */
++#define	HIFN_1_DMA_DRAR		0x3c	/* DMA Destination Ring Address */
++#define	HIFN_1_DMA_CSR		0x40	/* DMA Status and Control */
++#define	HIFN_1_DMA_IER		0x44	/* DMA Interrupt Enable */
++#define	HIFN_1_DMA_CNFG		0x48	/* DMA Configuration */
++#define	HIFN_1_PLL		0x4c	/* 7955/7956: PLL config */
++#define	HIFN_1_7811_RNGENA	0x60	/* 7811: rng enable */
++#define	HIFN_1_7811_RNGCFG	0x64	/* 7811: rng config */
++#define	HIFN_1_7811_RNGDAT	0x68	/* 7811: rng data */
++#define	HIFN_1_7811_RNGSTS	0x6c	/* 7811: rng status */
++#define	HIFN_1_DMA_CNFG2	0x6c	/* 7955/7956: dma config #2 */
++#define	HIFN_1_7811_MIPSRST	0x94	/* 7811: MIPS reset */
++#define	HIFN_1_REVID		0x98	/* Revision ID */
++
++#define	HIFN_1_PUB_RESET	0x204	/* Public/RNG Reset */
++#define	HIFN_1_PUB_BASE		0x300	/* Public Base Address */
++#define	HIFN_1_PUB_OPLEN	0x304	/* 7951-compat Public Operand Length */
++#define	HIFN_1_PUB_OP		0x308	/* 7951-compat Public Operand */
++#define	HIFN_1_PUB_STATUS	0x30c	/* 7951-compat Public Status */
++#define	HIFN_1_PUB_IEN		0x310	/* Public Interrupt enable */
++#define	HIFN_1_RNG_CONFIG	0x314	/* RNG config */
++#define	HIFN_1_RNG_DATA		0x318	/* RNG data */
++#define	HIFN_1_PUB_MODE		0x320	/* PK mode */
++#define	HIFN_1_PUB_FIFO_OPLEN	0x380	/* first element of oplen fifo */
++#define	HIFN_1_PUB_FIFO_OP	0x384	/* first element of op fifo */
++#define	HIFN_1_PUB_MEM		0x400	/* start of Public key memory */
++#define	HIFN_1_PUB_MEMEND	0xbff	/* end of Public key memory */
++
++/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
++#define	HIFN_DMACSR_D_CTRLMASK	0xc0000000	/* Destinition Ring Control */
++#define	HIFN_DMACSR_D_CTRL_NOP	0x00000000	/* Dest. Control: no-op */
++#define	HIFN_DMACSR_D_CTRL_DIS	0x40000000	/* Dest. Control: disable */
++#define	HIFN_DMACSR_D_CTRL_ENA	0x80000000	/* Dest. Control: enable */
++#define	HIFN_DMACSR_D_ABORT	0x20000000	/* Destinition Ring PCIAbort */
++#define	HIFN_DMACSR_D_DONE	0x10000000	/* Destinition Ring Done */
++#define	HIFN_DMACSR_D_LAST	0x08000000	/* Destinition Ring Last */
++#define	HIFN_DMACSR_D_WAIT	0x04000000	/* Destinition Ring Waiting */
++#define	HIFN_DMACSR_D_OVER	0x02000000	/* Destinition Ring Overflow */
++#define	HIFN_DMACSR_R_CTRL	0x00c00000	/* Result Ring Control */
++#define	HIFN_DMACSR_R_CTRL_NOP	0x00000000	/* Result Control: no-op */
++#define	HIFN_DMACSR_R_CTRL_DIS	0x00400000	/* Result Control: disable */
++#define	HIFN_DMACSR_R_CTRL_ENA	0x00800000	/* Result Control: enable */
++#define	HIFN_DMACSR_R_ABORT	0x00200000	/* Result Ring PCI Abort */
++#define	HIFN_DMACSR_R_DONE	0x00100000	/* Result Ring Done */
++#define	HIFN_DMACSR_R_LAST	0x00080000	/* Result Ring Last */
++#define	HIFN_DMACSR_R_WAIT	0x00040000	/* Result Ring Waiting */
++#define	HIFN_DMACSR_R_OVER	0x00020000	/* Result Ring Overflow */
++#define	HIFN_DMACSR_S_CTRL	0x0000c000	/* Source Ring Control */
++#define	HIFN_DMACSR_S_CTRL_NOP	0x00000000	/* Source Control: no-op */
++#define	HIFN_DMACSR_S_CTRL_DIS	0x00004000	/* Source Control: disable */
++#define	HIFN_DMACSR_S_CTRL_ENA	0x00008000	/* Source Control: enable */
++#define	HIFN_DMACSR_S_ABORT	0x00002000	/* Source Ring PCI Abort */
++#define	HIFN_DMACSR_S_DONE	0x00001000	/* Source Ring Done */
++#define	HIFN_DMACSR_S_LAST	0x00000800	/* Source Ring Last */
++#define	HIFN_DMACSR_S_WAIT	0x00000400	/* Source Ring Waiting */
++#define	HIFN_DMACSR_ILLW	0x00000200	/* Illegal write (7811 only) */
++#define	HIFN_DMACSR_ILLR	0x00000100	/* Illegal read (7811 only) */
++#define	HIFN_DMACSR_C_CTRL	0x000000c0	/* Command Ring Control */
++#define	HIFN_DMACSR_C_CTRL_NOP	0x00000000	/* Command Control: no-op */
++#define	HIFN_DMACSR_C_CTRL_DIS	0x00000040	/* Command Control: disable */
++#define	HIFN_DMACSR_C_CTRL_ENA	0x00000080	/* Command Control: enable */
++#define	HIFN_DMACSR_C_ABORT	0x00000020	/* Command Ring PCI Abort */
++#define	HIFN_DMACSR_C_DONE	0x00000010	/* Command Ring Done */
++#define	HIFN_DMACSR_C_LAST	0x00000008	/* Command Ring Last */
++#define	HIFN_DMACSR_C_WAIT	0x00000004	/* Command Ring Waiting */
++#define	HIFN_DMACSR_PUBDONE	0x00000002	/* Public op done (7951 only) */
++#define	HIFN_DMACSR_ENGINE	0x00000001	/* Command Ring Engine IRQ */
++
++/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
++#define	HIFN_DMAIER_D_ABORT	0x20000000	/* Destination Ring PCIAbort */
++#define	HIFN_DMAIER_D_DONE	0x10000000	/* Destination Ring Done */
++#define	HIFN_DMAIER_D_LAST	0x08000000	/* Destination Ring Last */
++#define	HIFN_DMAIER_D_WAIT	0x04000000	/* Destination Ring Waiting */
++#define	HIFN_DMAIER_D_OVER	0x02000000	/* Destination Ring Overflow */
++#define	HIFN_DMAIER_R_ABORT	0x00200000	/* Result Ring PCI Abort */
++#define	HIFN_DMAIER_R_DONE	0x00100000	/* Result Ring Done */
++#define	HIFN_DMAIER_R_LAST	0x00080000	/* Result Ring Last */
++#define	HIFN_DMAIER_R_WAIT	0x00040000	/* Result Ring Waiting */
++#define	HIFN_DMAIER_R_OVER	0x00020000	/* Result Ring Overflow */
++#define	HIFN_DMAIER_S_ABORT	0x00002000	/* Source Ring PCI Abort */
++#define	HIFN_DMAIER_S_DONE	0x00001000	/* Source Ring Done */
++#define	HIFN_DMAIER_S_LAST	0x00000800	/* Source Ring Last */
++#define	HIFN_DMAIER_S_WAIT	0x00000400	/* Source Ring Waiting */
++#define	HIFN_DMAIER_ILLW	0x00000200	/* Illegal write (7811 only) */
++#define	HIFN_DMAIER_ILLR	0x00000100	/* Illegal read (7811 only) */
++#define	HIFN_DMAIER_C_ABORT	0x00000020	/* Command Ring PCI Abort */
++#define	HIFN_DMAIER_C_DONE	0x00000010	/* Command Ring Done */
++#define	HIFN_DMAIER_C_LAST	0x00000008	/* Command Ring Last */
++#define	HIFN_DMAIER_C_WAIT	0x00000004	/* Command Ring Waiting */
++#define	HIFN_DMAIER_PUBDONE	0x00000002	/* public op done (7951 only) */
++#define	HIFN_DMAIER_ENGINE	0x00000001	/* Engine IRQ */
++
++/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
++#define	HIFN_DMACNFG_BIGENDIAN	0x10000000	/* big endian mode */
++#define	HIFN_DMACNFG_POLLFREQ	0x00ff0000	/* Poll frequency mask */
++#define	HIFN_DMACNFG_UNLOCK	0x00000800
++#define	HIFN_DMACNFG_POLLINVAL	0x00000700	/* Invalid Poll Scalar */
++#define	HIFN_DMACNFG_LAST	0x00000010	/* Host control LAST bit */
++#define	HIFN_DMACNFG_MODE	0x00000004	/* DMA mode */
++#define	HIFN_DMACNFG_DMARESET	0x00000002	/* DMA Reset # */
++#define	HIFN_DMACNFG_MSTRESET	0x00000001	/* Master Reset # */
++
++/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
++#define	HIFN_DMACNFG2_PKSWAP32	(1 << 19)	/* swap the OPLEN/OP reg */
++#define	HIFN_DMACNFG2_PKSWAP8	(1 << 18)	/* swap the bits of OPLEN/OP */
++#define	HIFN_DMACNFG2_BAR0_SWAP32 (1<<17)	/* swap the bytes of BAR0 */
++#define	HIFN_DMACNFG2_BAR1_SWAP8 (1<<16)	/* swap the bits  of BAR0 */
++#define	HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
++#define	HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
++#define	HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
++#define	HIFN_DMACNFG2_TGT_READ_BURST_SHIFT  0
++
++/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
++#define	HIFN_7811_RNGENA_ENA	0x00000001	/* enable RNG */
++
++/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
++#define	HIFN_7811_RNGCFG_PRE1	0x00000f00	/* first prescalar */
++#define	HIFN_7811_RNGCFG_OPRE	0x00000080	/* output prescalar */
++#define	HIFN_7811_RNGCFG_DEFL	0x00000f80	/* 2 words/ 1/100 sec */
++
++/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
++#define	HIFN_7811_RNGSTS_RDY	0x00004000	/* two numbers in FIFO */
++#define	HIFN_7811_RNGSTS_UFL	0x00001000	/* rng underflow */
++
++/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
++#define	HIFN_MIPSRST_BAR2SIZE	0xffff0000	/* sdram size */
++#define	HIFN_MIPSRST_GPRAMINIT	0x00008000	/* gpram can be accessed */
++#define	HIFN_MIPSRST_CRAMINIT	0x00004000	/* ctxram can be accessed */
++#define	HIFN_MIPSRST_LED2	0x00000400	/* external LED2 */
++#define	HIFN_MIPSRST_LED1	0x00000200	/* external LED1 */
++#define	HIFN_MIPSRST_LED0	0x00000100	/* external LED0 */
++#define	HIFN_MIPSRST_MIPSDIS	0x00000004	/* disable MIPS */
++#define	HIFN_MIPSRST_MIPSRST	0x00000002	/* warm reset MIPS */
++#define	HIFN_MIPSRST_MIPSCOLD	0x00000001	/* cold reset MIPS */
++
++/* Public key reset register (HIFN_1_PUB_RESET) */
++#define	HIFN_PUBRST_RESET	0x00000001	/* reset public/rng unit */
++
++/* Public operation register (HIFN_1_PUB_OP) */
++#define	HIFN_PUBOP_AOFFSET	0x0000003e	/* A offset */
++#define	HIFN_PUBOP_BOFFSET	0x00000fc0	/* B offset */
++#define	HIFN_PUBOP_MOFFSET	0x0003f000	/* M offset */
++#define	HIFN_PUBOP_OP_MASK	0x003c0000	/* Opcode: */
++#define	HIFN_PUBOP_OP_NOP	0x00000000	/*  NOP */
++#define	HIFN_PUBOP_OP_ADD	0x00040000	/*  ADD */
++#define	HIFN_PUBOP_OP_ADDC	0x00080000	/*  ADD w/carry */
++#define	HIFN_PUBOP_OP_SUB	0x000c0000	/*  SUB */
++#define	HIFN_PUBOP_OP_SUBC	0x00100000	/*  SUB w/carry */
++#define	HIFN_PUBOP_OP_MODADD	0x00140000	/*  Modular ADD */
++#define	HIFN_PUBOP_OP_MODSUB	0x00180000	/*  Modular SUB */
++#define	HIFN_PUBOP_OP_INCA	0x001c0000	/*  INC A */
++#define	HIFN_PUBOP_OP_DECA	0x00200000	/*  DEC A */
++#define	HIFN_PUBOP_OP_MULT	0x00240000	/*  MULT */
++#define	HIFN_PUBOP_OP_MODMULT	0x00280000	/*  Modular MULT */
++#define	HIFN_PUBOP_OP_MODRED	0x002c0000	/*  Modular Red */
++#define	HIFN_PUBOP_OP_MODEXP	0x00300000	/*  Modular Exp */
++
++/* Public operand length register (HIFN_1_PUB_OPLEN) */
++#define	HIFN_PUBOPLEN_MODLEN	0x0000007f
++#define	HIFN_PUBOPLEN_EXPLEN	0x0003ff80
++#define	HIFN_PUBOPLEN_REDLEN	0x003c0000
++
++/* Public status register (HIFN_1_PUB_STATUS) */
++#define	HIFN_PUBSTS_DONE	0x00000001	/* operation done */
++#define	HIFN_PUBSTS_CARRY	0x00000002	/* carry */
++#define	HIFN_PUBSTS_FIFO_EMPTY	0x00000100	/* fifo empty */
++#define	HIFN_PUBSTS_FIFO_FULL	0x00000200	/* fifo full */
++#define	HIFN_PUBSTS_FIFO_OVFL	0x00000400	/* fifo overflow */
++#define	HIFN_PUBSTS_FIFO_WRITE	0x000f0000	/* fifo write */
++#define	HIFN_PUBSTS_FIFO_READ	0x0f000000	/* fifo read */
++
++/* Public interrupt enable register (HIFN_1_PUB_IEN) */
++#define	HIFN_PUBIEN_DONE	0x00000001	/* operation done interrupt */
++
++/* Random number generator config register (HIFN_1_RNG_CONFIG) */
++#define	HIFN_RNGCFG_ENA		0x00000001	/* enable rng */
++
++/*
++ * Register offsets in register set 1
++ */
++
++#define	HIFN_UNLOCK_SECRET1	0xf4
++#define	HIFN_UNLOCK_SECRET2	0xfc
++
++/*
++ * PLL config register
++ *
++ * This register is present only on 7954/7955/7956 parts. It must be
++ * programmed according to the bus interface method used by the h/w.
++ * Note that the parts require a stable clock.  Since the PCI clock
++ * may vary the reference clock must usually be used.  To avoid
++ * overclocking the core logic, setup must be done carefully, refer
++ * to the driver for details.  The exact multiplier required varies
++ * by part and system configuration; refer to the Hifn documentation.
++ */
++#define	HIFN_PLL_REF_SEL	0x00000001	/* REF/HBI clk selection */
++#define	HIFN_PLL_BP		0x00000002	/* bypass (used during setup) */
++/* bit 2 reserved */
++#define	HIFN_PLL_PK_CLK_SEL	0x00000008	/* public key clk select */
++#define	HIFN_PLL_PE_CLK_SEL	0x00000010	/* packet engine clk select */
++/* bits 5-9 reserved */
++#define	HIFN_PLL_MBSET		0x00000400	/* must be set to 1 */
++#define	HIFN_PLL_ND		0x00003800	/* Fpll_ref multiplier select */
++#define	HIFN_PLL_ND_SHIFT	11
++#define	HIFN_PLL_ND_2		0x00000000	/* 2x */
++#define	HIFN_PLL_ND_4		0x00000800	/* 4x */
++#define	HIFN_PLL_ND_6		0x00001000	/* 6x */
++#define	HIFN_PLL_ND_8		0x00001800	/* 8x */
++#define	HIFN_PLL_ND_10		0x00002000	/* 10x */
++#define	HIFN_PLL_ND_12		0x00002800	/* 12x */
++/* bits 14-15 reserved */
++#define	HIFN_PLL_IS		0x00010000	/* charge pump current select */
++/* bits 17-31 reserved */
++
++/*
++ * Board configuration specifies only these bits.
++ */
++#define	HIFN_PLL_CONFIG		(HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
++
++/*
++ * Public Key Engine Mode Register
++ */
++#define	HIFN_PKMODE_HOSTINVERT	(1 << 0)	/* HOST INVERT */
++#define	HIFN_PKMODE_ENHANCED	(1 << 1)	/* Enable enhanced mode */
++
++
++/*********************************************************************
++ * Structs for board commands 
++ *
++ *********************************************************************/
++
++/*
++ * Structure to help build up the command data structure.
++ */
++typedef struct hifn_base_command {
++	volatile u_int16_t masks;
++	volatile u_int16_t session_num;
++	volatile u_int16_t total_source_count;
++	volatile u_int16_t total_dest_count;
++} hifn_base_command_t;
++
++#define	HIFN_BASE_CMD_MAC		0x0400
++#define	HIFN_BASE_CMD_CRYPT		0x0800
++#define	HIFN_BASE_CMD_DECODE		0x2000
++#define	HIFN_BASE_CMD_SRCLEN_M		0xc000
++#define	HIFN_BASE_CMD_SRCLEN_S		14
++#define	HIFN_BASE_CMD_DSTLEN_M		0x3000
++#define	HIFN_BASE_CMD_DSTLEN_S		12
++#define	HIFN_BASE_CMD_LENMASK_HI	0x30000
++#define	HIFN_BASE_CMD_LENMASK_LO	0x0ffff
++
++/*
++ * Structure to help build up the command data structure.
++ */
++typedef struct hifn_crypt_command {
++	volatile u_int16_t masks;
++	volatile u_int16_t header_skip;
++	volatile u_int16_t source_count;
++	volatile u_int16_t reserved;
++} hifn_crypt_command_t;
++
++#define	HIFN_CRYPT_CMD_ALG_MASK		0x0003		/* algorithm: */
++#define	HIFN_CRYPT_CMD_ALG_DES		0x0000		/*   DES */
++#define	HIFN_CRYPT_CMD_ALG_3DES		0x0001		/*   3DES */
++#define	HIFN_CRYPT_CMD_ALG_RC4		0x0002		/*   RC4 */
++#define	HIFN_CRYPT_CMD_ALG_AES		0x0003		/*   AES */
++#define	HIFN_CRYPT_CMD_MODE_MASK	0x0018		/* Encrypt mode: */
++#define	HIFN_CRYPT_CMD_MODE_ECB		0x0000		/*   ECB */
++#define	HIFN_CRYPT_CMD_MODE_CBC		0x0008		/*   CBC */
++#define	HIFN_CRYPT_CMD_MODE_CFB		0x0010		/*   CFB */
++#define	HIFN_CRYPT_CMD_MODE_OFB		0x0018		/*   OFB */
++#define	HIFN_CRYPT_CMD_CLR_CTX		0x0040		/* clear context */
++#define	HIFN_CRYPT_CMD_NEW_KEY		0x0800		/* expect new key */
++#define	HIFN_CRYPT_CMD_NEW_IV		0x1000		/* expect new iv */
++
++#define	HIFN_CRYPT_CMD_SRCLEN_M		0xc000
++#define	HIFN_CRYPT_CMD_SRCLEN_S		14
++
++#define	HIFN_CRYPT_CMD_KSZ_MASK		0x0600		/* AES key size: */
++#define	HIFN_CRYPT_CMD_KSZ_128		0x0000		/*   128 bit */
++#define	HIFN_CRYPT_CMD_KSZ_192		0x0200		/*   192 bit */
++#define	HIFN_CRYPT_CMD_KSZ_256		0x0400		/*   256 bit */
++
++/*
++ * Structure to help build up the command data structure.
++ */
++typedef struct hifn_mac_command {
++	volatile u_int16_t masks;
++	volatile u_int16_t header_skip;
++	volatile u_int16_t source_count;
++	volatile u_int16_t reserved;
++} hifn_mac_command_t;
++
++#define	HIFN_MAC_CMD_ALG_MASK		0x0001
++#define	HIFN_MAC_CMD_ALG_SHA1		0x0000
++#define	HIFN_MAC_CMD_ALG_MD5		0x0001
++#define	HIFN_MAC_CMD_MODE_MASK		0x000c
++#define	HIFN_MAC_CMD_MODE_HMAC		0x0000
++#define	HIFN_MAC_CMD_MODE_SSL_MAC	0x0004
++#define	HIFN_MAC_CMD_MODE_HASH		0x0008
++#define	HIFN_MAC_CMD_MODE_FULL		0x0004
++#define	HIFN_MAC_CMD_TRUNC		0x0010
++#define	HIFN_MAC_CMD_RESULT		0x0020
++#define	HIFN_MAC_CMD_APPEND		0x0040
++#define	HIFN_MAC_CMD_SRCLEN_M		0xc000
++#define	HIFN_MAC_CMD_SRCLEN_S		14
++
++/*
++ * MAC POS IPsec initiates authentication after encryption on encodes
++ * and before decryption on decodes.
++ */
++#define	HIFN_MAC_CMD_POS_IPSEC		0x0200
++#define	HIFN_MAC_CMD_NEW_KEY		0x0800
++
++/*
++ * The poll frequency and poll scalar defines are unshifted values used
++ * to set fields in the DMA Configuration Register.
++ */
++#ifndef HIFN_POLL_FREQUENCY
++#define	HIFN_POLL_FREQUENCY	0x1
++#endif
++
++#ifndef HIFN_POLL_SCALAR
++#define	HIFN_POLL_SCALAR	0x0
++#endif
++
++#define	HIFN_MAX_SEGLEN 	0xffff		/* maximum dma segment len */
++#define	HIFN_MAX_DMALEN		0x3ffff		/* maximum dma length */
++#endif /* __HIFN_H__ */
+diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifn7751var.h linux-2.6.30/crypto/ocf/hifn/hifn7751var.h
+--- linux-2.6.30.orig/crypto/ocf/hifn/hifn7751var.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/hifn/hifn7751var.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,369 @@
++/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
++/*	$OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $	*/
++
++/*-
++ * Invertex AEON / Hifn 7751 driver
++ * Copyright (c) 1999 Invertex Inc. All rights reserved.
++ * Copyright (c) 1999 Theo de Raadt
++ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
++ *			http://www.netsec.net
++ *
++ * Please send any comments, feedback, bug-fixes, or feature requests to
++ * software@invertex.com.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Effort sponsored in part by the Defense Advanced Research Projects
++ * Agency (DARPA) and Air Force Research Laboratory, Air Force
++ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
++ *
++ */
++
++#ifndef __HIFN7751VAR_H__
++#define __HIFN7751VAR_H__
++
++#ifdef __KERNEL__
++
++/*
++ * Some configurable values for the driver.  By default command+result
++ * descriptor rings are the same size.  The src+dst descriptor rings
++ * are sized at 3.5x the number of potential commands.  Slower parts
++ * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
++ * src+cmd/result descriptors.  It's not clear that increasing the size
++ * of the descriptor rings helps performance significantly as other
++ * factors tend to come into play (e.g. copying misaligned packets).
++ */
++#define	HIFN_D_CMD_RSIZE	24	/* command descriptors */
++#define	HIFN_D_SRC_RSIZE	((HIFN_D_CMD_RSIZE * 7) / 2)	/* source descriptors */
++#define	HIFN_D_RES_RSIZE	HIFN_D_CMD_RSIZE	/* result descriptors */
++#define	HIFN_D_DST_RSIZE	HIFN_D_SRC_RSIZE	/* destination descriptors */
++
++/*
++ *  Length values for cryptography
++ */
++#define HIFN_DES_KEY_LENGTH		8
++#define HIFN_3DES_KEY_LENGTH		24
++#define HIFN_MAX_CRYPT_KEY_LENGTH	HIFN_3DES_KEY_LENGTH
++#define HIFN_IV_LENGTH			8
++#define	HIFN_AES_IV_LENGTH		16
++#define HIFN_MAX_IV_LENGTH		HIFN_AES_IV_LENGTH
++
++/*
++ *  Length values for authentication
++ */
++#define HIFN_MAC_KEY_LENGTH		64
++#define HIFN_MD5_LENGTH			16
++#define HIFN_SHA1_LENGTH		20
++#define HIFN_MAC_TRUNC_LENGTH		12
++
++#define MAX_SCATTER 64
++
++/*
++ * Data structure to hold all 4 rings and any other ring related data.
++ */
++struct hifn_dma {
++	/*
++	 *  Descriptor rings.  We add +1 to the size to accomidate the
++	 *  jump descriptor.
++	 */
++	struct hifn_desc	cmdr[HIFN_D_CMD_RSIZE+1];
++	struct hifn_desc	srcr[HIFN_D_SRC_RSIZE+1];
++	struct hifn_desc	dstr[HIFN_D_DST_RSIZE+1];
++	struct hifn_desc	resr[HIFN_D_RES_RSIZE+1];
++
++	struct hifn_command	*hifn_commands[HIFN_D_RES_RSIZE];
++
++	u_char			command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
++	u_char			result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
++	u_int32_t		slop[HIFN_D_CMD_RSIZE];
++
++	u_int64_t		test_src, test_dst;
++
++	/*
++	 *  Our current positions for insertion and removal from the desriptor
++	 *  rings. 
++	 */
++	int			cmdi, srci, dsti, resi;
++	volatile int		cmdu, srcu, dstu, resu;
++	int			cmdk, srck, dstk, resk;
++};
++
++struct hifn_session {
++	int hs_used;
++	int hs_mlen;
++	u_int8_t hs_iv[HIFN_MAX_IV_LENGTH];
++};
++
++#define	HIFN_RING_SYNC(sc, r, i, f)					\
++	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
++
++#define	HIFN_CMDR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), cmdr, (i), (f))
++#define	HIFN_RESR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), resr, (i), (f))
++#define	HIFN_SRCR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), srcr, (i), (f))
++#define	HIFN_DSTR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), dstr, (i), (f))
++
++#define	HIFN_CMD_SYNC(sc, i, f)						\
++	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
++
++#define	HIFN_RES_SYNC(sc, i, f)						\
++	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
++
++typedef int bus_size_t;
++
++/*
++ * Holds data specific to a single HIFN board.
++ */
++struct hifn_softc {
++	softc_device_decl		 sc_dev;
++
++	struct pci_dev		*sc_pcidev;	/* PCI device pointer */
++	spinlock_t		sc_mtx;		/* per-instance lock */
++
++	int			sc_num;		/* for multiple devs */
++
++	ocf_iomem_t		sc_bar0;
++	bus_size_t		sc_bar0_lastreg;/* bar0 last reg written */
++	ocf_iomem_t		sc_bar1;
++	bus_size_t		sc_bar1_lastreg;/* bar1 last reg written */
++
++	int			sc_irq;
++
++	u_int32_t		sc_dmaier;
++	u_int32_t		sc_drammodel;	/* 1=dram, 0=sram */
++	u_int32_t		sc_pllconfig;	/* 7954/7955/7956 PLL config */
++
++	struct hifn_dma		*sc_dma;
++	dma_addr_t		sc_dma_physaddr;/* physical address of sc_dma */
++
++	int			sc_dmansegs;
++	int32_t			sc_cid;
++	int			sc_maxses;
++	int			sc_nsessions;
++	struct hifn_session	*sc_sessions;
++	int			sc_ramsize;
++	int			sc_flags;
++#define	HIFN_HAS_RNG		0x1	/* includes random number generator */
++#define	HIFN_HAS_PUBLIC		0x2	/* includes public key support */
++#define	HIFN_HAS_AES		0x4	/* includes AES support */
++#define	HIFN_IS_7811		0x8	/* Hifn 7811 part */
++#define	HIFN_IS_7956		0x10	/* Hifn 7956/7955 don't have SDRAM */
++
++	struct timer_list	sc_tickto;	/* for managing DMA */
++
++	int			sc_rngfirst;
++	int			sc_rnghz;	/* RNG polling frequency */
++
++	int			sc_c_busy;	/* command ring busy */
++	int			sc_s_busy;	/* source data ring busy */
++	int			sc_d_busy;	/* destination data ring busy */
++	int			sc_r_busy;	/* result ring busy */
++	int			sc_active;	/* for initial countdown */
++	int			sc_needwakeup;	/* ops q'd wating on resources */
++	int			sc_curbatch;	/* # ops submitted w/o int */
++	int			sc_suspended;
++#ifdef HIFN_VULCANDEV
++	struct cdev            *sc_pkdev;
++#endif
++};
++
++#define	HIFN_LOCK(_sc)		spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
++#define	HIFN_UNLOCK(_sc)	spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
++
++/*
++ *  hifn_command_t
++ *
++ *  This is the control structure used to pass commands to hifn_encrypt().
++ *
++ *  flags
++ *  -----
++ *  Flags is the bitwise "or" values for command configuration.  A single
++ *  encrypt direction needs to be set:
++ *
++ *	HIFN_ENCODE or HIFN_DECODE
++ *
++ *  To use cryptography, a single crypto algorithm must be included:
++ *
++ *	HIFN_CRYPT_3DES or HIFN_CRYPT_DES
++ *
++ *  To use authentication is used, a single MAC algorithm must be included:
++ *
++ *	HIFN_MAC_MD5 or HIFN_MAC_SHA1
++ *
++ *  By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
++ *  If the value below is set, hash values are truncated or assumed
++ *  truncated to 12 bytes:
++ *
++ *	HIFN_MAC_TRUNC
++ *
++ *  Keys for encryption and authentication can be sent as part of a command,
++ *  or the last key value used with a particular session can be retrieved
++ *  and used again if either of these flags are not specified.
++ *
++ *	HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
++ *
++ *  session_num
++ *  -----------
++ *  A number between 0 and 2048 (for DRAM models) or a number between 
++ *  0 and 768 (for SRAM models).  Those who don't want to use session
++ *  numbers should leave value at zero and send a new crypt key and/or
++ *  new MAC key on every command.  If you use session numbers and
++ *  don't send a key with a command, the last key sent for that same
++ *  session number will be used.
++ *
++ *  Warning:  Using session numbers and multiboard at the same time
++ *            is currently broken.
++ *
++ *  mbuf
++ *  ----
++ *  Either fill in the mbuf pointer and npa=0 or
++ *	 fill packp[] and packl[] and set npa to > 0
++ * 
++ *  mac_header_skip
++ *  ---------------
++ *  The number of bytes of the source_buf that are skipped over before
++ *  authentication begins.  This must be a number between 0 and 2^16-1
++ *  and can be used by IPsec implementers to skip over IP headers.
++ *  *** Value ignored if authentication not used ***
++ *
++ *  crypt_header_skip
++ *  -----------------
++ *  The number of bytes of the source_buf that are skipped over before
++ *  the cryptographic operation begins.  This must be a number between 0
++ *  and 2^16-1.  For IPsec, this number will always be 8 bytes larger
++ *  than the auth_header_skip (to skip over the ESP header).
++ *  *** Value ignored if cryptography not used ***
++ *
++ */
++struct hifn_operand {
++	union {
++		struct sk_buff *skb;
++		struct uio *io;
++		unsigned char *buf;
++	} u;
++	void		*map;
++	bus_size_t	mapsize;
++	int		nsegs;
++	struct {
++	    dma_addr_t  ds_addr;
++	    int         ds_len;
++	} segs[MAX_SCATTER];
++};
++
++struct hifn_command {
++	u_int16_t session_num;
++	u_int16_t base_masks, cry_masks, mac_masks;
++	u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
++	int cklen;
++	int sloplen, slopidx;
++
++	struct hifn_operand src;
++	struct hifn_operand dst;
++
++	struct hifn_softc *softc;
++	struct cryptop *crp;
++	struct cryptodesc *enccrd, *maccrd;
++};
++
++#define	src_skb		src.u.skb
++#define	src_io		src.u.io
++#define	src_map		src.map
++#define	src_mapsize	src.mapsize
++#define	src_segs	src.segs
++#define	src_nsegs	src.nsegs
++#define	src_buf		src.u.buf
++
++#define	dst_skb		dst.u.skb
++#define	dst_io		dst.u.io
++#define	dst_map		dst.map
++#define	dst_mapsize	dst.mapsize
++#define	dst_segs	dst.segs
++#define	dst_nsegs	dst.nsegs
++#define	dst_buf		dst.u.buf
++
++/*
++ *  Return values for hifn_crypto()
++ */
++#define HIFN_CRYPTO_SUCCESS	0
++#define HIFN_CRYPTO_BAD_INPUT	(-1)
++#define HIFN_CRYPTO_RINGS_FULL	(-2)
++
++/**************************************************************************
++ *
++ *  Function:  hifn_crypto
++ *
++ *  Purpose:   Called by external drivers to begin an encryption on the
++ *             HIFN board.
++ *
++ *  Blocking/Non-blocking Issues
++ *  ============================
++ *  The driver cannot block in hifn_crypto (no calls to tsleep) currently.
++ *  hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
++ *  room in any of the rings for the request to proceed.
++ *
++ *  Return Values
++ *  =============
++ *  0 for success, negative values on error
++ *
++ *  Defines for negative error codes are:
++ *  
++ *    HIFN_CRYPTO_BAD_INPUT  :  The passed in command had invalid settings.
++ *    HIFN_CRYPTO_RINGS_FULL :  All DMA rings were full and non-blocking
++ *                              behaviour was requested.
++ *
++ *************************************************************************/
++
++/*
++ * Convert back and forth from 'sid' to 'card' and 'session'
++ */
++#define HIFN_CARD(sid)		(((sid) & 0xf0000000) >> 28)
++#define HIFN_SESSION(sid)	((sid) & 0x000007ff)
++#define HIFN_SID(crd,ses)	(((crd) << 28) | ((ses) & 0x7ff))
++
++#endif /* _KERNEL */
++
++struct hifn_stats {
++	u_int64_t hst_ibytes;
++	u_int64_t hst_obytes;
++	u_int32_t hst_ipackets;
++	u_int32_t hst_opackets;
++	u_int32_t hst_invalid;
++	u_int32_t hst_nomem;		/* malloc or one of hst_nomem_* */
++	u_int32_t hst_abort;
++	u_int32_t hst_noirq;		/* IRQ for no reason */
++	u_int32_t hst_totbatch;		/* ops submitted w/o interrupt */
++	u_int32_t hst_maxbatch;		/* max ops submitted together */
++	u_int32_t hst_unaligned;	/* unaligned src caused copy */
++	/*
++	 * The following divides hst_nomem into more specific buckets.
++	 */
++	u_int32_t hst_nomem_map;	/* bus_dmamap_create failed */
++	u_int32_t hst_nomem_load;	/* bus_dmamap_load_* failed */
++	u_int32_t hst_nomem_mbuf;	/* MGET* failed */
++	u_int32_t hst_nomem_mcl;	/* MCLGET* failed */
++	u_int32_t hst_nomem_cr;		/* out of command/result descriptor */
++	u_int32_t hst_nomem_sd;		/* out of src/dst descriptors */
++};
++
++#endif /* __HIFN7751VAR_H__ */
+diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPP.c linux-2.6.30/crypto/ocf/hifn/hifnHIPP.c
+--- linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPP.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/hifn/hifnHIPP.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,420 @@
++/*-
++ * Driver for Hifn HIPP-I/II chipset
++ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *   notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *   notice, this list of conditions and the following disclaimer in the
++ *   documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *   derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Effort sponsored by Hifn Inc.
++ *
++ */
++
++/*
++ * Driver for various Hifn encryption processors.
++ */
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++#include <linux/version.h>
++#include <linux/skbuff.h>
++#include <linux/uio.h>
++#include <linux/sysfs.h>
++#include <linux/miscdevice.h>
++#include <asm/io.h>
++
++#include <cryptodev.h>
++
++#include "hifnHIPPreg.h"
++#include "hifnHIPPvar.h"
++
++#if 1
++#define	DPRINTF(a...)	if (hipp_debug) { \
++							printk("%s: ", sc ? \
++								device_get_nameunit(sc->sc_dev) : "hifn"); \
++							printk(a); \
++						} else
++#else
++#define	DPRINTF(a...)
++#endif
++
++typedef int bus_size_t;
++
++static inline int
++pci_get_revid(struct pci_dev *dev)
++{
++	u8 rid = 0;
++	pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
++	return rid;
++}
++
++#define debug hipp_debug
++int hipp_debug = 0;
++module_param(hipp_debug, int, 0644);
++MODULE_PARM_DESC(hipp_debug, "Enable debug");
++
++int hipp_maxbatch = 1;
++module_param(hipp_maxbatch, int, 0644);
++MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
++
++static	int  hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
++static	void hipp_remove(struct pci_dev *dev);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++static irqreturn_t hipp_intr(int irq, void *arg);
++#else
++static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
++#endif
++
++static int hipp_num_chips = 0;
++static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
++
++static	int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
++static	int hipp_freesession(device_t, u_int64_t);
++static	int hipp_process(device_t, struct cryptop *, int);
++
++static device_method_t hipp_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession,	hipp_newsession),
++	DEVMETHOD(cryptodev_freesession,hipp_freesession),
++	DEVMETHOD(cryptodev_process,	hipp_process),
++};
++
++static __inline u_int32_t
++READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
++{
++	u_int32_t v = readl(sc->sc_bar[barno] + reg);
++	//sc->sc_bar0_lastreg = (bus_size_t) -1;
++	return (v);
++}
++static __inline void
++WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
++{
++	writel(val, sc->sc_bar[barno] + reg);
++}
++
++#define READ_REG_0(sc, reg)         READ_REG(sc, 0, reg)
++#define WRITE_REG_0(sc, reg, val)   WRITE_REG(sc,0, reg, val)
++#define READ_REG_1(sc, reg)         READ_REG(sc, 1, reg)
++#define WRITE_REG_1(sc, reg, val)   WRITE_REG(sc,1, reg, val)
++
++static int
++hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
++{
++	return EINVAL;
++}
++
++static int
++hipp_freesession(device_t dev, u_int64_t tid)
++{
++	return EINVAL;
++}
++
++static int
++hipp_process(device_t dev, struct cryptop *crp, int hint)
++{
++	return EINVAL;
++}
++
++static const char*
++hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
++{
++	char *n = NULL;
++
++	switch (pci_get_vendor(sc->sc_pcidev)) {
++	case PCI_VENDOR_HIFN:
++		switch (pci_get_device(sc->sc_pcidev)) {
++		case PCI_PRODUCT_HIFN_7855:	n = "Hifn 7855";
++		case PCI_PRODUCT_HIFN_8155:	n = "Hifn 8155";
++		case PCI_PRODUCT_HIFN_6500:	n = "Hifn 6500";
++		}
++	}
++
++	if(n==NULL) {
++		snprintf(buf, blen, "VID=%02x,PID=%02x",
++			 pci_get_vendor(sc->sc_pcidev),
++			 pci_get_device(sc->sc_pcidev));
++	} else {
++		buf[0]='\0';
++		strncat(buf, n, blen);
++	}
++	return buf;
++}
++
++struct hipp_fs_entry {
++	struct attribute attr;
++	/* other stuff */
++};
++
++
++static ssize_t
++cryptoid_show(struct device *dev,
++	      struct device_attribute *attr,
++	      char *buf)						
++{								
++	struct hipp_softc *sc;					
++
++	sc = pci_get_drvdata(to_pci_dev (dev));
++	return sprintf (buf, "%d\n", sc->sc_cid);
++}
++
++struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
++
++/*
++ * Attach an interface that successfully probed.
++ */
++static int
++hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
++{
++	struct hipp_softc *sc = NULL;
++	int i;
++	//char rbase;
++	//u_int16_t ena;
++	int rev;
++	//int rseg;
++	int rc;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (pci_enable_device(dev) < 0)
++		return(-ENODEV);
++
++	if (pci_set_mwi(dev))
++		return(-ENODEV);
++
++	if (!dev->irq) {
++		printk("hifn: found device with no IRQ assigned. check BIOS settings!");
++		pci_disable_device(dev);
++		return(-ENODEV);
++	}
++
++	sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
++	if (!sc)
++		return(-ENOMEM);
++	memset(sc, 0, sizeof(*sc));
++
++	softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
++
++	sc->sc_pcidev = dev;
++	sc->sc_irq = -1;
++	sc->sc_cid = -1;
++	sc->sc_num = hipp_num_chips++;
++
++	if (sc->sc_num < HIPP_MAX_CHIPS)
++		hipp_chip_idx[sc->sc_num] = sc;
++
++	pci_set_drvdata(sc->sc_pcidev, sc);
++
++	spin_lock_init(&sc->sc_mtx);
++
++	/*
++	 * Setup PCI resources.
++	 * The READ_REG_0, WRITE_REG_0, READ_REG_1,
++	 * and WRITE_REG_1 macros throughout the driver are used
++	 * to permit better debugging.
++	 */
++	for(i=0; i<4; i++) {
++		unsigned long mem_start, mem_len;
++		mem_start = pci_resource_start(sc->sc_pcidev, i);
++		mem_len   = pci_resource_len(sc->sc_pcidev, i);
++		sc->sc_barphy[i] = (caddr_t)mem_start;
++		sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
++		if (!sc->sc_bar[i]) {
++			device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
++			goto fail;
++		}
++	}
++
++	//hipp_reset_board(sc, 0);
++	pci_set_master(sc->sc_pcidev);
++
++	/*
++	 * Arrange the interrupt line.
++	 */
++	rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
++	if (rc) {
++		device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
++		goto fail;
++	}
++	sc->sc_irq = dev->irq;
++
++	rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
++
++	{
++		char b[32];
++		device_printf(sc->sc_dev, "%s, rev %u",
++			      hipp_partname(sc, b, sizeof(b)), rev);
++	}
++
++#if 0
++	if (sc->sc_flags & HIFN_IS_7956)
++		printf(", pll=0x%x<%s clk, %ux mult>",
++			sc->sc_pllconfig,
++			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
++			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
++#endif
++	printf("\n");
++
++	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
++	if (sc->sc_cid < 0) {
++		device_printf(sc->sc_dev, "could not get crypto driver id\n");
++		goto fail;
++	}
++
++#if 0 /* cannot work with a non-GPL module */
++	/* make a sysfs entry to let the world know what entry we got */
++	sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
++#endif
++
++#if 0
++	init_timer(&sc->sc_tickto);
++	sc->sc_tickto.function = hifn_tick;
++	sc->sc_tickto.data = (unsigned long) sc->sc_num;
++	mod_timer(&sc->sc_tickto, jiffies + HZ);
++#endif
++
++#if 0 /* no code here yet ?? */
++	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
++#endif
++
++	return (0);
++
++fail:
++	if (sc->sc_cid >= 0)
++		crypto_unregister_all(sc->sc_cid);
++	if (sc->sc_irq != -1)
++		free_irq(sc->sc_irq, sc);
++	
++#if 0
++	if (sc->sc_dma) {
++		/* Turn off DMA polling */
++		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++			    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++		
++		pci_free_consistent(sc->sc_pcidev,
++				    sizeof(*sc->sc_dma),
++				    sc->sc_dma, sc->sc_dma_physaddr);
++	}
++#endif
++	kfree(sc);
++	return (-ENXIO);
++}
++
++/*
++ * Detach an interface that successfully probed.
++ */
++static void
++hipp_remove(struct pci_dev *dev)
++{
++	struct hipp_softc *sc = pci_get_drvdata(dev);
++	unsigned long l_flags;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	/* disable interrupts */
++	HIPP_LOCK(sc);
++
++#if 0
++	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
++	HIFN_UNLOCK(sc);
++
++	/*XXX other resources */
++	del_timer_sync(&sc->sc_tickto);
++
++	/* Turn off DMA polling */
++	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
++	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
++#endif
++
++	crypto_unregister_all(sc->sc_cid);
++
++	free_irq(sc->sc_irq, sc);
++
++#if 0
++	pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
++                sc->sc_dma, sc->sc_dma_physaddr);
++#endif
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++static irqreturn_t hipp_intr(int irq, void *arg)
++#else
++static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
++#endif
++{
++	struct hipp_softc *sc = arg;
++
++	sc = sc; /* shut up compiler */
++
++	return IRQ_HANDLED;
++}
++
++static struct pci_device_id hipp_pci_tbl[] = {
++	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++};
++MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
++
++static struct pci_driver hipp_driver = {
++	.name         = "hipp",
++	.id_table     = hipp_pci_tbl,
++	.probe        =	hipp_probe,
++	.remove       = hipp_remove,
++	/* add PM stuff here one day */
++};
++
++static int __init hipp_init (void)
++{
++	struct hipp_softc *sc = NULL;
++	int rc;
++
++	DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
++
++	rc = pci_register_driver(&hipp_driver);
++	pci_register_driver_compat(&hipp_driver, rc);
++
++	return rc;
++}
++
++static void __exit hipp_exit (void)
++{
++	pci_unregister_driver(&hipp_driver);
++}
++
++module_init(hipp_init);
++module_exit(hipp_exit);
++
++MODULE_LICENSE("BSD");
++MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
++MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
+diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPPreg.h linux-2.6.30/crypto/ocf/hifn/hifnHIPPreg.h
+--- linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPPreg.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/hifn/hifnHIPPreg.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,46 @@
++/*-
++ * Hifn HIPP-I/HIPP-II (7855/8155) driver.
++ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Effort sponsored by Hifn inc.
++ *
++ */
++
++#ifndef __HIFNHIPP_H__
++#define	__HIFNHIPP_H__
++
++/*
++ * PCI vendor and device identifiers
++ */
++#define	PCI_VENDOR_HIFN		0x13a3		/* Hifn */
++#define	PCI_PRODUCT_HIFN_6500	0x0006		/* 6500 */
++#define	PCI_PRODUCT_HIFN_7855	0x001f		/* 7855 */
++#define	PCI_PRODUCT_HIFN_8155	0x999		/* XXX 8155 */
++
++#define HIPP_1_REVID            0x01 /* BOGUS */
++
++#endif /* __HIPP_H__ */
+diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPPvar.h linux-2.6.30/crypto/ocf/hifn/hifnHIPPvar.h
+--- linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPPvar.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/hifn/hifnHIPPvar.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,93 @@
++/*
++ * Hifn HIPP-I/HIPP-II (7855/8155) driver.
++ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> * 
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Effort sponsored by Hifn inc.
++ *
++ */
++
++#ifndef __HIFNHIPPVAR_H__
++#define __HIFNHIPPVAR_H__
++
++#define HIPP_MAX_CHIPS 8
++
++/*
++ * Holds data specific to a single Hifn HIPP-I board.
++ */
++struct hipp_softc {
++	softc_device_decl		 sc_dev;
++
++	struct pci_dev		*sc_pcidev;	/* device backpointer */
++	ocf_iomem_t             sc_bar[5];
++	caddr_t		        sc_barphy[5];   /* physical address */
++	int			sc_num;		/* for multiple devs */
++	spinlock_t		sc_mtx;		/* per-instance lock */
++	int32_t			sc_cid;
++	int			sc_irq;
++
++#if 0
++
++	u_int32_t		sc_dmaier;
++	u_int32_t		sc_drammodel;	/* 1=dram, 0=sram */
++	u_int32_t		sc_pllconfig;	/* 7954/7955/7956 PLL config */
++
++	struct hifn_dma		*sc_dma;
++	dma_addr_t		sc_dma_physaddr;/* physical address of sc_dma */
++
++	int			sc_dmansegs;
++	int			sc_maxses;
++	int			sc_nsessions;
++	struct hifn_session	*sc_sessions;
++	int			sc_ramsize;
++	int			sc_flags;
++#define	HIFN_HAS_RNG		0x1	/* includes random number generator */
++#define	HIFN_HAS_PUBLIC		0x2	/* includes public key support */
++#define	HIFN_HAS_AES		0x4	/* includes AES support */
++#define	HIFN_IS_7811		0x8	/* Hifn 7811 part */
++#define	HIFN_IS_7956		0x10	/* Hifn 7956/7955 don't have SDRAM */
++
++	struct timer_list	sc_tickto;	/* for managing DMA */
++
++	int			sc_rngfirst;
++	int			sc_rnghz;	/* RNG polling frequency */
++
++	int			sc_c_busy;	/* command ring busy */
++	int			sc_s_busy;	/* source data ring busy */
++	int			sc_d_busy;	/* destination data ring busy */
++	int			sc_r_busy;	/* result ring busy */
++	int			sc_active;	/* for initial countdown */
++	int			sc_needwakeup;	/* ops q'd wating on resources */
++	int			sc_curbatch;	/* # ops submitted w/o int */
++	int			sc_suspended;
++	struct miscdevice       sc_miscdev;
++#endif
++};
++
++#define	HIPP_LOCK(_sc)		spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
++#define	HIPP_UNLOCK(_sc)	spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
++
++#endif /* __HIFNHIPPVAR_H__ */
+diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/Makefile linux-2.6.30/crypto/ocf/hifn/Makefile
+--- linux-2.6.30.orig/crypto/ocf/hifn/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/hifn/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,13 @@
++# for SGlinux builds
++-include $(ROOTDIR)/modules/.config
++
++obj-$(CONFIG_OCF_HIFN)     += hifn7751.o
++obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
++
++obj ?= .
++EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
++
++ifdef TOPDIR
++-include $(TOPDIR)/Rules.make
++endif
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/ixp4xx/ixp4xx.c linux-2.6.30/crypto/ocf/ixp4xx/ixp4xx.c
+--- linux-2.6.30.orig/crypto/ocf/ixp4xx/ixp4xx.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ixp4xx/ixp4xx.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,1328 @@
++/*
++ * An OCF module that uses Intels IXP CryptACC API to do the crypto.
++ * This driver requires the IXP400 Access Library that is available
++ * from Intel in order to operate (or compile).
++ *
++ * Written by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ *
++ * LICENSE TERMS
++ *
++ * The free distribution and use of this software in both source and binary
++ * form is allowed (with or without changes) provided that:
++ *
++ *   1. distributions of this source code include the above copyright
++ *      notice, this list of conditions and the following disclaimer;
++ *
++ *   2. distributions in binary form include the above copyright
++ *      notice, this list of conditions and the following disclaimer
++ *      in the documentation and/or other associated materials;
++ *
++ *   3. the copyright holder's name is not used to endorse products
++ *      built using this software without specific written permission.
++ *
++ * ALTERNATIVELY, provided that this notice is retained in full, this product
++ * may be distributed under the terms of the GNU General Public License (GPL),
++ * in which case the provisions of the GPL apply INSTEAD OF those given above.
++ *
++ * DISCLAIMER
++ *
++ * This software is provided 'as is' with no explicit or implied warranties
++ * in respect of its properties, including, but not limited to, correctness
++ * and/or fitness for purpose.
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/crypto.h>
++#include <linux/interrupt.h>
++#include <asm/scatterlist.h>
++
++#include <IxTypes.h>
++#include <IxOsBuffMgt.h>
++#include <IxNpeDl.h>
++#include <IxCryptoAcc.h>
++#include <IxQMgr.h>
++#include <IxOsServices.h>
++#include <IxOsCacheMMU.h>
++
++#include <cryptodev.h>
++#include <uio.h>
++
++#ifndef IX_MBUF_PRIV
++#define IX_MBUF_PRIV(x) ((x)->priv)
++#endif
++
++struct ixp_data;
++
++struct ixp_q {
++	struct list_head	 ixp_q_list;
++	struct ixp_data		*ixp_q_data;
++	struct cryptop		*ixp_q_crp;
++	struct cryptodesc	*ixp_q_ccrd;
++	struct cryptodesc	*ixp_q_acrd;
++	IX_MBUF				 ixp_q_mbuf;
++	UINT8				*ixp_hash_dest; /* Location for hash in client buffer */
++	UINT8				*ixp_hash_src; /* Location of hash in internal buffer */
++	unsigned char		 ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
++	unsigned char		*ixp_q_iv;
++};
++
++struct ixp_data {
++	int					 ixp_registered;	/* is the context registered */
++	int					 ixp_crd_flags;		/* detect direction changes */
++
++	int					 ixp_cipher_alg;
++	int					 ixp_auth_alg;
++
++	UINT32				 ixp_ctx_id;
++	UINT32				 ixp_hash_key_id;	/* used when hashing */
++	IxCryptoAccCtx		 ixp_ctx;
++	IX_MBUF				 ixp_pri_mbuf;
++	IX_MBUF				 ixp_sec_mbuf;
++
++	struct work_struct   ixp_pending_work;
++	struct work_struct   ixp_registration_work;
++	struct list_head	 ixp_q;				/* unprocessed requests */
++};
++
++#ifdef __ixp46X
++
++#define	MAX_IOP_SIZE	64	/* words */
++#define	MAX_OOP_SIZE	128
++
++#define	MAX_PARAMS		3
++
++struct ixp_pkq {
++	struct list_head			 pkq_list;
++	struct cryptkop				*pkq_krp;
++
++	IxCryptoAccPkeEauInOperands	 pkq_op;
++	IxCryptoAccPkeEauOpResult	 pkq_result;
++
++	UINT32						 pkq_ibuf0[MAX_IOP_SIZE];
++	UINT32						 pkq_ibuf1[MAX_IOP_SIZE];
++	UINT32						 pkq_ibuf2[MAX_IOP_SIZE];
++	UINT32						 pkq_obuf[MAX_OOP_SIZE];
++};
++
++static LIST_HEAD(ixp_pkq); /* current PK wait list */
++static struct ixp_pkq *ixp_pk_cur;
++static spinlock_t ixp_pkq_lock;
++
++#endif /* __ixp46X */
++
++static int ixp_blocked = 0;
++
++static int32_t			 ixp_id = -1;
++static struct ixp_data **ixp_sessions = NULL;
++static u_int32_t		 ixp_sesnum = 0;
++
++static int ixp_process(device_t, struct cryptop *, int);
++static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
++static int ixp_freesession(device_t, u_int64_t);
++#ifdef __ixp46X
++static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++static kmem_cache_t *qcache;
++#else
++static struct kmem_cache *qcache;
++#endif
++
++#define debug ixp_debug
++static int ixp_debug = 0;
++module_param(ixp_debug, int, 0644);
++MODULE_PARM_DESC(ixp_debug, "Enable debug");
++
++static int ixp_init_crypto = 1;
++module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
++MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
++
++static void ixp_process_pending(void *arg);
++static void ixp_registration(void *arg);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void ixp_process_pending_wq(struct work_struct *work);
++static void ixp_registration_wq(struct work_struct *work);
++#endif
++
++/*
++ * dummy device structure
++ */
++
++static struct {
++	softc_device_decl	sc_dev;
++} ixpdev;
++
++static device_method_t ixp_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession,	ixp_newsession),
++	DEVMETHOD(cryptodev_freesession,ixp_freesession),
++	DEVMETHOD(cryptodev_process,	ixp_process),
++#ifdef __ixp46X
++	DEVMETHOD(cryptodev_kprocess,	ixp_kprocess),
++#endif
++};
++
++/*
++ * Generate a new software session.
++ */
++static int
++ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
++{
++	struct ixp_data *ixp;
++	u_int32_t i;
++#define AUTH_LEN(cri, def) \
++	(cri->cri_mlen ? cri->cri_mlen : (def))
++
++	dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
++	if (sid == NULL || cri == NULL) {
++		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++
++	if (ixp_sessions) {
++		for (i = 1; i < ixp_sesnum; i++)
++			if (ixp_sessions[i] == NULL)
++				break;
++	} else
++		i = 1;		/* NB: to silence compiler warning */
++
++	if (ixp_sessions == NULL || i == ixp_sesnum) {
++		struct ixp_data **ixpd;
++
++		if (ixp_sessions == NULL) {
++			i = 1; /* We leave ixp_sessions[0] empty */
++			ixp_sesnum = CRYPTO_SW_SESSIONS;
++		} else
++			ixp_sesnum *= 2;
++
++		ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
++		if (ixpd == NULL) {
++			/* Reset session number */
++			if (ixp_sesnum == CRYPTO_SW_SESSIONS)
++				ixp_sesnum = 0;
++			else
++				ixp_sesnum /= 2;
++			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
++			return ENOBUFS;
++		}
++		memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
++
++		/* Copy existing sessions */
++		if (ixp_sessions) {
++			memcpy(ixpd, ixp_sessions,
++			    (ixp_sesnum / 2) * sizeof(struct ixp_data *));
++			kfree(ixp_sessions);
++		}
++
++		ixp_sessions = ixpd;
++	}
++
++	ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
++			SLAB_ATOMIC);
++	if (ixp_sessions[i] == NULL) {
++		ixp_freesession(NULL, i);
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		return ENOBUFS;
++	}
++
++	*sid = i;
++
++	ixp = ixp_sessions[i];
++	memset(ixp, 0, sizeof(*ixp));
++
++	ixp->ixp_cipher_alg = -1;
++	ixp->ixp_auth_alg = -1;
++	ixp->ixp_ctx_id = -1;
++	INIT_LIST_HEAD(&ixp->ixp_q);
++
++	ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
++
++	while (cri) {
++		switch (cri->cri_alg) {
++		case CRYPTO_DES_CBC:
++			ixp->ixp_cipher_alg = cri->cri_alg;
++			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
++			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
++			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
++			ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
++			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
++						IX_CRYPTO_ACC_DES_IV_64;
++			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
++					cri->cri_key, (cri->cri_klen + 7) / 8);
++			break;
++
++		case CRYPTO_3DES_CBC:
++			ixp->ixp_cipher_alg = cri->cri_alg;
++			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
++			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
++			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
++			ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
++			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
++						IX_CRYPTO_ACC_DES_IV_64;
++			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
++					cri->cri_key, (cri->cri_klen + 7) / 8);
++			break;
++
++		case CRYPTO_RIJNDAEL128_CBC:
++			ixp->ixp_cipher_alg = cri->cri_alg;
++			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
++			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
++			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
++			ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
++			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
++			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
++					cri->cri_key, (cri->cri_klen + 7) / 8);
++			break;
++
++		case CRYPTO_MD5:
++		case CRYPTO_MD5_HMAC:
++			ixp->ixp_auth_alg = cri->cri_alg;
++			ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
++			ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
++			ixp->ixp_ctx.authCtx.aadLen = 0;
++			/* Only MD5_HMAC needs a key */
++			if (cri->cri_alg == CRYPTO_MD5_HMAC) {
++				ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
++				if (ixp->ixp_ctx.authCtx.authKeyLen >
++						sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
++					printk(
++						"ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
++							cri->cri_klen);
++					ixp_freesession(NULL, i);
++					return EINVAL;
++				}
++				memcpy(ixp->ixp_ctx.authCtx.key.authKey,
++						cri->cri_key, (cri->cri_klen + 7) / 8);
++			}
++			break;
++
++		case CRYPTO_SHA1:
++		case CRYPTO_SHA1_HMAC:
++			ixp->ixp_auth_alg = cri->cri_alg;
++			ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
++			ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
++			ixp->ixp_ctx.authCtx.aadLen = 0;
++			/* Only SHA1_HMAC needs a key */
++			if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
++				ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
++				if (ixp->ixp_ctx.authCtx.authKeyLen >
++						sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
++					printk(
++						"ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
++							cri->cri_klen);
++					ixp_freesession(NULL, i);
++					return EINVAL;
++				}
++				memcpy(ixp->ixp_ctx.authCtx.key.authKey,
++						cri->cri_key, (cri->cri_klen + 7) / 8);
++			}
++			break;
++
++		default:
++			printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
++			ixp_freesession(NULL, i);
++			return EINVAL;
++		}
++		cri = cri->cri_next;
++	}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++	INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
++	INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
++#else
++	INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
++	INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
++#endif
++
++	return 0;
++}
++
++
++/*
++ * Free a session.
++ */
++static int
++ixp_freesession(device_t dev, u_int64_t tid)
++{
++	u_int32_t sid = CRYPTO_SESID2LID(tid);
++
++	dprintk("%s()\n", __FUNCTION__);
++	if (sid > ixp_sesnum || ixp_sessions == NULL ||
++			ixp_sessions[sid] == NULL) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++
++	/* Silently accept and return */
++	if (sid == 0)
++		return 0;
++
++	if (ixp_sessions[sid]) {
++		if (ixp_sessions[sid]->ixp_ctx_id != -1) {
++			ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
++			ixp_sessions[sid]->ixp_ctx_id = -1;
++		}
++
++		flush_scheduled_work();
++
++		kfree(ixp_sessions[sid]);
++	}
++	ixp_sessions[sid] = NULL;
++	if (ixp_blocked) {
++		ixp_blocked = 0;
++		crypto_unblock(ixp_id, CRYPTO_SYMQ);
++	}
++	return 0;
++}
++
++
++/*
++ * callback for when hash processing is complete
++ */
++
++static void
++ixp_hash_perform_cb(
++	UINT32 hash_key_id,
++	IX_MBUF *bufp,
++	IxCryptoAccStatus status)
++{
++	struct ixp_q *q;
++
++	dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
++
++	if (bufp == NULL) {
++		printk("ixp: NULL buf in %s\n", __FUNCTION__);
++		return;
++	}
++
++	q = IX_MBUF_PRIV(bufp);
++	if (q == NULL) {
++		printk("ixp: NULL priv in %s\n", __FUNCTION__);
++		return;
++	}
++
++	if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
++		/* On success, need to copy hash back into original client buffer */
++		memcpy(q->ixp_hash_dest, q->ixp_hash_src,
++				(q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
++					SHA1_HASH_LEN : MD5_HASH_LEN);
++	}
++	else {
++		printk("ixp: hash perform failed status=%d\n", status);
++		q->ixp_q_crp->crp_etype = EINVAL;
++	}
++
++	/* Free internal buffer used for hashing */
++	kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
++
++	crypto_done(q->ixp_q_crp);
++	kmem_cache_free(qcache, q);
++}
++
++/*
++ * setup a request and perform it
++ */
++static void
++ixp_q_process(struct ixp_q *q)
++{
++	IxCryptoAccStatus status;
++	struct ixp_data *ixp = q->ixp_q_data;
++	int auth_off = 0;
++	int auth_len = 0;
++	int crypt_off = 0;
++	int crypt_len = 0;
++	int icv_off = 0;
++	char *crypt_func;
++
++	dprintk("%s(%p)\n", __FUNCTION__, q);
++
++	if (q->ixp_q_ccrd) {
++		if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
++			q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
++		} else {
++			q->ixp_q_iv = q->ixp_q_iv_data;
++			crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
++					q->ixp_q_ccrd->crd_inject,
++					ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
++					(caddr_t) q->ixp_q_iv);
++		}
++
++		if (q->ixp_q_acrd) {
++			auth_off = q->ixp_q_acrd->crd_skip;
++			auth_len = q->ixp_q_acrd->crd_len;
++			icv_off  = q->ixp_q_acrd->crd_inject;
++		}
++
++		crypt_off = q->ixp_q_ccrd->crd_skip;
++		crypt_len = q->ixp_q_ccrd->crd_len;
++	} else { /* if (q->ixp_q_acrd) */
++		auth_off = q->ixp_q_acrd->crd_skip;
++		auth_len = q->ixp_q_acrd->crd_len;
++		icv_off  = q->ixp_q_acrd->crd_inject;
++	}
++
++	if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
++		struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
++		if (skb_shinfo(skb)->nr_frags) {
++			/*
++			 * DAVIDM fix this limitation one day by using
++			 * a buffer pool and chaining,  it is not currently
++			 * needed for current user/kernel space acceleration
++			 */
++			printk("ixp: Cannot handle fragmented skb's yet !\n");
++			q->ixp_q_crp->crp_etype = ENOENT;
++			goto done;
++		}
++		IX_MBUF_MLEN(&q->ixp_q_mbuf) =
++				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =  skb->len;
++		IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
++	} else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
++		struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
++		if (uiop->uio_iovcnt != 1) {
++			/*
++			 * DAVIDM fix this limitation one day by using
++			 * a buffer pool and chaining,  it is not currently
++			 * needed for current user/kernel space acceleration
++			 */
++			printk("ixp: Cannot handle more than 1 iovec yet !\n");
++			q->ixp_q_crp->crp_etype = ENOENT;
++			goto done;
++		}
++		IX_MBUF_MLEN(&q->ixp_q_mbuf) =
++				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
++		IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
++	} else /* contig buffer */ {
++		IX_MBUF_MLEN(&q->ixp_q_mbuf)  =
++				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
++		IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
++	}
++
++	IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
++
++	if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
++		/*
++		 * For SHA1 and MD5 hash, need to create an internal buffer that is big
++		 * enough to hold the original data + the appropriate padding for the
++		 * hash algorithm.
++		 */
++		UINT8 *tbuf = NULL;
++
++		IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
++			((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
++		tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
++		
++		if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
++			printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
++					IX_MBUF_MLEN(&q->ixp_q_mbuf));
++			q->ixp_q_crp->crp_etype = ENOMEM;
++			goto done;
++		}
++		memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
++
++		/* Set location in client buffer to copy hash into */
++		q->ixp_hash_dest =
++			&(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
++
++		IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
++
++		/* Set location in internal buffer for where hash starts */
++		q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
++
++		crypt_func = "ixCryptoAccHashPerform";
++		status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
++				&q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
++				&ixp->ixp_hash_key_id);
++	}
++	else {
++		crypt_func = "ixCryptoAccAuthCryptPerform";
++		status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
++			NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
++			q->ixp_q_iv);
++	}
++
++	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
++		return;
++
++	if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
++		q->ixp_q_crp->crp_etype = ENOMEM;
++		goto done;
++	}
++
++	printk("ixp: %s failed %u\n", crypt_func, status);
++	q->ixp_q_crp->crp_etype = EINVAL;
++
++done:
++	crypto_done(q->ixp_q_crp);
++	kmem_cache_free(qcache, q);
++}
++
++
++/*
++ * because we cannot process the Q from the Register callback
++ * we do it here on a task Q.
++ */
++
++static void
++ixp_process_pending(void *arg)
++{
++	struct ixp_data *ixp = arg;
++	struct ixp_q *q = NULL;
++
++	dprintk("%s(%p)\n", __FUNCTION__, arg);
++
++	if (!ixp)
++		return;
++
++	while (!list_empty(&ixp->ixp_q)) {
++		q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
++		list_del(&q->ixp_q_list);
++		ixp_q_process(q);
++	}
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void
++ixp_process_pending_wq(struct work_struct *work)
++{
++	struct ixp_data *ixp = container_of(work, struct ixp_data,
++								ixp_pending_work);
++	ixp_process_pending(ixp);
++}
++#endif
++
++/*
++ * callback for when context registration is complete
++ */
++
++static void
++ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
++{
++	int i;
++	struct ixp_data *ixp;
++	struct ixp_q *q;
++
++	dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
++
++	/*
++	 * free any buffer passed in to this routine
++	 */
++	if (bufp) {
++		IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
++		kfree(IX_MBUF_MDATA(bufp));
++		IX_MBUF_MDATA(bufp) = NULL;
++	}
++
++	for (i = 0; i < ixp_sesnum; i++) {
++		ixp = ixp_sessions[i];
++		if (ixp && ixp->ixp_ctx_id == ctx_id)
++			break;
++	}
++	if (i >= ixp_sesnum) {
++		printk("ixp: invalid context id %d\n", ctx_id);
++		return;
++	}
++
++	if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
++		/* this is normal to free the first of two buffers */
++		dprintk("ixp: register not finished yet.\n");
++		return;
++	}
++
++	if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
++		printk("ixp: register failed 0x%x\n", status);
++		while (!list_empty(&ixp->ixp_q)) {
++			q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
++			list_del(&q->ixp_q_list);
++			q->ixp_q_crp->crp_etype = EINVAL;
++			crypto_done(q->ixp_q_crp);
++			kmem_cache_free(qcache, q);
++		}
++		return;
++	}
++
++	/*
++	 * we are now registered,  we cannot start processing the Q here
++	 * or we get strange errors with AES (DES/3DES seem to be ok).
++	 */
++	ixp->ixp_registered = 1;
++	schedule_work(&ixp->ixp_pending_work);
++}
++
++
++/*
++ * callback for when data processing is complete
++ */
++
++static void
++ixp_perform_cb(
++	UINT32 ctx_id,
++	IX_MBUF *sbufp,
++	IX_MBUF *dbufp,
++	IxCryptoAccStatus status)
++{
++	struct ixp_q *q;
++
++	dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
++			dbufp, status);
++
++	if (sbufp == NULL) {
++		printk("ixp: NULL sbuf in ixp_perform_cb\n");
++		return;
++	}
++
++	q = IX_MBUF_PRIV(sbufp);
++	if (q == NULL) {
++		printk("ixp: NULL priv in ixp_perform_cb\n");
++		return;
++	}
++
++	if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
++		printk("ixp: perform failed status=%d\n", status);
++		q->ixp_q_crp->crp_etype = EINVAL;
++	}
++
++	crypto_done(q->ixp_q_crp);
++	kmem_cache_free(qcache, q);
++}
++
++
++/*
++ * registration is not callable at IRQ time,  so we defer
++ * to a task queue,  this routines completes the registration for us
++ * when the task queue runs
++ *
++ * Unfortunately this means we cannot tell OCF that the driver is blocked,
++ * we do that on the next request.
++ */
++
++static void
++ixp_registration(void *arg)
++{
++	struct ixp_data *ixp = arg;
++	struct ixp_q *q = NULL;
++	IX_MBUF *pri = NULL, *sec = NULL;
++	int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
++
++	if (!ixp) {
++		printk("ixp: ixp_registration with no arg\n");
++		return;
++	}
++
++	if (ixp->ixp_ctx_id != -1) {
++		ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
++		ixp->ixp_ctx_id = -1;
++	}
++
++	if (list_empty(&ixp->ixp_q)) {
++		printk("ixp: ixp_registration with no Q\n");
++		return;
++	}
++
++	/*
++	 * setup the primary and secondary buffers
++	 */
++	q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
++	if (q->ixp_q_acrd) {
++		pri = &ixp->ixp_pri_mbuf;
++		sec = &ixp->ixp_sec_mbuf;
++		IX_MBUF_MLEN(pri)  = IX_MBUF_PKT_LEN(pri) = 128;
++		IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
++		IX_MBUF_MLEN(sec)  = IX_MBUF_PKT_LEN(sec) = 128;
++		IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
++	}
++
++	/* Only need to register if a crypt op or HMAC op */
++	if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
++				ixp->ixp_auth_alg == CRYPTO_MD5)) {
++		status = ixCryptoAccCtxRegister(
++					&ixp->ixp_ctx,
++					pri, sec,
++					ixp_register_cb,
++					ixp_perform_cb,
++					&ixp->ixp_ctx_id);
++	}
++	else {
++		/* Otherwise we start processing pending q */
++		schedule_work(&ixp->ixp_pending_work);
++	}
++
++	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
++		return;
++
++	if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
++		printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
++		ixp_blocked = 1;
++		/* perhaps we should return EGAIN on queued ops ? */
++		return;
++	}
++
++	printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
++	ixp->ixp_ctx_id = -1;
++
++	/*
++	 * everything waiting is toasted
++	 */
++	while (!list_empty(&ixp->ixp_q)) {
++		q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
++		list_del(&q->ixp_q_list);
++		q->ixp_q_crp->crp_etype = ENOENT;
++		crypto_done(q->ixp_q_crp);
++		kmem_cache_free(qcache, q);
++	}
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void
++ixp_registration_wq(struct work_struct *work)
++{
++	struct ixp_data *ixp = container_of(work, struct ixp_data,
++								ixp_registration_work);
++	ixp_registration(ixp);
++}
++#endif
++
++/*
++ * Process a request.
++ */
++static int
++ixp_process(device_t dev, struct cryptop *crp, int hint)
++{
++	struct ixp_data *ixp;
++	unsigned int lid;
++	struct ixp_q *q = NULL;
++	int status;
++
++	dprintk("%s()\n", __FUNCTION__);
++
++	/* Sanity check */
++	if (crp == NULL) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++
++	crp->crp_etype = 0;
++
++	if (ixp_blocked)
++		return ERESTART;
++
++	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		crp->crp_etype = EINVAL;
++		goto done;
++	}
++
++	/*
++	 * find the session we are using
++	 */
++
++	lid = crp->crp_sid & 0xffffffff;
++	if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
++			ixp_sessions[lid] == NULL) {
++		crp->crp_etype = ENOENT;
++		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
++		goto done;
++	}
++	ixp = ixp_sessions[lid];
++
++	/*
++	 * setup a new request ready for queuing
++	 */
++	q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
++	if (q == NULL) {
++		dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
++		crp->crp_etype = ENOMEM;
++		goto done;
++	}
++	/*
++	 * save some cycles by only zeroing the important bits
++	 */
++	memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
++	q->ixp_q_ccrd = NULL;
++	q->ixp_q_acrd = NULL;
++	q->ixp_q_crp = crp;
++	q->ixp_q_data = ixp;
++
++	/*
++	 * point the cipher and auth descriptors appropriately
++	 * check that we have something to do
++	 */
++	if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
++		q->ixp_q_ccrd = crp->crp_desc;
++	else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
++		q->ixp_q_acrd = crp->crp_desc;
++	else {
++		crp->crp_etype = ENOENT;
++		dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
++		goto done;
++	}
++	if (crp->crp_desc->crd_next) {
++		if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
++			q->ixp_q_ccrd = crp->crp_desc->crd_next;
++		else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
++			q->ixp_q_acrd = crp->crp_desc->crd_next;
++		else {
++			crp->crp_etype = ENOENT;
++			dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
++			goto done;
++		}
++	}
++
++	/*
++	 * If there is a direction change for this context then we mark it as
++	 * unregistered and re-register is for the new direction.  This is not
++	 * a very expensive operation and currently only tends to happen when
++	 * user-space application are doing benchmarks
++	 *
++	 * DM - we should be checking for pending requests before unregistering.
++	 */
++	if (q->ixp_q_ccrd && ixp->ixp_registered &&
++			ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
++		dprintk("%s - detected direction change on session\n", __FUNCTION__);
++		ixp->ixp_registered = 0;
++	}
++
++	/*
++	 * if we are registered,  call straight into the perform code
++	 */
++	if (ixp->ixp_registered) {
++		ixp_q_process(q);
++		return 0;
++	}
++
++	/*
++	 * the only part of the context not set in newsession is the direction
++	 * dependent parts
++	 */
++	if (q->ixp_q_ccrd) {
++		ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
++		if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
++			ixp->ixp_ctx.operation = q->ixp_q_acrd ?
++					IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
++		} else {
++			ixp->ixp_ctx.operation = q->ixp_q_acrd ?
++					IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
++		}
++	} else {
++		/* q->ixp_q_acrd must be set if we are here */
++		ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
++	}
++
++	status = list_empty(&ixp->ixp_q);
++	list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
++	if (status)
++		schedule_work(&ixp->ixp_registration_work);
++	return 0;
++
++done:
++	if (q)
++		kmem_cache_free(qcache, q);
++	crypto_done(crp);
++	return 0;
++}
++
++
++#ifdef __ixp46X
++/*
++ * key processing support for the ixp465
++ */
++
++
++/*
++ * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
++ * assume zeroed and only copy bits that are significant
++ */
++
++static int
++ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
++{
++	unsigned char *src = (unsigned char *) p->crp_p;
++	unsigned char *dst;
++	int len, bits = p->crp_nbits;
++
++	dprintk("%s()\n", __FUNCTION__);
++
++	if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
++		dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
++				bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
++		return -1;
++	}
++
++	len = (bits + 31) / 32; /* the number UINT32's needed */
++
++	dst = (unsigned char *) &buf[len];
++	dst--;
++
++	while (bits > 0) {
++		*dst-- = *src++;
++		bits -= 8;
++	}
++
++#if 0 /* no need to zero remaining bits as it is done during request alloc */
++	while (dst > (unsigned char *) buf)
++		*dst-- = '\0';
++#endif
++
++	op->pData = buf;
++	op->dataLen = len;
++	return 0;
++}
++
++/*
++ * copy out the result,  be as forgiving as we can about small output buffers
++ */
++
++static int
++ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
++{
++	unsigned char *dst = (unsigned char *) p->crp_p;
++	unsigned char *src = (unsigned char *) buf;
++	int len, z, bits = p->crp_nbits;
++
++	dprintk("%s()\n", __FUNCTION__);
++
++	len = op->dataLen * sizeof(UINT32);
++
++	/* skip leading zeroes to be small buffer friendly */
++	z = 0;
++	while (z < len && src[z] == '\0')
++		z++;
++
++	src += len;
++	src--;
++	len -= z;
++
++	while (len > 0 && bits > 0) {
++		*dst++ = *src--;
++		len--;
++		bits -= 8;
++	}
++
++	while (bits > 0) {
++		*dst++ = '\0';
++		bits -= 8;
++	}
++
++	if (len > 0) {
++		dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
++				__FUNCTION__, len, z, p->crp_nbits / 8);
++		return -1;
++	}
++
++	return 0;
++}
++
++
++/*
++ * the parameter offsets for exp_mod
++ */
++
++#define IXP_PARAM_BASE 0
++#define IXP_PARAM_EXP  1
++#define IXP_PARAM_MOD  2
++#define IXP_PARAM_RES  3
++
++/*
++ * key processing complete callback,  is also used to start processing
++ * by passing a NULL for pResult
++ */
++
++static void
++ixp_kperform_cb(
++	IxCryptoAccPkeEauOperation operation,
++	IxCryptoAccPkeEauOpResult *pResult,
++	BOOL carryOrBorrow,
++	IxCryptoAccStatus status)
++{
++	struct ixp_pkq *q, *tmp;
++	unsigned long flags;
++
++	dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
++			carryOrBorrow, status);
++
++	/* handle a completed request */
++	if (pResult) {
++		if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
++			q = ixp_pk_cur;
++			if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
++				dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
++				q->pkq_krp->krp_status = ERANGE; /* could do better */
++			} else {
++				/* copy out the result */
++				if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
++						&q->pkq_result, q->pkq_obuf))
++					q->pkq_krp->krp_status = ERANGE;
++			}
++			crypto_kdone(q->pkq_krp);
++			kfree(q);
++			ixp_pk_cur = NULL;
++		} else
++			printk("%s - callback with invalid result pointer\n", __FUNCTION__);
++	}
++
++	spin_lock_irqsave(&ixp_pkq_lock, flags);
++	if (ixp_pk_cur || list_empty(&ixp_pkq)) {
++		spin_unlock_irqrestore(&ixp_pkq_lock, flags);
++		return;
++	}
++
++	list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
++
++		list_del(&q->pkq_list);
++		ixp_pk_cur = q;
++
++		spin_unlock_irqrestore(&ixp_pkq_lock, flags);
++
++		status = ixCryptoAccPkeEauPerform(
++				IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
++				&q->pkq_op,
++				ixp_kperform_cb,
++				&q->pkq_result);
++	
++		if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
++			dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
++			return; /* callback will return here for callback */
++		} else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
++			printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
++		} else {
++			printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
++					__FUNCTION__, status);
++		}
++		q->pkq_krp->krp_status = ERANGE; /* could do better */
++		crypto_kdone(q->pkq_krp);
++		kfree(q);
++		spin_lock_irqsave(&ixp_pkq_lock, flags);
++	}
++	spin_unlock_irqrestore(&ixp_pkq_lock, flags);
++}
++
++
++static int
++ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
++{
++	struct ixp_pkq *q;
++	int rc = 0;
++	unsigned long flags;
++
++	dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
++			krp->krp_param[IXP_PARAM_BASE].crp_nbits,
++			krp->krp_param[IXP_PARAM_EXP].crp_nbits,
++			krp->krp_param[IXP_PARAM_MOD].crp_nbits,
++			krp->krp_param[IXP_PARAM_RES].crp_nbits);
++
++
++	if (krp->krp_op != CRK_MOD_EXP) {
++		krp->krp_status = EOPNOTSUPP;
++		goto err;
++	}
++
++	q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
++	if (q == NULL) {
++		krp->krp_status = ENOMEM;
++		goto err;
++	}
++
++	/*
++	 * The PKE engine does not appear to zero the output buffer
++	 * appropriately, so we need to do it all here.
++	 */
++	memset(q, 0, sizeof(*q));
++
++	q->pkq_krp = krp;
++	INIT_LIST_HEAD(&q->pkq_list);
++
++	if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
++			q->pkq_ibuf0))
++		rc = 1;
++	if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
++				&q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
++		rc = 2;
++	if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
++				&q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
++		rc = 3;
++
++	if (rc) {
++		kfree(q);
++		krp->krp_status = ERANGE;
++		goto err;
++	}
++
++	q->pkq_result.pData           = q->pkq_obuf;
++	q->pkq_result.dataLen         =
++			(krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
++
++	spin_lock_irqsave(&ixp_pkq_lock, flags);
++	list_add_tail(&q->pkq_list, &ixp_pkq);
++	spin_unlock_irqrestore(&ixp_pkq_lock, flags);
++
++	if (!ixp_pk_cur)
++		ixp_kperform_cb(0, NULL, 0, 0);
++	return (0);
++
++err:
++	crypto_kdone(krp);
++	return (0);
++}
++
++
++
++#ifdef CONFIG_OCF_RANDOMHARVEST
++/*
++ * We run the random number generator output through SHA so that it
++ * is FIPS compliant.
++ */
++
++static volatile int sha_done = 0;
++static unsigned char sha_digest[20];
++
++static void
++ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
++{
++	dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
++	if (sha_digest != digest)
++		printk("digest error\n");
++	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
++		sha_done = 1;
++	else
++		sha_done = -status;
++}
++
++static int
++ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
++{
++	IxCryptoAccStatus status;
++	int i, n, rc;
++
++	dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
++	memset(buf, 0, maxwords * sizeof(*buf));
++	status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
++	if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
++		dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
++				__FUNCTION__, status);
++		return 0;
++	}
++
++	/*
++	 * run the random data through SHA to make it look more random
++	 */
++
++	n = sizeof(sha_digest); /* process digest bytes at a time */
++
++	rc = 0;
++	for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
++		if ((maxwords - i) * sizeof(*buf) < n)
++			n = (maxwords - i) * sizeof(*buf);
++		sha_done = 0;
++		status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
++				(UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
++		if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
++			dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
++			return -EIO;
++		}
++		while (!sha_done)
++			schedule();
++		if (sha_done < 0) {
++			dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
++			return 0;
++		}
++		memcpy(&buf[i], sha_digest, n);
++		rc += n / sizeof(*buf);;
++	}
++
++	return rc;
++}
++#endif /* CONFIG_OCF_RANDOMHARVEST */
++
++#endif /* __ixp46X */
++
++
++
++/*
++ * our driver startup and shutdown routines
++ */
++
++static int
++ixp_init(void)
++{
++	dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
++
++	if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
++		printk("ixCryptoAccInit failed, assuming already initialised!\n");
++
++	qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
++				SLAB_HWCACHE_ALIGN, NULL
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++				, NULL
++#endif
++				  );
++	if (!qcache) {
++		printk("failed to create Qcache\n");
++		return -ENOENT;
++	}
++
++	memset(&ixpdev, 0, sizeof(ixpdev));
++	softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
++
++	ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
++				CRYPTOCAP_F_HARDWARE);
++	if (ixp_id < 0)
++		panic("IXP/OCF crypto device cannot initialize!");
++
++#define	REGISTER(alg) \
++	crypto_register(ixp_id,alg,0,0)
++
++	REGISTER(CRYPTO_DES_CBC);
++	REGISTER(CRYPTO_3DES_CBC);
++	REGISTER(CRYPTO_RIJNDAEL128_CBC);
++#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
++	REGISTER(CRYPTO_MD5);
++	REGISTER(CRYPTO_SHA1);
++#endif
++	REGISTER(CRYPTO_MD5_HMAC);
++	REGISTER(CRYPTO_SHA1_HMAC);
++#undef REGISTER
++
++#ifdef __ixp46X
++	spin_lock_init(&ixp_pkq_lock);
++	/*
++	 * we do not enable the go fast options here as they can potentially
++	 * allow timing based attacks
++	 *
++	 * http://www.openssl.org/news/secadv_20030219.txt
++	 */
++	ixCryptoAccPkeEauExpConfig(0, 0);
++	crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
++#ifdef CONFIG_OCF_RANDOMHARVEST
++	crypto_rregister(ixp_id, ixp_read_random, NULL);
++#endif
++#endif
++
++	return 0;
++}
++
++static void
++ixp_exit(void)
++{
++	dprintk("%s()\n", __FUNCTION__);
++	crypto_unregister_all(ixp_id);
++	ixp_id = -1;
++	kmem_cache_destroy(qcache);
++	qcache = NULL;
++}
++
++module_init(ixp_init);
++module_exit(ixp_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
++MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
+diff -Nur linux-2.6.30.orig/crypto/ocf/ixp4xx/Makefile linux-2.6.30/crypto/ocf/ixp4xx/Makefile
+--- linux-2.6.30.orig/crypto/ocf/ixp4xx/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ixp4xx/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,104 @@
++# for SGlinux builds
++-include $(ROOTDIR)/modules/.config
++
++#
++# You will need to point this at your Intel ixp425 includes,  this portion
++# of the Makefile only really works under SGLinux with the appropriate libs
++# installed.  They can be downloaded from http://www.snapgear.org/
++#
++ifeq ($(CONFIG_CPU_IXP46X),y)
++IXPLATFORM = ixp46X
++else
++ifeq ($(CONFIG_CPU_IXP43X),y)
++IXPLATFORM = ixp43X
++else
++IXPLATFORM = ixp42X
++endif
++endif
++
++ifdef CONFIG_IXP400_LIB_2_4
++IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
++OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
++endif
++ifdef CONFIG_IXP400_LIB_2_1
++IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
++OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
++endif
++ifdef CONFIG_IXP400_LIB_2_0
++IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
++OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
++endif
++ifdef IX_XSCALE_SW
++ifdef CONFIG_IXP400_LIB_2_4
++IXP_CFLAGS = \
++	-I$(ROOTDIR)/. \
++	-I$(IX_XSCALE_SW)/src/include \
++	-I$(OSAL_DIR)/common/include/ \
++	-I$(OSAL_DIR)/common/include/modules/ \
++	-I$(OSAL_DIR)/common/include/modules/ddk/ \
++	-I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
++	-I$(OSAL_DIR)/common/include/modules/ioMem/ \
++	-I$(OSAL_DIR)/common/os/linux/include/ \
++	-I$(OSAL_DIR)/common/os/linux/include/core/  \
++	-I$(OSAL_DIR)/common/os/linux/include/modules/ \
++	-I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
++	-I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
++	-I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
++	-I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
++	-I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
++	-DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
++	-DUSE_IXP4XX_CRYPTO
++else
++IXP_CFLAGS = \
++	-I$(ROOTDIR)/. \
++	-I$(IX_XSCALE_SW)/src/include \
++	-I$(OSAL_DIR)/ \
++	-I$(OSAL_DIR)/os/linux/include/ \
++	-I$(OSAL_DIR)/os/linux/include/modules/ \
++	-I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
++	-I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
++	-I$(OSAL_DIR)/os/linux/include/core/  \
++	-I$(OSAL_DIR)/os/linux/include/platforms/ \
++	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
++	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
++	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
++	-I$(OSAL_DIR)/os/linux/include/core/ \
++	-I$(OSAL_DIR)/include/ \
++	-I$(OSAL_DIR)/include/modules/ \
++	-I$(OSAL_DIR)/include/modules/bufferMgt/ \
++	-I$(OSAL_DIR)/include/modules/ioMem/ \
++	-I$(OSAL_DIR)/include/platforms/ \
++	-I$(OSAL_DIR)/include/platforms/ixp400/ \
++	-DUSE_IXP4XX_CRYPTO
++endif
++endif
++ifdef CONFIG_IXP400_LIB_1_4
++IXP_CFLAGS   = \
++	-I$(ROOTDIR)/. \
++	-I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
++	-I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
++	-DUSE_IXP4XX_CRYPTO
++endif
++ifndef IXPDIR
++IXPDIR = ixp-version-is-not-supported
++endif
++
++ifeq ($(CONFIG_CPU_IXP46X),y)
++IXP_CFLAGS += -D__ixp46X
++else
++ifeq ($(CONFIG_CPU_IXP43X),y)
++IXP_CFLAGS += -D__ixp43X
++else
++IXP_CFLAGS += -D__ixp42X
++endif
++endif
++
++obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
++
++obj ?= .
++EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
++
++ifdef TOPDIR
++-include $(TOPDIR)/Rules.make
++endif
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/Kconfig linux-2.6.30/crypto/ocf/Kconfig
+--- linux-2.6.30.orig/crypto/ocf/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/Kconfig	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,101 @@
++menu "OCF Configuration"
++
++config OCF_OCF
++	tristate "OCF (Open Cryptograhic Framework)"
++	help
++	  A linux port of the OpenBSD/FreeBSD crypto framework.
++
++config OCF_RANDOMHARVEST
++	bool "crypto random --- harvest entropy for /dev/random"
++	depends on OCF_OCF
++	help
++	  Includes code to harvest random numbers from devices that support it.
++
++config OCF_FIPS
++	bool "enable fips RNG checks"
++	depends on OCF_OCF && OCF_RANDOMHARVEST
++	help
++	  Run all RNG provided data through a fips check before
++	  adding it /dev/random's entropy pool.
++
++config OCF_CRYPTODEV
++	tristate "cryptodev (user space support)"
++	depends on OCF_OCF
++	help
++	  The user space API to access crypto hardware.
++
++config OCF_CRYPTOSOFT
++	tristate "cryptosoft (software crypto engine)"
++	depends on OCF_OCF
++	help
++	  A software driver for the OCF framework that uses
++	  the kernel CryptoAPI.
++
++config OCF_SAFE
++	tristate "safenet (HW crypto engine)"
++	depends on OCF_OCF
++	help
++	  A driver for a number of the safenet Excel crypto accelerators.
++	  Currently tested and working on the 1141 and 1741.
++
++config OCF_IXP4XX
++	tristate "IXP4xx (HW crypto engine)"
++	depends on OCF_OCF
++	help
++	  XScale IXP4xx crypto accelerator driver.  Requires the
++	  Intel Access library.
++
++config OCF_IXP4XX_SHA1_MD5
++	bool "IXP4xx SHA1 and MD5 Hashing"
++	depends on OCF_IXP4XX
++	help
++	  Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
++	  Note: this is MUCH slower than using cryptosoft (software crypto engine).
++
++config OCF_HIFN
++	tristate "hifn (HW crypto engine)"
++	depends on OCF_OCF
++	help
++	  OCF driver for various HIFN based crypto accelerators.
++	  (7951, 7955, 7956, 7751, 7811)
++
++config OCF_HIFNHIPP
++	tristate "Hifn HIPP (HW packet crypto engine)"
++	depends on OCF_OCF
++	help
++	  OCF driver for various HIFN (HIPP) based crypto accelerators
++	  (7855)
++
++config OCF_TALITOS
++	tristate "talitos (HW crypto engine)"
++	depends on OCF_OCF
++	help
++	  OCF driver for Freescale's security engine (SEC/talitos).
++
++config OCF_PASEMI
++	tristate "pasemi (HW crypto engine)"
++	depends on OCF_OCF && PPC_PASEMI
++	help
++	  OCF driver for the PA Semi PWRficient DMA Engine
++
++config OCF_EP80579
++	tristate "ep80579 (HW crypto engine)"
++	depends on OCF_OCF
++	help
++	  OCF driver for the Intel EP80579 Integrated Processor Product Line.
++
++config OCF_OCFNULL
++	tristate "ocfnull (fake crypto engine)"
++	depends on OCF_OCF
++	help
++	  OCF driver for measuring ipsec overheads (does no crypto)
++
++config OCF_BENCH
++	tristate "ocf-bench (HW crypto in-kernel benchmark)"
++	depends on OCF_OCF
++	help
++	  A very simple encryption test for the in-kernel interface
++	  of OCF.  Also includes code to benchmark the IXP Access library
++	  for comparison.
++
++endmenu
+diff -Nur linux-2.6.30.orig/crypto/ocf/Makefile linux-2.6.30/crypto/ocf/Makefile
+--- linux-2.6.30.orig/crypto/ocf/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,121 @@
++# for SGlinux builds
++-include $(ROOTDIR)/modules/.config
++
++OCF_OBJS = crypto.o criov.o
++
++ifdef CONFIG_OCF_RANDOMHARVEST
++	OCF_OBJS += random.o
++endif
++
++ifdef CONFIG_OCF_FIPS
++	OCF_OBJS += rndtest.o
++endif
++
++# Add in autoconf.h to get #defines for CONFIG_xxx
++AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
++ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
++	EXTRA_CFLAGS += -include $(AUTOCONF_H)
++	export EXTRA_CFLAGS
++endif
++
++ifndef obj
++	obj ?= .
++	_obj = subdir
++	mod-subdirs := safe hifn ixp4xx talitos ocfnull
++	export-objs += crypto.o criov.o random.o
++	list-multi += ocf.o
++	_slash :=
++else
++	_obj = obj
++	_slash := /
++endif
++
++EXTRA_CFLAGS += -I$(obj)/.
++
++obj-$(CONFIG_OCF_OCF)         += ocf.o
++obj-$(CONFIG_OCF_CRYPTODEV)   += cryptodev.o
++obj-$(CONFIG_OCF_CRYPTOSOFT)  += cryptosoft.o
++obj-$(CONFIG_OCF_BENCH)       += ocf-bench.o
++
++$(_obj)-$(CONFIG_OCF_SAFE)    += safe$(_slash)
++$(_obj)-$(CONFIG_OCF_HIFN)    += hifn$(_slash)
++$(_obj)-$(CONFIG_OCF_IXP4XX)  += ixp4xx$(_slash)
++$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
++$(_obj)-$(CONFIG_OCF_PASEMI)  += pasemi$(_slash)
++$(_obj)-$(CONFIG_OCF_EP80579) += ep80579$(_slash)
++$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
++
++ocf-objs := $(OCF_OBJS)
++
++$(list-multi) dummy1: $(ocf-objs)
++	$(LD) -r -o $@ $(ocf-objs)
++
++.PHONY:
++clean:
++	rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
++	rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
++
++ifdef TOPDIR
++-include $(TOPDIR)/Rules.make
++endif
++
++#
++# release gen targets
++#
++
++.PHONY: patch
++patch:
++	REL=`date +%Y%m%d`; \
++		patch=ocf-linux-$$REL.patch; \
++		patch24=ocf-linux-24-$$REL.patch; \
++		patch26=ocf-linux-26-$$REL.patch; \
++		( \
++			find . -name Makefile; \
++			find . -name Config.in; \
++			find . -name Kconfig; \
++			find . -name README; \
++			find . -name '*.[ch]' | grep -v '.mod.c'; \
++		) | while read t; do \
++			diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
++		done > $$patch; \
++		cat patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
++		cat patches/linux-2.6.26-ocf.patch $$patch > $$patch26
++
++.PHONY: tarball
++tarball:
++	REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
++		CURDIR=`pwd`; \
++		rm -rf /tmp/ocf-linux-$$REL*; \
++		mkdir -p $$RELDIR/tools; \
++		cp README* $$RELDIR; \
++		cp patches/openss*.patch $$RELDIR; \
++		cp patches/crypto-tools.patch $$RELDIR; \
++		cp tools/[!C]* $$RELDIR/tools; \
++		cd ..; \
++		tar cvf $$RELDIR/ocf-linux.tar \
++					--exclude=CVS \
++					--exclude=.* \
++					--exclude=*.o \
++					--exclude=*.ko \
++					--exclude=*.mod.* \
++					--exclude=README* \
++					--exclude=ocf-*.patch \
++					--exclude=ocf/patches/openss*.patch \
++					--exclude=ocf/patches/crypto-tools.patch \
++					--exclude=ocf/tools \
++					ocf; \
++		gzip -9 $$RELDIR/ocf-linux.tar; \
++		cd /tmp; \
++		tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
++		gzip -9 ocf-linux-$$REL.tar; \
++		cd $$CURDIR/../../user; \
++		rm -rf /tmp/crypto-tools-$$REL*; \
++		tar cvf /tmp/crypto-tools-$$REL.tar \
++					--exclude=CVS \
++					--exclude=.* \
++					--exclude=*.o \
++					--exclude=cryptotest \
++					--exclude=cryptokeytest \
++					crypto-tools; \
++		gzip -9 /tmp/crypto-tools-$$REL.tar
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/ocf-bench.c linux-2.6.30/crypto/ocf/ocf-bench.c
+--- linux-2.6.30.orig/crypto/ocf/ocf-bench.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ocf-bench.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,436 @@
++/*
++ * A loadable module that benchmarks the OCF crypto speed from kernel space.
++ *
++ * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
++ *
++ * LICENSE TERMS
++ *
++ * The free distribution and use of this software in both source and binary
++ * form is allowed (with or without changes) provided that:
++ *
++ *   1. distributions of this source code include the above copyright
++ *      notice, this list of conditions and the following disclaimer;
++ *
++ *   2. distributions in binary form include the above copyright
++ *      notice, this list of conditions and the following disclaimer
++ *      in the documentation and/or other associated materials;
++ *
++ *   3. the copyright holder's name is not used to endorse products
++ *      built using this software without specific written permission.
++ *
++ * ALTERNATIVELY, provided that this notice is retained in full, this product
++ * may be distributed under the terms of the GNU General Public License (GPL),
++ * in which case the provisions of the GPL apply INSTEAD OF those given above.
++ *
++ * DISCLAIMER
++ *
++ * This software is provided 'as is' with no explicit or implied warranties
++ * in respect of its properties, including, but not limited to, correctness
++ * and/or fitness for purpose.
++ */
++
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/spinlock.h>
++#include <linux/version.h>
++#include <linux/interrupt.h>
++#include <cryptodev.h>
++
++#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
++#define BENCH_IXP_ACCESS_LIB 1
++#endif
++#ifdef BENCH_IXP_ACCESS_LIB
++#include <IxTypes.h>
++#include <IxOsBuffMgt.h>
++#include <IxNpeDl.h>
++#include <IxCryptoAcc.h>
++#include <IxQMgr.h>
++#include <IxOsServices.h>
++#include <IxOsCacheMMU.h>
++#endif
++
++/*
++ * support for access lib version 1.4
++ */
++#ifndef IX_MBUF_PRIV
++#define IX_MBUF_PRIV(x) ((x)->priv)
++#endif
++
++/*
++ * the number of simultaneously active requests
++ */
++static int request_q_len = 20;
++module_param(request_q_len, int, 0);
++MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
++/*
++ * how many requests we want to have processed
++ */
++static int request_num = 1024;
++module_param(request_num, int, 0);
++MODULE_PARM_DESC(request_num, "run for at least this many requests");
++/*
++ * the size of each request
++ */
++static int request_size = 1500;
++module_param(request_size, int, 0);
++MODULE_PARM_DESC(request_size, "size of each request");
++
++/*
++ * a structure for each request
++ */
++typedef struct  {
++	struct work_struct work;
++#ifdef BENCH_IXP_ACCESS_LIB
++	IX_MBUF mbuf;
++#endif
++	unsigned char *buffer;
++} request_t;
++
++static request_t *requests;
++
++static int outstanding;
++static int total;
++
++/*************************************************************************/
++/*
++ * OCF benchmark routines
++ */
++
++static uint64_t ocf_cryptoid;
++static int ocf_init(void);
++static int ocf_cb(struct cryptop *crp);
++static void ocf_request(void *arg);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void ocf_request_wq(struct work_struct *work);
++#endif
++
++static int
++ocf_init(void)
++{
++	int error;
++	struct cryptoini crie, cria;
++	struct cryptodesc crda, crde;
++
++	memset(&crie, 0, sizeof(crie));
++	memset(&cria, 0, sizeof(cria));
++	memset(&crde, 0, sizeof(crde));
++	memset(&crda, 0, sizeof(crda));
++
++	cria.cri_alg  = CRYPTO_SHA1_HMAC;
++	cria.cri_klen = 20 * 8;
++	cria.cri_key  = "0123456789abcdefghij";
++
++	crie.cri_alg  = CRYPTO_3DES_CBC;
++	crie.cri_klen = 24 * 8;
++	crie.cri_key  = "0123456789abcdefghijklmn";
++
++	crie.cri_next = &cria;
++
++	error = crypto_newsession(&ocf_cryptoid, &crie, 0);
++	if (error) {
++		printk("crypto_newsession failed %d\n", error);
++		return -1;
++	}
++	return 0;
++}
++
++static int
++ocf_cb(struct cryptop *crp)
++{
++	request_t *r = (request_t *) crp->crp_opaque;
++
++	if (crp->crp_etype)
++		printk("Error in OCF processing: %d\n", crp->crp_etype);
++	total++;
++	crypto_freereq(crp);
++	crp = NULL;
++
++	if (total > request_num) {
++		outstanding--;
++		return 0;
++	}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++	INIT_WORK(&r->work, ocf_request_wq);
++#else
++	INIT_WORK(&r->work, ocf_request, r);
++#endif
++	schedule_work(&r->work);
++	return 0;
++}
++
++
++static void
++ocf_request(void *arg)
++{
++	request_t *r = arg;
++	struct cryptop *crp = crypto_getreq(2);
++	struct cryptodesc *crde, *crda;
++
++	if (!crp) {
++		outstanding--;
++		return;
++	}
++
++	crde = crp->crp_desc;
++	crda = crde->crd_next;
++
++	crda->crd_skip = 0;
++	crda->crd_flags = 0;
++	crda->crd_len = request_size;
++	crda->crd_inject = request_size;
++	crda->crd_alg = CRYPTO_SHA1_HMAC;
++	crda->crd_key = "0123456789abcdefghij";
++	crda->crd_klen = 20 * 8;
++
++	crde->crd_skip = 0;
++	crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
++	crde->crd_len = request_size;
++	crde->crd_inject = request_size;
++	crde->crd_alg = CRYPTO_3DES_CBC;
++	crde->crd_key = "0123456789abcdefghijklmn";
++	crde->crd_klen = 24 * 8;
++
++	crp->crp_ilen = request_size + 64;
++	crp->crp_flags = CRYPTO_F_CBIMM;
++	crp->crp_buf = (caddr_t) r->buffer;
++	crp->crp_callback = ocf_cb;
++	crp->crp_sid = ocf_cryptoid;
++	crp->crp_opaque = (caddr_t) r;
++	crypto_dispatch(crp);
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void
++ocf_request_wq(struct work_struct *work)
++{
++	request_t *r = container_of(work, request_t, work);
++	ocf_request(r);
++}
++#endif
++
++/*************************************************************************/
++#ifdef BENCH_IXP_ACCESS_LIB
++/*************************************************************************/
++/*
++ * CryptoAcc benchmark routines
++ */
++
++static IxCryptoAccCtx ixp_ctx;
++static UINT32 ixp_ctx_id;
++static IX_MBUF ixp_pri;
++static IX_MBUF ixp_sec;
++static int ixp_registered = 0;
++
++static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
++					IxCryptoAccStatus status);
++static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
++					IxCryptoAccStatus status);
++static void ixp_request(void *arg);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void ixp_request_wq(struct work_struct *work);
++#endif
++
++static int
++ixp_init(void)
++{
++	IxCryptoAccStatus status;
++
++	ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
++	ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
++	ixp_ctx.cipherCtx.cipherKeyLen = 24;
++	ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
++	ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
++	memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
++
++	ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
++	ixp_ctx.authCtx.authDigestLen = 12;
++	ixp_ctx.authCtx.aadLen = 0;
++	ixp_ctx.authCtx.authKeyLen = 20;
++	memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
++
++	ixp_ctx.useDifferentSrcAndDestMbufs = 0;
++	ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
++
++	IX_MBUF_MLEN(&ixp_pri)  = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
++	IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
++	IX_MBUF_MLEN(&ixp_sec)  = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
++	IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
++
++	status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
++			ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
++
++	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
++		while (!ixp_registered)
++			schedule();
++		return ixp_registered < 0 ? -1 : 0;
++	}
++
++	printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
++	return -1;
++}
++
++static void
++ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
++{
++	if (bufp) {
++		IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
++		kfree(IX_MBUF_MDATA(bufp));
++		IX_MBUF_MDATA(bufp) = NULL;
++	}
++
++	if (IX_CRYPTO_ACC_STATUS_WAIT == status)
++		return;
++	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
++		ixp_registered = 1;
++	else
++		ixp_registered = -1;
++}
++
++static void
++ixp_perform_cb(
++	UINT32 ctx_id,
++	IX_MBUF *sbufp,
++	IX_MBUF *dbufp,
++	IxCryptoAccStatus status)
++{
++	request_t *r = NULL;
++
++	total++;
++	if (total > request_num) {
++		outstanding--;
++		return;
++	}
++
++	if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
++		printk("crappo %p %p\n", sbufp, r);
++		outstanding--;
++		return;
++	}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++	INIT_WORK(&r->work, ixp_request_wq);
++#else
++	INIT_WORK(&r->work, ixp_request, r);
++#endif
++	schedule_work(&r->work);
++}
++
++static void
++ixp_request(void *arg)
++{
++	request_t *r = arg;
++	IxCryptoAccStatus status;
++
++	memset(&r->mbuf, 0, sizeof(r->mbuf));
++	IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
++	IX_MBUF_MDATA(&r->mbuf) = r->buffer;
++	IX_MBUF_PRIV(&r->mbuf) = r;
++	status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
++			0, request_size, 0, request_size, request_size, r->buffer);
++	if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
++		printk("status1 = %d\n", status);
++		outstanding--;
++		return;
++	}
++	return;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void
++ixp_request_wq(struct work_struct *work)
++{
++	request_t *r = container_of(work, request_t, work);
++	ixp_request(r);
++}
++#endif
++
++/*************************************************************************/
++#endif /* BENCH_IXP_ACCESS_LIB */
++/*************************************************************************/
++
++int
++ocfbench_init(void)
++{
++	int i, jstart, jstop;
++
++	printk("Crypto Speed tests\n");
++
++	requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
++	if (!requests) {
++		printk("malloc failed\n");
++		return -EINVAL;
++	}
++
++	for (i = 0; i < request_q_len; i++) {
++		/* +64 for return data */
++		requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
++		if (!requests[i].buffer) {
++			printk("malloc failed\n");
++			return -EINVAL;
++		}
++		memset(requests[i].buffer, '0' + i, request_size + 128);
++	}
++
++	/*
++	 * OCF benchmark
++	 */
++	printk("OCF: testing ...\n");
++	ocf_init();
++	total = outstanding = 0;
++	jstart = jiffies;
++	for (i = 0; i < request_q_len; i++) {
++		outstanding++;
++		ocf_request(&requests[i]);
++	}
++	while (outstanding > 0)
++		schedule();
++	jstop = jiffies;
++
++	printk("OCF: %d requests of %d bytes in %d jiffies\n", total, request_size,
++			jstop - jstart);
++
++#ifdef BENCH_IXP_ACCESS_LIB
++	/*
++	 * IXP benchmark
++	 */
++	printk("IXP: testing ...\n");
++	ixp_init();
++	total = outstanding = 0;
++	jstart = jiffies;
++	for (i = 0; i < request_q_len; i++) {
++		outstanding++;
++		ixp_request(&requests[i]);
++	}
++	while (outstanding > 0)
++		schedule();
++	jstop = jiffies;
++
++	printk("IXP: %d requests of %d bytes in %d jiffies\n", total, request_size,
++			jstop - jstart);
++#endif /* BENCH_IXP_ACCESS_LIB */
++
++	for (i = 0; i < request_q_len; i++)
++		kfree(requests[i].buffer);
++	kfree(requests);
++	return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
++}
++
++static void __exit ocfbench_exit(void)
++{
++}
++
++module_init(ocfbench_init);
++module_exit(ocfbench_exit);
++
++MODULE_LICENSE("BSD");
++MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
++MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
+diff -Nur linux-2.6.30.orig/crypto/ocf/ocf-compat.h linux-2.6.30/crypto/ocf/ocf-compat.h
+--- linux-2.6.30.orig/crypto/ocf/ocf-compat.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ocf-compat.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,270 @@
++#ifndef _BSD_COMPAT_H_
++#define _BSD_COMPAT_H_ 1
++/****************************************************************************/
++/*
++ * Provide compat routines for older linux kernels and BSD kernels
++ *
++ * Written by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2007 David McCullough <david_mccullough@securecomputing.com>
++ *
++ * LICENSE TERMS
++ *
++ * The free distribution and use of this software in both source and binary
++ * form is allowed (with or without changes) provided that:
++ *
++ *   1. distributions of this source code include the above copyright
++ *      notice, this list of conditions and the following disclaimer;
++ *
++ *   2. distributions in binary form include the above copyright
++ *      notice, this list of conditions and the following disclaimer
++ *      in the documentation and/or other associated materials;
++ *
++ *   3. the copyright holder's name is not used to endorse products
++ *      built using this software without specific written permission.
++ *
++ * ALTERNATIVELY, provided that this notice is retained in full, this file
++ * may be distributed under the terms of the GNU General Public License (GPL),
++ * in which case the provisions of the GPL apply INSTEAD OF those given above.
++ *
++ * DISCLAIMER
++ *
++ * This software is provided 'as is' with no explicit or implied warranties
++ * in respect of its properties, including, but not limited to, correctness
++ * and/or fitness for purpose.
++ */
++/****************************************************************************/
++#ifdef __KERNEL__
++/*
++ * fake some BSD driver interface stuff specifically for OCF use
++ */
++
++typedef struct ocf_device *device_t;
++
++typedef struct {
++	int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
++	int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
++	int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
++	int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
++} device_method_t;
++#define DEVMETHOD(id, func)	id: func
++
++struct ocf_device {
++	char name[32];		/* the driver name */
++	char nameunit[32];	/* the driver name + HW instance */
++	int  unit;
++	device_method_t	methods;
++	void *softc;
++};
++
++#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
++	((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
++#define CRYPTODEV_FREESESSION(dev, sid) \
++	((*(dev)->methods.cryptodev_freesession)(dev, sid))
++#define CRYPTODEV_PROCESS(dev, crp, hint) \
++	((*(dev)->methods.cryptodev_process)(dev, crp, hint))
++#define CRYPTODEV_KPROCESS(dev, krp, hint) \
++	((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
++
++#define device_get_name(dev)	((dev)->name)
++#define device_get_nameunit(dev)	((dev)->nameunit)
++#define device_get_unit(dev)	((dev)->unit)
++#define device_get_softc(dev)	((dev)->softc)
++
++#define	softc_device_decl \
++		struct ocf_device _device; \
++		device_t
++
++#define	softc_device_init(_sc, _name, _unit, _methods) \
++	if (1) {\
++	strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
++	snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
++	(_sc)->_device.unit = _unit; \
++	(_sc)->_device.methods = _methods; \
++	(_sc)->_device.softc = (void *) _sc; \
++	*(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
++	} else
++
++#define	softc_get_device(_sc)	(&(_sc)->_device)
++
++/*
++ * iomem support for 2.4 and 2.6 kernels
++ */
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define ocf_iomem_t	unsigned long
++
++/*
++ * implement simple workqueue like support for older kernels
++ */
++
++#include <linux/tqueue.h>
++
++#define work_struct tq_struct
++
++#define INIT_WORK(wp, fp, ap) \
++	do { \
++		(wp)->sync = 0; \
++		(wp)->routine = (fp); \
++		(wp)->data = (ap); \
++	} while (0)
++
++#define schedule_work(wp) \
++	do { \
++		queue_task((wp), &tq_immediate); \
++		mark_bh(IMMEDIATE_BH); \
++	} while (0)
++
++#define flush_scheduled_work()	run_task_queue(&tq_immediate)
++
++#else
++#define ocf_iomem_t	void __iomem *
++
++#include <linux/workqueue.h>
++
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
++#include <linux/fdtable.h>
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
++#define files_fdtable(files)	(files)
++#endif
++
++#ifdef MODULE_PARM
++#undef module_param	/* just in case */
++#define	module_param(a,b,c)		MODULE_PARM(a,"i")
++#endif
++
++#define bzero(s,l)		memset(s,0,l)
++#define bcopy(s,d,l)	memcpy(d,s,l)
++#define bcmp(x, y, l)	memcmp(x,y,l)
++
++#define MIN(x,y)	((x) < (y) ? (x) : (y))
++
++#define device_printf(dev, a...) ({ \
++				printk("%s: ", device_get_nameunit(dev)); printk(a); \
++			})
++
++#undef printf
++#define printf(fmt...)	printk(fmt)
++
++#define KASSERT(c,p)	if (!(c)) { printk p ; } else
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define ocf_daemonize(str) \
++	daemonize(); \
++	spin_lock_irq(&current->sigmask_lock); \
++	sigemptyset(&current->blocked); \
++	recalc_sigpending(current); \
++	spin_unlock_irq(&current->sigmask_lock); \
++	sprintf(current->comm, str);
++#else
++#define ocf_daemonize(str) daemonize(str);
++#endif
++
++#define	TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
++#define	TAILQ_EMPTY(q)	list_empty(q)
++#define	TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
++
++#define read_random(p,l) get_random_bytes(p,l)
++
++#define DELAY(x)	((x) > 2000 ? mdelay((x)/1000) : udelay(x))
++#define strtoul simple_strtoul
++
++#define pci_get_vendor(dev)	((dev)->vendor)
++#define pci_get_device(dev)	((dev)->device)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#define pci_set_consistent_dma_mask(dev, mask) (0)
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
++#define pci_dma_sync_single_for_cpu pci_dma_sync_single
++#endif
++
++#ifndef DMA_32BIT_MASK
++#define DMA_32BIT_MASK  0x00000000ffffffffULL
++#endif
++
++#define htole32(x)	cpu_to_le32(x)
++#define htobe32(x)	cpu_to_be32(x)
++#define htole16(x)	cpu_to_le16(x)
++#define htobe16(x)	cpu_to_be16(x)
++
++/* older kernels don't have these */
++
++#ifndef IRQ_NONE
++#define IRQ_NONE
++#define IRQ_HANDLED
++#define irqreturn_t void
++#endif
++#ifndef IRQF_SHARED
++#define IRQF_SHARED	SA_SHIRQ
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
++# define strlcpy(dest,src,len) \
++		({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
++#endif
++
++#ifndef MAX_ERRNO
++#define MAX_ERRNO	4095
++#endif
++#ifndef IS_ERR_VALUE
++#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
++#endif
++
++/*
++ * common debug for all
++ */
++#if 1
++#define dprintk(a...)	do { if (debug) printk(a); } while(0)
++#else
++#define dprintk(a...)
++#endif
++
++#ifndef SLAB_ATOMIC
++/* Changed in 2.6.20, must use GFP_ATOMIC now */
++#define	SLAB_ATOMIC	GFP_ATOMIC
++#endif
++
++/*
++ * need some additional support for older kernels */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
++#define pci_register_driver_compat(driver, rc) \
++	do { \
++		if ((rc) > 0) { \
++			(rc) = 0; \
++		} else if (rc == 0) { \
++			(rc) = -ENODEV; \
++		} else { \
++			pci_unregister_driver(driver); \
++		} \
++	} while (0)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
++#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
++#else
++#define pci_register_driver_compat(driver,rc)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++
++#include <asm/scatterlist.h>
++
++static inline void sg_set_page(struct scatterlist *sg,  struct page *page,
++			       unsigned int len, unsigned int offset)
++{
++	sg->page = page;
++	sg->offset = offset;
++	sg->length = len;
++}
++
++static inline void *sg_virt(struct scatterlist *sg)
++{
++	return page_address(sg->page) + sg->offset;
++}
++
++#endif
++
++#endif /* __KERNEL__ */
++
++/****************************************************************************/
++#endif /* _BSD_COMPAT_H_ */
+diff -Nur linux-2.6.30.orig/crypto/ocf/ocfnull/Makefile linux-2.6.30/crypto/ocf/ocfnull/Makefile
+--- linux-2.6.30.orig/crypto/ocf/ocfnull/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ocfnull/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,12 @@
++# for SGlinux builds
++-include $(ROOTDIR)/modules/.config
++
++obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
++
++obj ?= .
++EXTRA_CFLAGS += -I$(obj)/..
++
++ifdef TOPDIR
++-include $(TOPDIR)/Rules.make
++endif
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/ocfnull/ocfnull.c linux-2.6.30/crypto/ocf/ocfnull/ocfnull.c
+--- linux-2.6.30.orig/crypto/ocf/ocfnull/ocfnull.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/ocfnull/ocfnull.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,203 @@
++/*
++ * An OCF module for determining the cost of crypto versus the cost of
++ * IPSec processing outside of OCF.  This modules gives us the effect of
++ * zero cost encryption,  of course you will need to run it at both ends
++ * since it does no crypto at all.
++ *
++ * Written by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough 
++ *
++ * LICENSE TERMS
++ *
++ * The free distribution and use of this software in both source and binary
++ * form is allowed (with or without changes) provided that:
++ *
++ *   1. distributions of this source code include the above copyright
++ *      notice, this list of conditions and the following disclaimer;
++ *
++ *   2. distributions in binary form include the above copyright
++ *      notice, this list of conditions and the following disclaimer
++ *      in the documentation and/or other associated materials;
++ *
++ *   3. the copyright holder's name is not used to endorse products
++ *      built using this software without specific written permission.
++ *
++ * ALTERNATIVELY, provided that this notice is retained in full, this product
++ * may be distributed under the terms of the GNU General Public License (GPL),
++ * in which case the provisions of the GPL apply INSTEAD OF those given above.
++ *
++ * DISCLAIMER
++ *
++ * This software is provided 'as is' with no explicit or implied warranties
++ * in respect of its properties, including, but not limited to, correctness
++ * and/or fitness for purpose.
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/crypto.h>
++#include <linux/interrupt.h>
++
++#include <cryptodev.h>
++#include <uio.h>
++
++static int32_t			 null_id = -1;
++static u_int32_t		 null_sesnum = 0;
++
++static int null_process(device_t, struct cryptop *, int);
++static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
++static int null_freesession(device_t, u_int64_t);
++
++#define debug ocfnull_debug
++int ocfnull_debug = 0;
++module_param(ocfnull_debug, int, 0644);
++MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
++
++/*
++ * dummy device structure
++ */
++
++static struct {
++	softc_device_decl	sc_dev;
++} nulldev;
++
++static device_method_t null_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession,	null_newsession),
++	DEVMETHOD(cryptodev_freesession,null_freesession),
++	DEVMETHOD(cryptodev_process,	null_process),
++};
++
++/*
++ * Generate a new software session.
++ */
++static int
++null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
++{
++	dprintk("%s()\n", __FUNCTION__);
++	if (sid == NULL || cri == NULL) {
++		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++
++	if (null_sesnum == 0)
++		null_sesnum++;
++	*sid = null_sesnum++;
++	return 0;
++}
++
++
++/*
++ * Free a session.
++ */
++static int
++null_freesession(device_t arg, u_int64_t tid)
++{
++	u_int32_t sid = CRYPTO_SESID2LID(tid);
++
++	dprintk("%s()\n", __FUNCTION__);
++	if (sid > null_sesnum) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++
++	/* Silently accept and return */
++	if (sid == 0)
++		return 0;
++	return 0;
++}
++
++
++/*
++ * Process a request.
++ */
++static int
++null_process(device_t arg, struct cryptop *crp, int hint)
++{
++	unsigned int lid;
++
++	dprintk("%s()\n", __FUNCTION__);
++
++	/* Sanity check */
++	if (crp == NULL) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++
++	crp->crp_etype = 0;
++
++	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
++		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
++		crp->crp_etype = EINVAL;
++		goto done;
++	}
++
++	/*
++	 * find the session we are using
++	 */
++
++	lid = crp->crp_sid & 0xffffffff;
++	if (lid >= null_sesnum || lid == 0) {
++		crp->crp_etype = ENOENT;
++		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
++		goto done;
++	}
++
++done:
++	crypto_done(crp);
++	return 0;
++}
++
++
++/*
++ * our driver startup and shutdown routines
++ */
++
++static int
++null_init(void)
++{
++	dprintk("%s(%p)\n", __FUNCTION__, null_init);
++
++	memset(&nulldev, 0, sizeof(nulldev));
++	softc_device_init(&nulldev, "ocfnull", 0, null_methods);
++
++	null_id = crypto_get_driverid(softc_get_device(&nulldev),
++				CRYPTOCAP_F_HARDWARE);
++	if (null_id < 0)
++		panic("ocfnull: crypto device cannot initialize!");
++
++#define	REGISTER(alg) \
++	crypto_register(null_id,alg,0,0)
++	REGISTER(CRYPTO_DES_CBC);
++	REGISTER(CRYPTO_3DES_CBC);
++	REGISTER(CRYPTO_RIJNDAEL128_CBC);
++	REGISTER(CRYPTO_MD5);
++	REGISTER(CRYPTO_SHA1);
++	REGISTER(CRYPTO_MD5_HMAC);
++	REGISTER(CRYPTO_SHA1_HMAC);
++#undef REGISTER
++
++	return 0;
++}
++
++static void
++null_exit(void)
++{
++	dprintk("%s()\n", __FUNCTION__);
++	crypto_unregister_all(null_id);
++	null_id = -1;
++}
++
++module_init(null_init);
++module_exit(null_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
++MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
+diff -Nur linux-2.6.30.orig/crypto/ocf/pasemi/Makefile linux-2.6.30/crypto/ocf/pasemi/Makefile
+--- linux-2.6.30.orig/crypto/ocf/pasemi/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/pasemi/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,12 @@
++# for SGlinux builds
++-include $(ROOTDIR)/modules/.config
++
++obj-$(CONFIG_OCF_PASEMI) += pasemi.o
++
++obj ?= .
++EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
++
++ifdef TOPDIR
++-include $(TOPDIR)/Rules.make
++endif
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/pasemi/pasemi.c linux-2.6.30/crypto/ocf/pasemi/pasemi.c
+--- linux-2.6.30.orig/crypto/ocf/pasemi/pasemi.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/pasemi/pasemi.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,1009 @@
++/*
++ * Copyright (C) 2007 PA Semi, Inc
++ *
++ * Driver for the PA Semi PWRficient DMA Crypto Engine
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/timer.h>
++#include <linux/random.h>
++#include <linux/skbuff.h>
++#include <asm/scatterlist.h>
++#include <linux/moduleparam.h>
++#include <linux/pci.h>
++#include <cryptodev.h>
++#include <uio.h>
++#include "pasemi_fnu.h"
++
++#define DRV_NAME "pasemi"
++
++#define TIMER_INTERVAL 1000
++
++static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
++static struct pasdma_status volatile * dma_status;
++
++static int debug;
++module_param(debug, int, 0644);
++MODULE_PARM_DESC(debug, "Enable debug");
++
++static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
++{
++	desc->postop = 0;
++	desc->quad[0] = hdr;
++	desc->quad_cnt = 1;
++	desc->size = 1;
++}
++
++static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
++{
++	desc->quad[desc->quad_cnt++] = val;
++	desc->size = (desc->quad_cnt + 1) / 2;
++}
++
++static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
++{
++	desc->quad[0] |= hdr;
++}
++
++static int pasemi_desc_size(struct pasemi_desc *desc)
++{
++	return desc->size;
++}
++
++static void pasemi_ring_add_desc(
++				 struct pasemi_fnu_txring *ring,
++				 struct pasemi_desc *desc,
++				 struct cryptop *crp) {
++	int i;
++	int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
++
++	TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
++	TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
++	TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
++
++	for (i = 0; i < desc->quad_cnt; i += 2) {
++		ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
++		ring->desc[ring_index] = desc->quad[i];
++		ring->desc[ring_index + 1] = desc->quad[i + 1];
++		ring->next_to_fill++;
++	}
++
++	if (desc->quad_cnt & 1)
++		ring->desc[ring_index + 1] = 0;
++}
++
++static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
++{
++	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
++		 incr);
++}
++
++/*
++ * Generate a new software session.
++ */
++static int
++pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
++{
++	struct cryptoini *c, *encini = NULL, *macini = NULL;
++	struct pasemi_softc *sc = device_get_softc(dev);
++	struct pasemi_session *ses = NULL, **sespp;
++	int sesn, blksz = 0;
++	u64 ccmd = 0;
++	unsigned long flags;
++	struct pasemi_desc init_desc;
++	struct pasemi_fnu_txring *txring;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++	if (sidp == NULL || cri == NULL || sc == NULL) {
++		DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
++		return -EINVAL;
++	}
++	for (c = cri; c != NULL; c = c->cri_next) {
++		if (ALG_IS_SIG(c->cri_alg)) {
++			if (macini)
++				return -EINVAL;
++			macini = c;
++		} else if (ALG_IS_CIPHER(c->cri_alg)) {
++			if (encini)
++				return -EINVAL;
++			encini = c;
++		} else {
++			DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
++			return -EINVAL;
++		}
++	}
++	if (encini == NULL && macini == NULL)
++		return -EINVAL;
++	if (encini) {
++		/* validate key length */
++		switch (encini->cri_alg) {
++		case CRYPTO_DES_CBC:
++			if (encini->cri_klen != 64)
++				return -EINVAL;
++			ccmd = DMA_CALGO_DES;
++			break;
++		case CRYPTO_3DES_CBC:
++			if (encini->cri_klen != 192)
++				return -EINVAL;
++			ccmd = DMA_CALGO_3DES;
++			break;
++		case CRYPTO_AES_CBC:
++			if (encini->cri_klen != 128 &&
++			    encini->cri_klen != 192 &&
++			    encini->cri_klen != 256)
++				return -EINVAL;
++			ccmd = DMA_CALGO_AES;
++			break;
++		case CRYPTO_ARC4:
++			if (encini->cri_klen != 128)
++				return -EINVAL;
++			ccmd = DMA_CALGO_ARC;
++			break;
++		default:
++			DPRINTF("UNKNOWN encini->cri_alg %d\n",
++				encini->cri_alg);
++			return -EINVAL;
++		}
++	}
++
++	if (macini) {
++		switch (macini->cri_alg) {
++		case CRYPTO_MD5:
++		case CRYPTO_MD5_HMAC:
++			blksz = 16;
++			break;
++		case CRYPTO_SHA1:
++		case CRYPTO_SHA1_HMAC:
++			blksz = 20;
++			break;
++		default:
++			DPRINTF("UNKNOWN macini->cri_alg %d\n",
++				macini->cri_alg);
++			return -EINVAL;
++		}
++		if (((macini->cri_klen + 7) / 8) > blksz) {
++			DPRINTF("key length %d bigger than blksize %d not supported\n",
++				((macini->cri_klen + 7) / 8), blksz);
++			return -EINVAL;
++		}
++	}
++
++	for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
++		if (sc->sc_sessions[sesn] == NULL) {
++			sc->sc_sessions[sesn] = (struct pasemi_session *)
++				kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
++			ses = sc->sc_sessions[sesn];
++			break;
++		} else if (sc->sc_sessions[sesn]->used == 0) {
++			ses = sc->sc_sessions[sesn];
++			break;
++		}
++	}
++
++	if (ses == NULL) {
++		sespp = (struct pasemi_session **)
++			kzalloc(sc->sc_nsessions * 2 *
++				sizeof(struct pasemi_session *), GFP_ATOMIC);
++		if (sespp == NULL)
++			return -ENOMEM;
++		memcpy(sespp, sc->sc_sessions,
++		       sc->sc_nsessions * sizeof(struct pasemi_session *));
++		kfree(sc->sc_sessions);
++		sc->sc_sessions = sespp;
++		sesn = sc->sc_nsessions;
++		ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
++			kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
++		if (ses == NULL)
++			return -ENOMEM;
++		sc->sc_nsessions *= 2;
++	}
++
++	ses->used = 1;
++
++	ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
++				       sizeof(struct pasemi_session), DMA_TO_DEVICE);
++
++	/* enter the channel scheduler */
++	spin_lock_irqsave(&sc->sc_chnlock, flags);
++
++	/* ARC4 has to be processed by the even channel */
++	if (encini && (encini->cri_alg == CRYPTO_ARC4))
++		ses->chan = sc->sc_lastchn & ~1;
++	else
++		ses->chan = sc->sc_lastchn;
++	sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
++
++	spin_unlock_irqrestore(&sc->sc_chnlock, flags);
++
++	txring = &sc->tx[ses->chan];
++
++	if (encini) {
++		ses->ccmd = ccmd;
++
++		/* get an IV */
++		/* XXX may read fewer than requested */
++		get_random_bytes(ses->civ, sizeof(ses->civ));
++
++		ses->keysz = (encini->cri_klen - 63) / 64;
++		memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
++
++		pasemi_desc_start(&init_desc,
++				  XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
++		pasemi_desc_build(&init_desc,
++				  XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
++	}
++	if (macini) {
++		if (macini->cri_alg == CRYPTO_MD5_HMAC ||
++		    macini->cri_alg == CRYPTO_SHA1_HMAC)
++			memcpy(ses->hkey, macini->cri_key, blksz);
++		else {
++			/* Load initialization constants(RFC 1321, 3174) */
++			ses->hiv[0] = 0x67452301efcdab89ULL;
++			ses->hiv[1] = 0x98badcfe10325476ULL;
++			ses->hiv[2] = 0xc3d2e1f000000000ULL;
++		}
++		ses->hseq = 0ULL;
++	}
++
++	spin_lock_irqsave(&txring->fill_lock, flags);
++
++	if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
++	     txring->next_to_clean) > TX_RING_SIZE) {
++		spin_unlock_irqrestore(&txring->fill_lock, flags);
++		return ERESTART;
++	}
++
++	if (encini) {
++		pasemi_ring_add_desc(txring, &init_desc, NULL);
++		pasemi_ring_incr(sc, ses->chan,
++				 pasemi_desc_size(&init_desc));
++	}
++
++	txring->sesn = sesn;
++	spin_unlock_irqrestore(&txring->fill_lock, flags);
++
++	*sidp = PASEMI_SID(sesn);
++	return 0;
++}
++
++/*
++ * Deallocate a session.
++ */
++static int
++pasemi_freesession(device_t dev, u_int64_t tid)
++{
++	struct pasemi_softc *sc = device_get_softc(dev);
++	int session;
++	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (sc == NULL)
++		return -EINVAL;
++	session = PASEMI_SESSION(sid);
++	if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
++		return -EINVAL;
++
++	pci_unmap_single(sc->dma_pdev,
++			 sc->sc_sessions[session]->dma_addr,
++			 sizeof(struct pasemi_session), DMA_TO_DEVICE);
++	memset(sc->sc_sessions[session], 0,
++	       sizeof(struct pasemi_session));
++
++	return 0;
++}
++
++static int
++pasemi_process(device_t dev, struct cryptop *crp, int hint)
++{
++
++	int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
++	struct pasemi_softc *sc = device_get_softc(dev);
++	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
++	caddr_t ivp;
++	struct pasemi_desc init_desc, work_desc;
++	struct pasemi_session *ses;
++	struct sk_buff *skb;
++	struct uio *uiop;
++	unsigned long flags;
++	struct pasemi_fnu_txring *txring;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
++		return -EINVAL;
++
++	crp->crp_etype = 0;
++	if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
++		return -EINVAL;
++
++	ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
++
++	crd1 = crp->crp_desc;
++	if (crd1 == NULL) {
++		err = -EINVAL;
++		goto errout;
++	}
++	crd2 = crd1->crd_next;
++
++	if (ALG_IS_SIG(crd1->crd_alg)) {
++		maccrd = crd1;
++		if (crd2 == NULL)
++			enccrd = NULL;
++		else if (ALG_IS_CIPHER(crd2->crd_alg) &&
++			 (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
++			enccrd = crd2;
++		else
++			goto erralg;
++	} else if (ALG_IS_CIPHER(crd1->crd_alg)) {
++		enccrd = crd1;
++		if (crd2 == NULL)
++			maccrd = NULL;
++		else if (ALG_IS_SIG(crd2->crd_alg) &&
++			 (crd1->crd_flags & CRD_F_ENCRYPT))
++			maccrd = crd2;
++		else
++			goto erralg;
++	} else
++		goto erralg;
++
++	chsel = ses->chan;
++
++	txring = &sc->tx[chsel];
++
++	if (enccrd && !maccrd) {
++		if (enccrd->crd_alg == CRYPTO_ARC4)
++			reinit = 1;
++		reinit_size = 0x40;
++		srclen = crp->crp_ilen;
++
++		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
++				  | XCT_FUN_FUN(chsel));
++		if (enccrd->crd_flags & CRD_F_ENCRYPT)
++			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
++		else
++			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
++	} else if (enccrd && maccrd) {
++		if (enccrd->crd_alg == CRYPTO_ARC4)
++			reinit = 1;
++		reinit_size = 0x68;
++
++		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
++			/* Encrypt -> Authenticate */
++			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
++					  | XCT_FUN_A | XCT_FUN_FUN(chsel));
++			srclen = maccrd->crd_skip + maccrd->crd_len;
++		} else {
++			/* Authenticate -> Decrypt */
++			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
++					  | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
++			pasemi_desc_build(&work_desc, 0);
++			pasemi_desc_build(&work_desc, 0);
++			pasemi_desc_build(&work_desc, 0);
++			work_desc.postop = PASEMI_CHECK_SIG;
++			srclen = crp->crp_ilen;
++		}
++
++		pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
++		pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
++	} else if (!enccrd && maccrd) {
++		srclen = maccrd->crd_len;
++
++		pasemi_desc_start(&init_desc,
++				  XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
++		pasemi_desc_build(&init_desc,
++				  XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
++
++		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
++				  | XCT_FUN_A | XCT_FUN_FUN(chsel));
++	}
++
++	if (enccrd) {
++		switch (enccrd->crd_alg) {
++		case CRYPTO_3DES_CBC:
++			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
++					XCT_FUN_BCM_CBC);
++			ivsize = sizeof(u64);
++			break;
++		case CRYPTO_DES_CBC:
++			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
++					XCT_FUN_BCM_CBC);
++			ivsize = sizeof(u64);
++			break;
++		case CRYPTO_AES_CBC:
++			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
++					XCT_FUN_BCM_CBC);
++			ivsize = 2 * sizeof(u64);
++			break;
++		case CRYPTO_ARC4:
++			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
++			ivsize = 0;
++			break;
++		default:
++			printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
++			       enccrd->crd_alg);
++			err = -EINVAL;
++			goto errout;
++		}
++
++		ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
++		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
++			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
++				memcpy(ivp, enccrd->crd_iv, ivsize);
++			/* If IV is not present in the buffer already, it has to be copied there */
++			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
++				crypto_copyback(crp->crp_flags, crp->crp_buf,
++						enccrd->crd_inject, ivsize, ivp);
++		} else {
++			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
++				/* IV is provided expicitly in descriptor */
++				memcpy(ivp, enccrd->crd_iv, ivsize);
++			else
++				/* IV is provided in the packet */
++				crypto_copydata(crp->crp_flags, crp->crp_buf,
++						enccrd->crd_inject, ivsize,
++						ivp);
++		}
++	}
++
++	if (maccrd) {
++		switch (maccrd->crd_alg) {
++		case CRYPTO_MD5:
++			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
++					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
++			break;
++		case CRYPTO_SHA1:
++			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
++					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
++			break;
++		case CRYPTO_MD5_HMAC:
++			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
++					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
++			break;
++		case CRYPTO_SHA1_HMAC:
++			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
++					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
++			break;
++		default:
++			printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
++			       maccrd->crd_alg);
++			err = -EINVAL;
++			goto errout;
++		}
++	}
++
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		/* using SKB buffers */
++		skb = (struct sk_buff *)crp->crp_buf;
++		if (skb_shinfo(skb)->nr_frags) {
++			printk(DRV_NAME ": skb frags unimplemented\n");
++			err = -EINVAL;
++			goto errout;
++		}
++		pasemi_desc_build(
++			&work_desc,
++			XCT_FUN_DST_PTR(skb->len, pci_map_single(
++						sc->dma_pdev, skb->data,
++						skb->len, DMA_TO_DEVICE)));
++		pasemi_desc_build(
++			&work_desc,
++			XCT_FUN_SRC_PTR(
++				srclen, pci_map_single(
++					sc->dma_pdev, skb->data,
++					srclen, DMA_TO_DEVICE)));
++		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
++	} else if (crp->crp_flags & CRYPTO_F_IOV) {
++		/* using IOV buffers */
++		uiop = (struct uio *)crp->crp_buf;
++		if (uiop->uio_iovcnt > 1) {
++			printk(DRV_NAME ": iov frags unimplemented\n");
++			err = -EINVAL;
++			goto errout;
++		}
++
++		/* crp_olen is never set; always use crp_ilen */
++		pasemi_desc_build(
++			&work_desc,
++			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
++						sc->dma_pdev,
++						uiop->uio_iov->iov_base,
++						crp->crp_ilen, DMA_TO_DEVICE)));
++		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
++
++		pasemi_desc_build(
++			&work_desc,
++			XCT_FUN_SRC_PTR(srclen, pci_map_single(
++						sc->dma_pdev,
++						uiop->uio_iov->iov_base,
++						srclen, DMA_TO_DEVICE)));
++	} else {
++		/* using contig buffers */
++		pasemi_desc_build(
++			&work_desc,
++			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
++						sc->dma_pdev,
++						crp->crp_buf,
++						crp->crp_ilen, DMA_TO_DEVICE)));
++		pasemi_desc_build(
++			&work_desc,
++			XCT_FUN_SRC_PTR(srclen, pci_map_single(
++						sc->dma_pdev,
++						crp->crp_buf, srclen,
++						DMA_TO_DEVICE)));
++		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
++	}
++
++	spin_lock_irqsave(&txring->fill_lock, flags);
++
++	if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
++		txring->sesn = PASEMI_SESSION(crp->crp_sid);
++		reinit = 1;
++	}
++
++	if (enccrd) {
++		pasemi_desc_start(&init_desc,
++				  XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
++		pasemi_desc_build(&init_desc,
++				  XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
++	}
++
++	if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
++	      pasemi_desc_size(&work_desc)) -
++	     txring->next_to_clean) > TX_RING_SIZE) {
++		spin_unlock_irqrestore(&txring->fill_lock, flags);
++		err = ERESTART;
++		goto errout;
++	}
++
++	pasemi_ring_add_desc(txring, &init_desc, NULL);
++	pasemi_ring_add_desc(txring, &work_desc, crp);
++
++	pasemi_ring_incr(sc, chsel,
++			 pasemi_desc_size(&init_desc) +
++			 pasemi_desc_size(&work_desc));
++
++	spin_unlock_irqrestore(&txring->fill_lock, flags);
++
++	mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
++
++	return 0;
++
++erralg:
++	printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
++	       crd1->crd_alg, crd2->crd_alg);
++	err = -EINVAL;
++
++errout:
++	if (err != ERESTART) {
++		crp->crp_etype = err;
++		crypto_done(crp);
++	}
++	return err;
++}
++
++static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
++{
++	int i, j, ring_idx;
++	struct pasemi_fnu_txring *ring = &sc->tx[chan];
++	u16 delta_cnt;
++	int flags, loops = 10;
++	int desc_size;
++	struct cryptop *crp;
++
++	spin_lock_irqsave(&ring->clean_lock, flags);
++
++	while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
++			     & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
++	       && loops--) {
++
++		for (i = 0; i < delta_cnt; i++) {
++			desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
++			crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
++			if (crp) {
++				ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
++				if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
++					/* Need to make sure signature matched,
++					 * if not - return error */
++					if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
++						crp->crp_etype = -EINVAL;
++				}
++				crypto_done(TX_DESC_INFO(ring,
++							 ring->next_to_clean).cf_crp);
++				TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
++				pci_unmap_single(
++					sc->dma_pdev,
++					XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
++					PCI_DMA_TODEVICE);
++
++				ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
++
++				ring->next_to_clean++;
++				for (j = 1; j < desc_size; j++) {
++					ring_idx = 2 *
++						(ring->next_to_clean &
++						 (TX_RING_SIZE-1));
++					pci_unmap_single(
++						sc->dma_pdev,
++						XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
++						PCI_DMA_TODEVICE);
++					if (ring->desc[ring_idx + 1])
++						pci_unmap_single(
++							sc->dma_pdev,
++							XCT_PTR_ADDR_LEN(
++								ring->desc[
++									ring_idx + 1]),
++							PCI_DMA_TODEVICE);
++					ring->desc[ring_idx] =
++						ring->desc[ring_idx + 1] = 0;
++					ring->next_to_clean++;
++				}
++			} else {
++				for (j = 0; j < desc_size; j++) {
++					ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
++					ring->desc[ring_idx] =
++						ring->desc[ring_idx + 1] = 0;
++					ring->next_to_clean++;
++				}
++			}
++		}
++
++		ring->total_pktcnt += delta_cnt;
++	}
++	spin_unlock_irqrestore(&ring->clean_lock, flags);
++
++	return 0;
++}
++
++static void sweepup_tx(struct pasemi_softc *sc)
++{
++	int i;
++
++	for (i = 0; i < sc->sc_num_channels; i++)
++		pasemi_clean_tx(sc, i);
++}
++
++static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
++{
++	struct pasemi_softc *sc = arg;
++	unsigned int reg;
++	int chan = irq - sc->base_irq;
++	int chan_index = sc->base_chan + chan;
++	u64 stat = dma_status->tx_sta[chan_index];
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (!(stat & PAS_STATUS_CAUSE_M))
++		return IRQ_NONE;
++
++	pasemi_clean_tx(sc, chan);
++
++	stat = dma_status->tx_sta[chan_index];
++
++	reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
++		PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
++
++	if (stat & PAS_STATUS_SOFT)
++		reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
++
++	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
++
++
++	return IRQ_HANDLED;
++}
++
++static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
++{
++	u32 val;
++	int chan_index = chan + sc->base_chan;
++	int ret;
++	struct pasemi_fnu_txring *ring;
++
++	ring = &sc->tx[chan];
++
++	spin_lock_init(&ring->fill_lock);
++	spin_lock_init(&ring->clean_lock);
++
++	ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
++				  TX_RING_SIZE, GFP_KERNEL);
++	if (!ring->desc_info)
++		return -ENOMEM;
++
++	/* Allocate descriptors */
++	ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
++					TX_RING_SIZE *
++					2 * sizeof(u64),
++					&ring->dma, GFP_KERNEL);
++	if (!ring->desc)
++		return -ENOMEM;
++
++	memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
++
++	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
++
++	ring->total_pktcnt = 0;
++
++	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
++		 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
++
++	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
++	val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
++
++	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
++
++	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
++		 PAS_DMA_TXCHAN_CFG_TY_FUNC |
++		 PAS_DMA_TXCHAN_CFG_TATTR(chan) |
++		 PAS_DMA_TXCHAN_CFG_WT(2));
++
++	/* enable tx channel */
++	out_le32(sc->dma_regs +
++		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
++		 PAS_DMA_TXCHAN_TCMDSTA_EN);
++
++	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
++		 PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
++
++	ring->next_to_fill = 0;
++	ring->next_to_clean = 0;
++
++	snprintf(ring->irq_name, sizeof(ring->irq_name),
++		 "%s%d", "crypto", chan);
++
++	ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
++	ret = request_irq(ring->irq, (irq_handler_t)
++			  pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
++	if (ret) {
++		printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
++		       ring->irq, ret);
++		ring->irq = -1;
++		return ret;
++	}
++
++	setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
++
++	return 0;
++}
++
++static device_method_t pasemi_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession,		pasemi_newsession),
++	DEVMETHOD(cryptodev_freesession,	pasemi_freesession),
++	DEVMETHOD(cryptodev_process,		pasemi_process),
++};
++
++/* Set up the crypto device structure, private data,
++ * and anything else we need before we start */
++
++static int __devinit
++pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++	struct pasemi_softc *sc;
++	int ret, i;
++
++	DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
++
++	sc = kzalloc(sizeof(*sc), GFP_KERNEL);
++	if (!sc)
++		return -ENOMEM;
++
++	softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
++
++	pci_set_drvdata(pdev, sc);
++
++	spin_lock_init(&sc->sc_chnlock);
++
++	sc->sc_sessions = (struct pasemi_session **)
++		kzalloc(PASEMI_INITIAL_SESSIONS *
++			sizeof(struct pasemi_session *), GFP_ATOMIC);
++	if (sc->sc_sessions == NULL) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
++	sc->sc_lastchn = 0;
++	sc->base_irq = pdev->irq + 6;
++	sc->base_chan = 6;
++	sc->sc_cid = -1;
++	sc->dma_pdev = pdev;
++
++	sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
++	if (!sc->iob_pdev) {
++		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
++		ret = -ENODEV;
++		goto out;
++	}
++
++	/* This is hardcoded and ugly, but we have some firmware versions
++	 * who don't provide the register space in the device tree. Luckily
++	 * they are at well-known locations so we can just do the math here.
++	 */
++	sc->dma_regs =
++		ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
++	sc->iob_regs =
++		ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
++	if (!sc->dma_regs || !sc->iob_regs) {
++		dev_err(&pdev->dev, "Can't map registers\n");
++		ret = -ENODEV;
++		goto out;
++	}
++
++	dma_status = __ioremap(0xfd800000, 0x1000, 0);
++	if (!dma_status) {
++		ret = -ENODEV;
++		dev_err(&pdev->dev, "Can't map dmastatus space\n");
++		goto out;
++	}
++
++	sc->tx = (struct pasemi_fnu_txring *)
++		kzalloc(sizeof(struct pasemi_fnu_txring)
++			* 8, GFP_KERNEL);
++	if (!sc->tx) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	/* Initialize the h/w */
++	out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
++		 (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
++		  PAS_DMA_COM_CFG_FWF));
++	out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
++
++	for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
++		sc->sc_num_channels++;
++		ret = pasemi_dma_setup_tx_resources(sc, i);
++		if (ret)
++			goto out;
++	}
++
++	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
++					 CRYPTOCAP_F_HARDWARE);
++	if (sc->sc_cid < 0) {
++		printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
++		ret = -ENXIO;
++		goto out;
++	}
++
++	/* register algorithms with the framework */
++	printk(DRV_NAME ":");
++
++	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
++	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
++	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
++	crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
++	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
++	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
++	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
++	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
++
++	return 0;
++
++out:
++	pasemi_dma_remove(pdev);
++	return ret;
++}
++
++#define MAX_RETRIES 5000
++
++static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
++{
++	struct pasemi_fnu_txring *ring = &sc->tx[chan];
++	int chan_index = chan + sc->base_chan;
++	int retries;
++	u32 stat;
++
++	/* Stop the channel */
++	out_le32(sc->dma_regs +
++		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
++		 PAS_DMA_TXCHAN_TCMDSTA_ST);
++
++	for (retries = 0; retries < MAX_RETRIES; retries++) {
++		stat = in_le32(sc->dma_regs +
++			       PAS_DMA_TXCHAN_TCMDSTA(chan_index));
++		if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
++			break;
++		cond_resched();
++	}
++
++	if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
++		dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
++			chan_index);
++
++	/* Disable the channel */
++	out_le32(sc->dma_regs +
++		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
++		 0);
++
++	if (ring->desc_info)
++		kfree((void *) ring->desc_info);
++	if (ring->desc)
++		dma_free_coherent(&sc->dma_pdev->dev,
++				  TX_RING_SIZE *
++				  2 * sizeof(u64),
++				  (void *) ring->desc, ring->dma);
++	if (ring->irq != -1)
++		free_irq(ring->irq, sc);
++
++	del_timer(&ring->crypto_timer);
++}
++
++static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
++{
++	struct pasemi_softc *sc = pci_get_drvdata(pdev);
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (sc->sc_cid >= 0) {
++		crypto_unregister_all(sc->sc_cid);
++	}
++
++	if (sc->tx) {
++		for (i = 0; i < sc->sc_num_channels; i++)
++			pasemi_free_tx_resources(sc, i);
++
++		kfree(sc->tx);
++	}
++	if (sc->sc_sessions) {
++		for (i = 0; i < sc->sc_nsessions; i++)
++			kfree(sc->sc_sessions[i]);
++		kfree(sc->sc_sessions);
++	}
++	if (sc->iob_pdev)
++		pci_dev_put(sc->iob_pdev);
++	if (sc->dma_regs)
++		iounmap(sc->dma_regs);
++	if (sc->iob_regs)
++		iounmap(sc->iob_regs);
++	kfree(sc);
++}
++
++static struct pci_device_id pasemi_dma_pci_tbl[] = {
++	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
++};
++
++MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
++
++static struct pci_driver pasemi_dma_driver = {
++	.name		= "pasemi_dma",
++	.id_table	= pasemi_dma_pci_tbl,
++	.probe		= pasemi_dma_probe,
++	.remove		= __devexit_p(pasemi_dma_remove),
++};
++
++static void __exit pasemi_dma_cleanup_module(void)
++{
++	pci_unregister_driver(&pasemi_dma_driver);
++	__iounmap(dma_status);
++	dma_status = NULL;
++}
++
++int pasemi_dma_init_module(void)
++{
++	return pci_register_driver(&pasemi_dma_driver);
++}
++
++module_init(pasemi_dma_init_module);
++module_exit(pasemi_dma_cleanup_module);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
++MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
+diff -Nur linux-2.6.30.orig/crypto/ocf/pasemi/pasemi_fnu.h linux-2.6.30/crypto/ocf/pasemi/pasemi_fnu.h
+--- linux-2.6.30.orig/crypto/ocf/pasemi/pasemi_fnu.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/pasemi/pasemi_fnu.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,410 @@
++/*
++ * Copyright (C) 2007 PA Semi, Inc
++ *
++ * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
++ * hardware register layouts.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
++ */
++
++#ifndef PASEMI_FNU_H
++#define PASEMI_FNU_H
++
++#include <linux/spinlock.h>
++
++#define	PASEMI_SESSION(sid)	((sid) & 0xffffffff)
++#define	PASEMI_SID(sesn)	((sesn) & 0xffffffff)
++#define	DPRINTF(a...)	if (debug) { printk(DRV_NAME ": " a); }
++
++/* Must be a power of two */
++#define RX_RING_SIZE 512
++#define TX_RING_SIZE 512
++#define TX_DESC(ring, num)	((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
++#define TX_DESC_INFO(ring, num)	((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
++#define MAX_DESC_SIZE 8
++#define PASEMI_INITIAL_SESSIONS 10
++#define PASEMI_FNU_CHANNELS 8
++
++/* DMA descriptor */
++struct pasemi_desc {
++	u64 quad[2*MAX_DESC_SIZE];
++	int quad_cnt;
++	int size;
++	int postop;
++};
++
++/*
++ * Holds per descriptor data
++ */
++struct pasemi_desc_info {
++	int			desc_size;
++	int			desc_postop;
++#define PASEMI_CHECK_SIG 0x1
++
++	struct cryptop          *cf_crp;
++};
++
++/*
++ * Holds per channel data
++ */
++struct pasemi_fnu_txring {
++	volatile u64		*desc;
++	volatile struct
++	pasemi_desc_info	*desc_info;
++	dma_addr_t		dma;
++	struct timer_list       crypto_timer;
++	spinlock_t		fill_lock;
++	spinlock_t		clean_lock;
++	unsigned int		next_to_fill;
++	unsigned int		next_to_clean;
++	u16			total_pktcnt;
++	int			irq;
++	int			sesn;
++	char			irq_name[10];
++};
++
++/*
++ * Holds data specific to a single pasemi device.
++ */
++struct pasemi_softc {
++	softc_device_decl	sc_cdev;
++	struct pci_dev		*dma_pdev;	/* device backpointer */
++	struct pci_dev		*iob_pdev;	/* device backpointer */
++	void __iomem		*dma_regs;
++	void __iomem		*iob_regs;
++	int			base_irq;
++	int			base_chan;
++	int32_t			sc_cid;		/* crypto tag */
++	int			sc_nsessions;
++	struct pasemi_session	**sc_sessions;
++	int			sc_num_channels;/* number of crypto channels */
++
++	/* pointer to the array of txring datastructures, one txring per channel */
++	struct pasemi_fnu_txring *tx;
++
++	/*
++	 * mutual exclusion for the channel scheduler
++	 */
++	spinlock_t		sc_chnlock;
++	/* last channel used, for now use round-robin to allocate channels */
++	int			sc_lastchn;
++};
++
++struct pasemi_session {
++	u64 civ[2];
++	u64 keysz;
++	u64 key[4];
++	u64 ccmd;
++	u64 hkey[4];
++	u64 hseq;
++	u64 giv[2];
++	u64 hiv[4];
++
++	int used;
++	dma_addr_t	dma_addr;
++	int chan;
++};
++
++/* status register layout in IOB region, at 0xfd800000 */
++struct pasdma_status {
++	u64 rx_sta[64];
++	u64 tx_sta[20];
++};
++
++#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC)		|| \
++				(alg == CRYPTO_3DES_CBC)	|| \
++				(alg == CRYPTO_AES_CBC)		|| \
++				(alg == CRYPTO_ARC4)		|| \
++				(alg == CRYPTO_NULL_CBC))
++
++#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5)			|| \
++				(alg == CRYPTO_MD5_HMAC)	|| \
++				(alg == CRYPTO_SHA1)		|| \
++				(alg == CRYPTO_SHA1_HMAC)	|| \
++				(alg == CRYPTO_NULL_HMAC))
++
++enum {
++	PAS_DMA_COM_TXCMD = 0x100,	/* Transmit Command Register  */
++	PAS_DMA_COM_TXSTA = 0x104,	/* Transmit Status Register   */
++	PAS_DMA_COM_RXCMD = 0x108,	/* Receive Command Register   */
++	PAS_DMA_COM_RXSTA = 0x10c,	/* Receive Status Register    */
++	PAS_DMA_COM_CFG   = 0x114,	/* DMA Configuration Register */
++};
++
++/* All these registers live in the PCI configuration space for the DMA PCI
++ * device. Use the normal PCI config access functions for them.
++ */
++
++#define PAS_DMA_COM_CFG_FWF	0x18000000
++
++#define PAS_DMA_COM_TXCMD_EN	0x00000001 /* enable */
++#define PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
++#define PAS_DMA_COM_RXCMD_EN	0x00000001 /* enable */
++#define PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
++
++#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
++#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
++#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
++#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
++#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
++#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
++#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
++#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
++#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
++#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
++#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
++#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
++#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
++#define    PAS_DMA_TXCHAN_CFG_TY_FUNC	0x00000002	/* Type = interface */
++#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
++#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
++#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
++#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
++					 PAS_DMA_TXCHAN_CFG_TATTR_M)
++#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000001c0
++#define    PAS_DMA_TXCHAN_CFG_WT_S	6
++#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
++					 PAS_DMA_TXCHAN_CFG_WT_M)
++#define    PAS_DMA_TXCHAN_CFG_LPSQ_FAST	0x00000400
++#define    PAS_DMA_TXCHAN_CFG_LPDQ_FAST	0x00000800
++#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
++#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
++#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
++#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
++#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
++#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
++#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
++#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
++					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
++#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
++#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
++#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
++#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
++					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
++/* # of cache lines worth of buffer ring */
++#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
++#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
++#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
++					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
++
++#define    PAS_STATUS_PCNT_M		0x000000000000ffffull
++#define    PAS_STATUS_PCNT_S		0
++#define    PAS_STATUS_DCNT_M		0x00000000ffff0000ull
++#define    PAS_STATUS_DCNT_S		16
++#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000ull
++#define    PAS_STATUS_BPCNT_S		32
++#define    PAS_STATUS_CAUSE_M		0xf000000000000000ull
++#define    PAS_STATUS_TIMER		0x1000000000000000ull
++#define    PAS_STATUS_ERROR		0x2000000000000000ull
++#define    PAS_STATUS_SOFT		0x4000000000000000ull
++#define    PAS_STATUS_INT		0x8000000000000000ull
++
++#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
++#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
++#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
++#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
++						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
++#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
++#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
++#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
++#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
++						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
++#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
++#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
++#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
++#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
++#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
++						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
++#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
++#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
++#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
++#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
++#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
++						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
++#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
++#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
++#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	16
++#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
++						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
++#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
++#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
++#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
++#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
++#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
++#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
++#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
++#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
++#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	16
++#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
++						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
++#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
++#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
++#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
++#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
++#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
++#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
++
++#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
++#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
++#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
++#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
++						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
++
++/* Transmit descriptor fields */
++#define	XCT_MACTX_T		0x8000000000000000ull
++#define	XCT_MACTX_ST		0x4000000000000000ull
++#define XCT_MACTX_NORES		0x0000000000000000ull
++#define XCT_MACTX_8BRES		0x1000000000000000ull
++#define XCT_MACTX_24BRES	0x2000000000000000ull
++#define XCT_MACTX_40BRES	0x3000000000000000ull
++#define XCT_MACTX_I		0x0800000000000000ull
++#define XCT_MACTX_O		0x0400000000000000ull
++#define XCT_MACTX_E		0x0200000000000000ull
++#define XCT_MACTX_VLAN_M	0x0180000000000000ull
++#define XCT_MACTX_VLAN_NOP	0x0000000000000000ull
++#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000ull
++#define XCT_MACTX_VLAN_INSERT   0x0100000000000000ull
++#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000ull
++#define XCT_MACTX_CRC_M		0x0060000000000000ull
++#define XCT_MACTX_CRC_NOP	0x0000000000000000ull
++#define XCT_MACTX_CRC_INSERT	0x0020000000000000ull
++#define XCT_MACTX_CRC_PAD	0x0040000000000000ull
++#define XCT_MACTX_CRC_REPLACE	0x0060000000000000ull
++#define XCT_MACTX_SS		0x0010000000000000ull
++#define XCT_MACTX_LLEN_M	0x00007fff00000000ull
++#define XCT_MACTX_LLEN_S	32ull
++#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & \
++				 XCT_MACTX_LLEN_M)
++#define XCT_MACTX_IPH_M		0x00000000f8000000ull
++#define XCT_MACTX_IPH_S		27ull
++#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & \
++				 XCT_MACTX_IPH_M)
++#define XCT_MACTX_IPO_M		0x0000000007c00000ull
++#define XCT_MACTX_IPO_S		22ull
++#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & \
++				 XCT_MACTX_IPO_M)
++#define XCT_MACTX_CSUM_M	0x0000000000000060ull
++#define XCT_MACTX_CSUM_NOP	0x0000000000000000ull
++#define XCT_MACTX_CSUM_TCP	0x0000000000000040ull
++#define XCT_MACTX_CSUM_UDP	0x0000000000000060ull
++#define XCT_MACTX_V6		0x0000000000000010ull
++#define XCT_MACTX_C		0x0000000000000004ull
++#define XCT_MACTX_AL2		0x0000000000000002ull
++
++#define XCT_PTR_T		0x8000000000000000ull
++#define XCT_PTR_LEN_M		0x7ffff00000000000ull
++#define XCT_PTR_LEN_S		44
++#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & \
++				 XCT_PTR_LEN_M)
++#define XCT_PTR_ADDR_M		0x00000fffffffffffull
++#define XCT_PTR_ADDR_S		0
++#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & \
++				 XCT_PTR_ADDR_M)
++
++/* Function descriptor fields */
++#define	XCT_FUN_T		0x8000000000000000ull
++#define	XCT_FUN_ST		0x4000000000000000ull
++#define XCT_FUN_NORES		0x0000000000000000ull
++#define XCT_FUN_8BRES		0x1000000000000000ull
++#define XCT_FUN_24BRES		0x2000000000000000ull
++#define XCT_FUN_40BRES		0x3000000000000000ull
++#define XCT_FUN_I		0x0800000000000000ull
++#define XCT_FUN_O		0x0400000000000000ull
++#define XCT_FUN_E		0x0200000000000000ull
++#define XCT_FUN_FUN_S		54
++#define XCT_FUN_FUN_M		0x01c0000000000000ull
++#define XCT_FUN_FUN(num)	((((long)(num)) << XCT_FUN_FUN_S) & \
++				XCT_FUN_FUN_M)
++#define XCT_FUN_CRM_NOP		0x0000000000000000ull
++#define XCT_FUN_CRM_SIG		0x0008000000000000ull
++#define XCT_FUN_CRM_ENC		0x0010000000000000ull
++#define XCT_FUN_CRM_DEC		0x0018000000000000ull
++#define XCT_FUN_CRM_SIG_ENC	0x0020000000000000ull
++#define XCT_FUN_CRM_ENC_SIG	0x0028000000000000ull
++#define XCT_FUN_CRM_SIG_DEC	0x0030000000000000ull
++#define XCT_FUN_CRM_DEC_SIG	0x0038000000000000ull
++#define XCT_FUN_LLEN_M		0x0007ffff00000000ull
++#define XCT_FUN_LLEN_S		32ULL
++#define XCT_FUN_LLEN(x)		((((long)(x)) << XCT_FUN_LLEN_S) & \
++				 XCT_FUN_LLEN_M)
++#define XCT_FUN_SHL_M		0x00000000f8000000ull
++#define XCT_FUN_SHL_S		27ull
++#define XCT_FUN_SHL(x)		((((long)(x)) << XCT_FUN_SHL_S) & \
++				 XCT_FUN_SHL_M)
++#define XCT_FUN_CHL_M		0x0000000007c00000ull
++#define XCT_FUN_CHL_S		22ull
++#define XCT_FUN_CHL(x)		((((long)(x)) << XCT_FUN_CHL_S) & \
++				 XCT_FUN_CHL_M)
++#define XCT_FUN_HSZ_M		0x00000000003c0000ull
++#define XCT_FUN_HSZ_S		18ull
++#define XCT_FUN_HSZ(x)		((((long)(x)) << XCT_FUN_HSZ_S) & \
++				 XCT_FUN_HSZ_M)
++#define XCT_FUN_ALG_DES		0x0000000000000000ull
++#define XCT_FUN_ALG_3DES	0x0000000000008000ull
++#define XCT_FUN_ALG_AES		0x0000000000010000ull
++#define XCT_FUN_ALG_ARC		0x0000000000018000ull
++#define XCT_FUN_ALG_KASUMI	0x0000000000020000ull
++#define XCT_FUN_BCM_ECB		0x0000000000000000ull
++#define XCT_FUN_BCM_CBC		0x0000000000001000ull
++#define XCT_FUN_BCM_CFB		0x0000000000002000ull
++#define XCT_FUN_BCM_OFB		0x0000000000003000ull
++#define XCT_FUN_BCM_CNT		0x0000000000003800ull
++#define XCT_FUN_BCM_KAS_F8	0x0000000000002800ull
++#define XCT_FUN_BCM_KAS_F9	0x0000000000001800ull
++#define XCT_FUN_BCP_NO_PAD	0x0000000000000000ull
++#define XCT_FUN_BCP_ZRO		0x0000000000000200ull
++#define XCT_FUN_BCP_PL		0x0000000000000400ull
++#define XCT_FUN_BCP_INCR	0x0000000000000600ull
++#define XCT_FUN_SIG_MD5		(0ull << 4)
++#define XCT_FUN_SIG_SHA1	(2ull << 4)
++#define XCT_FUN_SIG_HMAC_MD5	(8ull << 4)
++#define XCT_FUN_SIG_HMAC_SHA1	(10ull << 4)
++#define XCT_FUN_A		0x0000000000000008ull
++#define XCT_FUN_C		0x0000000000000004ull
++#define XCT_FUN_AL2		0x0000000000000002ull
++#define XCT_FUN_SE		0x0000000000000001ull
++
++#define XCT_FUN_SRC_PTR(len, addr)	(XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
++#define XCT_FUN_DST_PTR(len, addr)	(XCT_FUN_SRC_PTR(len, addr) | \
++					0x8000000000000000ull)
++
++#define XCT_CTRL_HDR_FUN_NUM_M		0x01c0000000000000ull
++#define XCT_CTRL_HDR_FUN_NUM_S		54
++#define XCT_CTRL_HDR_LEN_M		0x0007ffff00000000ull
++#define XCT_CTRL_HDR_LEN_S		32
++#define XCT_CTRL_HDR_REG_M		0x00000000000000ffull
++#define XCT_CTRL_HDR_REG_S		0
++
++#define XCT_CTRL_HDR(funcN,len,reg)	(0x9400000000000000ull | \
++			((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
++			& XCT_CTRL_HDR_FUN_NUM_M) | \
++			((((long)(len)) << \
++			XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
++			((((long)(reg)) << \
++			XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
++
++/* Function config command options */
++#define	DMA_CALGO_DES			0x00
++#define	DMA_CALGO_3DES			0x01
++#define	DMA_CALGO_AES			0x02
++#define	DMA_CALGO_ARC			0x03
++
++#define DMA_FN_CIV0			0x02
++#define DMA_FN_CIV1			0x03
++#define DMA_FN_HKEY0			0x0a
++
++#define XCT_PTR_ADDR_LEN(ptr)		((ptr) & XCT_PTR_ADDR_M), \
++			(((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
++
++#endif /* PASEMI_FNU_H */
+diff -Nur linux-2.6.30.orig/crypto/ocf/random.c linux-2.6.30/crypto/ocf/random.c
+--- linux-2.6.30.orig/crypto/ocf/random.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/random.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,317 @@
++/*
++ * A system independant way of adding entropy to the kernels pool
++ * this way the drivers can focus on the real work and we can take
++ * care of pushing it to the appropriate place in the kernel.
++ *
++ * This should be fast and callable from timers/interrupts
++ *
++ * Written by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ *
++ * LICENSE TERMS
++ *
++ * The free distribution and use of this software in both source and binary
++ * form is allowed (with or without changes) provided that:
++ *
++ *   1. distributions of this source code include the above copyright
++ *      notice, this list of conditions and the following disclaimer;
++ *
++ *   2. distributions in binary form include the above copyright
++ *      notice, this list of conditions and the following disclaimer
++ *      in the documentation and/or other associated materials;
++ *
++ *   3. the copyright holder's name is not used to endorse products
++ *      built using this software without specific written permission.
++ *
++ * ALTERNATIVELY, provided that this notice is retained in full, this product
++ * may be distributed under the terms of the GNU General Public License (GPL),
++ * in which case the provisions of the GPL apply INSTEAD OF those given above.
++ *
++ * DISCLAIMER
++ *
++ * This software is provided 'as is' with no explicit or implied warranties
++ * in respect of its properties, including, but not limited to, correctness
++ * and/or fitness for purpose.
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/spinlock.h>
++#include <linux/version.h>
++#include <linux/unistd.h>
++#include <linux/poll.h>
++#include <linux/random.h>
++#include <cryptodev.h>
++
++#ifdef CONFIG_OCF_FIPS
++#include "rndtest.h"
++#endif
++
++#ifndef HAS_RANDOM_INPUT_WAIT
++#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
++#endif
++
++/*
++ * a hack to access the debug levels from the crypto driver
++ */
++extern int crypto_debug;
++#define debug crypto_debug
++
++/*
++ * a list of all registered random providers
++ */
++static LIST_HEAD(random_ops);
++static int started = 0;
++static int initted = 0;
++
++struct random_op {
++	struct list_head random_list;
++	u_int32_t driverid;
++	int (*read_random)(void *arg, u_int32_t *buf, int len);
++	void *arg;
++};
++
++static int random_proc(void *arg);
++
++static pid_t		randomproc = (pid_t) -1;
++static spinlock_t	random_lock;
++
++/*
++ * just init the spin locks
++ */
++static int
++crypto_random_init(void)
++{
++	spin_lock_init(&random_lock);
++	initted = 1;
++	return(0);
++}
++
++/*
++ * Add the given random reader to our list (if not present)
++ * and start the thread (if not already started)
++ *
++ * we have to assume that driver id is ok for now
++ */
++int
++crypto_rregister(
++	u_int32_t driverid,
++	int (*read_random)(void *arg, u_int32_t *buf, int len),
++	void *arg)
++{
++	unsigned long flags;
++	int ret = 0;
++	struct random_op	*rops, *tmp;
++
++	dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
++			__FUNCTION__, driverid, read_random, arg);
++
++	if (!initted)
++		crypto_random_init();
++
++#if 0
++	struct cryptocap	*cap;
++
++	cap = crypto_checkdriver(driverid);
++	if (!cap)
++		return EINVAL;
++#endif
++
++	list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
++		if (rops->driverid == driverid && rops->read_random == read_random)
++			return EEXIST;
++	}
++
++	rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
++	if (!rops)
++		return ENOMEM;
++
++	rops->driverid    = driverid;
++	rops->read_random = read_random;
++	rops->arg = arg;
++
++	spin_lock_irqsave(&random_lock, flags);
++	list_add_tail(&rops->random_list, &random_ops);
++	if (!started) {
++		randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
++		if (randomproc < 0) {
++			ret = randomproc;
++			printk("crypto: crypto_rregister cannot start random thread; "
++					"error %d", ret);
++		} else
++			started = 1;
++	}
++	spin_unlock_irqrestore(&random_lock, flags);
++
++	return ret;
++}
++EXPORT_SYMBOL(crypto_rregister);
++
++int
++crypto_runregister_all(u_int32_t driverid)
++{
++	struct random_op *rops, *tmp;
++	unsigned long flags;
++
++	dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
++
++	list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
++		if (rops->driverid == driverid) {
++			list_del(&rops->random_list);
++			kfree(rops);
++		}
++	}
++
++	spin_lock_irqsave(&random_lock, flags);
++	if (list_empty(&random_ops) && started)
++		kill_pid(randomproc, SIGKILL, 1);
++	spin_unlock_irqrestore(&random_lock, flags);
++	return(0);
++}
++EXPORT_SYMBOL(crypto_runregister_all);
++
++/*
++ * while we can add entropy to random.c continue to read random data from
++ * the drivers and push it to random.
++ */
++static int
++random_proc(void *arg)
++{
++	int n;
++	int wantcnt;
++	int bufcnt = 0;
++	int retval = 0;
++	int *buf = NULL;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++	daemonize();
++	spin_lock_irq(&current->sigmask_lock);
++	sigemptyset(&current->blocked);
++	recalc_sigpending(current);
++	spin_unlock_irq(&current->sigmask_lock);
++	sprintf(current->comm, "ocf-random");
++#else
++	daemonize("ocf-random");
++	allow_signal(SIGKILL);
++#endif
++
++	(void) get_fs();
++	set_fs(get_ds());
++
++#ifdef CONFIG_OCF_FIPS
++#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
++#else
++#define NUM_INT 32
++#endif
++
++	/*
++	 * some devices can transferr their RNG data direct into memory,
++	 * so make sure it is device friendly
++	 */
++	buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
++	if (NULL == buf) {
++		printk("crypto: RNG could not allocate memory\n");
++		retval = -ENOMEM;
++		goto bad_alloc;
++	}
++
++	wantcnt = NUM_INT;   /* start by adding some entropy */
++
++	/*
++	 * its possible due to errors or driver removal that we no longer
++	 * have anything to do,  if so exit or we will consume all the CPU
++	 * doing nothing
++	 */
++	while (!list_empty(&random_ops)) {
++		struct random_op	*rops, *tmp;
++
++#ifdef CONFIG_OCF_FIPS
++		if (wantcnt)
++			wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
++#endif
++
++		/* see if we can get enough entropy to make the world
++		 * a better place.
++		 */
++		while (bufcnt < wantcnt && bufcnt < NUM_INT) {
++			list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
++
++				n = (*rops->read_random)(rops->arg, &buf[bufcnt],
++							 NUM_INT - bufcnt);
++
++				/* on failure remove the random number generator */
++				if (n == -1) {
++					list_del(&rops->random_list);
++					printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
++							rops->driverid);
++					kfree(rops);
++				} else if (n > 0)
++					bufcnt += n;
++			}
++			/* give up CPU for a bit, just in case as this is a loop */
++			schedule();
++		}
++
++
++#ifdef CONFIG_OCF_FIPS
++		if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
++			dprintk("crypto: buffer had fips errors, discarding\n");
++			bufcnt = 0;
++		}
++#endif
++
++		/*
++		 * if we have a certified buffer,  we can send some data
++		 * to /dev/random and move along
++		 */
++		if (bufcnt > 0) {
++			/* add what we have */
++			random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
++			bufcnt = 0;
++		}
++
++		/* give up CPU for a bit so we don't hog while filling */
++		schedule();
++
++		/* wait for needing more */
++		wantcnt = random_input_wait();
++
++		if (wantcnt <= 0)
++			wantcnt = 0; /* try to get some info again */
++		else
++		 	/* round up to one word or we can loop forever */
++			wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
++		if (wantcnt > NUM_INT) {
++			wantcnt = NUM_INT;
++		}
++
++		if (signal_pending(current)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++			spin_lock_irq(&current->sigmask_lock);
++#endif
++			flush_signals(current);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++			spin_unlock_irq(&current->sigmask_lock);
++#endif
++		}
++	}
++	
++	kfree(buf);
++
++bad_alloc:
++	spin_lock_irq(&random_lock);
++	randomproc = (pid_t) -1;
++	started = 0;
++	spin_unlock_irq(&random_lock);
++
++	return retval;
++}
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/README linux-2.6.30/crypto/ocf/README
+--- linux-2.6.30.orig/crypto/ocf/README	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/README	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,167 @@
++README - ocf-linux-20071215
++---------------------------
++
++This README provides instructions for getting ocf-linux compiled and
++operating in a generic linux environment.  For other information you
++might like to visit the home page for this project:
++
++    http://ocf-linux.sourceforge.net/
++
++Adding OCF to linux
++-------------------
++
++    Not much in this file for now,  just some notes.  I usually build
++    the ocf support as modules but it can be built into the kernel as
++    well.  To use it:
++
++    * mknod /dev/crypto c 10 70
++
++    * to add OCF to your kernel source,  you have two options.  Apply
++      the kernel specific patch:
++
++          cd linux-2.4*; gunzip < ocf-linux-24-XXXXXXXX.patch.gz | patch -p1
++          cd linux-2.6*; gunzip < ocf-linux-26-XXXXXXXX.patch.gz | patch -p1
++    
++      if you do one of the above,  then you can proceed to the next step,
++      or you can do the above process by hand with using the patches against
++      linux-2.4.35 and 2.6.23 to include the ocf code under crypto/ocf.
++      Here's how to add it:
++
++      for 2.4.35 (and later)
++
++          cd linux-2.4.35/crypto
++          tar xvzf ocf-linux.tar.gz
++          cd ..
++          patch -p1 < crypto/ocf/patches/linux-2.4.35-ocf.patch
++
++      for 2.6.23 (and later),  find the kernel patch specific (or nearest)
++      to your kernel versions and then:
++
++          cd linux-2.6.NN/crypto
++          tar xvzf ocf-linux.tar.gz
++          cd ..
++          patch -p1 < crypto/ocf/patches/linux-2.6.NN-ocf.patch
++
++      It should be easy to take this patch and apply it to other more
++      recent versions of the kernels.  The same patches should also work
++      relatively easily on kernels as old as 2.6.11 and 2.4.18.
++      
++    * under 2.4 if you are on a non-x86 platform,  you may need to:
++
++        cp linux-2.X.x/include/asm-i386/kmap_types.h linux-2.X.x/include/asm-YYY
++
++      so that you can build the kernel crypto support needed for the cryptosoft
++      driver.
++
++    * For simplicity you should enable all the crypto support in your kernel
++      except for the test driver.  Likewise for the OCF options.  Do not
++      enable OCF crypto drivers for HW that you do not have (for example
++      ixp4xx will not compile on non-Xscale systems).
++
++    * make sure that cryptodev.h (from ocf-linux.tar.gz) is installed as
++      crypto/cryptodev.h in an include directory that is used for building
++      applications for your platform.  For example on a host system that
++      might be:
++
++              /usr/include/crypto/cryptodev.h
++
++    * patch your openssl-0.9.8i code with the openssl-0.9.8i.patch.
++      (NOTE: there is no longer a need to patch ssh). The patch is against:
++      openssl-0_9_8e
++
++      If you need a patch for an older version of openssl,  you should look
++      to older OCF releases.  This patch is unlikely to work on older
++      openssl versions.
++
++      openssl-0.9.8i.patch
++                - enables --with-cryptodev for non BSD systems
++                - adds -cpu option to openssl speed for calculating CPU load
++                  under linux
++                - fixes null pointer in openssl speed multi thread output.
++                - fixes test keys to work with linux crypto's more stringent
++                  key checking.
++                - adds MD5/SHA acceleration (Ronen Shitrit), only enabled
++                  with the --with-cryptodev-digests option
++                - fixes bug in engine code caching.
++
++    * build crypto-tools-XXXXXXXX.tar.gz if you want to try some of the BSD
++      tools for testing OCF (ie., cryptotest).
++
++How to load the OCF drivers
++---------------------------
++
++    First insert the base modules:
++
++        insmod ocf
++        insmod cryptodev
++
++    You can then install the software OCF driver with:
++
++        insmod cryptosoft
++
++    and one or more of the OCF HW drivers with:
++
++        insmod safe
++        insmod hifn7751
++        insmod ixp4xx
++        ...
++
++    all the drivers take a debug option to enable verbose debug so that
++    you can see what is going on.  For debug you load them as:
++
++        insmod ocf crypto_debug=1
++        insmod cryptodev cryptodev_debug=1
++        insmod cryptosoft swcr_debug=1
++
++    You may load more than one OCF crypto driver but then there is no guarantee
++    as to which will be used.
++
++    You can also enable debug at run time on 2.6 systems with the following:
++
++        echo 1 > /sys/module/ocf/parameters/crypto_debug
++        echo 1 > /sys/module/cryptodev/parameters/cryptodev_debug
++        echo 1 > /sys/module/cryptosoft/parameters/swcr_debug
++        echo 1 > /sys/module/hifn7751/parameters/hifn_debug
++        echo 1 > /sys/module/safe/parameters/safe_debug
++        echo 1 > /sys/module/ixp4xx/parameters/ixp_debug
++        ...
++
++Testing the OCF support
++-----------------------
++
++    run "cryptotest",  it should do a short test for a couple of
++    des packets.  If it does everything is working.
++
++    If this works,  then ssh will use the driver when invoked as:
++
++        ssh -c 3des username@host
++
++    to see for sure that it is operating, enable debug as defined above.
++
++    To get a better idea of performance run:
++
++        cryptotest 100 4096
++
++    There are more options to cryptotest,  see the help.
++
++    It is also possible to use openssl to test the speed of the crypto
++    drivers.
++
++        openssl speed -evp des -engine cryptodev -elapsed
++        openssl speed -evp des3 -engine cryptodev -elapsed
++        openssl speed -evp aes128 -engine cryptodev -elapsed
++
++    and multiple threads (10) with:
++
++        openssl speed -evp des -engine cryptodev -elapsed -multi 10
++        openssl speed -evp des3 -engine cryptodev -elapsed -multi 10
++        openssl speed -evp aes128 -engine cryptodev -elapsed -multi 10
++
++    for public key testing you can try:
++
++        cryptokeytest
++        openssl speed -engine cryptodev rsa -elapsed
++        openssl speed -engine cryptodev dsa -elapsed
++
++David McCullough
++david_mccullough@securecomputing.com
+diff -Nur linux-2.6.30.orig/crypto/ocf/rndtest.c linux-2.6.30/crypto/ocf/rndtest.c
+--- linux-2.6.30.orig/crypto/ocf/rndtest.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/rndtest.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,300 @@
++/*	$OpenBSD$	*/
++
++/*
++ * OCF/Linux port done by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ * The license and original author are listed below.
++ *
++ * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. All advertising materials mentioning features or use of this software
++ *    must display the following acknowledgement:
++ *	This product includes software developed by Jason L. Wright
++ * 4. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
++ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/time.h>
++#include <linux/version.h>
++#include <linux/unistd.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/time.h>
++#include <cryptodev.h>
++#include "rndtest.h"
++
++static struct rndtest_stats rndstats;
++
++static	void rndtest_test(struct rndtest_state *);
++
++/* The tests themselves */
++static	int rndtest_monobit(struct rndtest_state *);
++static	int rndtest_runs(struct rndtest_state *);
++static	int rndtest_longruns(struct rndtest_state *);
++static	int rndtest_chi_4(struct rndtest_state *);
++
++static	int rndtest_runs_check(struct rndtest_state *, int, int *);
++static	void rndtest_runs_record(struct rndtest_state *, int, int *);
++
++static const struct rndtest_testfunc {
++	int (*test)(struct rndtest_state *);
++} rndtest_funcs[] = {
++	{ rndtest_monobit },
++	{ rndtest_runs },
++	{ rndtest_chi_4 },
++	{ rndtest_longruns },
++};
++
++#define	RNDTEST_NTESTS	(sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
++
++static void
++rndtest_test(struct rndtest_state *rsp)
++{
++	int i, rv = 0;
++
++	rndstats.rst_tests++;
++	for (i = 0; i < RNDTEST_NTESTS; i++)
++		rv |= (*rndtest_funcs[i].test)(rsp);
++	rsp->rs_discard = (rv != 0);
++}
++
++
++extern int crypto_debug;
++#define rndtest_verbose 2
++#define rndtest_report(rsp, failure, fmt, a...) \
++	{ if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
++
++#define	RNDTEST_MONOBIT_MINONES	9725
++#define	RNDTEST_MONOBIT_MAXONES	10275
++
++static int
++rndtest_monobit(struct rndtest_state *rsp)
++{
++	int i, ones = 0, j;
++	u_int8_t r;
++
++	for (i = 0; i < RNDTEST_NBYTES; i++) {
++		r = rsp->rs_buf[i];
++		for (j = 0; j < 8; j++, r <<= 1)
++			if (r & 0x80)
++				ones++;
++	}
++	if (ones > RNDTEST_MONOBIT_MINONES &&
++	    ones < RNDTEST_MONOBIT_MAXONES) {
++		if (rndtest_verbose > 1)
++			rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
++			    RNDTEST_MONOBIT_MINONES, ones,
++			    RNDTEST_MONOBIT_MAXONES);
++		return (0);
++	} else {
++		if (rndtest_verbose)
++			rndtest_report(rsp, 1,
++			    "monobit failed (%d ones)", ones);
++		rndstats.rst_monobit++;
++		return (-1);
++	}
++}
++
++#define	RNDTEST_RUNS_NINTERVAL	6
++
++static const struct rndtest_runs_tabs {
++	u_int16_t min, max;
++} rndtest_runs_tab[] = {
++	{ 2343, 2657 },
++	{ 1135, 1365 },
++	{ 542, 708 },
++	{ 251, 373 },
++	{ 111, 201 },
++	{ 111, 201 },
++};
++
++static int
++rndtest_runs(struct rndtest_state *rsp)
++{
++	int i, j, ones, zeros, rv = 0;
++	int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
++	u_int8_t c;
++
++	bzero(onei, sizeof(onei));
++	bzero(zeroi, sizeof(zeroi));
++	ones = zeros = 0;
++	for (i = 0; i < RNDTEST_NBYTES; i++) {
++		c = rsp->rs_buf[i];
++		for (j = 0; j < 8; j++, c <<= 1) {
++			if (c & 0x80) {
++				ones++;
++				rndtest_runs_record(rsp, zeros, zeroi);
++				zeros = 0;
++			} else {
++				zeros++;
++				rndtest_runs_record(rsp, ones, onei);
++				ones = 0;
++			}
++		}
++	}
++	rndtest_runs_record(rsp, ones, onei);
++	rndtest_runs_record(rsp, zeros, zeroi);
++
++	rv |= rndtest_runs_check(rsp, 0, zeroi);
++	rv |= rndtest_runs_check(rsp, 1, onei);
++
++	if (rv)
++		rndstats.rst_runs++;
++
++	return (rv);
++}
++
++static void
++rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
++{
++	if (len == 0)
++		return;
++	if (len > RNDTEST_RUNS_NINTERVAL)
++		len = RNDTEST_RUNS_NINTERVAL;
++	len -= 1;
++	intrv[len]++;
++}
++
++static int
++rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
++{
++	int i, rv = 0;
++
++	for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
++		if (src[i] < rndtest_runs_tab[i].min ||
++		    src[i] > rndtest_runs_tab[i].max) {
++			rndtest_report(rsp, 1,
++			    "%s interval %d failed (%d, %d-%d)",
++			    val ? "ones" : "zeros",
++			    i + 1, src[i], rndtest_runs_tab[i].min,
++			    rndtest_runs_tab[i].max);
++			rv = -1;
++		} else {
++			rndtest_report(rsp, 0,
++			    "runs pass %s interval %d (%d < %d < %d)",
++			    val ? "ones" : "zeros",
++			    i + 1, rndtest_runs_tab[i].min, src[i],
++			    rndtest_runs_tab[i].max);
++		}
++	}
++	return (rv);
++}
++
++static int
++rndtest_longruns(struct rndtest_state *rsp)
++{
++	int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
++	u_int8_t c;
++
++	for (i = 0; i < RNDTEST_NBYTES; i++) {
++		c = rsp->rs_buf[i];
++		for (j = 0; j < 8; j++, c <<= 1) {
++			if (c & 0x80) {
++				zeros = 0;
++				ones++;
++				if (ones > maxones)
++					maxones = ones;
++			} else {
++				ones = 0;
++				zeros++;
++				if (zeros > maxzeros)
++					maxzeros = zeros;
++			}
++		}
++	}
++
++	if (maxones < 26 && maxzeros < 26) {
++		rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
++			maxones, maxzeros);
++		return (0);
++	} else {
++		rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
++			maxones, maxzeros);
++		rndstats.rst_longruns++;
++		return (-1);
++	}
++}
++
++/*
++ * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
++ * but it is really the chi^2 test over 4 bits (the poker test as described
++ * by Knuth vol 2 is something different, and I take him as authoritative
++ * on nomenclature over NIST).
++ */
++#define	RNDTEST_CHI4_K	16
++#define	RNDTEST_CHI4_K_MASK	(RNDTEST_CHI4_K - 1)
++
++/*
++ * The unnormalized values are used so that we don't have to worry about
++ * fractional precision.  The "real" value is found by:
++ *	(V - 1562500) * (16 / 5000) = Vn   (where V is the unnormalized value)
++ */
++#define	RNDTEST_CHI4_VMIN	1563181		/* 2.1792 */
++#define	RNDTEST_CHI4_VMAX	1576929		/* 46.1728 */
++
++static int
++rndtest_chi_4(struct rndtest_state *rsp)
++{
++	unsigned int freq[RNDTEST_CHI4_K], i, sum;
++
++	for (i = 0; i < RNDTEST_CHI4_K; i++)
++		freq[i] = 0;
++
++	/* Get number of occurances of each 4 bit pattern */
++	for (i = 0; i < RNDTEST_NBYTES; i++) {
++		freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
++		freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
++	}
++
++	for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
++		sum += freq[i] * freq[i];
++
++	if (sum >= 1563181 && sum <= 1576929) {
++		rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
++		return (0);
++	} else {
++		rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
++		rndstats.rst_chi++;
++		return (-1);
++	}
++}
++
++int
++rndtest_buf(unsigned char *buf)
++{
++	struct rndtest_state rsp;
++
++	memset(&rsp, 0, sizeof(rsp));
++	rsp.rs_buf = buf;
++	rndtest_test(&rsp);
++	return(rsp.rs_discard);
++}
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/rndtest.h linux-2.6.30/crypto/ocf/rndtest.h
+--- linux-2.6.30.orig/crypto/ocf/rndtest.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/rndtest.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,54 @@
++/*	$FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $	*/
++/*	$OpenBSD$	*/
++
++/*
++ * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. All advertising materials mentioning features or use of this software
++ *    must display the following acknowledgement:
++ *	This product includes software developed by Jason L. Wright
++ * 4. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
++ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++
++/* Some of the tests depend on these values */
++#define	RNDTEST_NBYTES	2500
++#define	RNDTEST_NBITS	(8 * RNDTEST_NBYTES)
++
++struct rndtest_state {
++	int		rs_discard;	/* discard/accept random data */
++	u_int8_t	*rs_buf;
++};
++
++struct rndtest_stats {
++	u_int32_t	rst_discard;	/* number of bytes discarded */
++	u_int32_t	rst_tests;	/* number of test runs */
++	u_int32_t	rst_monobit;	/* monobit test failures */
++	u_int32_t	rst_runs;	/* 0/1 runs failures */
++	u_int32_t	rst_longruns;	/* longruns failures */
++	u_int32_t	rst_chi;	/* chi^2 failures */
++};
++
++extern int rndtest_buf(unsigned char *buf);
+diff -Nur linux-2.6.30.orig/crypto/ocf/safe/Makefile linux-2.6.30/crypto/ocf/safe/Makefile
+--- linux-2.6.30.orig/crypto/ocf/safe/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/safe/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,12 @@
++# for SGlinux builds
++-include $(ROOTDIR)/modules/.config
++
++obj-$(CONFIG_OCF_SAFE) += safe.o
++
++obj ?= .
++EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
++
++ifdef TOPDIR
++-include $(TOPDIR)/Rules.make
++endif
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/safe/md5.c linux-2.6.30/crypto/ocf/safe/md5.c
+--- linux-2.6.30.orig/crypto/ocf/safe/md5.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/safe/md5.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,308 @@
++/*	$KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $	*/
++/*
++ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the project nor the names of its contributors
++ *    may be used to endorse or promote products derived from this software
++ *    without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#if 0
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
++
++#include <sys/types.h>
++#include <sys/cdefs.h>
++#include <sys/time.h>
++#include <sys/systm.h>
++#include <crypto/md5.h>
++#endif
++
++#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
++
++#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
++#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
++#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
++#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
++
++#define ROUND1(a, b, c, d, k, s, i) { \
++	(a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
++	(a) = SHIFT((a), (s)); \
++	(a) = (b) + (a); \
++}
++
++#define ROUND2(a, b, c, d, k, s, i) { \
++	(a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
++	(a) = SHIFT((a), (s)); \
++	(a) = (b) + (a); \
++}
++
++#define ROUND3(a, b, c, d, k, s, i) { \
++	(a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
++	(a) = SHIFT((a), (s)); \
++	(a) = (b) + (a); \
++}
++
++#define ROUND4(a, b, c, d, k, s, i) { \
++	(a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
++	(a) = SHIFT((a), (s)); \
++	(a) = (b) + (a); \
++}
++
++#define Sa	 7
++#define Sb	12
++#define Sc	17
++#define Sd	22
++
++#define Se	 5
++#define Sf	 9
++#define Sg	14
++#define Sh	20
++
++#define Si	 4
++#define Sj	11
++#define Sk	16
++#define Sl	23
++
++#define Sm	 6
++#define Sn	10
++#define So	15
++#define Sp	21
++
++#define MD5_A0	0x67452301
++#define MD5_B0	0xefcdab89
++#define MD5_C0	0x98badcfe
++#define MD5_D0	0x10325476
++
++/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
++static const u_int32_t T[65] = {
++	0,
++	0xd76aa478, 	0xe8c7b756,	0x242070db,	0xc1bdceee,
++	0xf57c0faf,	0x4787c62a, 	0xa8304613,	0xfd469501,
++	0x698098d8,	0x8b44f7af,	0xffff5bb1,	0x895cd7be,
++	0x6b901122, 	0xfd987193, 	0xa679438e,	0x49b40821,
++
++	0xf61e2562,	0xc040b340, 	0x265e5a51, 	0xe9b6c7aa,
++	0xd62f105d,	0x2441453,	0xd8a1e681,	0xe7d3fbc8,
++	0x21e1cde6,	0xc33707d6, 	0xf4d50d87, 	0x455a14ed,
++	0xa9e3e905,	0xfcefa3f8, 	0x676f02d9, 	0x8d2a4c8a,
++
++	0xfffa3942,	0x8771f681, 	0x6d9d6122, 	0xfde5380c,
++	0xa4beea44, 	0x4bdecfa9, 	0xf6bb4b60, 	0xbebfbc70,
++	0x289b7ec6, 	0xeaa127fa, 	0xd4ef3085,	0x4881d05,
++	0xd9d4d039, 	0xe6db99e5, 	0x1fa27cf8, 	0xc4ac5665,
++
++	0xf4292244, 	0x432aff97, 	0xab9423a7, 	0xfc93a039,
++	0x655b59c3, 	0x8f0ccc92, 	0xffeff47d, 	0x85845dd1,
++	0x6fa87e4f, 	0xfe2ce6e0, 	0xa3014314, 	0x4e0811a1,
++	0xf7537e82, 	0xbd3af235, 	0x2ad7d2bb, 	0xeb86d391,
++};
++
++static const u_int8_t md5_paddat[MD5_BUFLEN] = {
++	0x80,	0,	0,	0,	0,	0,	0,	0,
++	0,	0,	0,	0,	0,	0,	0,	0,
++	0,	0,	0,	0,	0,	0,	0,	0,
++	0,	0,	0,	0,	0,	0,	0,	0,
++	0,	0,	0,	0,	0,	0,	0,	0,
++	0,	0,	0,	0,	0,	0,	0,	0,
++	0,	0,	0,	0,	0,	0,	0,	0,
++	0,	0,	0,	0,	0,	0,	0,	0,	
++};
++
++static void md5_calc(u_int8_t *, md5_ctxt *);
++
++void md5_init(ctxt)
++	md5_ctxt *ctxt;
++{
++	ctxt->md5_n = 0;
++	ctxt->md5_i = 0;
++	ctxt->md5_sta = MD5_A0;
++	ctxt->md5_stb = MD5_B0;
++	ctxt->md5_stc = MD5_C0;
++	ctxt->md5_std = MD5_D0;
++	bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
++}
++
++void md5_loop(ctxt, input, len)
++	md5_ctxt *ctxt;
++	u_int8_t *input;
++	u_int len; /* number of bytes */
++{
++	u_int gap, i;
++
++	ctxt->md5_n += len * 8; /* byte to bit */
++	gap = MD5_BUFLEN - ctxt->md5_i;
++
++	if (len >= gap) {
++		bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
++			gap);
++		md5_calc(ctxt->md5_buf, ctxt);
++
++		for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
++			md5_calc((u_int8_t *)(input + i), ctxt);
++		}
++		
++		ctxt->md5_i = len - i;
++		bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
++	} else {
++		bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
++			len);
++		ctxt->md5_i += len;
++	}
++}
++
++void md5_pad(ctxt)
++	md5_ctxt *ctxt;
++{
++	u_int gap;
++
++	/* Don't count up padding. Keep md5_n. */	
++	gap = MD5_BUFLEN - ctxt->md5_i;
++	if (gap > 8) {
++		bcopy(md5_paddat,
++		      (void *)(ctxt->md5_buf + ctxt->md5_i),
++		      gap - sizeof(ctxt->md5_n));
++	} else {
++		/* including gap == 8 */
++		bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
++			gap);
++		md5_calc(ctxt->md5_buf, ctxt);
++		bcopy((md5_paddat + gap),
++		      (void *)ctxt->md5_buf,
++		      MD5_BUFLEN - sizeof(ctxt->md5_n));
++	}
++
++	/* 8 byte word */	
++#if BYTE_ORDER == LITTLE_ENDIAN
++	bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
++#endif
++#if BYTE_ORDER == BIG_ENDIAN
++	ctxt->md5_buf[56] = ctxt->md5_n8[7];
++	ctxt->md5_buf[57] = ctxt->md5_n8[6];
++	ctxt->md5_buf[58] = ctxt->md5_n8[5];
++	ctxt->md5_buf[59] = ctxt->md5_n8[4];
++	ctxt->md5_buf[60] = ctxt->md5_n8[3];
++	ctxt->md5_buf[61] = ctxt->md5_n8[2];
++	ctxt->md5_buf[62] = ctxt->md5_n8[1];
++	ctxt->md5_buf[63] = ctxt->md5_n8[0];
++#endif
++
++	md5_calc(ctxt->md5_buf, ctxt);
++}
++
++void md5_result(digest, ctxt)
++	u_int8_t *digest;
++	md5_ctxt *ctxt;
++{
++	/* 4 byte words */
++#if BYTE_ORDER == LITTLE_ENDIAN
++	bcopy(&ctxt->md5_st8[0], digest, 16);
++#endif
++#if BYTE_ORDER == BIG_ENDIAN
++	digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
++	digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
++	digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
++	digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
++	digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
++	digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
++	digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
++	digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
++#endif
++}
++
++static void md5_calc(b64, ctxt)
++	u_int8_t *b64;
++	md5_ctxt *ctxt;
++{
++	u_int32_t A = ctxt->md5_sta;
++	u_int32_t B = ctxt->md5_stb;
++	u_int32_t C = ctxt->md5_stc;
++	u_int32_t D = ctxt->md5_std;
++#if BYTE_ORDER == LITTLE_ENDIAN
++	u_int32_t *X = (u_int32_t *)b64;
++#endif	
++#if BYTE_ORDER == BIG_ENDIAN
++	/* 4 byte words */
++	/* what a brute force but fast! */
++	u_int32_t X[16];
++	u_int8_t *y = (u_int8_t *)X;
++	y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
++	y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
++	y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
++	y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
++	y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
++	y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
++	y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
++	y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
++	y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
++	y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
++	y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
++	y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
++	y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
++	y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
++	y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
++	y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
++#endif
++
++	ROUND1(A, B, C, D,  0, Sa,  1); ROUND1(D, A, B, C,  1, Sb,  2);
++	ROUND1(C, D, A, B,  2, Sc,  3); ROUND1(B, C, D, A,  3, Sd,  4);
++	ROUND1(A, B, C, D,  4, Sa,  5); ROUND1(D, A, B, C,  5, Sb,  6);
++	ROUND1(C, D, A, B,  6, Sc,  7); ROUND1(B, C, D, A,  7, Sd,  8);
++	ROUND1(A, B, C, D,  8, Sa,  9); ROUND1(D, A, B, C,  9, Sb, 10);
++	ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
++	ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
++	ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
++	
++	ROUND2(A, B, C, D,  1, Se, 17); ROUND2(D, A, B, C,  6, Sf, 18);
++	ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A,  0, Sh, 20);
++	ROUND2(A, B, C, D,  5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
++	ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A,  4, Sh, 24);
++	ROUND2(A, B, C, D,  9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
++	ROUND2(C, D, A, B,  3, Sg, 27); ROUND2(B, C, D, A,  8, Sh, 28);
++	ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C,  2, Sf, 30);
++	ROUND2(C, D, A, B,  7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
++
++	ROUND3(A, B, C, D,  5, Si, 33); ROUND3(D, A, B, C,  8, Sj, 34);
++	ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
++	ROUND3(A, B, C, D,  1, Si, 37); ROUND3(D, A, B, C,  4, Sj, 38);
++	ROUND3(C, D, A, B,  7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
++	ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C,  0, Sj, 42);
++	ROUND3(C, D, A, B,  3, Sk, 43); ROUND3(B, C, D, A,  6, Sl, 44);
++	ROUND3(A, B, C, D,  9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
++	ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A,  2, Sl, 48);
++	
++	ROUND4(A, B, C, D,  0, Sm, 49); ROUND4(D, A, B, C,  7, Sn, 50);	
++	ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A,  5, Sp, 52);	
++	ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C,  3, Sn, 54);	
++	ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A,  1, Sp, 56);	
++	ROUND4(A, B, C, D,  8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);	
++	ROUND4(C, D, A, B,  6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);	
++	ROUND4(A, B, C, D,  4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);	
++	ROUND4(C, D, A, B,  2, So, 63); ROUND4(B, C, D, A,  9, Sp, 64);
++
++	ctxt->md5_sta += A;
++	ctxt->md5_stb += B;
++	ctxt->md5_stc += C;
++	ctxt->md5_std += D;
++}
+diff -Nur linux-2.6.30.orig/crypto/ocf/safe/md5.h linux-2.6.30/crypto/ocf/safe/md5.h
+--- linux-2.6.30.orig/crypto/ocf/safe/md5.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/safe/md5.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,76 @@
++/*	$FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $	*/
++/*	$KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $	*/
++
++/*
++ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the project nor the names of its contributors
++ *    may be used to endorse or promote products derived from this software
++ *    without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#ifndef _NETINET6_MD5_H_
++#define _NETINET6_MD5_H_
++
++#define MD5_BUFLEN	64
++
++typedef struct {
++	union {
++		u_int32_t	md5_state32[4];
++		u_int8_t	md5_state8[16];
++	} md5_st;
++
++#define md5_sta		md5_st.md5_state32[0]
++#define md5_stb		md5_st.md5_state32[1]
++#define md5_stc		md5_st.md5_state32[2]
++#define md5_std		md5_st.md5_state32[3]
++#define md5_st8		md5_st.md5_state8
++
++	union {
++		u_int64_t	md5_count64;
++		u_int8_t	md5_count8[8];
++	} md5_count;
++#define md5_n	md5_count.md5_count64
++#define md5_n8	md5_count.md5_count8
++
++	u_int	md5_i;
++	u_int8_t	md5_buf[MD5_BUFLEN];
++} md5_ctxt;
++
++extern void md5_init(md5_ctxt *);
++extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
++extern void md5_pad(md5_ctxt *);
++extern void md5_result(u_int8_t *, md5_ctxt *);
++
++/* compatibility */
++#define MD5_CTX		md5_ctxt
++#define MD5Init(x)	md5_init((x))
++#define MD5Update(x, y, z)	md5_loop((x), (y), (z))
++#define MD5Final(x, y) \
++do {				\
++	md5_pad((y));		\
++	md5_result((x), (y));	\
++} while (0)
++
++#endif /* ! _NETINET6_MD5_H_*/
+diff -Nur linux-2.6.30.orig/crypto/ocf/safe/safe.c linux-2.6.30/crypto/ocf/safe/safe.c
+--- linux-2.6.30.orig/crypto/ocf/safe/safe.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/safe/safe.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,2288 @@
++/*-
++ * Linux port done by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2004-2007 David McCullough
++ * The license and original author are listed below.
++ *
++ * Copyright (c) 2003 Sam Leffler, Errno Consulting
++ * Copyright (c) 2003 Global Technology Associates, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++#include <linux/version.h>
++#include <linux/skbuff.h>
++#include <asm/io.h>
++
++/*
++ * SafeNet SafeXcel-1141 hardware crypto accelerator
++ */
++
++#include <cryptodev.h>
++#include <uio.h>
++#include <safe/safereg.h>
++#include <safe/safevar.h>
++
++#if 1
++#define	DPRINTF(a)	do { \
++						if (debug) { \
++							printk("%s: ", sc ? \
++								device_get_nameunit(sc->sc_dev) : "safe"); \
++							printk a; \
++						} \
++					} while (0)
++#else
++#define	DPRINTF(a)
++#endif
++
++/*
++ * until we find a cleaner way, include the BSD md5/sha1 code
++ * here
++ */
++#define HMAC_HACK 1
++#ifdef HMAC_HACK
++#define LITTLE_ENDIAN 1234
++#define BIG_ENDIAN 4321
++#ifdef __LITTLE_ENDIAN
++#define BYTE_ORDER LITTLE_ENDIAN
++#endif
++#ifdef __BIG_ENDIAN
++#define BYTE_ORDER BIG_ENDIAN
++#endif
++#include <safe/md5.h>
++#include <safe/md5.c>
++#include <safe/sha1.h>
++#include <safe/sha1.c>
++
++u_int8_t hmac_ipad_buffer[64] = {
++    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
++    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
++    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
++    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
++    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
++    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
++    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
++    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
++};
++
++u_int8_t hmac_opad_buffer[64] = {
++    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
++    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
++    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
++    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
++    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
++    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
++    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
++    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
++};
++#endif /* HMAC_HACK */
++
++/* add proc entry for this */
++struct safe_stats safestats;
++
++#define debug safe_debug
++int safe_debug = 0;
++module_param(safe_debug, int, 0644);
++MODULE_PARM_DESC(safe_debug, "Enable debug");
++
++static	void safe_callback(struct safe_softc *, struct safe_ringentry *);
++static	void safe_feed(struct safe_softc *, struct safe_ringentry *);
++#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
++static	void safe_rng_init(struct safe_softc *);
++int safe_rngbufsize = 8;		/* 32 bytes each read  */
++module_param(safe_rngbufsize, int, 0644);
++MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
++int safe_rngmaxalarm = 8;		/* max alarms before reset */
++module_param(safe_rngmaxalarm, int, 0644);
++MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
++#endif /* SAFE_NO_RNG */
++
++static void safe_totalreset(struct safe_softc *sc);
++static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
++static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
++static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
++static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
++static int safe_kstart(struct safe_softc *sc);
++static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
++static void safe_kfeed(struct safe_softc *sc);
++static void safe_kpoll(unsigned long arg);
++static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
++								u_int32_t len, struct crparam *n);
++
++static	int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
++static	int safe_freesession(device_t, u_int64_t);
++static	int safe_process(device_t, struct cryptop *, int);
++
++static device_method_t safe_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession,	safe_newsession),
++	DEVMETHOD(cryptodev_freesession,safe_freesession),
++	DEVMETHOD(cryptodev_process,	safe_process),
++	DEVMETHOD(cryptodev_kprocess,	safe_kprocess),
++};
++
++#define	READ_REG(sc,r)			readl((sc)->sc_base_addr + (r))
++#define WRITE_REG(sc,r,val)		writel((val), (sc)->sc_base_addr + (r))
++
++#define SAFE_MAX_CHIPS 8
++static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
++
++/*
++ * split our buffers up into safe DMAable byte fragments to avoid lockup
++ * bug in 1141 HW on rev 1.0.
++ */
++
++static int
++pci_map_linear(
++	struct safe_softc *sc,
++	struct safe_operand *buf,
++	void *addr,
++	int len)
++{
++	dma_addr_t tmp;
++	int chunk, tlen = len;
++
++	tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
++
++	buf->mapsize += len;
++	while (len > 0) {
++		chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
++		buf->segs[buf->nsegs].ds_addr = tmp;
++		buf->segs[buf->nsegs].ds_len  = chunk;
++		buf->segs[buf->nsegs].ds_tlen = tlen;
++		buf->nsegs++;
++		tmp  += chunk;
++		len  -= chunk;
++		tlen = 0;
++	}
++	return 0;
++}
++
++/*
++ * map in a given uio buffer (great on some arches :-)
++ */
++
++static int
++pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
++{
++	struct iovec *iov = uio->uio_iov;
++	int n;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	buf->mapsize = 0;
++	buf->nsegs = 0;
++
++	for (n = 0; n < uio->uio_iovcnt; n++) {
++		pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
++		iov++;
++	}
++
++	/* identify this buffer by the first segment */
++	buf->map = (void *) buf->segs[0].ds_addr;
++	return(0);
++}
++
++/*
++ * map in a given sk_buff
++ */
++
++static int
++pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
++{
++	int i;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	buf->mapsize = 0;
++	buf->nsegs = 0;
++
++	pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
++
++	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++		pci_map_linear(sc, buf,
++				page_address(skb_shinfo(skb)->frags[i].page) +
++				                        skb_shinfo(skb)->frags[i].page_offset,
++				skb_shinfo(skb)->frags[i].size);
++	}
++
++	/* identify this buffer by the first segment */
++	buf->map = (void *) buf->segs[0].ds_addr;
++	return(0);
++}
++
++
++#if 0 /* not needed at this time */
++static void
++pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
++{
++	int i;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++	for (i = 0; i < buf->nsegs; i++)
++		pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
++				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
++}
++#endif
++
++static void
++pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
++{
++	int i;
++	DPRINTF(("%s()\n", __FUNCTION__));
++	for (i = 0; i < buf->nsegs; i++) {
++		if (buf->segs[i].ds_tlen) {
++			DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
++			pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
++					buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
++			DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
++		}
++		buf->segs[i].ds_addr = 0;
++		buf->segs[i].ds_len = 0;
++		buf->segs[i].ds_tlen = 0;
++	}
++	buf->nsegs = 0;
++	buf->mapsize = 0;
++	buf->map = 0;
++}
++
++
++/*
++ * SafeXcel Interrupt routine
++ */
++static irqreturn_t
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++safe_intr(int irq, void *arg)
++#else
++safe_intr(int irq, void *arg, struct pt_regs *regs)
++#endif
++{
++	struct safe_softc *sc = arg;
++	int stat;
++	unsigned long flags;
++
++	stat = READ_REG(sc, SAFE_HM_STAT);
++
++	DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
++
++	if (stat == 0)		/* shared irq, not for us */
++		return IRQ_NONE;
++
++	WRITE_REG(sc, SAFE_HI_CLR, stat);	/* IACK */
++
++	if ((stat & SAFE_INT_PE_DDONE)) {
++		/*
++		 * Descriptor(s) done; scan the ring and
++		 * process completed operations.
++		 */
++		spin_lock_irqsave(&sc->sc_ringmtx, flags);
++		while (sc->sc_back != sc->sc_front) {
++			struct safe_ringentry *re = sc->sc_back;
++
++#ifdef SAFE_DEBUG
++			if (debug) {
++				safe_dump_ringstate(sc, __func__);
++				safe_dump_request(sc, __func__, re);
++			}
++#endif
++			/*
++			 * safe_process marks ring entries that were allocated
++			 * but not used with a csr of zero.  This insures the
++			 * ring front pointer never needs to be set backwards
++			 * in the event that an entry is allocated but not used
++			 * because of a setup error.
++			 */
++			DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
++			if (re->re_desc.d_csr != 0) {
++				if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
++					DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
++					break;
++				}
++				if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
++					DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
++					break;
++				}
++				sc->sc_nqchip--;
++				safe_callback(sc, re);
++			}
++			if (++(sc->sc_back) == sc->sc_ringtop)
++				sc->sc_back = sc->sc_ring;
++		}
++		spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
++	}
++
++	/*
++	 * Check to see if we got any DMA Error
++	 */
++	if (stat & SAFE_INT_PE_ERROR) {
++		printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
++				(int)READ_REG(sc, SAFE_PE_DMASTAT));
++		safestats.st_dmaerr++;
++		safe_totalreset(sc);
++#if 0
++		safe_feed(sc);
++#endif
++	}
++
++	if (sc->sc_needwakeup) {		/* XXX check high watermark */
++		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
++		DPRINTF(("%s: wakeup crypto %x\n", __func__,
++			sc->sc_needwakeup));
++		sc->sc_needwakeup &= ~wakeup;
++		crypto_unblock(sc->sc_cid, wakeup);
++	}
++	
++	return IRQ_HANDLED;
++}
++
++/*
++ * safe_feed() - post a request to chip
++ */
++static void
++safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
++{
++	DPRINTF(("%s()\n", __FUNCTION__));
++#ifdef SAFE_DEBUG
++	if (debug) {
++		safe_dump_ringstate(sc, __func__);
++		safe_dump_request(sc, __func__, re);
++	}
++#endif
++	sc->sc_nqchip++;
++	if (sc->sc_nqchip > safestats.st_maxqchip)
++		safestats.st_maxqchip = sc->sc_nqchip;
++	/* poke h/w to check descriptor ring, any value can be written */
++	WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
++}
++
++#define	N(a)	(sizeof(a) / sizeof (a[0]))
++static void
++safe_setup_enckey(struct safe_session *ses, caddr_t key)
++{
++	int i;
++
++	bcopy(key, ses->ses_key, ses->ses_klen / 8);
++
++	/* PE is little-endian, insure proper byte order */
++	for (i = 0; i < N(ses->ses_key); i++)
++		ses->ses_key[i] = htole32(ses->ses_key[i]);
++}
++
++static void
++safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
++{
++#ifdef HMAC_HACK
++	MD5_CTX md5ctx;
++	SHA1_CTX sha1ctx;
++	int i;
++
++
++	for (i = 0; i < klen; i++)
++		key[i] ^= HMAC_IPAD_VAL;
++
++	if (algo == CRYPTO_MD5_HMAC) {
++		MD5Init(&md5ctx);
++		MD5Update(&md5ctx, key, klen);
++		MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
++		bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
++	} else {
++		SHA1Init(&sha1ctx);
++		SHA1Update(&sha1ctx, key, klen);
++		SHA1Update(&sha1ctx, hmac_ipad_buffer,
++		    SHA1_HMAC_BLOCK_LEN - klen);
++		bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
++	}
++
++	for (i = 0; i < klen; i++)
++		key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
++
++	if (algo == CRYPTO_MD5_HMAC) {
++		MD5Init(&md5ctx);
++		MD5Update(&md5ctx, key, klen);
++		MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
++		bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
++	} else {
++		SHA1Init(&sha1ctx);
++		SHA1Update(&sha1ctx, key, klen);
++		SHA1Update(&sha1ctx, hmac_opad_buffer,
++		    SHA1_HMAC_BLOCK_LEN - klen);
++		bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
++	}
++
++	for (i = 0; i < klen; i++)
++		key[i] ^= HMAC_OPAD_VAL;
++
++#if 0
++	/*
++	 * this code prevents SHA working on a BE host,
++	 * so it is obviously wrong.  I think the byte
++	 * swap setup we do with the chip fixes this for us
++	 */
++
++	/* PE is little-endian, insure proper byte order */
++	for (i = 0; i < N(ses->ses_hminner); i++) {
++		ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
++		ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
++	}
++#endif
++#else /* HMAC_HACK */
++	printk("safe: md5/sha not implemented\n");
++#endif /* HMAC_HACK */
++}
++#undef N
++
++/*
++ * Allocate a new 'session' and return an encoded session id.  'sidp'
++ * contains our registration id, and should contain an encoded session
++ * id on successful allocation.
++ */
++static int
++safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
++{
++	struct safe_softc *sc = device_get_softc(dev);
++	struct cryptoini *c, *encini = NULL, *macini = NULL;
++	struct safe_session *ses = NULL;
++	int sesn;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (sidp == NULL || cri == NULL || sc == NULL)
++		return (EINVAL);
++
++	for (c = cri; c != NULL; c = c->cri_next) {
++		if (c->cri_alg == CRYPTO_MD5_HMAC ||
++		    c->cri_alg == CRYPTO_SHA1_HMAC ||
++		    c->cri_alg == CRYPTO_NULL_HMAC) {
++			if (macini)
++				return (EINVAL);
++			macini = c;
++		} else if (c->cri_alg == CRYPTO_DES_CBC ||
++		    c->cri_alg == CRYPTO_3DES_CBC ||
++		    c->cri_alg == CRYPTO_AES_CBC ||
++		    c->cri_alg == CRYPTO_NULL_CBC) {
++			if (encini)
++				return (EINVAL);
++			encini = c;
++		} else
++			return (EINVAL);
++	}
++	if (encini == NULL && macini == NULL)
++		return (EINVAL);
++	if (encini) {			/* validate key length */
++		switch (encini->cri_alg) {
++		case CRYPTO_DES_CBC:
++			if (encini->cri_klen != 64)
++				return (EINVAL);
++			break;
++		case CRYPTO_3DES_CBC:
++			if (encini->cri_klen != 192)
++				return (EINVAL);
++			break;
++		case CRYPTO_AES_CBC:
++			if (encini->cri_klen != 128 &&
++			    encini->cri_klen != 192 &&
++			    encini->cri_klen != 256)
++				return (EINVAL);
++			break;
++		}
++	}
++
++	if (sc->sc_sessions == NULL) {
++		ses = sc->sc_sessions = (struct safe_session *)
++			kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
++		if (ses == NULL)
++			return (ENOMEM);
++		memset(ses, 0, sizeof(struct safe_session));
++		sesn = 0;
++		sc->sc_nsessions = 1;
++	} else {
++		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
++			if (sc->sc_sessions[sesn].ses_used == 0) {
++				ses = &sc->sc_sessions[sesn];
++				break;
++			}
++		}
++
++		if (ses == NULL) {
++			sesn = sc->sc_nsessions;
++			ses = (struct safe_session *)
++				kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
++			if (ses == NULL)
++				return (ENOMEM);
++			memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
++			bcopy(sc->sc_sessions, ses, sesn *
++			    sizeof(struct safe_session));
++			bzero(sc->sc_sessions, sesn *
++			    sizeof(struct safe_session));
++			kfree(sc->sc_sessions);
++			sc->sc_sessions = ses;
++			ses = &sc->sc_sessions[sesn];
++			sc->sc_nsessions++;
++		}
++	}
++
++	bzero(ses, sizeof(struct safe_session));
++	ses->ses_used = 1;
++
++	if (encini) {
++		/* get an IV */
++		/* XXX may read fewer than requested */
++		read_random(ses->ses_iv, sizeof(ses->ses_iv));
++
++		ses->ses_klen = encini->cri_klen;
++		if (encini->cri_key != NULL)
++			safe_setup_enckey(ses, encini->cri_key);
++	}
++
++	if (macini) {
++		ses->ses_mlen = macini->cri_mlen;
++		if (ses->ses_mlen == 0) {
++			if (macini->cri_alg == CRYPTO_MD5_HMAC)
++				ses->ses_mlen = MD5_HASH_LEN;
++			else
++				ses->ses_mlen = SHA1_HASH_LEN;
++		}
++
++		if (macini->cri_key != NULL) {
++			safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
++			    macini->cri_klen / 8);
++		}
++	}
++
++	*sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
++	return (0);
++}
++
++/*
++ * Deallocate a session.
++ */
++static int
++safe_freesession(device_t dev, u_int64_t tid)
++{
++	struct safe_softc *sc = device_get_softc(dev);
++	int session, ret;
++	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (sc == NULL)
++		return (EINVAL);
++
++	session = SAFE_SESSION(sid);
++	if (session < sc->sc_nsessions) {
++		bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
++		ret = 0;
++	} else
++		ret = EINVAL;
++	return (ret);
++}
++
++
++static int
++safe_process(device_t dev, struct cryptop *crp, int hint)
++{
++	struct safe_softc *sc = device_get_softc(dev);
++	int err = 0, i, nicealign, uniform;
++	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
++	int bypass, oplen, ivsize;
++	caddr_t iv;
++	int16_t coffset;
++	struct safe_session *ses;
++	struct safe_ringentry *re;
++	struct safe_sarec *sa;
++	struct safe_pdesc *pd;
++	u_int32_t cmd0, cmd1, staterec;
++	unsigned long flags;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
++		safestats.st_invalid++;
++		return (EINVAL);
++	}
++	if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
++		safestats.st_badsession++;
++		return (EINVAL);
++	}
++
++	spin_lock_irqsave(&sc->sc_ringmtx, flags);
++	if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
++		safestats.st_ringfull++;
++		sc->sc_needwakeup |= CRYPTO_SYMQ;
++		spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
++		return (ERESTART);
++	}
++	re = sc->sc_front;
++
++	staterec = re->re_sa.sa_staterec;	/* save */
++	/* NB: zero everything but the PE descriptor */
++	bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
++	re->re_sa.sa_staterec = staterec;	/* restore */
++
++	re->re_crp = crp;
++	re->re_sesn = SAFE_SESSION(crp->crp_sid);
++
++	re->re_src.nsegs = 0;
++	re->re_dst.nsegs = 0;
++
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		re->re_src_skb = (struct sk_buff *)crp->crp_buf;
++		re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
++	} else if (crp->crp_flags & CRYPTO_F_IOV) {
++		re->re_src_io = (struct uio *)crp->crp_buf;
++		re->re_dst_io = (struct uio *)crp->crp_buf;
++	} else {
++		safestats.st_badflags++;
++		err = EINVAL;
++		goto errout;	/* XXX we don't handle contiguous blocks! */
++	}
++
++	sa = &re->re_sa;
++	ses = &sc->sc_sessions[re->re_sesn];
++
++	crd1 = crp->crp_desc;
++	if (crd1 == NULL) {
++		safestats.st_nodesc++;
++		err = EINVAL;
++		goto errout;
++	}
++	crd2 = crd1->crd_next;
++
++	cmd0 = SAFE_SA_CMD0_BASIC;		/* basic group operation */
++	cmd1 = 0;
++	if (crd2 == NULL) {
++		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
++		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
++		    crd1->crd_alg == CRYPTO_NULL_HMAC) {
++			maccrd = crd1;
++			enccrd = NULL;
++			cmd0 |= SAFE_SA_CMD0_OP_HASH;
++		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
++		    crd1->crd_alg == CRYPTO_3DES_CBC ||
++		    crd1->crd_alg == CRYPTO_AES_CBC ||
++		    crd1->crd_alg == CRYPTO_NULL_CBC) {
++			maccrd = NULL;
++			enccrd = crd1;
++			cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
++		} else {
++			safestats.st_badalg++;
++			err = EINVAL;
++			goto errout;
++		}
++	} else {
++		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
++		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
++		    crd1->crd_alg == CRYPTO_NULL_HMAC) &&
++		    (crd2->crd_alg == CRYPTO_DES_CBC ||
++			crd2->crd_alg == CRYPTO_3DES_CBC ||
++		        crd2->crd_alg == CRYPTO_AES_CBC ||
++		        crd2->crd_alg == CRYPTO_NULL_CBC) &&
++		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
++			maccrd = crd1;
++			enccrd = crd2;
++		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
++		    crd1->crd_alg == CRYPTO_3DES_CBC ||
++		    crd1->crd_alg == CRYPTO_AES_CBC ||
++		    crd1->crd_alg == CRYPTO_NULL_CBC) &&
++		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
++			crd2->crd_alg == CRYPTO_SHA1_HMAC ||
++			crd2->crd_alg == CRYPTO_NULL_HMAC) &&
++		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
++			enccrd = crd1;
++			maccrd = crd2;
++		} else {
++			safestats.st_badalg++;
++			err = EINVAL;
++			goto errout;
++		}
++		cmd0 |= SAFE_SA_CMD0_OP_BOTH;
++	}
++
++	if (enccrd) {
++		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
++			safe_setup_enckey(ses, enccrd->crd_key);
++
++		if (enccrd->crd_alg == CRYPTO_DES_CBC) {
++			cmd0 |= SAFE_SA_CMD0_DES;
++			cmd1 |= SAFE_SA_CMD1_CBC;
++			ivsize = 2*sizeof(u_int32_t);
++		} else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
++			cmd0 |= SAFE_SA_CMD0_3DES;
++			cmd1 |= SAFE_SA_CMD1_CBC;
++			ivsize = 2*sizeof(u_int32_t);
++		} else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
++			cmd0 |= SAFE_SA_CMD0_AES;
++			cmd1 |= SAFE_SA_CMD1_CBC;
++			if (ses->ses_klen == 128)
++			     cmd1 |=  SAFE_SA_CMD1_AES128;
++			else if (ses->ses_klen == 192)
++			     cmd1 |=  SAFE_SA_CMD1_AES192;
++			else
++			     cmd1 |=  SAFE_SA_CMD1_AES256;
++			ivsize = 4*sizeof(u_int32_t);
++		} else {
++			cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
++			ivsize = 0;
++		}
++
++		/*
++		 * Setup encrypt/decrypt state.  When using basic ops
++		 * we can't use an inline IV because hash/crypt offset
++		 * must be from the end of the IV to the start of the
++		 * crypt data and this leaves out the preceding header
++		 * from the hash calculation.  Instead we place the IV
++		 * in the state record and set the hash/crypt offset to
++		 * copy both the header+IV.
++		 */
++		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
++			cmd0 |= SAFE_SA_CMD0_OUTBOUND;
++
++			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
++				iv = enccrd->crd_iv;
++			else
++				iv = (caddr_t) ses->ses_iv;
++			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
++				crypto_copyback(crp->crp_flags, crp->crp_buf,
++				    enccrd->crd_inject, ivsize, iv);
++			}
++			bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
++			/* make iv LE */
++			for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
++				re->re_sastate.sa_saved_iv[i] =
++					cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
++			cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
++			re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
++		} else {
++			cmd0 |= SAFE_SA_CMD0_INBOUND;
++
++			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
++				bcopy(enccrd->crd_iv,
++					re->re_sastate.sa_saved_iv, ivsize);
++			} else {
++				crypto_copydata(crp->crp_flags, crp->crp_buf,
++				    enccrd->crd_inject, ivsize,
++				    (caddr_t)re->re_sastate.sa_saved_iv);
++			}
++			/* make iv LE */
++			for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
++				re->re_sastate.sa_saved_iv[i] =
++					cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
++			cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
++		}
++		/*
++		 * For basic encryption use the zero pad algorithm.
++		 * This pads results to an 8-byte boundary and
++		 * suppresses padding verification for inbound (i.e.
++		 * decrypt) operations.
++		 *
++		 * NB: Not sure if the 8-byte pad boundary is a problem.
++		 */
++		cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
++
++		/* XXX assert key bufs have the same size */
++		bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
++	}
++
++	if (maccrd) {
++		if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
++			safe_setup_mackey(ses, maccrd->crd_alg,
++			    maccrd->crd_key, maccrd->crd_klen / 8);
++		}
++
++		if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
++			cmd0 |= SAFE_SA_CMD0_MD5;
++			cmd1 |= SAFE_SA_CMD1_HMAC;	/* NB: enable HMAC */
++		} else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
++			cmd0 |= SAFE_SA_CMD0_SHA1;
++			cmd1 |= SAFE_SA_CMD1_HMAC;	/* NB: enable HMAC */
++		} else {
++			cmd0 |= SAFE_SA_CMD0_HASH_NULL;
++		}
++		/*
++		 * Digest data is loaded from the SA and the hash
++		 * result is saved to the state block where we
++		 * retrieve it for return to the caller.
++		 */
++		/* XXX assert digest bufs have the same size */
++		bcopy(ses->ses_hminner, sa->sa_indigest,
++			sizeof(sa->sa_indigest));
++		bcopy(ses->ses_hmouter, sa->sa_outdigest,
++			sizeof(sa->sa_outdigest));
++
++		cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
++		re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
++	}
++
++	if (enccrd && maccrd) {
++		/*
++		 * The offset from hash data to the start of
++		 * crypt data is the difference in the skips.
++		 */
++		bypass = maccrd->crd_skip;
++		coffset = enccrd->crd_skip - maccrd->crd_skip;
++		if (coffset < 0) {
++			DPRINTF(("%s: hash does not precede crypt; "
++				"mac skip %u enc skip %u\n",
++				__func__, maccrd->crd_skip, enccrd->crd_skip));
++			safestats.st_skipmismatch++;
++			err = EINVAL;
++			goto errout;
++		}
++		oplen = enccrd->crd_skip + enccrd->crd_len;
++		if (maccrd->crd_skip + maccrd->crd_len != oplen) {
++			DPRINTF(("%s: hash amount %u != crypt amount %u\n",
++				__func__, maccrd->crd_skip + maccrd->crd_len,
++				oplen));
++			safestats.st_lenmismatch++;
++			err = EINVAL;
++			goto errout;
++		}
++#ifdef SAFE_DEBUG
++		if (debug) {
++			printf("mac: skip %d, len %d, inject %d\n",
++			    maccrd->crd_skip, maccrd->crd_len,
++			    maccrd->crd_inject);
++			printf("enc: skip %d, len %d, inject %d\n",
++			    enccrd->crd_skip, enccrd->crd_len,
++			    enccrd->crd_inject);
++			printf("bypass %d coffset %d oplen %d\n",
++				bypass, coffset, oplen);
++		}
++#endif
++		if (coffset & 3) {	/* offset must be 32-bit aligned */
++			DPRINTF(("%s: coffset %u misaligned\n",
++				__func__, coffset));
++			safestats.st_coffmisaligned++;
++			err = EINVAL;
++			goto errout;
++		}
++		coffset >>= 2;
++		if (coffset > 255) {	/* offset must be <256 dwords */
++			DPRINTF(("%s: coffset %u too big\n",
++				__func__, coffset));
++			safestats.st_cofftoobig++;
++			err = EINVAL;
++			goto errout;
++		}
++		/*
++		 * Tell the hardware to copy the header to the output.
++		 * The header is defined as the data from the end of
++		 * the bypass to the start of data to be encrypted. 
++		 * Typically this is the inline IV.  Note that you need
++		 * to do this even if src+dst are the same; it appears
++		 * that w/o this bit the crypted data is written
++		 * immediately after the bypass data.
++		 */
++		cmd1 |= SAFE_SA_CMD1_HDRCOPY;
++		/*
++		 * Disable IP header mutable bit handling.  This is
++		 * needed to get correct HMAC calculations.
++		 */
++		cmd1 |= SAFE_SA_CMD1_MUTABLE;
++	} else {
++		if (enccrd) {
++			bypass = enccrd->crd_skip;
++			oplen = bypass + enccrd->crd_len;
++		} else {
++			bypass = maccrd->crd_skip;
++			oplen = bypass + maccrd->crd_len;
++		}
++		coffset = 0;
++	}
++	/* XXX verify multiple of 4 when using s/g */
++	if (bypass > 96) {		/* bypass offset must be <= 96 bytes */
++		DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
++		safestats.st_bypasstoobig++;
++		err = EINVAL;
++		goto errout;
++	}
++
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
++			safestats.st_noload++;
++			err = ENOMEM;
++			goto errout;
++		}
++	} else if (crp->crp_flags & CRYPTO_F_IOV) {
++		if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
++			safestats.st_noload++;
++			err = ENOMEM;
++			goto errout;
++		}
++	}
++	nicealign = safe_dmamap_aligned(sc, &re->re_src);
++	uniform = safe_dmamap_uniform(sc, &re->re_src);
++
++	DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
++		nicealign, uniform, re->re_src.nsegs));
++	if (re->re_src.nsegs > 1) {
++		re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
++			((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
++		for (i = 0; i < re->re_src_nsegs; i++) {
++			/* NB: no need to check if there's space */
++			pd = sc->sc_spfree;
++			if (++(sc->sc_spfree) == sc->sc_springtop)
++				sc->sc_spfree = sc->sc_spring;
++
++			KASSERT((pd->pd_flags&3) == 0 ||
++				(pd->pd_flags&3) == SAFE_PD_DONE,
++				("bogus source particle descriptor; flags %x",
++				pd->pd_flags));
++			pd->pd_addr = re->re_src_segs[i].ds_addr;
++			pd->pd_size = re->re_src_segs[i].ds_len;
++			pd->pd_flags = SAFE_PD_READY;
++		}
++		cmd0 |= SAFE_SA_CMD0_IGATHER;
++	} else {
++		/*
++		 * No need for gather, reference the operand directly.
++		 */
++		re->re_desc.d_src = re->re_src_segs[0].ds_addr;
++	}
++
++	if (enccrd == NULL && maccrd != NULL) {
++		/*
++		 * Hash op; no destination needed.
++		 */
++	} else {
++		if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
++			if (!nicealign) {
++				safestats.st_iovmisaligned++;
++				err = EINVAL;
++				goto errout;
++			}
++			if (uniform != 1) {
++				device_printf(sc->sc_dev, "!uniform source\n");
++				if (!uniform) {
++					/*
++					 * There's no way to handle the DMA
++					 * requirements with this uio.  We
++					 * could create a separate DMA area for
++					 * the result and then copy it back,
++					 * but for now we just bail and return
++					 * an error.  Note that uio requests
++					 * > SAFE_MAX_DSIZE are handled because
++					 * the DMA map and segment list for the
++					 * destination wil result in a
++					 * destination particle list that does
++					 * the necessary scatter DMA.
++					 */ 
++					safestats.st_iovnotuniform++;
++					err = EINVAL;
++					goto errout;
++				}
++			} else
++				re->re_dst = re->re_src;
++		} else {
++			safestats.st_badflags++;
++			err = EINVAL;
++			goto errout;
++		}
++
++		if (re->re_dst.nsegs > 1) {
++			re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
++			    ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
++			for (i = 0; i < re->re_dst_nsegs; i++) {
++				pd = sc->sc_dpfree;
++				KASSERT((pd->pd_flags&3) == 0 ||
++					(pd->pd_flags&3) == SAFE_PD_DONE,
++					("bogus dest particle descriptor; flags %x",
++						pd->pd_flags));
++				if (++(sc->sc_dpfree) == sc->sc_dpringtop)
++					sc->sc_dpfree = sc->sc_dpring;
++				pd->pd_addr = re->re_dst_segs[i].ds_addr;
++				pd->pd_flags = SAFE_PD_READY;
++			}
++			cmd0 |= SAFE_SA_CMD0_OSCATTER;
++		} else {
++			/*
++			 * No need for scatter, reference the operand directly.
++			 */
++			re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
++		}
++	}
++
++	/*
++	 * All done with setup; fillin the SA command words
++	 * and the packet engine descriptor.  The operation
++	 * is now ready for submission to the hardware.
++	 */
++	sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
++	sa->sa_cmd1 = cmd1
++		    | (coffset << SAFE_SA_CMD1_OFFSET_S)
++		    | SAFE_SA_CMD1_SAREV1	/* Rev 1 SA data structure */
++		    | SAFE_SA_CMD1_SRPCI
++		    ;
++	/*
++	 * NB: the order of writes is important here.  In case the
++	 * chip is scanning the ring because of an outstanding request
++	 * it might nab this one too.  In that case we need to make
++	 * sure the setup is complete before we write the length
++	 * field of the descriptor as it signals the descriptor is
++	 * ready for processing.
++	 */
++	re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
++	if (maccrd)
++		re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
++	wmb();
++	re->re_desc.d_len = oplen
++			  | SAFE_PE_LEN_READY
++			  | (bypass << SAFE_PE_LEN_BYPASS_S)
++			  ;
++
++	safestats.st_ipackets++;
++	safestats.st_ibytes += oplen;
++
++	if (++(sc->sc_front) == sc->sc_ringtop)
++		sc->sc_front = sc->sc_ring;
++
++	/* XXX honor batching */
++	safe_feed(sc, re);
++	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
++	return (0);
++
++errout:
++	if (re->re_src.map != re->re_dst.map)
++		pci_unmap_operand(sc, &re->re_dst);
++	if (re->re_src.map)
++		pci_unmap_operand(sc, &re->re_src);
++	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
++	if (err != ERESTART) {
++		crp->crp_etype = err;
++		crypto_done(crp);
++	} else {
++		sc->sc_needwakeup |= CRYPTO_SYMQ;
++	}
++	return (err);
++}
++
++static void
++safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
++{
++	struct cryptop *crp = (struct cryptop *)re->re_crp;
++	struct cryptodesc *crd;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	safestats.st_opackets++;
++	safestats.st_obytes += re->re_dst.mapsize;
++
++	if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
++		device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
++			re->re_desc.d_csr,
++			re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
++		safestats.st_peoperr++;
++		crp->crp_etype = EIO;		/* something more meaningful? */
++	}
++
++	if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
++		pci_unmap_operand(sc, &re->re_dst);
++	pci_unmap_operand(sc, &re->re_src);
++
++	/* 
++	 * If result was written to a differet mbuf chain, swap
++	 * it in as the return value and reclaim the original.
++	 */
++	if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
++		device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
++		/* kfree_skb(skb) */
++		/* crp->crp_buf = (caddr_t)re->re_dst_skb */
++		return;
++	}
++
++	if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
++		/* copy out IV for future use */
++		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
++			int i;
++			int ivsize;
++
++			if (crd->crd_alg == CRYPTO_DES_CBC ||
++			    crd->crd_alg == CRYPTO_3DES_CBC) {
++				ivsize = 2*sizeof(u_int32_t);
++			} else if (crd->crd_alg == CRYPTO_AES_CBC) {
++				ivsize = 4*sizeof(u_int32_t);
++			} else
++				continue;
++			crypto_copydata(crp->crp_flags, crp->crp_buf,
++			    crd->crd_skip + crd->crd_len - ivsize, ivsize,
++			    (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
++			for (i = 0;
++					i < ivsize/sizeof(sc->sc_sessions[re->re_sesn].ses_iv[0]);
++					i++)
++				sc->sc_sessions[re->re_sesn].ses_iv[i] =
++					cpu_to_le32(sc->sc_sessions[re->re_sesn].ses_iv[i]);
++			break;
++		}
++	}
++
++	if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
++		/* copy out ICV result */
++		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
++			if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
++			    crd->crd_alg == CRYPTO_SHA1_HMAC ||
++			    crd->crd_alg == CRYPTO_NULL_HMAC))
++				continue;
++			if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
++				/*
++				 * SHA-1 ICV's are byte-swapped; fix 'em up
++				 * before copy them to their destination.
++				 */
++				re->re_sastate.sa_saved_indigest[0] =
++					cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
++				re->re_sastate.sa_saved_indigest[1] = 
++					cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
++				re->re_sastate.sa_saved_indigest[2] =
++					cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
++			} else {
++				re->re_sastate.sa_saved_indigest[0] =
++					cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
++				re->re_sastate.sa_saved_indigest[1] = 
++					cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
++				re->re_sastate.sa_saved_indigest[2] =
++					cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
++			}
++			crypto_copyback(crp->crp_flags, crp->crp_buf,
++			    crd->crd_inject,
++			    sc->sc_sessions[re->re_sesn].ses_mlen,
++			    (caddr_t)re->re_sastate.sa_saved_indigest);
++			break;
++		}
++	}
++	crypto_done(crp);
++}
++
++
++#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
++#define	SAFE_RNG_MAXWAIT	1000
++
++static void
++safe_rng_init(struct safe_softc *sc)
++{
++	u_int32_t w, v;
++	int i;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	WRITE_REG(sc, SAFE_RNG_CTRL, 0);
++	/* use default value according to the manual */
++	WRITE_REG(sc, SAFE_RNG_CNFG, 0x834);	/* magic from SafeNet */
++	WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
++
++	/*
++	 * There is a bug in rev 1.0 of the 1140 that when the RNG
++	 * is brought out of reset the ready status flag does not
++	 * work until the RNG has finished its internal initialization.
++	 *
++	 * So in order to determine the device is through its
++	 * initialization we must read the data register, using the
++	 * status reg in the read in case it is initialized.  Then read
++	 * the data register until it changes from the first read.
++	 * Once it changes read the data register until it changes
++	 * again.  At this time the RNG is considered initialized. 
++	 * This could take between 750ms - 1000ms in time.
++	 */
++	i = 0;
++	w = READ_REG(sc, SAFE_RNG_OUT);
++	do {
++		v = READ_REG(sc, SAFE_RNG_OUT);
++		if (v != w) {
++			w = v;
++			break;
++		}
++		DELAY(10);
++	} while (++i < SAFE_RNG_MAXWAIT);
++
++	/* Wait Until data changes again */
++	i = 0;
++	do {
++		v = READ_REG(sc, SAFE_RNG_OUT);
++		if (v != w)
++			break;
++		DELAY(10);
++	} while (++i < SAFE_RNG_MAXWAIT);
++}
++
++static __inline void
++safe_rng_disable_short_cycle(struct safe_softc *sc)
++{
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	WRITE_REG(sc, SAFE_RNG_CTRL,
++		READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
++}
++
++static __inline void
++safe_rng_enable_short_cycle(struct safe_softc *sc)
++{
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	WRITE_REG(sc, SAFE_RNG_CTRL, 
++		READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
++}
++
++static __inline u_int32_t
++safe_rng_read(struct safe_softc *sc)
++{
++	int i;
++
++	i = 0;
++	while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
++		;
++	return READ_REG(sc, SAFE_RNG_OUT);
++}
++
++static int
++safe_read_random(void *arg, u_int32_t *buf, int maxwords)
++{
++	struct safe_softc *sc = (struct safe_softc *) arg;
++	int i, rc;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++	
++	safestats.st_rng++;
++	/*
++	 * Fetch the next block of data.
++	 */
++	if (maxwords > safe_rngbufsize)
++		maxwords = safe_rngbufsize;
++	if (maxwords > SAFE_RNG_MAXBUFSIZ)
++		maxwords = SAFE_RNG_MAXBUFSIZ;
++retry:
++	/* read as much as we can */
++	for (rc = 0; rc < maxwords; rc++) {
++		if (READ_REG(sc, SAFE_RNG_STAT) != 0)
++			break;
++		buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
++	}
++	if (rc == 0)
++		return 0;
++	/*
++	 * Check the comparator alarm count and reset the h/w if
++	 * it exceeds our threshold.  This guards against the
++	 * hardware oscillators resonating with external signals.
++	 */
++	if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
++		u_int32_t freq_inc, w;
++
++		DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
++			(unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
++		safestats.st_rngalarm++;
++		safe_rng_enable_short_cycle(sc);
++		freq_inc = 18;
++		for (i = 0; i < 64; i++) {
++			w = READ_REG(sc, SAFE_RNG_CNFG);
++			freq_inc = ((w + freq_inc) & 0x3fL);
++			w = ((w & ~0x3fL) | freq_inc);
++			WRITE_REG(sc, SAFE_RNG_CNFG, w);
++
++			WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
++
++			(void) safe_rng_read(sc);
++			DELAY(25);
++
++			if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
++				safe_rng_disable_short_cycle(sc);
++				goto retry;
++			}
++			freq_inc = 1;
++		}
++		safe_rng_disable_short_cycle(sc);
++	} else
++		WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
++
++	return(rc);
++}
++#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
++
++
++/*
++ * Resets the board.  Values in the regesters are left as is
++ * from the reset (i.e. initial values are assigned elsewhere).
++ */
++static void
++safe_reset_board(struct safe_softc *sc)
++{
++	u_int32_t v;
++	/*
++	 * Reset the device.  The manual says no delay
++	 * is needed between marking and clearing reset.
++	 */
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	v = READ_REG(sc, SAFE_PE_DMACFG) &~
++		(SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
++		 SAFE_PE_DMACFG_SGRESET);
++	WRITE_REG(sc, SAFE_PE_DMACFG, v
++				    | SAFE_PE_DMACFG_PERESET
++				    | SAFE_PE_DMACFG_PDRRESET
++				    | SAFE_PE_DMACFG_SGRESET);
++	WRITE_REG(sc, SAFE_PE_DMACFG, v);
++}
++
++/*
++ * Initialize registers we need to touch only once.
++ */
++static void
++safe_init_board(struct safe_softc *sc)
++{
++	u_int32_t v, dwords;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	v = READ_REG(sc, SAFE_PE_DMACFG);
++	v &=~ (   SAFE_PE_DMACFG_PEMODE
++			| SAFE_PE_DMACFG_FSENA		/* failsafe enable */
++			| SAFE_PE_DMACFG_GPRPCI		/* gather ring on PCI */
++			| SAFE_PE_DMACFG_SPRPCI		/* scatter ring on PCI */
++			| SAFE_PE_DMACFG_ESDESC		/* endian-swap descriptors */
++			| SAFE_PE_DMACFG_ESPDESC	/* endian-swap part. desc's */
++			| SAFE_PE_DMACFG_ESSA		/* endian-swap SA's */
++			| SAFE_PE_DMACFG_ESPACKET	/* swap the packet data */
++		  );
++	v |= SAFE_PE_DMACFG_FSENA		/* failsafe enable */
++	  |  SAFE_PE_DMACFG_GPRPCI		/* gather ring on PCI */
++	  |  SAFE_PE_DMACFG_SPRPCI		/* scatter ring on PCI */
++	  |  SAFE_PE_DMACFG_ESDESC		/* endian-swap descriptors */
++	  |  SAFE_PE_DMACFG_ESPDESC		/* endian-swap part. desc's */
++	  |  SAFE_PE_DMACFG_ESSA		/* endian-swap SA's */
++#if 0
++	  |  SAFE_PE_DMACFG_ESPACKET    /* swap the packet data */
++#endif
++	  ;
++	WRITE_REG(sc, SAFE_PE_DMACFG, v);
++
++#ifdef __BIG_ENDIAN
++	/* tell the safenet that we are 4321 and not 1234 */
++	WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
++#endif
++
++	if (sc->sc_chiprev == SAFE_REV(1,0)) {
++		/*
++		 * Avoid large PCI DMA transfers.  Rev 1.0 has a bug where
++		 * "target mode transfers" done while the chip is DMA'ing
++		 * >1020 bytes cause the hardware to lockup.  To avoid this
++		 * we reduce the max PCI transfer size and use small source
++		 * particle descriptors (<= 256 bytes).
++		 */
++		WRITE_REG(sc, SAFE_DMA_CFG, 256);
++		device_printf(sc->sc_dev,
++			"Reduce max DMA size to %u words for rev %u.%u WAR\n",
++			(unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
++			(unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
++			(unsigned) SAFE_REV_MIN(sc->sc_chiprev));
++		sc->sc_max_dsize = 256;
++	} else {
++		sc->sc_max_dsize = SAFE_MAX_DSIZE;
++	}
++
++	/* NB: operands+results are overlaid */
++	WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
++	WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
++	/*
++	 * Configure ring entry size and number of items in the ring.
++	 */
++	KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
++		("PE ring entry not 32-bit aligned!"));
++	dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
++	WRITE_REG(sc, SAFE_PE_RINGCFG,
++		(dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
++	WRITE_REG(sc, SAFE_PE_RINGPOLL, 0);	/* disable polling */
++
++	WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
++	WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
++	WRITE_REG(sc, SAFE_PE_PARTSIZE,
++		(SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
++	/*
++	 * NB: destination particles are fixed size.  We use
++	 *     an mbuf cluster and require all results go to
++	 *     clusters or smaller.
++	 */
++	WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
++
++	/* it's now safe to enable PE mode, do it */
++	WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
++
++	/*
++	 * Configure hardware to use level-triggered interrupts and
++	 * to interrupt after each descriptor is processed.
++	 */
++	WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
++	WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
++	WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
++	WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
++}
++
++
++/*
++ * Clean up after a chip crash.
++ * It is assumed that the caller in splimp()
++ */
++static void
++safe_cleanchip(struct safe_softc *sc)
++{
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (sc->sc_nqchip != 0) {
++		struct safe_ringentry *re = sc->sc_back;
++
++		while (re != sc->sc_front) {
++			if (re->re_desc.d_csr != 0)
++				safe_free_entry(sc, re);
++			if (++re == sc->sc_ringtop)
++				re = sc->sc_ring;
++		}
++		sc->sc_back = re;
++		sc->sc_nqchip = 0;
++	}
++}
++
++/*
++ * free a safe_q
++ * It is assumed that the caller is within splimp().
++ */
++static int
++safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
++{
++	struct cryptop *crp;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	/*
++	 * Free header MCR
++	 */
++	if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
++#ifdef NOTYET
++		m_freem(re->re_dst_m);
++#else
++		printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
++#endif
++
++	crp = (struct cryptop *)re->re_crp;
++	
++	re->re_desc.d_csr = 0;
++	
++	crp->crp_etype = EFAULT;
++	crypto_done(crp);
++	return(0);
++}
++
++/*
++ * Routine to reset the chip and clean up.
++ * It is assumed that the caller is in splimp()
++ */
++static void
++safe_totalreset(struct safe_softc *sc)
++{
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	safe_reset_board(sc);
++	safe_init_board(sc);
++	safe_cleanchip(sc);
++}
++
++/*
++ * Is the operand suitable aligned for direct DMA.  Each
++ * segment must be aligned on a 32-bit boundary and all
++ * but the last segment must be a multiple of 4 bytes.
++ */
++static int
++safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
++{
++	int i;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	for (i = 0; i < op->nsegs; i++) {
++		if (op->segs[i].ds_addr & 3)
++			return (0);
++		if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
++			return (0);
++	}
++	return (1);
++}
++
++/*
++ * Is the operand suitable for direct DMA as the destination
++ * of an operation.  The hardware requires that each ``particle''
++ * but the last in an operation result have the same size.  We
++ * fix that size at SAFE_MAX_DSIZE bytes.  This routine returns
++ * 0 if some segment is not a multiple of of this size, 1 if all
++ * segments are exactly this size, or 2 if segments are at worst
++ * a multple of this size.
++ */
++static int
++safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
++{
++	int result = 1;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (op->nsegs > 0) {
++		int i;
++
++		for (i = 0; i < op->nsegs-1; i++) {
++			if (op->segs[i].ds_len % sc->sc_max_dsize)
++				return (0);
++			if (op->segs[i].ds_len != sc->sc_max_dsize)
++				result = 2;
++		}
++	}
++	return (result);
++}
++
++static int
++safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
++{
++	struct safe_softc *sc = device_get_softc(dev);
++	struct safe_pkq *q;
++	unsigned long flags;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (sc == NULL) {
++		krp->krp_status = EINVAL;
++		goto err;
++	}
++
++	if (krp->krp_op != CRK_MOD_EXP) {
++		krp->krp_status = EOPNOTSUPP;
++		goto err;
++	}
++
++	q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
++	if (q == NULL) {
++		krp->krp_status = ENOMEM;
++		goto err;
++	}
++	memset(q, 0, sizeof(*q));
++	q->pkq_krp = krp;
++	INIT_LIST_HEAD(&q->pkq_list);
++
++	spin_lock_irqsave(&sc->sc_pkmtx, flags);
++	list_add_tail(&q->pkq_list, &sc->sc_pkq);
++	safe_kfeed(sc);
++	spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
++	return (0);
++
++err:
++	crypto_kdone(krp);
++	return (0);
++}
++
++#define	SAFE_CRK_PARAM_BASE	0
++#define	SAFE_CRK_PARAM_EXP	1
++#define	SAFE_CRK_PARAM_MOD	2
++
++static int
++safe_kstart(struct safe_softc *sc)
++{
++	struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
++	int exp_bits, mod_bits, base_bits;
++	u_int32_t op, a_off, b_off, c_off, d_off;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
++		krp->krp_status = EINVAL;
++		return (1);
++	}
++
++	base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
++	if (base_bits > 2048)
++		goto too_big;
++	if (base_bits <= 0)		/* 5. base not zero */
++		goto too_small;
++
++	exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
++	if (exp_bits > 2048)
++		goto too_big;
++	if (exp_bits <= 0)		/* 1. exponent word length > 0 */
++		goto too_small;		/* 4. exponent not zero */
++
++	mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
++	if (mod_bits > 2048)
++		goto too_big;
++	if (mod_bits <= 32)		/* 2. modulus word length > 1 */
++		goto too_small;		/* 8. MSW of modulus != zero */
++	if (mod_bits < exp_bits)	/* 3 modulus len >= exponent len */
++		goto too_small;
++	if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
++		goto bad_domain;	/* 6. modulus is odd */
++	if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
++		goto too_small;		/* make sure result will fit */
++
++	/* 7. modulus > base */
++	if (mod_bits < base_bits)
++		goto too_small;
++	if (mod_bits == base_bits) {
++		u_int8_t *basep, *modp;
++		int i;
++
++		basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
++		    ((base_bits + 7) / 8) - 1;
++		modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
++		    ((mod_bits + 7) / 8) - 1;
++		
++		for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
++			if (*modp < *basep)
++				goto too_small;
++			if (*modp > *basep)
++				break;
++		}
++	}
++
++	/* And on the 9th step, he rested. */
++
++	WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
++	WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
++	if (mod_bits > 1024) {
++		op = SAFE_PK_FUNC_EXP4;
++		a_off = 0x000;
++		b_off = 0x100;
++		c_off = 0x200;
++		d_off = 0x300;
++	} else {
++		op = SAFE_PK_FUNC_EXP16;
++		a_off = 0x000;
++		b_off = 0x080;
++		c_off = 0x100;
++		d_off = 0x180;
++	}
++	sc->sc_pk_reslen = b_off - a_off;
++	sc->sc_pk_resoff = d_off;
++
++	/* A is exponent, B is modulus, C is base, D is result */
++	safe_kload_reg(sc, a_off, b_off - a_off,
++	    &krp->krp_param[SAFE_CRK_PARAM_EXP]);
++	WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
++	safe_kload_reg(sc, b_off, b_off - a_off,
++	    &krp->krp_param[SAFE_CRK_PARAM_MOD]);
++	WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
++	safe_kload_reg(sc, c_off, b_off - a_off,
++	    &krp->krp_param[SAFE_CRK_PARAM_BASE]);
++	WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
++	WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
++
++	WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
++
++	return (0);
++
++too_big:
++	krp->krp_status = E2BIG;
++	return (1);
++too_small:
++	krp->krp_status = ERANGE;
++	return (1);
++bad_domain:
++	krp->krp_status = EDOM;
++	return (1);
++}
++
++static int
++safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
++{
++	u_int plen = (cr->crp_nbits + 7) / 8;
++	int i, sig = plen * 8;
++	u_int8_t c, *p = cr->crp_p;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	for (i = plen - 1; i >= 0; i--) {
++		c = p[i];
++		if (c != 0) {
++			while ((c & 0x80) == 0) {
++				sig--;
++				c <<= 1;
++			}
++			break;
++		}
++		sig -= 8;
++	}
++	return (sig);
++}
++
++static void
++safe_kfeed(struct safe_softc *sc)
++{
++	struct safe_pkq *q, *tmp;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
++		return;
++	if (sc->sc_pkq_cur != NULL)
++		return;
++	list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
++		sc->sc_pkq_cur = q;
++		list_del(&q->pkq_list);
++		if (safe_kstart(sc) != 0) {
++			crypto_kdone(q->pkq_krp);
++			kfree(q);
++			sc->sc_pkq_cur = NULL;
++		} else {
++			/* op started, start polling */
++			mod_timer(&sc->sc_pkto, jiffies + 1);
++			break;
++		}
++	}
++}
++
++static void
++safe_kpoll(unsigned long arg)
++{
++	struct safe_softc *sc = NULL;
++	struct safe_pkq *q;
++	struct crparam *res;
++	int i;
++	u_int32_t buf[64];
++	unsigned long flags;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (arg >= SAFE_MAX_CHIPS)
++		return;
++	sc = safe_chip_idx[arg];
++	if (!sc) {
++		DPRINTF(("%s() - bad callback\n", __FUNCTION__));
++		return;
++	}
++
++	spin_lock_irqsave(&sc->sc_pkmtx, flags);
++	if (sc->sc_pkq_cur == NULL)
++		goto out;
++	if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
++		/* still running, check back later */
++		mod_timer(&sc->sc_pkto, jiffies + 1);
++		goto out;
++	}
++
++	q = sc->sc_pkq_cur;
++	res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
++	bzero(buf, sizeof(buf));
++	bzero(res->crp_p, (res->crp_nbits + 7) / 8);
++	for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
++		buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
++		    sc->sc_pk_resoff + (i << 2)));
++	bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
++	/*
++	 * reduce the bits that need copying if possible
++	 */
++	res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
++	res->crp_nbits = safe_ksigbits(sc, res);
++
++	for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
++		WRITE_REG(sc, i, 0);
++
++	crypto_kdone(q->pkq_krp);
++	kfree(q);
++	sc->sc_pkq_cur = NULL;
++
++	safe_kfeed(sc);
++out:
++	spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
++}
++
++static void
++safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
++    struct crparam *n)
++{
++	u_int32_t buf[64], i;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	bzero(buf, sizeof(buf));
++	bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
++
++	for (i = 0; i < len >> 2; i++)
++		WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
++		    cpu_to_le32(buf[i]));
++}
++
++#ifdef SAFE_DEBUG
++static void
++safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
++{
++	printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
++		, tag
++		, READ_REG(sc, SAFE_DMA_ENDIAN)
++		, READ_REG(sc, SAFE_DMA_SRCADDR)
++		, READ_REG(sc, SAFE_DMA_DSTADDR)
++		, READ_REG(sc, SAFE_DMA_STAT)
++	);
++}
++
++static void
++safe_dump_intrstate(struct safe_softc *sc, const char *tag)
++{
++	printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
++		, tag
++		, READ_REG(sc, SAFE_HI_CFG)
++		, READ_REG(sc, SAFE_HI_MASK)
++		, READ_REG(sc, SAFE_HI_DESC_CNT)
++		, READ_REG(sc, SAFE_HU_STAT)
++		, READ_REG(sc, SAFE_HM_STAT)
++	);
++}
++
++static void
++safe_dump_ringstate(struct safe_softc *sc, const char *tag)
++{
++	u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
++
++	/* NB: assume caller has lock on ring */
++	printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
++		tag,
++		estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
++		(unsigned long)(sc->sc_back - sc->sc_ring),
++		(unsigned long)(sc->sc_front - sc->sc_ring));
++}
++
++static void
++safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
++{
++	int ix, nsegs;
++
++	ix = re - sc->sc_ring;
++	printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
++		, tag
++		, re, ix
++		, re->re_desc.d_csr
++		, re->re_desc.d_src
++		, re->re_desc.d_dst
++		, re->re_desc.d_sa
++		, re->re_desc.d_len
++	);
++	if (re->re_src.nsegs > 1) {
++		ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
++			sizeof(struct safe_pdesc);
++		for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
++			printf(" spd[%u] %p: %p size %u flags %x"
++				, ix, &sc->sc_spring[ix]
++				, (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
++				, sc->sc_spring[ix].pd_size
++				, sc->sc_spring[ix].pd_flags
++			);
++			if (sc->sc_spring[ix].pd_size == 0)
++				printf(" (zero!)");
++			printf("\n");
++			if (++ix == SAFE_TOTAL_SPART)
++				ix = 0;
++		}
++	}
++	if (re->re_dst.nsegs > 1) {
++		ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
++			sizeof(struct safe_pdesc);
++		for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
++			printf(" dpd[%u] %p: %p flags %x\n"
++				, ix, &sc->sc_dpring[ix]
++				, (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
++				, sc->sc_dpring[ix].pd_flags
++			);
++			if (++ix == SAFE_TOTAL_DPART)
++				ix = 0;
++		}
++	}
++	printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
++		re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
++	printf("sa: key %x %x %x %x %x %x %x %x\n"
++		, re->re_sa.sa_key[0]
++		, re->re_sa.sa_key[1]
++		, re->re_sa.sa_key[2]
++		, re->re_sa.sa_key[3]
++		, re->re_sa.sa_key[4]
++		, re->re_sa.sa_key[5]
++		, re->re_sa.sa_key[6]
++		, re->re_sa.sa_key[7]
++	);
++	printf("sa: indigest %x %x %x %x %x\n"
++		, re->re_sa.sa_indigest[0]
++		, re->re_sa.sa_indigest[1]
++		, re->re_sa.sa_indigest[2]
++		, re->re_sa.sa_indigest[3]
++		, re->re_sa.sa_indigest[4]
++	);
++	printf("sa: outdigest %x %x %x %x %x\n"
++		, re->re_sa.sa_outdigest[0]
++		, re->re_sa.sa_outdigest[1]
++		, re->re_sa.sa_outdigest[2]
++		, re->re_sa.sa_outdigest[3]
++		, re->re_sa.sa_outdigest[4]
++	);
++	printf("sr: iv %x %x %x %x\n"
++		, re->re_sastate.sa_saved_iv[0]
++		, re->re_sastate.sa_saved_iv[1]
++		, re->re_sastate.sa_saved_iv[2]
++		, re->re_sastate.sa_saved_iv[3]
++	);
++	printf("sr: hashbc %u indigest %x %x %x %x %x\n"
++		, re->re_sastate.sa_saved_hashbc
++		, re->re_sastate.sa_saved_indigest[0]
++		, re->re_sastate.sa_saved_indigest[1]
++		, re->re_sastate.sa_saved_indigest[2]
++		, re->re_sastate.sa_saved_indigest[3]
++		, re->re_sastate.sa_saved_indigest[4]
++	);
++}
++
++static void
++safe_dump_ring(struct safe_softc *sc, const char *tag)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&sc->sc_ringmtx, flags);
++	printf("\nSafeNet Ring State:\n");
++	safe_dump_intrstate(sc, tag);
++	safe_dump_dmastatus(sc, tag);
++	safe_dump_ringstate(sc, tag);
++	if (sc->sc_nqchip) {
++		struct safe_ringentry *re = sc->sc_back;
++		do {
++			safe_dump_request(sc, tag, re);
++			if (++re == sc->sc_ringtop)
++				re = sc->sc_ring;
++		} while (re != sc->sc_front);
++	}
++	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
++}
++#endif /* SAFE_DEBUG */
++
++
++static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
++{
++	struct safe_softc *sc = NULL;
++	u32 mem_start, mem_len, cmd;
++	int i, rc, devinfo;
++	dma_addr_t raddr;
++	static int num_chips = 0;
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	if (pci_enable_device(dev) < 0)
++		return(-ENODEV);
++
++	if (!dev->irq) {
++		printk("safe: found device with no IRQ assigned. check BIOS settings!");
++		pci_disable_device(dev);
++		return(-ENODEV);
++	}
++
++	if (pci_set_mwi(dev)) {
++		printk("safe: pci_set_mwi failed!");
++		return(-ENODEV);
++	}
++
++	sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
++	if (!sc)
++		return(-ENOMEM);
++	memset(sc, 0, sizeof(*sc));
++
++	softc_device_init(sc, "safe", num_chips, safe_methods);
++
++	sc->sc_irq = -1;
++	sc->sc_cid = -1;
++	sc->sc_pcidev = dev;
++	if (num_chips < SAFE_MAX_CHIPS) {
++		safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
++		num_chips++;
++	}
++
++	INIT_LIST_HEAD(&sc->sc_pkq);
++	spin_lock_init(&sc->sc_pkmtx);
++
++	pci_set_drvdata(sc->sc_pcidev, sc);
++
++	/* we read its hardware registers as memory */
++	mem_start = pci_resource_start(sc->sc_pcidev, 0);
++	mem_len   = pci_resource_len(sc->sc_pcidev, 0);
++
++	sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
++	if (!sc->sc_base_addr) {
++		device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
++				mem_start, mem_start + mem_len - 1);
++		goto out;
++	}
++
++	/* fix up the bus size */
++	if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
++		device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
++		goto out;
++	}
++	if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
++		device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
++		goto out;
++	}
++
++	pci_set_master(sc->sc_pcidev);
++
++	pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
++
++	if (!(cmd & PCI_COMMAND_MEMORY)) {
++		device_printf(sc->sc_dev, "failed to enable memory mapping\n");
++		goto out;
++	}
++
++	if (!(cmd & PCI_COMMAND_MASTER)) {
++		device_printf(sc->sc_dev, "failed to enable bus mastering\n");
++		goto out;
++	}
++
++	rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
++	if (rc) {
++		device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
++		goto out;
++	}
++	sc->sc_irq = dev->irq;
++
++	sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
++			(SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
++
++	/*
++	 * Allocate packet engine descriptors.
++	 */
++	sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
++			SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
++			&sc->sc_ringalloc.dma_paddr);
++	if (!sc->sc_ringalloc.dma_vaddr) {
++		device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
++		goto out;
++	}
++
++	/*
++	 * Hookup the static portion of all our data structures.
++	 */
++	sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
++	sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
++	sc->sc_front = sc->sc_ring;
++	sc->sc_back = sc->sc_ring;
++	raddr = sc->sc_ringalloc.dma_paddr;
++	bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
++	for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
++		struct safe_ringentry *re = &sc->sc_ring[i];
++
++		re->re_desc.d_sa = raddr +
++			offsetof(struct safe_ringentry, re_sa);
++		re->re_sa.sa_staterec = raddr +
++			offsetof(struct safe_ringentry, re_sastate);
++
++		raddr += sizeof (struct safe_ringentry);
++	}
++	spin_lock_init(&sc->sc_ringmtx);
++
++	/*
++	 * Allocate scatter and gather particle descriptors.
++	 */
++	sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
++			SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
++			&sc->sc_spalloc.dma_paddr);
++	if (!sc->sc_spalloc.dma_vaddr) {
++		device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
++		goto out;
++	}
++	sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
++	sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
++	sc->sc_spfree = sc->sc_spring;
++	bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
++
++	sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
++			SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
++			&sc->sc_dpalloc.dma_paddr);
++	if (!sc->sc_dpalloc.dma_vaddr) {
++		device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
++		goto out;
++	}
++	sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
++	sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
++	sc->sc_dpfree = sc->sc_dpring;
++	bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
++
++	sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
++	if (sc->sc_cid < 0) {
++		device_printf(sc->sc_dev, "could not get crypto driver id\n");
++		goto out;
++	}
++
++	printf("%s:", device_get_nameunit(sc->sc_dev));
++
++	devinfo = READ_REG(sc, SAFE_DEVINFO);
++	if (devinfo & SAFE_DEVINFO_RNG) {
++		sc->sc_flags |= SAFE_FLAGS_RNG;
++		printf(" rng");
++	}
++	if (devinfo & SAFE_DEVINFO_PKEY) {
++		printf(" key");
++		sc->sc_flags |= SAFE_FLAGS_KEY;
++		crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
++#if 0
++		crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
++#endif
++		init_timer(&sc->sc_pkto);
++		sc->sc_pkto.function = safe_kpoll;
++		sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
++	}
++	if (devinfo & SAFE_DEVINFO_DES) {
++		printf(" des/3des");
++		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
++		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
++	}
++	if (devinfo & SAFE_DEVINFO_AES) {
++		printf(" aes");
++		crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
++	}
++	if (devinfo & SAFE_DEVINFO_MD5) {
++		printf(" md5");
++		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
++	}
++	if (devinfo & SAFE_DEVINFO_SHA1) {
++		printf(" sha1");
++		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
++	}
++	printf(" null");
++	crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
++	crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
++	/* XXX other supported algorithms */
++	printf("\n");
++
++	safe_reset_board(sc);		/* reset h/w */
++	safe_init_board(sc);		/* init h/w */
++
++#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
++	if (sc->sc_flags & SAFE_FLAGS_RNG) {
++		safe_rng_init(sc);
++		crypto_rregister(sc->sc_cid, safe_read_random, sc);
++	}
++#endif /* SAFE_NO_RNG */
++
++	return (0);
++
++out:
++	if (sc->sc_cid >= 0)
++		crypto_unregister_all(sc->sc_cid);
++	if (sc->sc_irq != -1)
++		free_irq(sc->sc_irq, sc);
++	if (sc->sc_ringalloc.dma_vaddr)
++		pci_free_consistent(sc->sc_pcidev,
++				SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
++				sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
++	if (sc->sc_spalloc.dma_vaddr)
++		pci_free_consistent(sc->sc_pcidev,
++				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
++				sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
++	if (sc->sc_dpalloc.dma_vaddr)
++		pci_free_consistent(sc->sc_pcidev,
++				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
++				sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
++	kfree(sc);
++	return(-ENODEV);
++}
++
++static void safe_remove(struct pci_dev *dev)
++{
++	struct safe_softc *sc = pci_get_drvdata(dev);
++
++	DPRINTF(("%s()\n", __FUNCTION__));
++
++	/* XXX wait/abort active ops */
++
++	WRITE_REG(sc, SAFE_HI_MASK, 0);		/* disable interrupts */
++
++	del_timer_sync(&sc->sc_pkto);
++
++	crypto_unregister_all(sc->sc_cid);
++
++	safe_cleanchip(sc);
++
++	if (sc->sc_irq != -1)
++		free_irq(sc->sc_irq, sc);
++	if (sc->sc_ringalloc.dma_vaddr)
++		pci_free_consistent(sc->sc_pcidev,
++				SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
++				sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
++	if (sc->sc_spalloc.dma_vaddr)
++		pci_free_consistent(sc->sc_pcidev,
++				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
++				sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
++	if (sc->sc_dpalloc.dma_vaddr)
++		pci_free_consistent(sc->sc_pcidev,
++				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
++				sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
++	sc->sc_irq = -1;
++	sc->sc_ringalloc.dma_vaddr = NULL;
++	sc->sc_spalloc.dma_vaddr = NULL;
++	sc->sc_dpalloc.dma_vaddr = NULL;
++}
++
++static struct pci_device_id safe_pci_tbl[] = {
++	{ PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
++	{ },
++};
++MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
++
++static struct pci_driver safe_driver = {
++	.name         = "safe",
++	.id_table     = safe_pci_tbl,
++	.probe        =	safe_probe,
++	.remove       = safe_remove,
++	/* add PM stuff here one day */
++};
++
++static int __init safe_init (void)
++{
++	struct safe_softc *sc = NULL;
++	int rc;
++
++	DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
++
++	rc = pci_register_driver(&safe_driver);
++	pci_register_driver_compat(&safe_driver, rc);
++
++	return rc;
++}
++
++static void __exit safe_exit (void)
++{
++	pci_unregister_driver(&safe_driver);
++}
++
++module_init(safe_init);
++module_exit(safe_exit);
++
++MODULE_LICENSE("BSD");
++MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
++MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
+diff -Nur linux-2.6.30.orig/crypto/ocf/safe/safereg.h linux-2.6.30/crypto/ocf/safe/safereg.h
+--- linux-2.6.30.orig/crypto/ocf/safe/safereg.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/safe/safereg.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,421 @@
++/*-
++ * Copyright (c) 2003 Sam Leffler, Errno Consulting
++ * Copyright (c) 2003 Global Technology Associates, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
++ */
++#ifndef _SAFE_SAFEREG_H_
++#define	_SAFE_SAFEREG_H_
++
++/*
++ * Register definitions for SafeNet SafeXcel-1141 crypto device.
++ * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
++ */
++
++#define BS_BAR			0x10	/* DMA base address register */
++#define	BS_TRDY_TIMEOUT		0x40	/* TRDY timeout */
++#define	BS_RETRY_TIMEOUT	0x41	/* DMA retry timeout */
++
++#define	PCI_VENDOR_SAFENET	0x16ae		/* SafeNet, Inc. */
++
++/* SafeNet */
++#define	PCI_PRODUCT_SAFEXCEL	0x1141		/* 1141 */
++
++#define	SAFE_PE_CSR		0x0000	/* Packet Enginge Ctrl/Status */
++#define	SAFE_PE_SRC		0x0004	/* Packet Engine Source */
++#define	SAFE_PE_DST		0x0008	/* Packet Engine Destination */
++#define	SAFE_PE_SA		0x000c	/* Packet Engine SA */
++#define	SAFE_PE_LEN		0x0010	/* Packet Engine Length */
++#define	SAFE_PE_DMACFG		0x0040	/* Packet Engine DMA Configuration */
++#define	SAFE_PE_DMASTAT		0x0044	/* Packet Engine DMA Status */
++#define	SAFE_PE_PDRBASE		0x0048	/* Packet Engine Descriptor Ring Base */
++#define	SAFE_PE_RDRBASE		0x004c	/* Packet Engine Result Ring Base */
++#define	SAFE_PE_RINGCFG		0x0050	/* Packet Engine Ring Configuration */
++#define	SAFE_PE_RINGPOLL	0x0054	/* Packet Engine Ring Poll */
++#define	SAFE_PE_IRNGSTAT	0x0058	/* Packet Engine Internal Ring Status */
++#define	SAFE_PE_ERNGSTAT	0x005c	/* Packet Engine External Ring Status */
++#define	SAFE_PE_IOTHRESH	0x0060	/* Packet Engine I/O Threshold */
++#define	SAFE_PE_GRNGBASE	0x0064	/* Packet Engine Gather Ring Base */
++#define	SAFE_PE_SRNGBASE	0x0068	/* Packet Engine Scatter Ring Base */
++#define	SAFE_PE_PARTSIZE	0x006c	/* Packet Engine Particlar Ring Size */
++#define	SAFE_PE_PARTCFG		0x0070	/* Packet Engine Particle Ring Config */
++#define	SAFE_CRYPTO_CTRL	0x0080	/* Crypto Control */
++#define	SAFE_DEVID		0x0084	/* Device ID */
++#define	SAFE_DEVINFO		0x0088	/* Device Info */
++#define	SAFE_HU_STAT		0x00a0	/* Host Unmasked Status */
++#define	SAFE_HM_STAT		0x00a4	/* Host Masked Status (read-only) */
++#define	SAFE_HI_CLR		0x00a4	/* Host Clear Interrupt (write-only) */
++#define	SAFE_HI_MASK		0x00a8	/* Host Mask Control */
++#define	SAFE_HI_CFG		0x00ac	/* Interrupt Configuration */
++#define	SAFE_HI_RD_DESCR	0x00b4	/* Force Descriptor Read */
++#define	SAFE_HI_DESC_CNT	0x00b8	/* Host Descriptor Done Count */
++#define	SAFE_DMA_ENDIAN		0x00c0	/* Master Endian Status */
++#define	SAFE_DMA_SRCADDR	0x00c4	/* DMA Source Address Status */
++#define	SAFE_DMA_DSTADDR	0x00c8	/* DMA Destination Address Status */
++#define	SAFE_DMA_STAT		0x00cc	/* DMA Current Status */
++#define	SAFE_DMA_CFG		0x00d4	/* DMA Configuration/Status */
++#define	SAFE_ENDIAN		0x00e0	/* Endian Configuration */
++#define	SAFE_PK_A_ADDR		0x0800	/* Public Key A Address */
++#define	SAFE_PK_B_ADDR		0x0804	/* Public Key B Address */
++#define	SAFE_PK_C_ADDR		0x0808	/* Public Key C Address */
++#define	SAFE_PK_D_ADDR		0x080c	/* Public Key D Address */
++#define	SAFE_PK_A_LEN		0x0810	/* Public Key A Length */
++#define	SAFE_PK_B_LEN		0x0814	/* Public Key B Length */
++#define	SAFE_PK_SHIFT		0x0818	/* Public Key Shift */
++#define	SAFE_PK_FUNC		0x081c	/* Public Key Function */
++#define SAFE_PK_RAM_START	0x1000	/* Public Key RAM start address */
++#define SAFE_PK_RAM_END		0x1fff	/* Public Key RAM end address */
++
++#define	SAFE_RNG_OUT		0x0100	/* RNG Output */
++#define	SAFE_RNG_STAT		0x0104	/* RNG Status */
++#define	SAFE_RNG_CTRL		0x0108	/* RNG Control */
++#define	SAFE_RNG_A		0x010c	/* RNG A */
++#define	SAFE_RNG_B		0x0110	/* RNG B */
++#define	SAFE_RNG_X_LO		0x0114	/* RNG X [31:0] */
++#define	SAFE_RNG_X_MID		0x0118	/* RNG X [63:32] */
++#define	SAFE_RNG_X_HI		0x011c	/* RNG X [80:64] */
++#define	SAFE_RNG_X_CNTR		0x0120	/* RNG Counter */
++#define	SAFE_RNG_ALM_CNT	0x0124	/* RNG Alarm Count */
++#define	SAFE_RNG_CNFG		0x0128	/* RNG Configuration */
++#define	SAFE_RNG_LFSR1_LO	0x012c	/* RNG LFSR1 [31:0] */
++#define	SAFE_RNG_LFSR1_HI	0x0130	/* RNG LFSR1 [47:32] */
++#define	SAFE_RNG_LFSR2_LO	0x0134	/* RNG LFSR1 [31:0] */
++#define	SAFE_RNG_LFSR2_HI	0x0138	/* RNG LFSR1 [47:32] */
++
++#define	SAFE_PE_CSR_READY	0x00000001	/* ready for processing */
++#define	SAFE_PE_CSR_DONE	0x00000002	/* h/w completed processing */
++#define	SAFE_PE_CSR_LOADSA	0x00000004	/* load SA digests */
++#define	SAFE_PE_CSR_HASHFINAL	0x00000010	/* do hash pad & write result */
++#define	SAFE_PE_CSR_SABUSID	0x000000c0	/* bus id for SA */
++#define	SAFE_PE_CSR_SAPCI	0x00000040	/* PCI bus id for SA */
++#define	SAFE_PE_CSR_NXTHDR	0x0000ff00	/* next hdr value for IPsec */
++#define	SAFE_PE_CSR_FPAD	0x0000ff00	/* fixed pad for basic ops */
++#define	SAFE_PE_CSR_STATUS	0x00ff0000	/* operation result status */
++#define	SAFE_PE_CSR_AUTH_FAIL	0x00010000	/* ICV mismatch (inbound) */
++#define	SAFE_PE_CSR_PAD_FAIL	0x00020000	/* pad verify fail (inbound) */
++#define	SAFE_PE_CSR_SEQ_FAIL	0x00040000	/* sequence number (inbound) */
++#define	SAFE_PE_CSR_XERROR	0x00080000	/* extended error follows */
++#define	SAFE_PE_CSR_XECODE	0x00f00000	/* extended error code */
++#define	SAFE_PE_CSR_XECODE_S	20
++#define	SAFE_PE_CSR_XECODE_BADCMD	0	/* invalid command */
++#define	SAFE_PE_CSR_XECODE_BADALG	1	/* invalid algorithm */
++#define	SAFE_PE_CSR_XECODE_ALGDIS	2	/* algorithm disabled */
++#define	SAFE_PE_CSR_XECODE_ZEROLEN	3	/* zero packet length */
++#define	SAFE_PE_CSR_XECODE_DMAERR	4	/* bus DMA error */
++#define	SAFE_PE_CSR_XECODE_PIPEABORT	5	/* secondary bus DMA error */
++#define	SAFE_PE_CSR_XECODE_BADSPI	6	/* IPsec SPI mismatch */
++#define	SAFE_PE_CSR_XECODE_TIMEOUT	10	/* failsafe timeout */
++#define	SAFE_PE_CSR_PAD		0xff000000	/* ESP padding control/status */
++#define	SAFE_PE_CSR_PAD_MIN	0x00000000	/* minimum IPsec padding */
++#define	SAFE_PE_CSR_PAD_16	0x08000000	/* pad to 16-byte boundary */
++#define	SAFE_PE_CSR_PAD_32	0x10000000	/* pad to 32-byte boundary */
++#define	SAFE_PE_CSR_PAD_64	0x20000000	/* pad to 64-byte boundary */
++#define	SAFE_PE_CSR_PAD_128	0x40000000	/* pad to 128-byte boundary */
++#define	SAFE_PE_CSR_PAD_256	0x80000000	/* pad to 256-byte boundary */
++
++/*
++ * Check the CSR to see if the PE has returned ownership to
++ * the host.  Note that before processing a descriptor this
++ * must be done followed by a check of the SAFE_PE_LEN register
++ * status bits to avoid premature processing of a descriptor
++ * on its way back to the host.
++ */
++#define	SAFE_PE_CSR_IS_DONE(_csr) \
++    (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
++
++#define	SAFE_PE_LEN_LENGTH	0x000fffff	/* total length (bytes) */
++#define	SAFE_PE_LEN_READY	0x00400000	/* ready for processing */
++#define	SAFE_PE_LEN_DONE	0x00800000	/* h/w completed processing */
++#define	SAFE_PE_LEN_BYPASS	0xff000000	/* bypass offset (bytes) */
++#define	SAFE_PE_LEN_BYPASS_S	24
++
++#define	SAFE_PE_LEN_IS_DONE(_len) \
++    (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
++
++/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
++#define	SAFE_INT_PE_CDONE	0x00000002	/* PE context done */
++#define	SAFE_INT_PE_DDONE	0x00000008	/* PE descriptor done */
++#define	SAFE_INT_PE_ERROR	0x00000010	/* PE error */
++#define	SAFE_INT_PE_ODONE	0x00000020	/* PE operation done */
++
++#define	SAFE_HI_CFG_PULSE	0x00000001	/* use pulse interrupt */
++#define	SAFE_HI_CFG_LEVEL	0x00000000	/* use level interrupt */
++#define	SAFE_HI_CFG_AUTOCLR	0x00000002	/* auto-clear pulse interrupt */
++
++#define	SAFE_ENDIAN_PASS	0x000000e4	/* straight pass-thru */
++#define	SAFE_ENDIAN_SWAB	0x0000001b	/* swap bytes in 32-bit word */
++
++#define	SAFE_PE_DMACFG_PERESET	0x00000001	/* reset packet engine */
++#define	SAFE_PE_DMACFG_PDRRESET	0x00000002	/* reset PDR counters/ptrs */
++#define	SAFE_PE_DMACFG_SGRESET	0x00000004	/* reset scatter/gather cache */
++#define	SAFE_PE_DMACFG_FSENA	0x00000008	/* enable failsafe reset */
++#define	SAFE_PE_DMACFG_PEMODE	0x00000100	/* packet engine mode */
++#define	SAFE_PE_DMACFG_SAPREC	0x00000200	/* SA precedes packet */
++#define	SAFE_PE_DMACFG_PKFOLL	0x00000400	/* packet follows descriptor */
++#define	SAFE_PE_DMACFG_GPRBID	0x00003000	/* gather particle ring busid */
++#define	SAFE_PE_DMACFG_GPRPCI	0x00001000	/* PCI gather particle ring */
++#define	SAFE_PE_DMACFG_SPRBID	0x0000c000	/* scatter part. ring busid */
++#define	SAFE_PE_DMACFG_SPRPCI	0x00004000	/* PCI scatter part. ring */
++#define	SAFE_PE_DMACFG_ESDESC	0x00010000	/* endian swap descriptors */
++#define	SAFE_PE_DMACFG_ESSA	0x00020000	/* endian swap SA data */
++#define	SAFE_PE_DMACFG_ESPACKET	0x00040000	/* endian swap packet data */
++#define	SAFE_PE_DMACFG_ESPDESC	0x00080000	/* endian swap particle desc. */
++#define	SAFE_PE_DMACFG_NOPDRUP	0x00100000	/* supp. PDR ownership update */
++#define	SAFE_PD_EDMACFG_PCIMODE	0x01000000	/* PCI target mode */
++
++#define	SAFE_PE_DMASTAT_PEIDONE	0x00000001	/* PE core input done */
++#define	SAFE_PE_DMASTAT_PEODONE	0x00000002	/* PE core output done */
++#define	SAFE_PE_DMASTAT_ENCDONE	0x00000004	/* encryption done */
++#define	SAFE_PE_DMASTAT_IHDONE	0x00000008	/* inner hash done */
++#define	SAFE_PE_DMASTAT_OHDONE	0x00000010	/* outer hash (HMAC) done */
++#define	SAFE_PE_DMASTAT_PADFLT	0x00000020	/* crypto pad fault */
++#define	SAFE_PE_DMASTAT_ICVFLT	0x00000040	/* ICV fault */
++#define	SAFE_PE_DMASTAT_SPIMIS	0x00000080	/* SPI mismatch */
++#define	SAFE_PE_DMASTAT_CRYPTO	0x00000100	/* crypto engine timeout */
++#define	SAFE_PE_DMASTAT_CQACT	0x00000200	/* command queue active */
++#define	SAFE_PE_DMASTAT_IRACT	0x00000400	/* input request active */
++#define	SAFE_PE_DMASTAT_ORACT	0x00000800	/* output request active */
++#define	SAFE_PE_DMASTAT_PEISIZE	0x003ff000	/* PE input size:32-bit words */
++#define	SAFE_PE_DMASTAT_PEOSIZE	0xffc00000	/* PE out. size:32-bit words */
++
++#define	SAFE_PE_RINGCFG_SIZE	0x000003ff	/* ring size (descriptors) */
++#define	SAFE_PE_RINGCFG_OFFSET	0xffff0000	/* offset btw desc's (dwords) */
++#define	SAFE_PE_RINGCFG_OFFSET_S	16
++
++#define	SAFE_PE_RINGPOLL_POLL	0x00000fff	/* polling frequency/divisor */
++#define	SAFE_PE_RINGPOLL_RETRY	0x03ff0000	/* polling frequency/divisor */
++#define	SAFE_PE_RINGPOLL_CONT	0x80000000	/* continuously poll */
++
++#define	SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001	/* command queue available */
++
++#define	SAFE_PE_ERNGSTAT_NEXT	0x03ff0000	/* index of next packet desc. */
++#define	SAFE_PE_ERNGSTAT_NEXT_S	16
++
++#define	SAFE_PE_IOTHRESH_INPUT	0x000003ff	/* input threshold (dwords) */
++#define	SAFE_PE_IOTHRESH_OUTPUT	0x03ff0000	/* output threshold (dwords) */
++
++#define	SAFE_PE_PARTCFG_SIZE	0x0000ffff	/* scatter particle size */
++#define	SAFE_PE_PARTCFG_GBURST	0x00030000	/* gather particle burst */
++#define	SAFE_PE_PARTCFG_GBURST_2	0x00000000
++#define	SAFE_PE_PARTCFG_GBURST_4	0x00010000
++#define	SAFE_PE_PARTCFG_GBURST_8	0x00020000
++#define	SAFE_PE_PARTCFG_GBURST_16	0x00030000
++#define	SAFE_PE_PARTCFG_SBURST	0x000c0000	/* scatter particle burst */
++#define	SAFE_PE_PARTCFG_SBURST_2	0x00000000
++#define	SAFE_PE_PARTCFG_SBURST_4	0x00040000
++#define	SAFE_PE_PARTCFG_SBURST_8	0x00080000
++#define	SAFE_PE_PARTCFG_SBURST_16	0x000c0000
++
++#define	SAFE_PE_PARTSIZE_SCAT	0xffff0000	/* scatter particle ring size */
++#define	SAFE_PE_PARTSIZE_GATH	0x0000ffff	/* gather particle ring size */
++
++#define	SAFE_CRYPTO_CTRL_3DES	0x00000001	/* enable 3DES support */
++#define	SAFE_CRYPTO_CTRL_PKEY	0x00010000	/* enable public key support */
++#define	SAFE_CRYPTO_CTRL_RNG	0x00020000	/* enable RNG support */
++
++#define	SAFE_DEVINFO_REV_MIN	0x0000000f	/* minor rev for chip */
++#define	SAFE_DEVINFO_REV_MAJ	0x000000f0	/* major rev for chip */
++#define	SAFE_DEVINFO_REV_MAJ_S	4
++#define	SAFE_DEVINFO_DES	0x00000100	/* DES/3DES support present */
++#define	SAFE_DEVINFO_ARC4	0x00000200	/* ARC4 support present */
++#define	SAFE_DEVINFO_AES	0x00000400	/* AES support present */
++#define	SAFE_DEVINFO_MD5	0x00001000	/* MD5 support present */
++#define	SAFE_DEVINFO_SHA1	0x00002000	/* SHA-1 support present */
++#define	SAFE_DEVINFO_RIPEMD	0x00004000	/* RIPEMD support present */
++#define	SAFE_DEVINFO_DEFLATE	0x00010000	/* Deflate support present */
++#define	SAFE_DEVINFO_SARAM	0x00100000	/* on-chip SA RAM present */
++#define	SAFE_DEVINFO_EMIBUS	0x00200000	/* EMI bus present */
++#define	SAFE_DEVINFO_PKEY	0x00400000	/* public key support present */
++#define	SAFE_DEVINFO_RNG	0x00800000	/* RNG present */
++
++#define	SAFE_REV(_maj, _min)	(((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
++#define	SAFE_REV_MAJ(_chiprev) \
++	(((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
++#define	SAFE_REV_MIN(_chiprev)	((_chiprev) & SAFE_DEVINFO_REV_MIN)
++
++#define	SAFE_PK_FUNC_MULT	0x00000001	/* Multiply function */
++#define	SAFE_PK_FUNC_SQUARE	0x00000004	/* Square function */
++#define	SAFE_PK_FUNC_ADD	0x00000010	/* Add function */
++#define	SAFE_PK_FUNC_SUB	0x00000020	/* Subtract function */
++#define	SAFE_PK_FUNC_LSHIFT	0x00000040	/* Left-shift function */
++#define	SAFE_PK_FUNC_RSHIFT	0x00000080	/* Right-shift function */
++#define	SAFE_PK_FUNC_DIV	0x00000100	/* Divide function */
++#define	SAFE_PK_FUNC_CMP	0x00000400	/* Compare function */
++#define	SAFE_PK_FUNC_COPY	0x00000800	/* Copy function */
++#define	SAFE_PK_FUNC_EXP16	0x00002000	/* Exponentiate (4-bit ACT) */
++#define	SAFE_PK_FUNC_EXP4	0x00004000	/* Exponentiate (2-bit ACT) */
++#define	SAFE_PK_FUNC_RUN	0x00008000	/* start/status */
++
++#define	SAFE_RNG_STAT_BUSY	0x00000001	/* busy, data not valid */
++
++#define	SAFE_RNG_CTRL_PRE_LFSR	0x00000001	/* enable output pre-LFSR */
++#define	SAFE_RNG_CTRL_TST_MODE	0x00000002	/* enable test mode */
++#define	SAFE_RNG_CTRL_TST_RUN	0x00000004	/* start test state machine */
++#define	SAFE_RNG_CTRL_ENA_RING1	0x00000008	/* test entropy oscillator #1 */
++#define	SAFE_RNG_CTRL_ENA_RING2	0x00000010	/* test entropy oscillator #2 */
++#define	SAFE_RNG_CTRL_DIS_ALARM	0x00000020	/* disable RNG alarm reports */
++#define	SAFE_RNG_CTRL_TST_CLOCK	0x00000040	/* enable test clock */
++#define	SAFE_RNG_CTRL_SHORTEN	0x00000080	/* shorten state timers */
++#define	SAFE_RNG_CTRL_TST_ALARM	0x00000100	/* simulate alarm state */
++#define	SAFE_RNG_CTRL_RST_LFSR	0x00000200	/* reset LFSR */
++
++/*
++ * Packet engine descriptor.  Note that d_csr is a copy of the
++ * SAFE_PE_CSR register and all definitions apply, and d_len
++ * is a copy of the SAFE_PE_LEN register and all definitions apply.
++ * d_src and d_len may point directly to contiguous data or to a
++ * list of ``particle descriptors'' when using scatter/gather i/o.
++ */
++struct safe_desc {
++	u_int32_t	d_csr;			/* per-packet control/status */
++	u_int32_t	d_src;			/* source address */
++	u_int32_t	d_dst;			/* destination address */
++	u_int32_t	d_sa;			/* SA address */
++	u_int32_t	d_len;			/* length, bypass, status */
++};
++
++/*
++ * Scatter/Gather particle descriptor.
++ *
++ * NB: scatter descriptors do not specify a size; this is fixed
++ *     by the setting of the SAFE_PE_PARTCFG register.
++ */
++struct safe_pdesc {
++	u_int32_t	pd_addr;		/* particle address */
++#ifdef __BIG_ENDIAN
++	u_int16_t	pd_flags;		/* control word */
++	u_int16_t	pd_size;		/* particle size (bytes) */
++#else
++	u_int16_t	pd_flags;		/* control word */
++	u_int16_t	pd_size;		/* particle size (bytes) */
++#endif
++};
++
++#define	SAFE_PD_READY	0x0001			/* ready for processing */
++#define	SAFE_PD_DONE	0x0002			/* h/w completed processing */
++
++/*
++ * Security Association (SA) Record (Rev 1).  One of these is
++ * required for each operation processed by the packet engine.
++ */
++struct safe_sarec {
++	u_int32_t	sa_cmd0;
++	u_int32_t	sa_cmd1;
++	u_int32_t	sa_resv0;
++	u_int32_t	sa_resv1;
++	u_int32_t	sa_key[8];		/* DES/3DES/AES key */
++	u_int32_t	sa_indigest[5];		/* inner digest */
++	u_int32_t	sa_outdigest[5];	/* outer digest */
++	u_int32_t	sa_spi;			/* SPI */
++	u_int32_t	sa_seqnum;		/* sequence number */
++	u_int32_t	sa_seqmask[2];		/* sequence number mask */
++	u_int32_t	sa_resv2;
++	u_int32_t	sa_staterec;		/* address of state record */
++	u_int32_t	sa_resv3[2];
++	u_int32_t	sa_samgmt0;		/* SA management field 0 */
++	u_int32_t	sa_samgmt1;		/* SA management field 0 */
++};
++
++#define	SAFE_SA_CMD0_OP		0x00000007	/* operation code */
++#define	SAFE_SA_CMD0_OP_CRYPT	0x00000000	/* encrypt/decrypt (basic) */
++#define	SAFE_SA_CMD0_OP_BOTH	0x00000001	/* encrypt-hash/hash-decrypto */
++#define	SAFE_SA_CMD0_OP_HASH	0x00000003	/* hash (outbound-only) */
++#define	SAFE_SA_CMD0_OP_ESP	0x00000000	/* ESP in/out (proto) */
++#define	SAFE_SA_CMD0_OP_AH	0x00000001	/* AH in/out (proto) */
++#define	SAFE_SA_CMD0_INBOUND	0x00000008	/* inbound operation */
++#define	SAFE_SA_CMD0_OUTBOUND	0x00000000	/* outbound operation */
++#define	SAFE_SA_CMD0_GROUP	0x00000030	/* operation group */
++#define	SAFE_SA_CMD0_BASIC	0x00000000	/* basic operation */
++#define	SAFE_SA_CMD0_PROTO	0x00000010	/* protocol/packet operation */
++#define	SAFE_SA_CMD0_BUNDLE	0x00000020	/* bundled operation (resvd) */
++#define	SAFE_SA_CMD0_PAD	0x000000c0	/* crypto pad method */
++#define	SAFE_SA_CMD0_PAD_IPSEC	0x00000000	/* IPsec padding */
++#define	SAFE_SA_CMD0_PAD_PKCS7	0x00000040	/* PKCS#7 padding */
++#define	SAFE_SA_CMD0_PAD_CONS	0x00000080	/* constant padding */
++#define	SAFE_SA_CMD0_PAD_ZERO	0x000000c0	/* zero padding */
++#define	SAFE_SA_CMD0_CRYPT_ALG	0x00000f00	/* symmetric crypto algorithm */
++#define	SAFE_SA_CMD0_DES	0x00000000	/* DES crypto algorithm */
++#define	SAFE_SA_CMD0_3DES	0x00000100	/* 3DES crypto algorithm */
++#define	SAFE_SA_CMD0_AES	0x00000300	/* AES crypto algorithm */
++#define	SAFE_SA_CMD0_CRYPT_NULL	0x00000f00	/* null crypto algorithm */
++#define	SAFE_SA_CMD0_HASH_ALG	0x0000f000	/* hash algorithm */
++#define	SAFE_SA_CMD0_MD5	0x00000000	/* MD5 hash algorithm */
++#define	SAFE_SA_CMD0_SHA1	0x00001000	/* SHA-1 hash algorithm */
++#define	SAFE_SA_CMD0_HASH_NULL	0x0000f000	/* null hash algorithm */
++#define	SAFE_SA_CMD0_HDR_PROC	0x00080000	/* header processing */
++#define	SAFE_SA_CMD0_IBUSID	0x00300000	/* input bus id */
++#define	SAFE_SA_CMD0_IPCI	0x00100000	/* PCI input bus id */
++#define	SAFE_SA_CMD0_OBUSID	0x00c00000	/* output bus id */
++#define	SAFE_SA_CMD0_OPCI	0x00400000	/* PCI output bus id */
++#define	SAFE_SA_CMD0_IVLD	0x03000000	/* IV loading */
++#define	SAFE_SA_CMD0_IVLD_NONE	0x00000000	/* IV no load (reuse) */
++#define	SAFE_SA_CMD0_IVLD_IBUF	0x01000000	/* IV load from input buffer */
++#define	SAFE_SA_CMD0_IVLD_STATE	0x02000000	/* IV load from state */
++#define	SAFE_SA_CMD0_HSLD	0x0c000000	/* hash state loading */
++#define	SAFE_SA_CMD0_HSLD_SA	0x00000000	/* hash state load from SA */
++#define	SAFE_SA_CMD0_HSLD_STATE	0x08000000	/* hash state load from state */
++#define	SAFE_SA_CMD0_HSLD_NONE	0x0c000000	/* hash state no load */
++#define	SAFE_SA_CMD0_SAVEIV	0x10000000	/* save IV */
++#define	SAFE_SA_CMD0_SAVEHASH	0x20000000	/* save hash state */
++#define	SAFE_SA_CMD0_IGATHER	0x40000000	/* input gather */
++#define	SAFE_SA_CMD0_OSCATTER	0x80000000	/* output scatter */
++
++#define	SAFE_SA_CMD1_HDRCOPY	0x00000002	/* copy header to output */
++#define	SAFE_SA_CMD1_PAYCOPY	0x00000004	/* copy payload to output */
++#define	SAFE_SA_CMD1_PADCOPY	0x00000008	/* copy pad to output */
++#define	SAFE_SA_CMD1_IPV4	0x00000000	/* IPv4 protocol */
++#define	SAFE_SA_CMD1_IPV6	0x00000010	/* IPv6 protocol */
++#define	SAFE_SA_CMD1_MUTABLE	0x00000020	/* mutable bit processing */
++#define	SAFE_SA_CMD1_SRBUSID	0x000000c0	/* state record bus id */
++#define	SAFE_SA_CMD1_SRPCI	0x00000040	/* state record from PCI */
++#define	SAFE_SA_CMD1_CRMODE	0x00000300	/* crypto mode */
++#define	SAFE_SA_CMD1_ECB	0x00000000	/* ECB crypto mode */
++#define	SAFE_SA_CMD1_CBC	0x00000100	/* CBC crypto mode */
++#define	SAFE_SA_CMD1_OFB	0x00000200	/* OFB crypto mode */
++#define	SAFE_SA_CMD1_CFB	0x00000300	/* CFB crypto mode */
++#define	SAFE_SA_CMD1_CRFEEDBACK	0x00000c00	/* crypto feedback mode */
++#define	SAFE_SA_CMD1_64BIT	0x00000000	/* 64-bit crypto feedback */
++#define	SAFE_SA_CMD1_8BIT	0x00000400	/* 8-bit crypto feedback */
++#define	SAFE_SA_CMD1_1BIT	0x00000800	/* 1-bit crypto feedback */
++#define	SAFE_SA_CMD1_128BIT	0x00000c00	/* 128-bit crypto feedback */
++#define	SAFE_SA_CMD1_OPTIONS	0x00001000	/* HMAC/options mutable bit */
++#define	SAFE_SA_CMD1_HMAC	SAFE_SA_CMD1_OPTIONS
++#define	SAFE_SA_CMD1_SAREV1	0x00008000	/* SA Revision 1 */
++#define	SAFE_SA_CMD1_OFFSET	0x00ff0000	/* hash/crypto offset(dwords) */
++#define	SAFE_SA_CMD1_OFFSET_S	16
++#define	SAFE_SA_CMD1_AESKEYLEN	0x0f000000	/* AES key length */
++#define	SAFE_SA_CMD1_AES128	0x02000000	/* 128-bit AES key */
++#define	SAFE_SA_CMD1_AES192	0x03000000	/* 192-bit AES key */
++#define	SAFE_SA_CMD1_AES256	0x04000000	/* 256-bit AES key */
++
++/* 
++ * Security Associate State Record (Rev 1).
++ */
++struct safe_sastate {
++	u_int32_t	sa_saved_iv[4];		/* saved IV (DES/3DES/AES) */
++	u_int32_t	sa_saved_hashbc;	/* saved hash byte count */
++	u_int32_t	sa_saved_indigest[5];	/* saved inner digest */
++};
++#endif /* _SAFE_SAFEREG_H_ */
+diff -Nur linux-2.6.30.orig/crypto/ocf/safe/safevar.h linux-2.6.30/crypto/ocf/safe/safevar.h
+--- linux-2.6.30.orig/crypto/ocf/safe/safevar.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/safe/safevar.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,230 @@
++/*-
++ * The linux port of this code done by David McCullough
++ * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
++ * The license and original author are listed below.
++ *
++ * Copyright (c) 2003 Sam Leffler, Errno Consulting
++ * Copyright (c) 2003 Global Technology Associates, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
++ */
++#ifndef _SAFE_SAFEVAR_H_
++#define	_SAFE_SAFEVAR_H_
++
++/* Maximum queue length */
++#ifndef SAFE_MAX_NQUEUE
++#define SAFE_MAX_NQUEUE	60
++#endif
++
++#define	SAFE_MAX_PART		64	/* Maximum scatter/gather depth */
++#define	SAFE_DMA_BOUNDARY	0	/* No boundary for source DMA ops */
++#define	SAFE_MAX_DSIZE		2048 /* MCLBYTES Fixed scatter particle size */
++#define	SAFE_MAX_SSIZE		0x0ffff	/* Maximum gather particle size */
++#define	SAFE_MAX_DMA		0xfffff	/* Maximum PE operand size (20 bits) */
++/* total src+dst particle descriptors */
++#define	SAFE_TOTAL_DPART	(SAFE_MAX_NQUEUE * SAFE_MAX_PART)
++#define	SAFE_TOTAL_SPART	(SAFE_MAX_NQUEUE * SAFE_MAX_PART)
++
++#define	SAFE_RNG_MAXBUFSIZ	128	/* 32-bit words */
++
++#define	SAFE_CARD(sid)		(((sid) & 0xf0000000) >> 28)
++#define	SAFE_SESSION(sid)	( (sid) & 0x0fffffff)
++#define	SAFE_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
++
++#define SAFE_DEF_RTY		0xff	/* PCI Retry Timeout */
++#define SAFE_DEF_TOUT		0xff	/* PCI TRDY Timeout */
++#define SAFE_DEF_CACHELINE	0x01	/* Cache Line setting */
++
++#ifdef __KERNEL__
++/*
++ * State associated with the allocation of each chunk
++ * of memory setup for DMA.
++ */
++struct safe_dma_alloc {
++	dma_addr_t		dma_paddr;
++	void			*dma_vaddr;
++};
++
++/*
++ * Cryptographic operand state.  One of these exists for each
++ * source and destination operand passed in from the crypto
++ * subsystem.  When possible source and destination operands
++ * refer to the same memory.  More often they are distinct.
++ * We track the virtual address of each operand as well as
++ * where each is mapped for DMA.
++ */
++struct safe_operand {
++	union {
++		struct sk_buff *skb;
++		struct uio *io;
++	} u;
++	void			*map;
++	int				mapsize;	/* total number of bytes in segs */
++	struct {
++		dma_addr_t	ds_addr;
++		int			ds_len;
++		int			ds_tlen;
++	} segs[SAFE_MAX_PART];
++	int				nsegs;
++};
++
++/*
++ * Packet engine ring entry and cryptographic operation state.
++ * The packet engine requires a ring of descriptors that contain
++ * pointers to various cryptographic state.  However the ring
++ * configuration register allows you to specify an arbitrary size
++ * for ring entries.  We use this feature to collect most of the
++ * state for each cryptographic request into one spot.  Other than
++ * ring entries only the ``particle descriptors'' (scatter/gather
++ * lists) and the actual operand data are kept separate.  The
++ * particle descriptors must also be organized in rings.  The
++ * operand data can be located aribtrarily (modulo alignment constraints).
++ *
++ * Note that the descriptor ring is mapped onto the PCI bus so
++ * the hardware can DMA data.  This means the entire ring must be
++ * contiguous.
++ */
++struct safe_ringentry {
++	struct safe_desc	re_desc;	/* command descriptor */
++	struct safe_sarec	re_sa;		/* SA record */
++	struct safe_sastate	re_sastate;	/* SA state record */
++
++	struct cryptop		*re_crp;	/* crypto operation */
++
++	struct safe_operand	re_src;		/* source operand */
++	struct safe_operand	re_dst;		/* destination operand */
++
++	int			re_sesn;	/* crypto session ID */
++	int			re_flags;
++#define	SAFE_QFLAGS_COPYOUTIV	0x1		/* copy back on completion */
++#define	SAFE_QFLAGS_COPYOUTICV	0x2		/* copy back on completion */
++};
++
++#define	re_src_skb	re_src.u.skb
++#define	re_src_io	re_src.u.io
++#define	re_src_map	re_src.map
++#define	re_src_nsegs	re_src.nsegs
++#define	re_src_segs	re_src.segs
++#define	re_src_mapsize	re_src.mapsize
++
++#define	re_dst_skb	re_dst.u.skb
++#define	re_dst_io	re_dst.u.io
++#define	re_dst_map	re_dst.map
++#define	re_dst_nsegs	re_dst.nsegs
++#define	re_dst_segs	re_dst.segs
++#define	re_dst_mapsize	re_dst.mapsize
++
++struct rndstate_test;
++
++struct safe_session {
++	u_int32_t	ses_used;
++	u_int32_t	ses_klen;		/* key length in bits */
++	u_int32_t	ses_key[8];		/* DES/3DES/AES key */
++	u_int32_t	ses_mlen;		/* hmac length in bytes */
++	u_int32_t	ses_hminner[5];		/* hmac inner state */
++	u_int32_t	ses_hmouter[5];		/* hmac outer state */
++	u_int32_t	ses_iv[4];		/* DES/3DES/AES iv */
++};
++
++struct safe_pkq {
++	struct list_head	pkq_list;
++	struct cryptkop		*pkq_krp;
++};
++
++struct safe_softc {
++	softc_device_decl	sc_dev;
++	u32			sc_irq;
++
++	struct pci_dev		*sc_pcidev;
++	ocf_iomem_t		sc_base_addr;
++
++	u_int			sc_chiprev;	/* major/minor chip revision */
++	int			sc_flags;	/* device specific flags */
++#define	SAFE_FLAGS_KEY		0x01		/* has key accelerator */
++#define	SAFE_FLAGS_RNG		0x02		/* hardware rng */
++	int			sc_suspended;
++	int			sc_needwakeup;	/* notify crypto layer */
++	int32_t			sc_cid;		/* crypto tag */
++
++	struct safe_dma_alloc	sc_ringalloc;	/* PE ring allocation state */
++	struct safe_ringentry	*sc_ring;	/* PE ring */
++	struct safe_ringentry	*sc_ringtop;	/* PE ring top */
++	struct safe_ringentry	*sc_front;	/* next free entry */
++	struct safe_ringentry	*sc_back;	/* next pending entry */
++	int			sc_nqchip;	/* # passed to chip */
++	spinlock_t		sc_ringmtx;	/* PE ring lock */
++	struct safe_pdesc	*sc_spring;	/* src particle ring */
++	struct safe_pdesc	*sc_springtop;	/* src particle ring top */
++	struct safe_pdesc	*sc_spfree;	/* next free src particle */
++	struct safe_dma_alloc	sc_spalloc;	/* src particle ring state */
++	struct safe_pdesc	*sc_dpring;	/* dest particle ring */
++	struct safe_pdesc	*sc_dpringtop;	/* dest particle ring top */
++	struct safe_pdesc	*sc_dpfree;	/* next free dest particle */
++	struct safe_dma_alloc	sc_dpalloc;	/* dst particle ring state */
++	int			sc_nsessions;	/* # of sessions */
++	struct safe_session	*sc_sessions;	/* sessions */
++
++	struct timer_list	sc_pkto;	/* PK polling */
++	spinlock_t		sc_pkmtx;	/* PK lock */
++	struct list_head	sc_pkq;		/* queue of PK requests */
++	struct safe_pkq		*sc_pkq_cur;	/* current processing request */
++	u_int32_t		sc_pk_reslen, sc_pk_resoff;
++
++	int			sc_max_dsize;	/* maximum safe DMA size */
++};
++#endif /* __KERNEL__ */
++
++struct safe_stats {
++	u_int64_t st_ibytes;
++	u_int64_t st_obytes;
++	u_int32_t st_ipackets;
++	u_int32_t st_opackets;
++	u_int32_t st_invalid;		/* invalid argument */
++	u_int32_t st_badsession;	/* invalid session id */
++	u_int32_t st_badflags;		/* flags indicate !(mbuf | uio) */
++	u_int32_t st_nodesc;		/* op submitted w/o descriptors */
++	u_int32_t st_badalg;		/* unsupported algorithm */
++	u_int32_t st_ringfull;		/* PE descriptor ring full */
++	u_int32_t st_peoperr;		/* PE marked error */
++	u_int32_t st_dmaerr;		/* PE DMA error */
++	u_int32_t st_bypasstoobig;	/* bypass > 96 bytes */
++	u_int32_t st_skipmismatch;	/* enc part begins before auth part */
++	u_int32_t st_lenmismatch;	/* enc length different auth length */
++	u_int32_t st_coffmisaligned;	/* crypto offset not 32-bit aligned */
++	u_int32_t st_cofftoobig;	/* crypto offset > 255 words */
++	u_int32_t st_iovmisaligned;	/* iov op not aligned */
++	u_int32_t st_iovnotuniform;	/* iov op not suitable */
++	u_int32_t st_unaligned;		/* unaligned src caused copy */
++	u_int32_t st_notuniform;	/* non-uniform src caused copy */
++	u_int32_t st_nomap;		/* bus_dmamap_create failed */
++	u_int32_t st_noload;		/* bus_dmamap_load_* failed */
++	u_int32_t st_nombuf;		/* MGET* failed */
++	u_int32_t st_nomcl;		/* MCLGET* failed */
++	u_int32_t st_maxqchip;		/* max mcr1 ops out for processing */
++	u_int32_t st_rng;		/* RNG requests */
++	u_int32_t st_rngalarm;		/* RNG alarm requests */
++	u_int32_t st_noicvcopy;		/* ICV data copies suppressed */
++};
++#endif /* _SAFE_SAFEVAR_H_ */
+diff -Nur linux-2.6.30.orig/crypto/ocf/safe/sha1.c linux-2.6.30/crypto/ocf/safe/sha1.c
+--- linux-2.6.30.orig/crypto/ocf/safe/sha1.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/safe/sha1.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,279 @@
++/*	$KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $	*/
++/*
++ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the project nor the names of its contributors
++ *    may be used to endorse or promote products derived from this software
++ *    without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++/*
++ * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
++ * based on: http://csrc.nist.gov/fips/fip180-1.txt
++ * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
++ */
++
++#if 0
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
++
++#include <sys/types.h>
++#include <sys/cdefs.h>
++#include <sys/time.h>
++#include <sys/systm.h>
++
++#include <crypto/sha1.h>
++#endif
++
++/* sanity check */
++#if BYTE_ORDER != BIG_ENDIAN
++# if BYTE_ORDER != LITTLE_ENDIAN
++#  define unsupported 1
++# endif
++#endif
++
++#ifndef unsupported
++
++/* constant table */
++static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
++#define	K(t)	_K[(t) / 20]
++
++#define	F0(b, c, d)	(((b) & (c)) | ((~(b)) & (d)))
++#define	F1(b, c, d)	(((b) ^ (c)) ^ (d))
++#define	F2(b, c, d)	(((b) & (c)) | ((b) & (d)) | ((c) & (d)))
++#define	F3(b, c, d)	(((b) ^ (c)) ^ (d))
++
++#define	S(n, x)		(((x) << (n)) | ((x) >> (32 - n)))
++
++#undef H
++#define	H(n)	(ctxt->h.b32[(n)])
++#define	COUNT	(ctxt->count)
++#define	BCOUNT	(ctxt->c.b64[0] / 8)
++#define	W(n)	(ctxt->m.b32[(n)])
++
++#define	PUTBYTE(x)	{ \
++	ctxt->m.b8[(COUNT % 64)] = (x);		\
++	COUNT++;				\
++	COUNT %= 64;				\
++	ctxt->c.b64[0] += 8;			\
++	if (COUNT % 64 == 0)			\
++		sha1_step(ctxt);		\
++     }
++
++#define	PUTPAD(x)	{ \
++	ctxt->m.b8[(COUNT % 64)] = (x);		\
++	COUNT++;				\
++	COUNT %= 64;				\
++	if (COUNT % 64 == 0)			\
++		sha1_step(ctxt);		\
++     }
++
++static void sha1_step(struct sha1_ctxt *);
++
++static void
++sha1_step(ctxt)
++	struct sha1_ctxt *ctxt;
++{
++	u_int32_t	a, b, c, d, e;
++	size_t t, s;
++	u_int32_t	tmp;
++
++#if BYTE_ORDER == LITTLE_ENDIAN
++	struct sha1_ctxt tctxt;
++	bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
++	ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
++	ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
++	ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
++	ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
++	ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
++	ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
++	ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
++	ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
++	ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
++	ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
++	ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
++	ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
++	ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
++	ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
++	ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
++	ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
++	ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
++	ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
++	ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
++	ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
++	ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
++	ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
++	ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
++	ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
++	ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
++	ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
++	ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
++	ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
++	ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
++	ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
++	ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
++	ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
++#endif
++
++	a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
++
++	for (t = 0; t < 20; t++) {
++		s = t & 0x0f;
++		if (t >= 16) {
++			W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
++		}
++		tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
++		e = d; d = c; c = S(30, b); b = a; a = tmp;
++	}
++	for (t = 20; t < 40; t++) {
++		s = t & 0x0f;
++		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
++		tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
++		e = d; d = c; c = S(30, b); b = a; a = tmp;
++	}
++	for (t = 40; t < 60; t++) {
++		s = t & 0x0f;
++		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
++		tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
++		e = d; d = c; c = S(30, b); b = a; a = tmp;
++	}
++	for (t = 60; t < 80; t++) {
++		s = t & 0x0f;
++		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
++		tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
++		e = d; d = c; c = S(30, b); b = a; a = tmp;
++	}
++
++	H(0) = H(0) + a;
++	H(1) = H(1) + b;
++	H(2) = H(2) + c;
++	H(3) = H(3) + d;
++	H(4) = H(4) + e;
++
++	bzero(&ctxt->m.b8[0], 64);
++}
++
++/*------------------------------------------------------------*/
++
++void
++sha1_init(ctxt)
++	struct sha1_ctxt *ctxt;
++{
++	bzero(ctxt, sizeof(struct sha1_ctxt));
++	H(0) = 0x67452301;
++	H(1) = 0xefcdab89;
++	H(2) = 0x98badcfe;
++	H(3) = 0x10325476;
++	H(4) = 0xc3d2e1f0;
++}
++
++void
++sha1_pad(ctxt)
++	struct sha1_ctxt *ctxt;
++{
++	size_t padlen;		/*pad length in bytes*/
++	size_t padstart;
++
++	PUTPAD(0x80);
++
++	padstart = COUNT % 64;
++	padlen = 64 - padstart;
++	if (padlen < 8) {
++		bzero(&ctxt->m.b8[padstart], padlen);
++		COUNT += padlen;
++		COUNT %= 64;
++		sha1_step(ctxt);
++		padstart = COUNT % 64;	/* should be 0 */
++		padlen = 64 - padstart;	/* should be 64 */
++	}
++	bzero(&ctxt->m.b8[padstart], padlen - 8);
++	COUNT += (padlen - 8);
++	COUNT %= 64;
++#if BYTE_ORDER == BIG_ENDIAN
++	PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
++	PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
++	PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
++	PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
++#else
++	PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
++	PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
++	PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
++	PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
++#endif
++}
++
++void
++sha1_loop(ctxt, input, len)
++	struct sha1_ctxt *ctxt;
++	const u_int8_t *input;
++	size_t len;
++{
++	size_t gaplen;
++	size_t gapstart;
++	size_t off;
++	size_t copysiz;
++
++	off = 0;
++
++	while (off < len) {
++		gapstart = COUNT % 64;
++		gaplen = 64 - gapstart;
++
++		copysiz = (gaplen < len - off) ? gaplen : len - off;
++		bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
++		COUNT += copysiz;
++		COUNT %= 64;
++		ctxt->c.b64[0] += copysiz * 8;
++		if (COUNT % 64 == 0)
++			sha1_step(ctxt);
++		off += copysiz;
++	}
++}
++
++void
++sha1_result(ctxt, digest0)
++	struct sha1_ctxt *ctxt;
++	caddr_t digest0;
++{
++	u_int8_t *digest;
++
++	digest = (u_int8_t *)digest0;
++	sha1_pad(ctxt);
++#if BYTE_ORDER == BIG_ENDIAN
++	bcopy(&ctxt->h.b8[0], digest, 20);
++#else
++	digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
++	digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
++	digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
++	digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
++	digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
++	digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
++	digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
++	digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
++	digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
++	digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
++#endif
++}
++
++#endif /*unsupported*/
+diff -Nur linux-2.6.30.orig/crypto/ocf/safe/sha1.h linux-2.6.30/crypto/ocf/safe/sha1.h
+--- linux-2.6.30.orig/crypto/ocf/safe/sha1.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/safe/sha1.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,72 @@
++/*	$FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $	*/
++/*	$KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $	*/
++
++/*
++ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the project nor the names of its contributors
++ *    may be used to endorse or promote products derived from this software
++ *    without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++/*
++ * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
++ * based on: http://csrc.nist.gov/fips/fip180-1.txt
++ * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
++ */
++
++#ifndef _NETINET6_SHA1_H_
++#define _NETINET6_SHA1_H_
++
++struct sha1_ctxt {
++	union {
++		u_int8_t	b8[20];
++		u_int32_t	b32[5];
++	} h;
++	union {
++		u_int8_t	b8[8];
++		u_int64_t	b64[1];
++	} c;
++	union {
++		u_int8_t	b8[64];
++		u_int32_t	b32[16];
++	} m;
++	u_int8_t	count;
++};
++
++#ifdef __KERNEL__
++extern void sha1_init(struct sha1_ctxt *);
++extern void sha1_pad(struct sha1_ctxt *);
++extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
++extern void sha1_result(struct sha1_ctxt *, caddr_t);
++
++/* compatibilty with other SHA1 source codes */
++typedef struct sha1_ctxt SHA1_CTX;
++#define SHA1Init(x)		sha1_init((x))
++#define SHA1Update(x, y, z)	sha1_loop((x), (y), (z))
++#define SHA1Final(x, y)		sha1_result((y), (x))
++#endif /* __KERNEL__ */
++
++#define	SHA1_RESULTLEN	(160/8)
++
++#endif /*_NETINET6_SHA1_H_*/
+diff -Nur linux-2.6.30.orig/crypto/ocf/talitos/Makefile linux-2.6.30/crypto/ocf/talitos/Makefile
+--- linux-2.6.30.orig/crypto/ocf/talitos/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/talitos/Makefile	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,12 @@
++# for SGlinux builds
++-include $(ROOTDIR)/modules/.config
++
++obj-$(CONFIG_OCF_TALITOS) += talitos.o
++
++obj ?= .
++EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
++
++ifdef TOPDIR
++-include $(TOPDIR)/Rules.make
++endif
++
+diff -Nur linux-2.6.30.orig/crypto/ocf/talitos/talitos.c linux-2.6.30/crypto/ocf/talitos/talitos.c
+--- linux-2.6.30.orig/crypto/ocf/talitos/talitos.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/talitos/talitos.c	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,1359 @@
++/*
++ * crypto/ocf/talitos/talitos.c
++ *
++ * An OCF-Linux module that uses Freescale's SEC to do the crypto.
++ * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
++ *
++ * Copyright (c) 2006 Freescale Semiconductor, Inc.
++ *
++ * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
++ * some code copied from files with the following:
++ * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * ---------------------------------------------------------------------------
++ *
++ * NOTES:
++ *
++ * The Freescale SEC (also known as 'talitos') resides on the
++ * internal bus, and runs asynchronous to the processor core.  It has
++ * a wide gamut of cryptographic acceleration features, including single-
++ * pass IPsec (also known as algorithm chaining).  To properly utilize 
++ * all of the SEC's performance enhancing features, further reworking 
++ * of higher level code (framework, applications) will be necessary.
++ *
++ * The following table shows which SEC version is present in which devices:
++ * 
++ * Devices       SEC version
++ *
++ * 8272, 8248    SEC 1.0
++ * 885, 875      SEC 1.2
++ * 8555E, 8541E  SEC 2.0
++ * 8349E         SEC 2.01
++ * 8548E         SEC 2.1
++ *
++ * The following table shows the features offered by each SEC version:
++ *
++ * 	                       Max.   chan-
++ * version  Bus I/F       Clock  nels  DEU AESU AFEU MDEU PKEU RNG KEU
++ *
++ * SEC 1.0  internal 64b  100MHz   4     1    1    1    1    1   1   0
++ * SEC 1.2  internal 32b   66MHz   1     1    1    0    1    0   0   0
++ * SEC 2.0  internal 64b  166MHz   4     1    1    1    1    1   1   0
++ * SEC 2.01 internal 64b  166MHz   4     1    1    1    1    1   1   0
++ * SEC 2.1  internal 64b  333MHz   4     1    1    1    1    1   1   1
++ *
++ * Each execution unit in the SEC has two modes of execution; channel and
++ * slave/debug.  This driver employs the channel infrastructure in the
++ * device for convenience.  Only the RNG is directly accessed due to the
++ * convenience of its random fifo pool.  The relationship between the
++ * channels and execution units is depicted in the following diagram:
++ *
++ *    -------   ------------
++ * ---| ch0 |---|          |
++ *    -------   |          |
++ *              |          |------+-------+-------+-------+------------
++ *    -------   |          |      |       |       |       |           |
++ * ---| ch1 |---|          |      |       |       |       |           |
++ *    -------   |          |   ------  ------  ------  ------      ------
++ *              |controller|   |DEU |  |AESU|  |MDEU|  |PKEU| ...  |RNG |
++ *    -------   |          |   ------  ------  ------  ------      ------
++ * ---| ch2 |---|          |      |       |       |       |           |
++ *    -------   |          |      |       |       |       |           |
++ *              |          |------+-------+-------+-------+------------
++ *    -------   |          |
++ * ---| ch3 |---|          |
++ *    -------   ------------
++ *
++ * Channel ch0 may drive an aes operation to the aes unit (AESU),
++ * and, at the same time, ch1 may drive a message digest operation
++ * to the mdeu. Each channel has an input descriptor FIFO, and the 
++ * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
++ * a buffer overrun error is triggered. The controller is responsible
++ * for fetching the data from descriptor pointers, and passing the 
++ * data to the appropriate EUs. The controller also writes the 
++ * cryptographic operation's result to memory. The SEC notifies 
++ * completion by triggering an interrupt and/or setting the 1st byte 
++ * of the hdr field to 0xff.
++ *
++ * TODO:
++ * o support more algorithms
++ * o support more versions of the SEC
++ * o add support for linux 2.4
++ * o scatter-gather (sg) support
++ * o add support for public key ops (PKEU)
++ * o add statistics
++ */
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/random.h>
++#include <linux/skbuff.h>
++#include <asm/scatterlist.h>
++#include <linux/dma-mapping.h>  /* dma_map_single() */
++#include <linux/moduleparam.h>
++
++#include <linux/version.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
++#include <linux/platform_device.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++#include <linux/of_platform.h>
++#endif
++
++#include <cryptodev.h>
++#include <uio.h>
++
++#define DRV_NAME "talitos" 
++
++#include "talitos_dev.h"
++#include "talitos_soft.h"
++
++#define read_random(p,l) get_random_bytes(p,l)
++
++const char talitos_driver_name[] = "Talitos OCF";
++const char talitos_driver_version[] = "0.2";
++
++static int talitos_newsession(device_t dev, u_int32_t *sidp,
++								struct cryptoini *cri);
++static int talitos_freesession(device_t dev, u_int64_t tid);
++static int talitos_process(device_t dev, struct cryptop *crp, int hint);
++static void dump_talitos_status(struct talitos_softc *sc);
++static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td, 
++								int chsel);
++static void talitos_doneprocessing(struct talitos_softc *sc);
++static void talitos_init_device(struct talitos_softc *sc);
++static void talitos_reset_device_master(struct talitos_softc *sc);
++static void talitos_reset_device(struct talitos_softc *sc);
++static void talitos_errorprocessing(struct talitos_softc *sc);
++#ifdef CONFIG_PPC_MERGE
++static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
++static int talitos_remove(struct of_device *ofdev);
++#else
++static int talitos_probe(struct platform_device *pdev);
++static int talitos_remove(struct platform_device *pdev);
++#endif
++#ifdef CONFIG_OCF_RANDOMHARVEST
++static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
++static void talitos_rng_init(struct talitos_softc *sc);
++#endif
++
++static device_method_t talitos_methods = {
++	/* crypto device methods */
++	DEVMETHOD(cryptodev_newsession,	talitos_newsession),
++	DEVMETHOD(cryptodev_freesession,talitos_freesession),
++	DEVMETHOD(cryptodev_process,	talitos_process),
++};
++
++#define debug talitos_debug
++int talitos_debug = 0;
++module_param(talitos_debug, int, 0644);
++MODULE_PARM_DESC(talitos_debug, "Enable debug");
++
++static inline void talitos_write(volatile unsigned *addr, u32 val)
++{
++        out_be32(addr, val);
++}
++
++static inline u32 talitos_read(volatile unsigned *addr)
++{
++        u32 val;
++        val = in_be32(addr);
++        return val;
++}
++
++static void dump_talitos_status(struct talitos_softc *sc)
++{
++	unsigned int v, v_hi, i, *ptr;
++	v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
++	v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
++	printk(KERN_INFO "%s: MCR          0x%08x_%08x\n",
++			device_get_nameunit(sc->sc_cdev), v, v_hi);
++	v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
++	v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
++	printk(KERN_INFO "%s: IMR          0x%08x_%08x\n",
++			device_get_nameunit(sc->sc_cdev), v, v_hi);
++	v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
++	v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
++	printk(KERN_INFO "%s: ISR          0x%08x_%08x\n",
++			device_get_nameunit(sc->sc_cdev), v, v_hi);
++	for (i = 0; i < sc->sc_num_channels; i++) { 
++		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
++			TALITOS_CH_CDPR);
++		v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
++			TALITOS_CH_CDPR_HI);
++		printk(KERN_INFO "%s: CDPR     ch%d 0x%08x_%08x\n", 
++				device_get_nameunit(sc->sc_cdev), i, v, v_hi);
++	}
++	for (i = 0; i < sc->sc_num_channels; i++) { 
++		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
++			TALITOS_CH_CCPSR);
++		v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
++			TALITOS_CH_CCPSR_HI);
++		printk(KERN_INFO "%s: CCPSR    ch%d 0x%08x_%08x\n", 
++				device_get_nameunit(sc->sc_cdev), i, v, v_hi);
++	}
++	ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
++	for (i = 0; i < 16; i++) { 
++		v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
++		printk(KERN_INFO "%s: DESCBUF  ch0 0x%08x_%08x (tdp%02d)\n", 
++				device_get_nameunit(sc->sc_cdev), v, v_hi, i);
++	}
++	return;
++}
++
++
++#ifdef CONFIG_OCF_RANDOMHARVEST
++/* 
++ * pull random numbers off the RNG FIFO, not exceeding amount available
++ */
++static int
++talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
++{
++	struct talitos_softc *sc = (struct talitos_softc *) arg;
++	int rc;
++	u_int32_t v;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	/* check for things like FIFO underflow */
++	v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
++	if (unlikely(v)) {
++		printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
++				device_get_nameunit(sc->sc_cdev), v);
++		return 0;
++	}
++	/*
++	 * OFL is number of available 64-bit words, 
++	 * shift and convert to a 32-bit word count
++	 */
++	v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
++	v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
++	if (maxwords > v)
++		maxwords = v;
++	for (rc = 0; rc < maxwords; rc++) {
++		buf[rc] = talitos_read(sc->sc_base_addr + 
++			TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
++	}
++	if (maxwords & 1) {
++		/* 
++		 * RNG will complain with an AE in the RNGISR
++		 * if we don't complete the pairs of 32-bit reads
++		 * to its 64-bit register based FIFO
++		 */
++		v = talitos_read(sc->sc_base_addr + 
++			TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
++	}
++
++	return rc;
++}
++
++static void
++talitos_rng_init(struct talitos_softc *sc)
++{
++	u_int32_t v;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++	/* reset RNG EU */
++	v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
++	v |= TALITOS_RNGRCR_HI_SR;
++	talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
++	while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI) 
++		& TALITOS_RNGSR_HI_RD) == 0)
++			cpu_relax();
++	/*
++	 * we tell the RNG to start filling the RNG FIFO
++	 * by writing the RNGDSR 
++	 */
++	v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
++	talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
++	/*
++	 * 64 bits of data will be pushed onto the FIFO every 
++	 * 256 SEC cycles until the FIFO is full.  The RNG then 
++	 * attempts to keep the FIFO full.
++	 */
++	v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
++	if (v) {
++		printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
++			device_get_nameunit(sc->sc_cdev), v);
++		return;
++	}
++	/*
++	 * n.b. we need to add a FIPS test here - if the RNG is going 
++	 * to fail, it's going to fail at reset time
++	 */
++	return;
++}
++#endif /* CONFIG_OCF_RANDOMHARVEST */
++
++/*
++ * Generate a new software session.
++ */
++static int
++talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
++{
++	struct cryptoini *c, *encini = NULL, *macini = NULL;
++	struct talitos_softc *sc = device_get_softc(dev);
++	struct talitos_session *ses = NULL;
++	int sesn;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++	if (sidp == NULL || cri == NULL || sc == NULL) {
++		DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
++		return EINVAL;
++	}
++	for (c = cri; c != NULL; c = c->cri_next) {
++		if (c->cri_alg == CRYPTO_MD5 ||
++		    c->cri_alg == CRYPTO_MD5_HMAC ||
++		    c->cri_alg == CRYPTO_SHA1 ||
++		    c->cri_alg == CRYPTO_SHA1_HMAC ||
++		    c->cri_alg == CRYPTO_NULL_HMAC) {
++			if (macini)
++				return EINVAL;
++			macini = c;
++		} else if (c->cri_alg == CRYPTO_DES_CBC ||
++		    c->cri_alg == CRYPTO_3DES_CBC ||
++		    c->cri_alg == CRYPTO_AES_CBC ||
++		    c->cri_alg == CRYPTO_NULL_CBC) {
++			if (encini)
++				return EINVAL;
++			encini = c;
++		} else {
++			DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
++			return EINVAL;
++		}
++	}
++	if (encini == NULL && macini == NULL)
++		return EINVAL;
++	if (encini) {	
++		/* validate key length */
++		switch (encini->cri_alg) {
++		case CRYPTO_DES_CBC:
++			if (encini->cri_klen != 64)
++				return EINVAL;
++			break;
++		case CRYPTO_3DES_CBC:
++			if (encini->cri_klen != 192) {
++				return EINVAL;
++			}
++			break;
++		case CRYPTO_AES_CBC:
++			if (encini->cri_klen != 128 &&
++			    encini->cri_klen != 192 &&
++			    encini->cri_klen != 256)
++				return EINVAL;
++			break;
++		default:
++			DPRINTF("UNKNOWN encini->cri_alg %d\n", 
++				encini->cri_alg);
++			return EINVAL;
++		}
++	}
++
++	if (sc->sc_sessions == NULL) {
++		ses = sc->sc_sessions = (struct talitos_session *)
++			kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
++		if (ses == NULL)
++			return ENOMEM;
++		memset(ses, 0, sizeof(struct talitos_session));
++		sesn = 0;
++		sc->sc_nsessions = 1;
++	} else {
++		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
++			if (sc->sc_sessions[sesn].ses_used == 0) {
++				ses = &sc->sc_sessions[sesn];
++				break;
++			}
++		}
++
++		if (ses == NULL) {
++			/* allocating session */
++			sesn = sc->sc_nsessions;
++			ses = (struct talitos_session *) kmalloc(
++				(sesn + 1) * sizeof(struct talitos_session), 
++				SLAB_ATOMIC);
++			if (ses == NULL)
++				return ENOMEM;
++			memset(ses, 0,
++				(sesn + 1) * sizeof(struct talitos_session));
++			memcpy(ses, sc->sc_sessions, 
++				sesn * sizeof(struct talitos_session));
++			memset(sc->sc_sessions, 0,
++				sesn * sizeof(struct talitos_session));
++			kfree(sc->sc_sessions);
++			sc->sc_sessions = ses;
++			ses = &sc->sc_sessions[sesn];
++			sc->sc_nsessions++;
++		}
++	}
++
++	ses->ses_used = 1;
++
++	if (encini) {
++		/* get an IV */
++		/* XXX may read fewer than requested */
++		read_random(ses->ses_iv, sizeof(ses->ses_iv));
++
++		ses->ses_klen = (encini->cri_klen + 7) / 8;
++		memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
++		if (macini) {
++			/* doing hash on top of cipher */
++			ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
++			memcpy(ses->ses_hmac, macini->cri_key,
++				ses->ses_hmac_len);
++		}
++	} else if (macini) {
++		/* doing hash */
++		ses->ses_klen = (macini->cri_klen + 7) / 8;
++		memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
++	}
++
++	/* back compat way of determining MSC result len */
++	if (macini) {
++		ses->ses_mlen = macini->cri_mlen;
++		if (ses->ses_mlen == 0) {
++			if (macini->cri_alg == CRYPTO_MD5_HMAC)
++				ses->ses_mlen = MD5_HASH_LEN;
++			else
++				ses->ses_mlen = SHA1_HASH_LEN;
++		}
++	}
++
++	/* really should make up a template td here, 
++	 * and only fill things like i/o and direction in process() */
++
++	/* assign session ID */
++	*sidp = TALITOS_SID(sc->sc_num, sesn);
++	return 0;
++}
++
++/*
++ * Deallocate a session.
++ */
++static int
++talitos_freesession(device_t dev, u_int64_t tid)
++{
++	struct talitos_softc *sc = device_get_softc(dev);
++	int session, ret;
++	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
++
++	if (sc == NULL)
++		return EINVAL;
++	session = TALITOS_SESSION(sid);
++	if (session < sc->sc_nsessions) {
++		memset(&sc->sc_sessions[session], 0,
++			sizeof(sc->sc_sessions[session]));
++		ret = 0;
++	} else
++		ret = EINVAL;
++	return ret;
++}
++
++/*
++ * launch device processing - it will come back with done notification 
++ * in the form of an interrupt and/or HDR_DONE_BITS in header 
++ */
++static int 
++talitos_submit(
++	struct talitos_softc *sc,
++	struct talitos_desc *td,
++	int chsel)
++{
++	u_int32_t v;
++
++	v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
++	talitos_write(sc->sc_base_addr + 
++		chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
++	talitos_write(sc->sc_base_addr + 
++		chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
++	return 0;
++}
++
++static int
++talitos_process(device_t dev, struct cryptop *crp, int hint)
++{
++	int i, err = 0, ivsize;
++	struct talitos_softc *sc = device_get_softc(dev);
++	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
++	caddr_t iv;
++	struct talitos_session *ses;
++	struct talitos_desc *td;
++	unsigned long flags;
++	/* descriptor mappings */
++	int hmac_key, hmac_data, cipher_iv, cipher_key, 
++		in_fifo, out_fifo, cipher_iv_out;
++	static int chsel = -1;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
++		return EINVAL;
++	}
++	crp->crp_etype = 0;
++	if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
++		return EINVAL;
++	}
++
++	ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
++
++        /* enter the channel scheduler */ 
++	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
++
++	/* reuse channel that already had/has requests for the required EU */
++	for (i = 0; i < sc->sc_num_channels; i++) {
++		if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
++			break;
++	}
++	if (i == sc->sc_num_channels) {
++		/*
++		 * haven't seen this algo the last sc_num_channels or more
++		 * use round robin in this case
++	 	 * nb: sc->sc_num_channels must be power of 2 
++		 */
++		chsel = (chsel + 1) & (sc->sc_num_channels - 1);
++	} else {
++		/*
++		 * matches channel with same target execution unit; 
++		 * use same channel in this case
++		 */
++		chsel = i;
++	}
++	sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
++
++        /* release the channel scheduler lock */ 
++	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
++
++	/* acquire the selected channel fifo lock */
++	spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
++
++	/* find and reserve next available descriptor-cryptop pair */
++	for (i = 0; i < sc->sc_chfifo_len; i++) {
++		if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
++			/* 
++			 * ensure correct descriptor formation by
++			 * avoiding inadvertently setting "optional" entries
++			 * e.g. not using "optional" dptr2 for MD/HMAC descs
++			 */
++			memset(&sc->sc_chnfifo[chsel][i].cf_desc,
++				0, sizeof(*td));
++			/* reserve it with done notification request bit */
++			sc->sc_chnfifo[chsel][i].cf_desc.hdr |= 
++				TALITOS_DONE_NOTIFY;
++			break;
++		}
++	}
++	spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
++
++	if (i == sc->sc_chfifo_len) {
++		/* fifo full */
++		err = ERESTART;
++		goto errout;
++	}
++	
++	td = &sc->sc_chnfifo[chsel][i].cf_desc;
++	sc->sc_chnfifo[chsel][i].cf_crp = crp;
++
++	crd1 = crp->crp_desc;
++	if (crd1 == NULL) {
++		err = EINVAL;
++		goto errout;
++	}
++	crd2 = crd1->crd_next;
++	/* prevent compiler warning */
++	hmac_key = 0;
++	hmac_data = 0;
++	if (crd2 == NULL) {
++		td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
++		/* assign descriptor dword ptr mappings for this desc. type */
++		cipher_iv = 1;
++		cipher_key = 2;
++		in_fifo = 3;
++		cipher_iv_out = 5;
++		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
++		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
++		    crd1->crd_alg == CRYPTO_SHA1 ||
++		    crd1->crd_alg == CRYPTO_MD5) {
++			out_fifo = 5;
++			maccrd = crd1;
++			enccrd = NULL;
++		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
++		    crd1->crd_alg == CRYPTO_3DES_CBC ||
++		    crd1->crd_alg == CRYPTO_AES_CBC ||
++		    crd1->crd_alg == CRYPTO_ARC4) {
++			out_fifo = 4;
++			maccrd = NULL;
++			enccrd = crd1;
++		} else {
++			DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
++			err = EINVAL;
++			goto errout;
++		}
++	} else {
++		if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
++			td->hdr |= TD_TYPE_IPSEC_ESP;
++		} else {
++			DPRINTF("unimplemented: multiple descriptor ipsec\n");
++			err = EINVAL;
++			goto errout;
++		}
++		/* assign descriptor dword ptr mappings for this desc. type */
++		hmac_key = 0;
++		hmac_data = 1;
++		cipher_iv = 2;
++		cipher_key = 3;
++		in_fifo = 4;
++		out_fifo = 5;
++		cipher_iv_out = 6;
++		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
++                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
++                     crd1->crd_alg == CRYPTO_MD5 ||
++                     crd1->crd_alg == CRYPTO_SHA1) &&
++		    (crd2->crd_alg == CRYPTO_DES_CBC ||
++		     crd2->crd_alg == CRYPTO_3DES_CBC ||
++		     crd2->crd_alg == CRYPTO_AES_CBC ||
++		     crd2->crd_alg == CRYPTO_ARC4) &&
++		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
++			maccrd = crd1;
++			enccrd = crd2;
++		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
++		     crd1->crd_alg == CRYPTO_ARC4 ||
++		     crd1->crd_alg == CRYPTO_3DES_CBC ||
++		     crd1->crd_alg == CRYPTO_AES_CBC) &&
++		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
++                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
++                     crd2->crd_alg == CRYPTO_MD5 ||
++                     crd2->crd_alg == CRYPTO_SHA1) &&
++		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
++			enccrd = crd1;
++			maccrd = crd2;
++		} else {
++			/* We cannot order the SEC as requested */
++			printk("%s: cannot do the order\n",
++					device_get_nameunit(sc->sc_cdev));
++			err = EINVAL;
++			goto errout;
++		}
++	}
++	/* assign in_fifo and out_fifo based on input/output struct type */
++	if (crp->crp_flags & CRYPTO_F_SKBUF) {
++		/* using SKB buffers */
++		struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
++		if (skb_shinfo(skb)->nr_frags) {
++			printk("%s: skb frags unimplemented\n",
++					device_get_nameunit(sc->sc_cdev));
++			err = EINVAL;
++			goto errout;
++		}
++		td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data, 
++			skb->len, DMA_TO_DEVICE);
++		td->ptr[in_fifo].len = skb->len;
++		td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data, 
++			skb->len, DMA_TO_DEVICE);
++		td->ptr[out_fifo].len = skb->len;
++		td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
++			skb->len, DMA_TO_DEVICE);
++	} else if (crp->crp_flags & CRYPTO_F_IOV) {
++		/* using IOV buffers */
++		struct uio *uiop = (struct uio *)crp->crp_buf;
++		if (uiop->uio_iovcnt > 1) {
++			printk("%s: iov frags unimplemented\n",
++					device_get_nameunit(sc->sc_cdev));
++			err = EINVAL;
++			goto errout;
++		}
++		td->ptr[in_fifo].ptr = dma_map_single(NULL,
++			uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
++		td->ptr[in_fifo].len = crp->crp_ilen;
++		/* crp_olen is never set; always use crp_ilen */
++		td->ptr[out_fifo].ptr = dma_map_single(NULL,
++			uiop->uio_iov->iov_base,
++			crp->crp_ilen, DMA_TO_DEVICE);
++		td->ptr[out_fifo].len = crp->crp_ilen;
++	} else {
++		/* using contig buffers */
++		td->ptr[in_fifo].ptr = dma_map_single(NULL,
++			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
++		td->ptr[in_fifo].len = crp->crp_ilen;
++		td->ptr[out_fifo].ptr = dma_map_single(NULL,
++			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
++		td->ptr[out_fifo].len = crp->crp_ilen;
++	}
++	if (enccrd) {
++		switch (enccrd->crd_alg) {
++		case CRYPTO_3DES_CBC:
++			td->hdr |= TALITOS_MODE0_DEU_3DES;
++			/* FALLTHROUGH */
++		case CRYPTO_DES_CBC:
++			td->hdr |= TALITOS_SEL0_DEU
++				|  TALITOS_MODE0_DEU_CBC;
++			if (enccrd->crd_flags & CRD_F_ENCRYPT)
++				td->hdr |= TALITOS_MODE0_DEU_ENC;
++			ivsize = 2*sizeof(u_int32_t);
++			DPRINTF("%cDES ses %d ch %d len %d\n",
++				(td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
++				(u32)TALITOS_SESSION(crp->crp_sid),
++				chsel, td->ptr[in_fifo].len);
++			break;
++		case CRYPTO_AES_CBC:
++			td->hdr |= TALITOS_SEL0_AESU
++				|  TALITOS_MODE0_AESU_CBC;
++			if (enccrd->crd_flags & CRD_F_ENCRYPT)
++				td->hdr |= TALITOS_MODE0_AESU_ENC;
++			ivsize = 4*sizeof(u_int32_t);
++			DPRINTF("AES  ses %d ch %d len %d\n",
++				(u32)TALITOS_SESSION(crp->crp_sid),
++				chsel, td->ptr[in_fifo].len);
++			break;
++		default:
++			printk("%s: unimplemented enccrd->crd_alg %d\n",
++					device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
++			err = EINVAL;
++			goto errout;
++		}
++		/*
++		 * Setup encrypt/decrypt state.  When using basic ops
++		 * we can't use an inline IV because hash/crypt offset
++		 * must be from the end of the IV to the start of the
++		 * crypt data and this leaves out the preceding header
++		 * from the hash calculation.  Instead we place the IV
++		 * in the state record and set the hash/crypt offset to
++		 * copy both the header+IV.
++		 */
++		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
++			td->hdr |= TALITOS_DIR_OUTBOUND; 
++			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
++				iv = enccrd->crd_iv;
++			else
++				iv = (caddr_t) ses->ses_iv;
++			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
++				crypto_copyback(crp->crp_flags, crp->crp_buf,
++				    enccrd->crd_inject, ivsize, iv);
++			}
++		} else {
++			td->hdr |= TALITOS_DIR_INBOUND; 
++			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
++				iv = enccrd->crd_iv;
++				bcopy(enccrd->crd_iv, iv, ivsize);
++			} else {
++				iv = (caddr_t) ses->ses_iv;
++				crypto_copydata(crp->crp_flags, crp->crp_buf,
++				    enccrd->crd_inject, ivsize, iv);
++			}
++		}
++		td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize, 
++			DMA_TO_DEVICE);
++		td->ptr[cipher_iv].len = ivsize;
++		/*
++		 * we don't need the cipher iv out length/pointer
++		 * field to do ESP IPsec. Therefore we set the len field as 0,
++		 * which tells the SEC not to do anything with this len/ptr
++		 * field. Previously, when length/pointer as pointing to iv,
++		 * it gave us corruption of packets.
++		 */
++		td->ptr[cipher_iv_out].len = 0;
++	}
++	if (enccrd && maccrd) {
++		/* this is ipsec only for now */
++		td->hdr |= TALITOS_SEL1_MDEU
++			|  TALITOS_MODE1_MDEU_INIT
++			|  TALITOS_MODE1_MDEU_PAD;
++		switch (maccrd->crd_alg) {
++			case	CRYPTO_MD5:	
++				td->hdr |= TALITOS_MODE1_MDEU_MD5;
++				break;
++			case	CRYPTO_MD5_HMAC:	
++				td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
++				break;
++			case	CRYPTO_SHA1:	
++				td->hdr |= TALITOS_MODE1_MDEU_SHA1;
++				break;
++			case	CRYPTO_SHA1_HMAC:	
++				td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
++				break;
++			default:
++				/* We cannot order the SEC as requested */
++				printk("%s: cannot do the order\n",
++						device_get_nameunit(sc->sc_cdev));
++				err = EINVAL;
++				goto errout;
++		}
++		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
++		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
++			/*
++			 * The offset from hash data to the start of
++			 * crypt data is the difference in the skips.
++			 */
++			/* ipsec only for now */
++			td->ptr[hmac_key].ptr = dma_map_single(NULL, 
++				ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
++			td->ptr[hmac_key].len = ses->ses_hmac_len;
++			td->ptr[in_fifo].ptr  += enccrd->crd_skip;
++			td->ptr[in_fifo].len  =  enccrd->crd_len;
++			td->ptr[out_fifo].ptr += enccrd->crd_skip;
++			td->ptr[out_fifo].len =  enccrd->crd_len;
++			/* bytes of HMAC to postpend to ciphertext */
++			td->ptr[out_fifo].extent =  ses->ses_mlen;
++			td->ptr[hmac_data].ptr += maccrd->crd_skip; 
++			td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
++		}
++		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
++			printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
++					device_get_nameunit(sc->sc_cdev));
++		}
++	}
++	if (!enccrd && maccrd) {
++		/* single MD5 or SHA */
++		td->hdr |= TALITOS_SEL0_MDEU
++				|  TALITOS_MODE0_MDEU_INIT
++				|  TALITOS_MODE0_MDEU_PAD;
++		switch (maccrd->crd_alg) {
++			case	CRYPTO_MD5:	
++				td->hdr |= TALITOS_MODE0_MDEU_MD5;
++				DPRINTF("MD5  ses %d ch %d len %d\n",
++					(u32)TALITOS_SESSION(crp->crp_sid), 
++					chsel, td->ptr[in_fifo].len);
++				break;
++			case	CRYPTO_MD5_HMAC:	
++				td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
++				break;
++			case	CRYPTO_SHA1:	
++				td->hdr |= TALITOS_MODE0_MDEU_SHA1;
++				DPRINTF("SHA1 ses %d ch %d len %d\n",
++					(u32)TALITOS_SESSION(crp->crp_sid), 
++					chsel, td->ptr[in_fifo].len);
++				break;
++			case	CRYPTO_SHA1_HMAC:	
++				td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
++				break;
++			default:
++				/* We cannot order the SEC as requested */
++				DPRINTF("cannot do the order\n");
++				err = EINVAL;
++				goto errout;
++		}
++
++		if (crp->crp_flags & CRYPTO_F_IOV)
++			td->ptr[out_fifo].ptr += maccrd->crd_inject;
++
++		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
++		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
++			td->ptr[hmac_key].ptr = dma_map_single(NULL, 
++				ses->ses_hmac, ses->ses_hmac_len, 
++				DMA_TO_DEVICE);
++			td->ptr[hmac_key].len = ses->ses_hmac_len;
++		}
++	} 
++	else {
++		/* using process key (session data has duplicate) */
++		td->ptr[cipher_key].ptr = dma_map_single(NULL, 
++			enccrd->crd_key, (enccrd->crd_klen + 7) / 8, 
++			DMA_TO_DEVICE);
++		td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
++	}
++	/* descriptor complete - GO! */
++	return talitos_submit(sc, td, chsel);
++
++errout:
++	if (err != ERESTART) {
++		crp->crp_etype = err;
++		crypto_done(crp);
++	}
++	return err;
++}
++
++/* go through all channels descriptors, notifying OCF what has 
++ * _and_hasn't_ successfully completed and reset the device 
++ * (otherwise it's up to decoding desc hdrs!)
++ */
++static void talitos_errorprocessing(struct talitos_softc *sc)
++{
++	unsigned long flags;
++	int i, j;
++
++	/* disable further scheduling until under control */
++	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
++
++	if (debug) dump_talitos_status(sc);
++	/* go through descriptors, try and salvage those successfully done, 
++	 * and EIO those that weren't
++	 */
++	for (i = 0; i < sc->sc_num_channels; i++) {
++		spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
++		for (j = 0; j < sc->sc_chfifo_len; j++) {
++			if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
++				if ((sc->sc_chnfifo[i][j].cf_desc.hdr 
++					& TALITOS_HDR_DONE_BITS) 
++					!= TALITOS_HDR_DONE_BITS) {
++					/* this one didn't finish */
++					/* signify in crp->etype */
++					sc->sc_chnfifo[i][j].cf_crp->crp_etype 
++						= EIO;
++				}
++			} else
++				continue; /* free entry */
++			/* either way, notify ocf */
++			crypto_done(sc->sc_chnfifo[i][j].cf_crp);
++			/* and tag it available again
++			 *
++			 * memset to ensure correct descriptor formation by
++			 * avoiding inadvertently setting "optional" entries
++			 * e.g. not using "optional" dptr2 MD/HMAC processing
++			 */
++			memset(&sc->sc_chnfifo[i][j].cf_desc,
++				0, sizeof(struct talitos_desc));
++		}
++		spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
++	}
++	/* reset and initialize the SEC h/w device */
++	talitos_reset_device(sc);
++	talitos_init_device(sc);
++#ifdef CONFIG_OCF_RANDOMHARVEST
++	if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
++		talitos_rng_init(sc);
++#endif
++
++	/* Okay. Stand by. */
++	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
++
++	return;
++}
++
++/* go through all channels descriptors, notifying OCF what's been done */
++static void talitos_doneprocessing(struct talitos_softc *sc)
++{
++	unsigned long flags;
++	int i, j;
++
++	/* go through descriptors looking for done bits */
++	for (i = 0; i < sc->sc_num_channels; i++) {
++		spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
++		for (j = 0; j < sc->sc_chfifo_len; j++) {
++			/* descriptor has done bits set? */
++			if ((sc->sc_chnfifo[i][j].cf_desc.hdr 
++				& TALITOS_HDR_DONE_BITS) 
++				== TALITOS_HDR_DONE_BITS) {
++				/* notify ocf */
++				crypto_done(sc->sc_chnfifo[i][j].cf_crp);
++				/* and tag it available again
++				 *
++				 * memset to ensure correct descriptor formation by
++				 * avoiding inadvertently setting "optional" entries
++				 * e.g. not using "optional" dptr2 MD/HMAC processing
++				 */
++				memset(&sc->sc_chnfifo[i][j].cf_desc,
++					0, sizeof(struct talitos_desc));
++			}
++		}
++		spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
++	}
++	return;
++}
++
++static irqreturn_t
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++talitos_intr(int irq, void *arg)
++#else
++talitos_intr(int irq, void *arg, struct pt_regs *regs)
++#endif
++{
++	struct talitos_softc *sc = arg;
++	u_int32_t v, v_hi;
++	
++	/* ack */
++	v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
++	v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
++	talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
++	talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
++
++	if (unlikely(v & TALITOS_ISR_ERROR)) {
++		/* Okay, Houston, we've had a problem here. */
++		printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
++				device_get_nameunit(sc->sc_cdev), v, v_hi);
++		talitos_errorprocessing(sc);
++	} else
++	if (likely(v & TALITOS_ISR_DONE)) {
++		talitos_doneprocessing(sc);
++	}
++	return IRQ_HANDLED;
++}
++
++/*
++ * Initialize registers we need to touch only once.
++ */
++static void
++talitos_init_device(struct talitos_softc *sc)
++{
++	u_int32_t v;
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	/* init all channels */
++	for (i = 0; i < sc->sc_num_channels; i++) {
++		v = talitos_read(sc->sc_base_addr + 
++			i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
++		v |= TALITOS_CH_CCCR_HI_CDWE
++		  |  TALITOS_CH_CCCR_HI_CDIE;  /* invoke interrupt if done */
++		talitos_write(sc->sc_base_addr + 
++			i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
++	}
++	/* enable all interrupts */
++	v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
++	v |= TALITOS_IMR_ALL;
++	talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
++	v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
++	v |= TALITOS_IMR_HI_ERRONLY;
++	talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
++	return;
++}
++
++/*
++ * set the master reset bit on the device.
++ */
++static void
++talitos_reset_device_master(struct talitos_softc *sc)
++{
++	u_int32_t v;
++
++	/* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
++	v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
++	talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
++
++	while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
++		cpu_relax();
++
++	return;
++}
++
++/*
++ * Resets the device.  Values in the registers are left as is
++ * from the reset (i.e. initial values are assigned elsewhere).
++ */
++static void
++talitos_reset_device(struct talitos_softc *sc)
++{
++	u_int32_t v;
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	/*
++	 * Master reset
++	 * errata documentation: warning: certain SEC interrupts 
++	 * are not fully cleared by writing the MCR:SWR bit, 
++	 * set bit twice to completely reset 
++	 */
++	talitos_reset_device_master(sc);	/* once */
++	talitos_reset_device_master(sc);	/* and once again */
++	
++	/* reset all channels */
++	for (i = 0; i < sc->sc_num_channels; i++) {
++		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
++			TALITOS_CH_CCCR);
++		talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
++			TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
++	}
++}
++
++/* Set up the crypto device structure, private data,
++ * and anything else we need before we start */
++#ifdef CONFIG_PPC_MERGE
++static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
++#else
++static int talitos_probe(struct platform_device *pdev)
++#endif
++{
++	struct talitos_softc *sc = NULL;
++	struct resource *r;
++#ifdef CONFIG_PPC_MERGE
++	struct device *device = &ofdev->dev;
++	struct device_node *np = ofdev->node;
++	const unsigned int *prop;
++	int err;
++	struct resource res;
++#endif
++	static int num_chips = 0;
++	int rc;
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++
++	sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
++	if (!sc)
++		return -ENOMEM;
++	memset(sc, 0, sizeof(*sc));
++
++	softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
++
++	sc->sc_irq = -1;
++	sc->sc_cid = -1;
++#ifndef CONFIG_PPC_MERGE
++	sc->sc_dev = pdev;
++#endif
++	sc->sc_num = num_chips++;
++
++#ifdef CONFIG_PPC_MERGE
++	dev_set_drvdata(device, sc);
++#else
++	platform_set_drvdata(sc->sc_dev, sc);
++#endif
++
++	/* get the irq line */
++#ifdef CONFIG_PPC_MERGE
++	err = of_address_to_resource(np, 0, &res);
++	if (err)
++		return -EINVAL;
++	r = &res;
++
++	sc->sc_irq = irq_of_parse_and_map(np, 0);
++#else
++	/* get a pointer to the register memory */
++	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++	sc->sc_irq = platform_get_irq(pdev, 0);
++#endif
++	rc = request_irq(sc->sc_irq, talitos_intr, 0,
++			device_get_nameunit(sc->sc_cdev), sc);
++	if (rc) {
++		printk(KERN_ERR "%s: failed to hook irq %d\n", 
++				device_get_nameunit(sc->sc_cdev), sc->sc_irq);
++		sc->sc_irq = -1;
++		goto out;
++	}
++
++	sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
++	if (!sc->sc_base_addr) {
++		printk(KERN_ERR "%s: failed to ioremap\n",
++				device_get_nameunit(sc->sc_cdev));
++		goto out;
++	}
++
++	/* figure out our SEC's properties and capabilities */
++	sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
++		 | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
++	DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
++
++#ifdef CONFIG_PPC_MERGE
++	/* get SEC properties from device tree, defaulting to SEC 2.0 */
++
++	prop = of_get_property(np, "num-channels", NULL);
++	sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
++
++	prop = of_get_property(np, "channel-fifo-len", NULL);
++	sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
++
++	prop = of_get_property(np, "exec-units-mask", NULL);
++	sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
++
++	prop = of_get_property(np, "descriptor-types-mask", NULL);
++	sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
++#else
++	/* bulk should go away with openfirmware flat device tree support */
++	if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
++		sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
++		sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
++		sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
++		sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
++	} else {
++		printk(KERN_ERR "%s: failed to id device\n",
++				device_get_nameunit(sc->sc_cdev));
++		goto out;
++	}
++#endif
++
++	/* + 1 is for the meta-channel lock used by the channel scheduler */
++	sc->sc_chnfifolock = (spinlock_t *) kmalloc(
++		(sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
++	if (!sc->sc_chnfifolock)
++		goto out;
++	for (i = 0; i < sc->sc_num_channels + 1; i++) {
++		spin_lock_init(&sc->sc_chnfifolock[i]);
++	}
++
++	sc->sc_chnlastalg = (int *) kmalloc(
++		sc->sc_num_channels * sizeof(int), GFP_KERNEL);
++	if (!sc->sc_chnlastalg)
++		goto out;
++	memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
++
++	sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
++		sc->sc_num_channels * sizeof(struct desc_cryptop_pair *), 
++		GFP_KERNEL);
++	if (!sc->sc_chnfifo)
++		goto out;
++	for (i = 0; i < sc->sc_num_channels; i++) {
++		sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
++			sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair), 
++			GFP_KERNEL);
++		if (!sc->sc_chnfifo[i])
++			goto out;
++		memset(sc->sc_chnfifo[i], 0, 
++			sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
++	}
++
++	/* reset and initialize the SEC h/w device */
++	talitos_reset_device(sc);
++	talitos_init_device(sc);
++
++	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
++	if (sc->sc_cid < 0) {
++		printk(KERN_ERR "%s: could not get crypto driver id\n",
++				device_get_nameunit(sc->sc_cdev));
++		goto out;
++	}
++
++	/* register algorithms with the framework */
++	printk("%s:", device_get_nameunit(sc->sc_cdev));
++
++	if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)  {
++		printk(" rng");
++#ifdef CONFIG_OCF_RANDOMHARVEST
++		talitos_rng_init(sc);
++		crypto_rregister(sc->sc_cid, talitos_read_random, sc);
++#endif
++	}
++	if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
++		printk(" des/3des");
++		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
++		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
++	}
++	if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
++		printk(" aes");
++		crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
++	}
++	if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
++		printk(" md5");
++		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
++		/* HMAC support only with IPsec for now */
++		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
++		printk(" sha1");
++		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
++		/* HMAC support only with IPsec for now */
++		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
++	}
++	printk("\n");
++	return 0;
++
++out:
++#ifndef CONFIG_PPC_MERGE
++	talitos_remove(pdev);
++#endif
++	return -ENOMEM;
++}
++
++#ifdef CONFIG_PPC_MERGE
++static int talitos_remove(struct of_device *ofdev)
++#else
++static int talitos_remove(struct platform_device *pdev)
++#endif
++{
++#ifdef CONFIG_PPC_MERGE
++	struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
++#else
++	struct talitos_softc *sc = platform_get_drvdata(pdev);
++#endif
++	int i;
++
++	DPRINTF("%s()\n", __FUNCTION__);
++	if (sc->sc_cid >= 0)
++		crypto_unregister_all(sc->sc_cid);
++	if (sc->sc_chnfifo) {
++		for (i = 0; i < sc->sc_num_channels; i++)
++			if (sc->sc_chnfifo[i])
++				kfree(sc->sc_chnfifo[i]);
++		kfree(sc->sc_chnfifo);
++	}
++	if (sc->sc_chnlastalg)
++		kfree(sc->sc_chnlastalg);
++	if (sc->sc_chnfifolock)
++		kfree(sc->sc_chnfifolock);
++	if (sc->sc_irq != -1)
++		free_irq(sc->sc_irq, sc);
++	if (sc->sc_base_addr)
++		iounmap((void *) sc->sc_base_addr);
++	kfree(sc);
++	return 0;
++}
++
++#ifdef CONFIG_PPC_MERGE
++static struct of_device_id talitos_match[] = {
++	{
++		.type = "crypto",
++		.compatible = "talitos",
++	},
++	{},
++};
++
++MODULE_DEVICE_TABLE(of, talitos_match);
++
++static struct of_platform_driver talitos_driver = {
++	.name		= DRV_NAME,
++	.match_table	= talitos_match,
++	.probe		= talitos_probe,
++	.remove		= talitos_remove,
++};
++
++static int __init talitos_init(void)
++{
++	return of_register_platform_driver(&talitos_driver);
++}
++
++static void __exit talitos_exit(void)
++{
++	of_unregister_platform_driver(&talitos_driver);
++}
++#else
++/* Structure for a platform device driver */
++static struct platform_driver talitos_driver = {
++	.probe = talitos_probe,
++	.remove = talitos_remove,
++	.driver = {
++		.name = "fsl-sec2",
++	}
++};
++
++static int __init talitos_init(void)
++{
++	return platform_driver_register(&talitos_driver);
++}
++
++static void __exit talitos_exit(void)
++{
++	platform_driver_unregister(&talitos_driver);
++}
++#endif
++
++module_init(talitos_init);
++module_exit(talitos_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("kim.phillips@freescale.com");
++MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
+diff -Nur linux-2.6.30.orig/crypto/ocf/talitos/talitos_dev.h linux-2.6.30/crypto/ocf/talitos/talitos_dev.h
+--- linux-2.6.30.orig/crypto/ocf/talitos/talitos_dev.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/talitos/talitos_dev.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,277 @@
++/*
++ * Freescale SEC (talitos) device dependent data structures
++ *
++ * Copyright (c) 2006 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ */
++
++/* device ID register values */
++#define TALITOS_ID_SEC_2_0	0x40
++#define TALITOS_ID_SEC_2_1	0x40 /* cross ref with IP block revision reg */
++
++/*
++ * following num_channels, channel-fifo-depth, exec-unit-mask, and 
++ * descriptor-types-mask are for forward-compatibility with openfirmware
++ * flat device trees
++ */
++
++/*
++ *  num_channels : the number of channels available in each SEC version.
++ */
++
++/* n.b. this driver requires these values be a power of 2 */
++#define TALITOS_NCHANNELS_SEC_1_0	4
++#define TALITOS_NCHANNELS_SEC_1_2	1
++#define TALITOS_NCHANNELS_SEC_2_0	4
++#define TALITOS_NCHANNELS_SEC_2_01	4
++#define TALITOS_NCHANNELS_SEC_2_1	4
++#define TALITOS_NCHANNELS_SEC_2_4	4
++
++/*
++ *  channel-fifo-depth : The number of descriptor
++ *  pointers a channel fetch fifo can hold.
++ */
++#define TALITOS_CHFIFOLEN_SEC_1_0	1
++#define TALITOS_CHFIFOLEN_SEC_1_2	1
++#define TALITOS_CHFIFOLEN_SEC_2_0	24
++#define TALITOS_CHFIFOLEN_SEC_2_01	24
++#define TALITOS_CHFIFOLEN_SEC_2_1	24
++#define TALITOS_CHFIFOLEN_SEC_2_4	24
++
++/* 
++ *  exec-unit-mask : The bitmask representing what Execution Units (EUs)
++ *  are available. EU information should be encoded following the SEC's 
++ *  EU_SEL0 bitfield documentation, i.e. as follows:
++ * 
++ *    bit 31 = set if SEC permits no-EU selection (should be always set)
++ *    bit 30 = set if SEC has the ARC4 EU (AFEU)
++ *    bit 29 = set if SEC has the des/3des EU (DEU)
++ *    bit 28 = set if SEC has the message digest EU (MDEU)
++ *    bit 27 = set if SEC has the random number generator EU (RNG)
++ *    bit 26 = set if SEC has the public key EU (PKEU)
++ *    bit 25 = set if SEC has the aes EU (AESU)
++ *    bit 24 = set if SEC has the Kasumi EU (KEU)
++ * 
++ */
++#define TALITOS_HAS_EU_NONE		(1<<0)
++#define TALITOS_HAS_EU_AFEU		(1<<1)
++#define TALITOS_HAS_EU_DEU		(1<<2)
++#define TALITOS_HAS_EU_MDEU		(1<<3)
++#define TALITOS_HAS_EU_RNG		(1<<4)
++#define TALITOS_HAS_EU_PKEU		(1<<5)
++#define TALITOS_HAS_EU_AESU		(1<<6)
++#define TALITOS_HAS_EU_KEU		(1<<7)
++
++/* the corresponding masks for each SEC version */
++#define TALITOS_HAS_EUS_SEC_1_0		0x7f
++#define TALITOS_HAS_EUS_SEC_1_2		0x4d
++#define TALITOS_HAS_EUS_SEC_2_0		0x7f
++#define TALITOS_HAS_EUS_SEC_2_01	0x7f
++#define TALITOS_HAS_EUS_SEC_2_1		0xff
++#define TALITOS_HAS_EUS_SEC_2_4		0x7f
++
++/*
++ *  descriptor-types-mask : The bitmask representing what descriptors
++ *  are available. Descriptor type information should be encoded 
++ *  following the SEC's Descriptor Header Dword DESC_TYPE field 
++ *  documentation, i.e. as follows:
++ *
++ *    bit 0  = set if SEC supports the aesu_ctr_nonsnoop desc. type
++ *    bit 1  = set if SEC supports the ipsec_esp descriptor type
++ *    bit 2  = set if SEC supports the common_nonsnoop desc. type
++ *    bit 3  = set if SEC supports the 802.11i AES ccmp desc. type
++ *    bit 4  = set if SEC supports the hmac_snoop_no_afeu desc. type
++ *    bit 5  = set if SEC supports the srtp descriptor type
++ *    bit 6  = set if SEC supports the non_hmac_snoop_no_afeu desc.type
++ *    bit 7  = set if SEC supports the pkeu_assemble descriptor type
++ *    bit 8  = set if SEC supports the aesu_key_expand_output desc.type
++ *    bit 9  = set if SEC supports the pkeu_ptmul descriptor type
++ *    bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
++ *    bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
++ *
++ *  ..and so on and so forth.
++ */
++#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP	(1<<0)
++#define TALITOS_HAS_DT_IPSEC_ESP		(1<<1)
++#define TALITOS_HAS_DT_COMMON_NONSNOOP		(1<<2)
++
++/* the corresponding masks for each SEC version */
++#define TALITOS_HAS_DESCTYPES_SEC_2_0	0x01010ebf
++#define TALITOS_HAS_DESCTYPES_SEC_2_1	0x012b0ebf
++
++/* 
++ * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
++ */
++
++/* global register offset addresses */
++#define TALITOS_ID		0x1020
++#define TALITOS_ID_HI		0x1024
++#define TALITOS_MCR		0x1030		/* master control register */
++#define TALITOS_MCR_HI		0x1038		/* master control register */
++#define TALITOS_MCR_SWR		0x1
++#define TALITOS_IMR		0x1008		/* interrupt mask register */
++#define TALITOS_IMR_ALL		0x00010fff	/* enable all interrupts mask */
++#define TALITOS_IMR_ERRONLY	0x00010aaa	/* enable error interrupts */
++#define TALITOS_IMR_HI		0x100C		/* interrupt mask register */
++#define TALITOS_IMR_HI_ALL	0x00323333	/* enable all interrupts mask */
++#define TALITOS_IMR_HI_ERRONLY	0x00222222	/* enable error interrupts */
++#define TALITOS_ISR		0x1010		/* interrupt status register */
++#define TALITOS_ISR_ERROR	0x00010faa	/* errors mask */
++#define TALITOS_ISR_DONE	0x00000055	/* channel(s) done mask */
++#define TALITOS_ISR_HI		0x1014		/* interrupt status register */
++#define TALITOS_ICR		0x1018		/* interrupt clear register */
++#define TALITOS_ICR_HI		0x101C		/* interrupt clear register */
++
++/* channel register address stride */
++#define TALITOS_CH_OFFSET	0x100
++
++/* channel register offset addresses and bits */
++#define TALITOS_CH_CCCR		0x1108	/* Crypto-Channel Config Register */
++#define TALITOS_CH_CCCR_RESET	0x1	/* Channel Reset bit */
++#define TALITOS_CH_CCCR_HI	0x110c	/* Crypto-Channel Config Register */
++#define TALITOS_CH_CCCR_HI_CDWE	0x10	/* Channel done writeback enable bit */
++#define TALITOS_CH_CCCR_HI_NT	0x4	/* Notification type bit */
++#define TALITOS_CH_CCCR_HI_CDIE	0x2	/* Channel Done Interrupt Enable bit */
++#define TALITOS_CH_CCPSR	0x1110	/* Crypto-Channel Pointer Status Reg */
++#define TALITOS_CH_CCPSR_HI	0x1114	/* Crypto-Channel Pointer Status Reg */
++#define TALITOS_CH_FF		0x1148	/* Fetch FIFO */
++#define TALITOS_CH_FF_HI	0x114c	/* Fetch FIFO's FETCH_ADRS */
++#define TALITOS_CH_CDPR		0x1140	/* Crypto-Channel Pointer Status Reg */
++#define TALITOS_CH_CDPR_HI	0x1144	/* Crypto-Channel Pointer Status Reg */
++#define TALITOS_CH_DESCBUF	0x1180	/* (thru 11bf) Crypto-Channel 
++					 * Descriptor Buffer (debug) */
++
++/* execution unit register offset addresses and bits */
++#define TALITOS_DEUSR		0x2028	/* DEU status register */
++#define TALITOS_DEUSR_HI	0x202c	/* DEU status register */
++#define TALITOS_DEUISR		0x2030	/* DEU interrupt status register */
++#define TALITOS_DEUISR_HI	0x2034	/* DEU interrupt status register */
++#define TALITOS_DEUICR		0x2038	/* DEU interrupt control register */
++#define TALITOS_DEUICR_HI	0x203c	/* DEU interrupt control register */
++#define TALITOS_AESUISR		0x4030	/* AESU interrupt status register */
++#define TALITOS_AESUISR_HI	0x4034	/* AESU interrupt status register */
++#define TALITOS_AESUICR		0x4038	/* AESU interrupt control register */
++#define TALITOS_AESUICR_HI	0x403c	/* AESU interrupt control register */
++#define TALITOS_MDEUISR		0x6030	/* MDEU interrupt status register */
++#define TALITOS_MDEUISR_HI	0x6034	/* MDEU interrupt status register */
++#define TALITOS_RNGSR		0xa028	/* RNG status register */
++#define TALITOS_RNGSR_HI	0xa02c	/* RNG status register */
++#define TALITOS_RNGSR_HI_RD	0x1	/* RNG Reset done */
++#define TALITOS_RNGSR_HI_OFL	0xff0000/* number of dwords in RNG output FIFO*/
++#define TALITOS_RNGDSR		0xa010	/* RNG data size register */
++#define TALITOS_RNGDSR_HI	0xa014	/* RNG data size register */
++#define TALITOS_RNG_FIFO	0xa800	/* RNG FIFO - pool of random numbers */
++#define TALITOS_RNGISR		0xa030	/* RNG Interrupt status register */
++#define TALITOS_RNGISR_HI	0xa034	/* RNG Interrupt status register */
++#define TALITOS_RNGRCR		0xa018	/* RNG Reset control register */
++#define TALITOS_RNGRCR_HI	0xa01c	/* RNG Reset control register */
++#define TALITOS_RNGRCR_HI_SR	0x1	/* RNG RNGRCR:Software Reset */
++
++/* descriptor pointer entry */
++struct talitos_desc_ptr {
++	u16	len;		/* length */
++	u8	extent;		/* jump (to s/g link table) and extent */
++	u8	res;		/* reserved */
++	u32	ptr;		/* pointer */
++};
++
++/* descriptor */
++struct talitos_desc {
++	u32	hdr;				/* header */
++	u32	res;				/* reserved */
++	struct talitos_desc_ptr		ptr[7];	/* ptr/len pair array */
++};
++
++/* talitos descriptor header (hdr) bits */
++
++/* primary execution unit select */
++#define	TALITOS_SEL0_AFEU	0x10000000
++#define	TALITOS_SEL0_DEU	0x20000000
++#define	TALITOS_SEL0_MDEU	0x30000000
++#define	TALITOS_SEL0_RNG	0x40000000
++#define	TALITOS_SEL0_PKEU	0x50000000
++#define	TALITOS_SEL0_AESU	0x60000000
++
++/* primary execution unit mode (MODE0) and derivatives */
++#define	TALITOS_MODE0_AESU_CBC		0x00200000
++#define	TALITOS_MODE0_AESU_ENC		0x00100000
++#define	TALITOS_MODE0_DEU_CBC		0x00400000
++#define	TALITOS_MODE0_DEU_3DES		0x00200000
++#define	TALITOS_MODE0_DEU_ENC		0x00100000
++#define	TALITOS_MODE0_MDEU_INIT		0x01000000	/* init starting regs */
++#define	TALITOS_MODE0_MDEU_HMAC		0x00800000
++#define	TALITOS_MODE0_MDEU_PAD		0x00400000	/* PD */
++#define	TALITOS_MODE0_MDEU_MD5		0x00200000
++#define	TALITOS_MODE0_MDEU_SHA256	0x00100000
++#define	TALITOS_MODE0_MDEU_SHA1		0x00000000	/* SHA-160 */
++#define	TALITOS_MODE0_MDEU_MD5_HMAC	\
++		(TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
++#define	TALITOS_MODE0_MDEU_SHA256_HMAC	\
++		(TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
++#define	TALITOS_MODE0_MDEU_SHA1_HMAC	\
++		(TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
++
++/* secondary execution unit select (SEL1) */
++/* it's MDEU or nothing */
++#define	TALITOS_SEL1_MDEU	0x00030000
++
++/* secondary execution unit mode (MODE1) and derivatives */
++#define	TALITOS_MODE1_MDEU_INIT		0x00001000	/* init starting regs */
++#define	TALITOS_MODE1_MDEU_HMAC		0x00000800
++#define	TALITOS_MODE1_MDEU_PAD		0x00000400	/* PD */
++#define	TALITOS_MODE1_MDEU_MD5		0x00000200
++#define	TALITOS_MODE1_MDEU_SHA256	0x00000100
++#define	TALITOS_MODE1_MDEU_SHA1		0x00000000	/* SHA-160 */
++#define	TALITOS_MODE1_MDEU_MD5_HMAC	\
++	(TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
++#define	TALITOS_MODE1_MDEU_SHA256_HMAC	\
++	(TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
++#define	TALITOS_MODE1_MDEU_SHA1_HMAC	\
++	(TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
++
++/* direction of overall data flow (DIR) */
++#define	TALITOS_DIR_OUTBOUND	0x00000000
++#define	TALITOS_DIR_INBOUND	0x00000002
++
++/* done notification (DN) */
++#define	TALITOS_DONE_NOTIFY	0x00000001
++
++/* descriptor types */
++/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
++#define TD_TYPE_AESU_CTR_NONSNOOP	(0 << 3)
++#define TD_TYPE_IPSEC_ESP		(1 << 3)
++#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU	(2 << 3)
++#define TD_TYPE_HMAC_SNOOP_NO_AFEU	(4 << 3)
++
++#define TALITOS_HDR_DONE_BITS	0xff000000
++
++#define	DPRINTF(a...)	do { \
++						if (debug) { \
++							printk("%s: ", sc ? \
++								device_get_nameunit(sc->sc_cdev) : "talitos"); \
++							printk(a); \
++						} \
++					} while (0)
+diff -Nur linux-2.6.30.orig/crypto/ocf/talitos/talitos_soft.h linux-2.6.30/crypto/ocf/talitos/talitos_soft.h
+--- linux-2.6.30.orig/crypto/ocf/talitos/talitos_soft.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/talitos/talitos_soft.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,77 @@
++/*
++ * Freescale SEC data structures for integration with ocf-linux
++ *
++ * Copyright (c) 2006 Freescale Semiconductor, Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ *    derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/*
++ * paired descriptor and associated crypto operation
++ */
++struct desc_cryptop_pair {
++	struct talitos_desc	cf_desc;	/* descriptor ptr */
++	struct cryptop		*cf_crp;	/* cryptop ptr */
++};
++
++/*
++ * Holds data specific to a single talitos device.
++ */
++struct talitos_softc {
++	softc_device_decl	sc_cdev;
++	struct platform_device	*sc_dev;	/* device backpointer */
++	ocf_iomem_t		sc_base_addr;
++	int			sc_irq;
++	int			sc_num;		/* if we have multiple chips */
++	int32_t			sc_cid;		/* crypto tag */
++	u64			sc_chiprev;	/* major/minor chip revision */
++	int			sc_nsessions;
++	struct talitos_session	*sc_sessions;
++	int			sc_num_channels;/* number of crypto channels */
++	int			sc_chfifo_len;	/* channel fetch fifo len */
++	int			sc_exec_units;	/* execution units mask */
++	int			sc_desc_types;	/* descriptor types mask */
++	/*
++	 * mutual exclusion for intra-channel resources, e.g. fetch fifos
++	 * the last entry is a meta-channel lock used by the channel scheduler
++	 */
++	spinlock_t		*sc_chnfifolock;
++	/* sc_chnlastalgo contains last algorithm for that channel */
++	int			*sc_chnlastalg;
++	/* sc_chnfifo holds pending descriptor--crypto operation pairs */
++	struct desc_cryptop_pair	**sc_chnfifo;
++};
++
++struct talitos_session {
++	u_int32_t	ses_used;
++	u_int32_t	ses_klen;		/* key length in bits */
++	u_int32_t	ses_key[8];		/* DES/3DES/AES key */
++	u_int32_t	ses_hmac[5];		/* hmac inner state */
++	u_int32_t	ses_hmac_len;		/* hmac length */
++	u_int32_t	ses_iv[4];		/* DES/3DES/AES iv */
++	u_int32_t	ses_mlen;		/* desired hash result len (12=ipsec or 16) */
++};
++
++#define	TALITOS_SESSION(sid)	((sid) & 0x0fffffff)
++#define	TALITOS_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
+diff -Nur linux-2.6.30.orig/crypto/ocf/uio.h linux-2.6.30/crypto/ocf/uio.h
+--- linux-2.6.30.orig/crypto/ocf/uio.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/crypto/ocf/uio.h	2009-06-11 10:55:27.000000000 +0200
+@@ -0,0 +1,54 @@
++#ifndef _OCF_UIO_H_
++#define _OCF_UIO_H_
++
++#include <linux/uio.h>
++
++/*
++ * The linux uio.h doesn't have all we need.  To be fully api compatible
++ * with the BSD cryptodev,  we need to keep this around.  Perhaps this can
++ * be moved back into the linux/uio.h
++ *
++ * Linux port done by David McCullough <david_mccullough@securecomputing.com>
++ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2004-2005 Intel Corporation.
++ *
++ * LICENSE TERMS
++ *
++ * The free distribution and use of this software in both source and binary
++ * form is allowed (with or without changes) provided that:
++ *
++ *   1. distributions of this source code include the above copyright
++ *      notice, this list of conditions and the following disclaimer;
++ *
++ *   2. distributions in binary form include the above copyright
++ *      notice, this list of conditions and the following disclaimer
++ *      in the documentation and/or other associated materials;
++ *
++ *   3. the copyright holder's name is not used to endorse products
++ *      built using this software without specific written permission.
++ *
++ * ALTERNATIVELY, provided that this notice is retained in full, this product
++ * may be distributed under the terms of the GNU General Public License (GPL),
++ * in which case the provisions of the GPL apply INSTEAD OF those given above.
++ *
++ * DISCLAIMER
++ *
++ * This software is provided 'as is' with no explicit or implied warranties
++ * in respect of its properties, including, but not limited to, correctness
++ * and/or fitness for purpose.
++ * ---------------------------------------------------------------------------
++ */
++
++struct uio {
++	struct	iovec *uio_iov;
++	int		uio_iovcnt;
++	off_t	uio_offset;
++	int		uio_resid;
++#if 0
++	enum	uio_seg uio_segflg;
++	enum	uio_rw uio_rw;
++	struct  thread *uio_td;
++#endif
++};
++
++#endif
+diff -Nur linux-2.6.30.orig/drivers/char/random.c linux-2.6.30/drivers/char/random.c
+--- linux-2.6.30.orig/drivers/char/random.c	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/drivers/char/random.c	2009-06-11 10:55:27.000000000 +0200
+@@ -129,6 +129,9 @@
+  *                                unsigned int value);
+  * 	void add_interrupt_randomness(int irq);
+  *
++ *      void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
++ *      int random_input_wait(void);
++ *
+  * add_input_randomness() uses the input layer interrupt timing, as well as
+  * the event type information from the hardware.
+  *
+@@ -140,6 +143,13 @@
+  * a better measure, since the timing of the disk interrupts are more
+  * unpredictable.
+  *
++ * random_input_words() just provides a raw block of entropy to the input
++ * pool, such as from a hardware entropy generator.
++ *
++ * random_input_wait() suspends the caller until such time as the
++ * entropy pool falls below the write threshold, and returns a count of how
++ * much entropy (in bits) is needed to sustain the pool.
++ *
+  * All of these routines try to estimate how many bits of randomness a
+  * particular randomness source.  They do this by keeping track of the
+  * first and second order deltas of the event timings.
+@@ -712,6 +722,61 @@
+ }
+ #endif
+ 
++/*
++ * random_input_words - add bulk entropy to pool
++ *
++ * @buf: buffer to add
++ * @wordcount: number of __u32 words to add
++ * @ent_count: total amount of entropy (in bits) to credit
++ *
++ * this provides bulk input of entropy to the input pool
++ *
++ */
++void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
++{
++	mix_pool_bytes(&input_pool, buf, wordcount*4);
++
++	credit_entropy_bits(&input_pool, ent_count);
++
++	DEBUG_ENT("crediting %d bits => %d\n",
++		  ent_count, input_pool.entropy_count);
++	/*
++	 * Wake up waiting processes if we have enough
++	 * entropy.
++	 */
++	if (input_pool.entropy_count >= random_read_wakeup_thresh)
++		wake_up_interruptible(&random_read_wait);
++}
++EXPORT_SYMBOL(random_input_words);
++
++/*
++ * random_input_wait - wait until random needs entropy
++ *
++ * this function sleeps until the /dev/random subsystem actually
++ * needs more entropy, and then return the amount of entropy
++ * that it would be nice to have added to the system.
++ */
++int random_input_wait(void)
++{
++	int count;
++
++	wait_event_interruptible(random_write_wait, 
++			 input_pool.entropy_count < random_write_wakeup_thresh);
++
++	count = random_write_wakeup_thresh - input_pool.entropy_count;
++
++        /* likely we got woken up due to a signal */
++	if (count <= 0) count = random_read_wakeup_thresh; 
++
++	DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
++		  count,
++		  input_pool.entropy_count, random_write_wakeup_thresh);
++
++	return count;
++}
++EXPORT_SYMBOL(random_input_wait);
++
++
+ #define EXTRACT_SIZE 10
+ 
+ /*********************************************************************
+diff -Nur linux-2.6.30.orig/fs/fcntl.c linux-2.6.30/fs/fcntl.c
+--- linux-2.6.30.orig/fs/fcntl.c	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/fs/fcntl.c	2009-06-11 10:55:27.000000000 +0200
+@@ -142,6 +142,7 @@
+ 	}
+ 	return ret;
+ }
++EXPORT_SYMBOL(sys_dup);
+ 
+ #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
+ 
+diff -Nur linux-2.6.30.orig/include/linux/miscdevice.h linux-2.6.30/include/linux/miscdevice.h
+--- linux-2.6.30.orig/include/linux/miscdevice.h	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/include/linux/miscdevice.h	2009-06-11 10:55:27.000000000 +0200
+@@ -12,6 +12,7 @@
+ #define APOLLO_MOUSE_MINOR	7
+ #define PC110PAD_MINOR		9
+ /*#define ADB_MOUSE_MINOR	10	FIXME OBSOLETE */
++#define CRYPTODEV_MINOR		70	/* /dev/crypto */
+ #define WATCHDOG_MINOR		130	/* Watchdog timer     */
+ #define TEMP_MINOR		131	/* Temperature Sensor */
+ #define RTC_MINOR		135
+diff -Nur linux-2.6.30.orig/include/linux/random.h linux-2.6.30/include/linux/random.h
+--- linux-2.6.30.orig/include/linux/random.h	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/include/linux/random.h	2009-06-11 10:55:27.000000000 +0200
+@@ -34,6 +34,30 @@
+ /* Clear the entropy pool and associated counters.  (Superuser only.) */
+ #define RNDCLEARPOOL	_IO( 'R', 0x06 )
+ 
++#ifdef CONFIG_FIPS_RNG
++
++/* Size of seed value - equal to AES blocksize */
++#define AES_BLOCK_SIZE_BYTES	16
++#define SEED_SIZE_BYTES			AES_BLOCK_SIZE_BYTES
++/* Size of AES key */
++#define KEY_SIZE_BYTES		16
++
++/* ioctl() structure used by FIPS 140-2 Tests */
++struct rand_fips_test {
++	unsigned char key[KEY_SIZE_BYTES];			/* Input */
++	unsigned char datetime[SEED_SIZE_BYTES];	/* Input */
++	unsigned char seed[SEED_SIZE_BYTES];		/* Input */
++	unsigned char result[SEED_SIZE_BYTES];		/* Output */
++};
++
++/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
++#define RNDFIPSVST	_IOWR('R', 0x10, struct rand_fips_test)
++
++/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
++#define RNDFIPSMCT	_IOWR('R', 0x11, struct rand_fips_test)
++
++#endif /* #ifdef CONFIG_FIPS_RNG */
++
+ struct rand_pool_info {
+ 	int	entropy_count;
+ 	int	buf_size;
+@@ -50,6 +74,10 @@
+ 				 unsigned int value);
+ extern void add_interrupt_randomness(int irq);
+ 
++extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
++extern int random_input_wait(void);
++#define HAS_RANDOM_INPUT_WAIT 1
++
+ extern void get_random_bytes(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+ 

+ 20 - 0
target/linux/patches/2.6.33/startup.patch

@@ -0,0 +1,20 @@
+diff -Nur linux-2.6.32.orig/init/main.c linux-2.6.32/init/main.c
+--- linux-2.6.32.orig/init/main.c	2009-12-03 04:51:21.000000000 +0100
++++ linux-2.6.32/init/main.c	2010-01-31 11:01:41.154334301 +0100
+@@ -814,6 +814,8 @@
+ 	if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+ 		printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+ 
++	printk(KERN_WARNING "Starting Linux (build with OpenADK).\n");
++
+ 	(void) sys_dup(0);
+ 	(void) sys_dup(0);
+ 
+@@ -836,6 +838,7 @@
+ 		printk(KERN_WARNING "Failed to execute %s.  Attempting "
+ 					"defaults...\n", execute_command);
+ 	}
++	run_init_process("/init");
+ 	run_init_process("/sbin/init");
+ 	run_init_process("/etc/init");
+ 	run_init_process("/bin/init");

+ 1075 - 0
target/linux/patches/2.6.33/swconfig.patch

@@ -0,0 +1,1075 @@
+diff -Nur linux-2.6.30.orig/drivers/net/phy/Kconfig linux-2.6.30/drivers/net/phy/Kconfig
+--- linux-2.6.30.orig/drivers/net/phy/Kconfig	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/drivers/net/phy/Kconfig	2009-06-11 09:22:50.000000000 +0200
+@@ -13,6 +13,12 @@
+ 
+ if PHYLIB
+ 
++config SWCONFIG
++	tristate "Switch configuration API"
++	---help---
++	  Switch configuration API using netlink. This allows
++	  you to configure the VLAN features of certain switches.
++
+ comment "MII PHY device drivers"
+ 
+ config MARVELL_PHY
+diff -Nur linux-2.6.30.orig/drivers/net/phy/Makefile linux-2.6.30/drivers/net/phy/Makefile
+--- linux-2.6.30.orig/drivers/net/phy/Makefile	2009-06-10 05:05:27.000000000 +0200
++++ linux-2.6.30/drivers/net/phy/Makefile	2009-06-11 09:22:50.000000000 +0200
+@@ -3,6 +3,7 @@
+ libphy-objs			:= phy.o phy_device.o mdio_bus.o
+ 
+ obj-$(CONFIG_PHYLIB)		+= libphy.o
++obj-$(CONFIG_SWCONFIG)		+= swconfig.o
+ obj-$(CONFIG_MARVELL_PHY)	+= marvell.o
+ obj-$(CONFIG_DAVICOM_PHY)	+= davicom.o
+ obj-$(CONFIG_CICADA_PHY)	+= cicada.o
+diff -Nur linux-2.6.30.orig/drivers/net/phy/swconfig.c linux-2.6.30/drivers/net/phy/swconfig.c
+--- linux-2.6.30.orig/drivers/net/phy/swconfig.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/drivers/net/phy/swconfig.c	2009-06-11 09:22:50.000000000 +0200
+@@ -0,0 +1,872 @@
++/*
++ * swconfig.c: Switch configuration API
++ *
++ * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/types.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/if.h>
++#include <linux/if_ether.h>
++#include <linux/capability.h>
++#include <linux/skbuff.h>
++#include <linux/switch.h>
++
++//#define DEBUG 1
++#ifdef DEBUG
++#define DPRINTF(format, ...) printk("%s: " format, __func__, ##__VA_ARGS__)
++#else
++#define DPRINTF(...) do {} while(0)
++#endif
++
++MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
++MODULE_LICENSE("GPL");
++
++static int swdev_id = 0;
++static struct list_head swdevs;
++static spinlock_t swdevs_lock = SPIN_LOCK_UNLOCKED;
++struct swconfig_callback;
++
++struct swconfig_callback
++{
++	struct sk_buff *msg;
++	struct genlmsghdr *hdr;
++	struct genl_info *info;
++	int cmd;
++
++	/* callback for filling in the message data */
++	int (*fill)(struct swconfig_callback *cb, void *arg);
++
++	/* callback for closing the message before sending it */
++	int (*close)(struct swconfig_callback *cb, void *arg);
++
++	struct nlattr *nest[4];
++	int args[4];
++};
++
++/* defaults */
++
++static int
++swconfig_get_vlan_ports(struct switch_dev *dev, struct switch_attr *attr, struct switch_val *val)
++{
++	int ret;
++	if (val->port_vlan >= dev->vlans)
++		return -EINVAL;
++
++	if (!dev->get_vlan_ports)
++		return -EOPNOTSUPP;
++
++	ret = dev->get_vlan_ports(dev, val);
++	printk("SET PORTS %d\n", val->len);
++	return ret;
++}
++
++static int
++swconfig_set_vlan_ports(struct switch_dev *dev, struct switch_attr *attr, struct switch_val *val)
++{
++	int i;
++
++	if (val->port_vlan >= dev->vlans)
++		return -EINVAL;
++
++	/* validate ports */
++	if (val->len > dev->ports)
++		return -EINVAL;
++
++	for (i = 0; i < val->len; i++) {
++		if (val->value.ports[i].id >= dev->ports)
++			return -EINVAL;
++	}
++
++	if (!dev->set_vlan_ports)
++		return -EOPNOTSUPP;
++
++	printk("SET PORTS %d\n", val->len);
++	return dev->set_vlan_ports(dev, val);
++}
++
++static int
++swconfig_apply_config(struct switch_dev *dev, struct switch_attr *attr, struct switch_val *val)
++{
++	/* don't complain if not supported by the switch driver */
++	if (!dev->apply_config)
++		return 0;
++
++	return dev->apply_config(dev);
++}
++
++
++enum global_defaults {
++	GLOBAL_APPLY,
++};
++
++enum vlan_defaults {
++	VLAN_PORTS,
++};
++
++enum port_defaults {
++	PORT_LINK,
++};
++
++static struct switch_attr default_global[] = {
++	[GLOBAL_APPLY] = {
++		.type = SWITCH_TYPE_NOVAL,
++		.name = "apply",
++		.description = "Activate changes in the hardware",
++		.set = swconfig_apply_config,
++	}
++};
++
++static struct switch_attr default_port[] = {
++	[PORT_LINK] = {
++		.type = SWITCH_TYPE_INT,
++		.name = "link",
++		.description = "Current link speed",
++	}
++};
++
++static struct switch_attr default_vlan[] = {
++	[VLAN_PORTS] = {
++		.type = SWITCH_TYPE_PORTS,
++		.name = "ports",
++		.description = "VLAN port mapping",
++		.set = swconfig_set_vlan_ports,
++		.get = swconfig_get_vlan_ports,
++	},
++};
++
++
++static void swconfig_defaults_init(struct switch_dev *dev)
++{
++	dev->def_global = 0;
++	dev->def_vlan = 0;
++	dev->def_port = 0;
++
++	if (dev->get_vlan_ports || dev->set_vlan_ports)
++		set_bit(VLAN_PORTS, &dev->def_vlan);
++
++	/* always present, can be no-op */
++	set_bit(GLOBAL_APPLY, &dev->def_global);
++}
++
++
++static struct genl_family switch_fam = {
++	.id = GENL_ID_GENERATE,
++	.name = "switch",
++	.hdrsize = 0,
++	.version = 1,
++	.maxattr = SWITCH_ATTR_MAX,
++};
++
++static const struct nla_policy switch_policy[SWITCH_ATTR_MAX+1] = {
++	[SWITCH_ATTR_ID] = { .type = NLA_U32 },
++	[SWITCH_ATTR_OP_ID] = { .type = NLA_U32 },
++	[SWITCH_ATTR_OP_PORT] = { .type = NLA_U32 },
++	[SWITCH_ATTR_OP_VLAN] = { .type = NLA_U32 },
++	[SWITCH_ATTR_OP_VALUE_INT] = { .type = NLA_U32 },
++	[SWITCH_ATTR_OP_VALUE_STR] = { .type = NLA_NUL_STRING },
++	[SWITCH_ATTR_OP_VALUE_PORTS] = { .type = NLA_NESTED },
++	[SWITCH_ATTR_TYPE] = { .type = NLA_U32 },
++};
++
++static const struct nla_policy port_policy[SWITCH_PORT_ATTR_MAX+1] = {
++	[SWITCH_PORT_ID] = { .type = NLA_U32 },
++	[SWITCH_PORT_FLAG_TAGGED] = { .type = NLA_FLAG },
++};
++
++static inline void
++swconfig_lock(void)
++{
++	spin_lock(&swdevs_lock);
++}
++
++static inline void
++swconfig_unlock(void)
++{
++	spin_unlock(&swdevs_lock);
++}
++
++static struct switch_dev *
++swconfig_get_dev(struct genl_info *info)
++{
++	struct switch_dev *dev = NULL;
++	struct switch_dev *p;
++	int id;
++
++	if (!info->attrs[SWITCH_ATTR_ID])
++		goto done;
++
++	id = nla_get_u32(info->attrs[SWITCH_ATTR_ID]);
++	swconfig_lock();
++	list_for_each_entry(p, &swdevs, dev_list) {
++		if (id != p->id)
++			continue;
++
++		dev = p;
++		break;
++	}
++	if (dev)
++		spin_lock(&dev->lock);
++	else
++		DPRINTF("device %d not found\n", id);
++	swconfig_unlock();
++done:
++	return dev;
++}
++
++static inline void
++swconfig_put_dev(struct switch_dev *dev)
++{
++	spin_unlock(&dev->lock);
++}
++
++static int
++swconfig_dump_attr(struct swconfig_callback *cb, void *arg)
++{
++	struct switch_attr *op = arg;
++	struct genl_info *info = cb->info;
++	struct sk_buff *msg = cb->msg;
++	int id = cb->args[0];
++	void *hdr;
++
++	hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, &switch_fam,
++			NLM_F_MULTI, SWITCH_CMD_NEW_ATTR);
++	if (IS_ERR(hdr))
++		return -1;
++
++	NLA_PUT_U32(msg, SWITCH_ATTR_OP_ID, id);
++	NLA_PUT_U32(msg, SWITCH_ATTR_OP_TYPE, op->type);
++	NLA_PUT_STRING(msg, SWITCH_ATTR_OP_NAME, op->name);
++	if (op->description)
++		NLA_PUT_STRING(msg, SWITCH_ATTR_OP_DESCRIPTION,
++			op->description);
++
++	return genlmsg_end(msg, hdr);
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++/* spread multipart messages across multiple message buffers */
++static int
++swconfig_send_multipart(struct swconfig_callback *cb, void *arg)
++{
++	struct genl_info *info = cb->info;
++	int restart = 0;
++	int err;
++
++	do {
++		if (!cb->msg) {
++			cb->msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
++			if (cb->msg == NULL)
++				goto error;
++		}
++
++		if (!(cb->fill(cb, arg) < 0))
++			break;
++
++		/* fill failed, check if this was already the second attempt */
++		if (restart)
++			goto error;
++
++		/* try again in a new message, send the current one */
++		restart = 1;
++		if (cb->close) {
++			if (cb->close(cb, arg) < 0)
++				goto error;
++		}
++		err = genlmsg_unicast(cb->msg, info->snd_pid);
++		cb->msg = NULL;
++		if (err < 0)
++			goto error;
++
++	} while (restart);
++
++	return 0;
++
++error:
++	if (cb->msg)
++		nlmsg_free(cb->msg);
++	return -1;
++}
++
++static int
++swconfig_list_attrs(struct sk_buff *skb, struct genl_info *info)
++{
++	struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
++	const struct switch_attrlist *alist;
++	struct switch_dev *dev;
++	struct swconfig_callback cb;
++	int err = -EINVAL;
++	int i;
++
++	/* defaults */
++	struct switch_attr *def_list;
++	unsigned long *def_active;
++	int n_def;
++
++	dev = swconfig_get_dev(info);
++	if (!dev)
++		return -EINVAL;
++
++	switch(hdr->cmd) {
++	case SWITCH_CMD_LIST_GLOBAL:
++		alist = &dev->attr_global;
++		def_list = default_global;
++		def_active = &dev->def_global;
++		n_def = ARRAY_SIZE(default_global);
++		break;
++	case SWITCH_CMD_LIST_VLAN:
++		alist = &dev->attr_vlan;
++		def_list = default_vlan;
++		def_active = &dev->def_vlan;
++		n_def = ARRAY_SIZE(default_vlan);
++		break;
++	case SWITCH_CMD_LIST_PORT:
++		alist = &dev->attr_port;
++		def_list = default_port;
++		def_active = &dev->def_port;
++		n_def = ARRAY_SIZE(default_port);
++		break;
++	default:
++		WARN_ON(1);
++		goto out;
++	}
++
++	memset(&cb, 0, sizeof(cb));
++	cb.info = info;
++	cb.fill = swconfig_dump_attr;
++	for (i = 0; i < alist->n_attr; i++) {
++		if (alist->attr[i].disabled)
++			continue;
++		cb.args[0] = i;
++		err = swconfig_send_multipart(&cb, &alist->attr[i]);
++		if (err < 0)
++			goto error;
++	}
++
++	/* defaults */
++	for (i = 0; i < n_def; i++) {
++		if (!test_bit(i, def_active))
++			continue;
++		cb.args[0] = SWITCH_ATTR_DEFAULTS_OFFSET + i;
++		err = swconfig_send_multipart(&cb, &def_list[i]);
++		if (err < 0)
++			goto error;
++	}
++	swconfig_put_dev(dev);
++
++	if (!cb.msg)
++		return 0;
++
++	return genlmsg_unicast(cb.msg, info->snd_pid);
++
++error:
++	if (cb.msg)
++		nlmsg_free(cb.msg);
++out:
++	swconfig_put_dev(dev);
++	return err;
++}
++
++static struct switch_attr *
++swconfig_lookup_attr(struct switch_dev *dev, struct genl_info *info,
++		struct switch_val *val)
++{
++	struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
++	const struct switch_attrlist *alist;
++	struct switch_attr *attr = NULL;
++	int attr_id;
++
++	/* defaults */
++	struct switch_attr *def_list;
++	unsigned long *def_active;
++	int n_def;
++
++	if (!info->attrs[SWITCH_ATTR_OP_ID])
++		goto done;
++
++	switch(hdr->cmd) {
++	case SWITCH_CMD_SET_GLOBAL:
++	case SWITCH_CMD_GET_GLOBAL:
++		alist = &dev->attr_global;
++		def_list = default_global;
++		def_active = &dev->def_global;
++		n_def = ARRAY_SIZE(default_global);
++		break;
++	case SWITCH_CMD_SET_VLAN:
++	case SWITCH_CMD_GET_VLAN:
++		alist = &dev->attr_vlan;
++		def_list = default_vlan;
++		def_active = &dev->def_vlan;
++		n_def = ARRAY_SIZE(default_vlan);
++		if (!info->attrs[SWITCH_ATTR_OP_VLAN])
++			goto done;
++		val->port_vlan = nla_get_u32(info->attrs[SWITCH_ATTR_OP_VLAN]);
++		break;
++	case SWITCH_CMD_SET_PORT:
++	case SWITCH_CMD_GET_PORT:
++		alist = &dev->attr_port;
++		def_list = default_port;
++		def_active = &dev->def_port;
++		n_def = ARRAY_SIZE(default_port);
++		if (!info->attrs[SWITCH_ATTR_OP_PORT])
++			goto done;
++		val->port_vlan = nla_get_u32(info->attrs[SWITCH_ATTR_OP_PORT]);
++		break;
++	default:
++		WARN_ON(1);
++		goto done;
++	}
++
++	if (!alist)
++		goto done;
++
++	attr_id = nla_get_u32(info->attrs[SWITCH_ATTR_OP_ID]);
++	if (attr_id >= SWITCH_ATTR_DEFAULTS_OFFSET) {
++		attr_id -= SWITCH_ATTR_DEFAULTS_OFFSET;
++		if (attr_id >= n_def)
++			goto done;
++		if (!test_bit(attr_id, def_active))
++			goto done;
++		attr = &def_list[attr_id];
++	} else {
++		if (attr_id >= alist->n_attr)
++			goto done;
++		attr = &alist->attr[attr_id];
++	}
++
++	if (attr->disabled)
++		attr = NULL;
++
++done:
++	if (!attr)
++		DPRINTF("attribute lookup failed\n");
++	val->attr = attr;
++	return attr;
++}
++
++static int
++swconfig_parse_ports(struct sk_buff *msg, struct nlattr *head,
++		struct switch_val *val, int max)
++{
++	struct nlattr *nla;
++	int rem;
++
++	val->len = 0;
++	nla_for_each_nested(nla, head, rem) {
++		struct nlattr *tb[SWITCH_PORT_ATTR_MAX+1];
++		struct switch_port *port = &val->value.ports[val->len];
++
++		if (val->len >= max)
++			return -EINVAL;
++
++		if (nla_parse_nested(tb, SWITCH_PORT_ATTR_MAX, nla,
++				port_policy))
++			return -EINVAL;
++
++		if (!tb[SWITCH_PORT_ID])
++			return -EINVAL;
++
++		port->id = nla_get_u32(tb[SWITCH_PORT_ID]);
++		if (tb[SWITCH_PORT_FLAG_TAGGED])
++			port->flags |= (1 << SWITCH_PORT_FLAG_TAGGED);
++		val->len++;
++	}
++
++	return 0;
++}
++
++static int
++swconfig_set_attr(struct sk_buff *skb, struct genl_info *info)
++{
++	struct switch_attr *attr;
++	struct switch_dev *dev;
++	struct switch_val val;
++	int err = -EINVAL;
++
++	dev = swconfig_get_dev(info);
++	if (!dev)
++		return -EINVAL;
++
++	memset(&val, 0, sizeof(val));
++	attr = swconfig_lookup_attr(dev, info, &val);
++	if (!attr || !attr->set)
++		goto error;
++
++	val.attr = attr;
++	switch(attr->type) {
++	case SWITCH_TYPE_NOVAL:
++		break;
++	case SWITCH_TYPE_INT:
++		if (!info->attrs[SWITCH_ATTR_OP_VALUE_INT])
++			goto error;
++		val.value.i =
++			nla_get_u32(info->attrs[SWITCH_ATTR_OP_VALUE_INT]);
++		break;
++	case SWITCH_TYPE_STRING:
++		if (!info->attrs[SWITCH_ATTR_OP_VALUE_STR])
++			goto error;
++		val.value.s =
++			nla_data(info->attrs[SWITCH_ATTR_OP_VALUE_STR]);
++		break;
++	case SWITCH_TYPE_PORTS:
++		val.value.ports = dev->portbuf;
++		memset(dev->portbuf, 0,
++			sizeof(struct switch_port) * dev->ports);
++
++		/* TODO: implement multipart? */
++		if (info->attrs[SWITCH_ATTR_OP_VALUE_PORTS]) {
++			err = swconfig_parse_ports(skb,
++				info->attrs[SWITCH_ATTR_OP_VALUE_PORTS], &val, dev->ports);
++			if (err < 0)
++				goto error;
++		} else {
++			val.len = 0;
++			err = 0;
++		}
++		break;
++	default:
++		goto error;
++	}
++
++	err = attr->set(dev, attr, &val);
++error:
++	swconfig_put_dev(dev);
++	return err;
++}
++
++static int
++swconfig_close_portlist(struct swconfig_callback *cb, void *arg)
++{
++	if (cb->nest[0])
++		nla_nest_end(cb->msg, cb->nest[0]);
++	return 0;
++}
++
++static int
++swconfig_send_port(struct swconfig_callback *cb, void *arg)
++{
++	const struct switch_port *port = arg;
++	struct nlattr *p = NULL;
++
++	if (!cb->nest[0]) {
++		cb->nest[0] = nla_nest_start(cb->msg, cb->cmd);
++		if (!cb->nest[0])
++			return -1;
++	}
++
++	p = nla_nest_start(cb->msg, SWITCH_ATTR_PORT);
++	if (!p)
++		goto error;
++
++	NLA_PUT_U32(cb->msg, SWITCH_PORT_ID, port->id);
++	if (port->flags & (1 << SWITCH_PORT_FLAG_TAGGED))
++		NLA_PUT_FLAG(cb->msg, SWITCH_PORT_FLAG_TAGGED);
++
++	nla_nest_end(cb->msg, p);
++	return 0;
++
++nla_put_failure:
++		nla_nest_cancel(cb->msg, p);
++error:
++	nla_nest_cancel(cb->msg, cb->nest[0]);
++	return -1;
++}
++
++static int
++swconfig_send_ports(struct sk_buff **msg, struct genl_info *info, int attr,
++		const struct switch_val *val)
++{
++	struct swconfig_callback cb;
++	int err = 0;
++	int i;
++
++	if (!val->value.ports)
++		return -EINVAL;
++
++	memset(&cb, 0, sizeof(cb));
++	cb.cmd = attr;
++	cb.msg = *msg;
++	cb.info = info;
++	cb.fill = swconfig_send_port;
++	cb.close = swconfig_close_portlist;
++
++	cb.nest[0] = nla_nest_start(cb.msg, cb.cmd);
++	for (i = 0; i < val->len; i++) {
++		err = swconfig_send_multipart(&cb, &val->value.ports[i]);
++		if (err)
++			goto done;
++	}
++	err = val->len;
++	swconfig_close_portlist(&cb, NULL);
++	*msg = cb.msg;
++
++done:
++	return err;
++}
++
++static int
++swconfig_get_attr(struct sk_buff *skb, struct genl_info *info)
++{
++	struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
++	struct switch_attr *attr;
++	struct switch_dev *dev;
++	struct sk_buff *msg = NULL;
++	struct switch_val val;
++	int err = -EINVAL;
++	int cmd = hdr->cmd;
++
++	dev = swconfig_get_dev(info);
++	if (!dev)
++		return -EINVAL;
++
++	memset(&val, 0, sizeof(val));
++	attr = swconfig_lookup_attr(dev, info, &val);
++	if (!attr || !attr->get)
++		goto error_dev;
++
++	if (attr->type == SWITCH_TYPE_PORTS) {
++		val.value.ports = dev->portbuf;
++		memset(dev->portbuf, 0,
++			sizeof(struct switch_port) * dev->ports);
++	}
++
++	err = attr->get(dev, attr, &val);
++	if (err)
++		goto error;
++
++	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
++	if (!msg)
++		goto error;
++
++	hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, &switch_fam,
++			0, cmd);
++	if (IS_ERR(hdr))
++		goto nla_put_failure;
++
++	switch(attr->type) {
++	case SWITCH_TYPE_INT:
++		NLA_PUT_U32(msg, SWITCH_ATTR_OP_VALUE_INT, val.value.i);
++		break;
++	case SWITCH_TYPE_STRING:
++		NLA_PUT_STRING(msg, SWITCH_ATTR_OP_VALUE_STR, val.value.s);
++		break;
++	case SWITCH_TYPE_PORTS:
++		err = swconfig_send_ports(&msg, info,
++				SWITCH_ATTR_OP_VALUE_PORTS, &val);
++		if (err < 0)
++			goto nla_put_failure;
++		break;
++	default:
++		DPRINTF("invalid type in attribute\n");
++		err = -EINVAL;
++		goto error;
++	}
++	err = genlmsg_end(msg, hdr);
++	if (err < 0)
++		goto nla_put_failure;
++
++	swconfig_put_dev(dev);
++	return genlmsg_unicast(msg, info->snd_pid);
++
++nla_put_failure:
++	if (msg)
++		nlmsg_free(msg);
++error_dev:
++	swconfig_put_dev(dev);
++error:
++	if (!err)
++		err = -ENOMEM;
++	return err;
++}
++
++static int
++swconfig_send_switch(struct sk_buff *msg, u32 pid, u32 seq, int flags,
++		const struct switch_dev *dev)
++{
++	void *hdr;
++
++	hdr = genlmsg_put(msg, pid, seq, &switch_fam, flags,
++			SWITCH_CMD_NEW_ATTR);
++	if (IS_ERR(hdr))
++		return -1;
++
++	NLA_PUT_U32(msg, SWITCH_ATTR_ID, dev->id);
++	NLA_PUT_STRING(msg, SWITCH_ATTR_NAME, dev->name);
++	NLA_PUT_STRING(msg, SWITCH_ATTR_DEV_NAME, dev->devname);
++	NLA_PUT_U32(msg, SWITCH_ATTR_VLANS, dev->vlans);
++	NLA_PUT_U32(msg, SWITCH_ATTR_PORTS, dev->ports);
++
++	return genlmsg_end(msg, hdr);
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static int swconfig_dump_switches(struct sk_buff *skb,
++		struct netlink_callback *cb)
++{
++	struct switch_dev *dev;
++	int start = cb->args[0];
++	int idx = 0;
++
++	swconfig_lock();
++	list_for_each_entry(dev, &swdevs, dev_list) {
++		if (++idx <= start)
++			continue;
++		if (swconfig_send_switch(skb, NETLINK_CB(cb->skb).pid,
++				cb->nlh->nlmsg_seq, NLM_F_MULTI,
++				dev) < 0)
++			break;
++	}
++	swconfig_unlock();
++	cb->args[0] = idx;
++
++	return skb->len;
++}
++
++static int
++swconfig_done(struct netlink_callback *cb)
++{
++	return 0;
++}
++
++static struct genl_ops swconfig_ops[] = {
++	{
++		.cmd = SWITCH_CMD_LIST_GLOBAL,
++		.doit = swconfig_list_attrs,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_LIST_VLAN,
++		.doit = swconfig_list_attrs,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_LIST_PORT,
++		.doit = swconfig_list_attrs,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_GET_GLOBAL,
++		.doit = swconfig_get_attr,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_GET_VLAN,
++		.doit = swconfig_get_attr,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_GET_PORT,
++		.doit = swconfig_get_attr,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_SET_GLOBAL,
++		.doit = swconfig_set_attr,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_SET_VLAN,
++		.doit = swconfig_set_attr,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_SET_PORT,
++		.doit = swconfig_set_attr,
++		.policy = switch_policy,
++	},
++	{
++		.cmd = SWITCH_CMD_GET_SWITCH,
++		.dumpit = swconfig_dump_switches,
++		.policy = switch_policy,
++		.done = swconfig_done,
++	}
++};
++
++int
++register_switch(struct switch_dev *dev, struct net_device *netdev)
++{
++	INIT_LIST_HEAD(&dev->dev_list);
++	if (netdev) {
++		dev->netdev = netdev;
++		if (!dev->devname)
++			dev->devname = netdev->name;
++	}
++	BUG_ON(!dev->devname);
++
++	if (dev->ports > 0) {
++		dev->portbuf = kzalloc(sizeof(struct switch_port) * dev->ports,
++				GFP_KERNEL);
++		if (!dev->portbuf)
++			return -ENOMEM;
++	}
++	dev->id = ++swdev_id;
++	swconfig_defaults_init(dev);
++	spin_lock_init(&dev->lock);
++	swconfig_lock();
++	list_add(&dev->dev_list, &swdevs);
++	swconfig_unlock();
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(register_switch);
++
++void
++unregister_switch(struct switch_dev *dev)
++{
++	kfree(dev->portbuf);
++	spin_lock(&dev->lock);
++	swconfig_lock();
++	list_del(&dev->dev_list);
++	swconfig_unlock();
++}
++EXPORT_SYMBOL_GPL(unregister_switch);
++
++
++static int __init
++swconfig_init(void)
++{
++	int i, err;
++
++	INIT_LIST_HEAD(&swdevs);
++	err = genl_register_family(&switch_fam);
++	if (err)
++		return err;
++
++	for (i = 0; i < ARRAY_SIZE(swconfig_ops); i++) {
++		err = genl_register_ops(&switch_fam, &swconfig_ops[i]);
++		if (err)
++			goto unregister;
++	}
++
++	return 0;
++
++unregister:
++	genl_unregister_family(&switch_fam);
++	return err;
++}
++
++static void __exit
++swconfig_exit(void)
++{
++	genl_unregister_family(&switch_fam);
++}
++
++module_init(swconfig_init);
++module_exit(swconfig_exit);
++
+diff -Nur linux-2.6.30.orig/include/linux/switch.h linux-2.6.30/include/linux/switch.h
+--- linux-2.6.30.orig/include/linux/switch.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.30/include/linux/switch.h	2009-06-11 09:22:50.000000000 +0200
+@@ -0,0 +1,168 @@
++/*
++ * switch.h: Switch configuration API
++ *
++ * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __LINUX_SWITCH_H
++#define __LINUX_SWITCH_H
++
++#include <linux/types.h>
++#include <linux/netdevice.h>
++#include <linux/netlink.h>
++#include <linux/genetlink.h>
++#ifndef __KERNEL__
++#include <netlink/netlink.h>
++#include <netlink/genl/genl.h>
++#include <netlink/genl/ctrl.h>
++#else
++#include <net/genetlink.h>
++#endif
++
++/* main attributes */
++enum {
++	SWITCH_ATTR_UNSPEC,
++	/* global */
++	SWITCH_ATTR_TYPE,
++	/* device */
++	SWITCH_ATTR_ID,
++	SWITCH_ATTR_NAME,
++	SWITCH_ATTR_DEV_NAME,
++	SWITCH_ATTR_VLANS,
++	SWITCH_ATTR_PORTS,
++	/* attributes */
++	SWITCH_ATTR_OP_ID,
++	SWITCH_ATTR_OP_TYPE,
++	SWITCH_ATTR_OP_NAME,
++	SWITCH_ATTR_OP_PORT,
++	SWITCH_ATTR_OP_VLAN,
++	SWITCH_ATTR_OP_VALUE_INT,
++	SWITCH_ATTR_OP_VALUE_STR,
++	SWITCH_ATTR_OP_VALUE_PORTS,
++	SWITCH_ATTR_OP_DESCRIPTION,
++	/* port lists */
++	SWITCH_ATTR_PORT,
++	SWITCH_ATTR_MAX
++};
++
++/* commands */
++enum {
++	SWITCH_CMD_UNSPEC,
++	SWITCH_CMD_GET_SWITCH,
++	SWITCH_CMD_NEW_ATTR,
++	SWITCH_CMD_LIST_GLOBAL,
++	SWITCH_CMD_GET_GLOBAL,
++	SWITCH_CMD_SET_GLOBAL,
++	SWITCH_CMD_LIST_PORT,
++	SWITCH_CMD_GET_PORT,
++	SWITCH_CMD_SET_PORT,
++	SWITCH_CMD_LIST_VLAN,
++	SWITCH_CMD_GET_VLAN,
++	SWITCH_CMD_SET_VLAN
++};
++
++/* data types */
++enum switch_val_type {
++	SWITCH_TYPE_UNSPEC,
++	SWITCH_TYPE_INT,
++	SWITCH_TYPE_STRING,
++	SWITCH_TYPE_PORTS,
++	SWITCH_TYPE_NOVAL,
++};
++
++/* port nested attributes */
++enum {
++	SWITCH_PORT_UNSPEC,
++	SWITCH_PORT_ID,
++	SWITCH_PORT_FLAG_TAGGED,
++	SWITCH_PORT_ATTR_MAX
++};
++
++#define SWITCH_ATTR_DEFAULTS_OFFSET	0x1000
++
++#ifdef __KERNEL__
++
++struct switch_dev;
++struct switch_op;
++struct switch_val;
++struct switch_attr;
++struct switch_attrlist;
++
++int register_switch(struct switch_dev *dev, struct net_device *netdev);
++void unregister_switch(struct switch_dev *dev);
++
++struct switch_attrlist {
++	/* filled in by the driver */
++	int n_attr;
++	struct switch_attr *attr;
++};
++
++
++struct switch_dev {
++	int id;
++	void *priv;
++	const char *name;
++
++	/* NB: either devname or netdev must be set */
++	const char *devname;
++	struct net_device *netdev;
++
++	int ports;
++	int vlans;
++	int cpu_port;
++	struct switch_attrlist attr_global, attr_port, attr_vlan;
++
++	spinlock_t lock;
++	struct switch_port *portbuf;
++	struct list_head dev_list;
++	unsigned long def_global, def_port, def_vlan;
++
++	int (*get_vlan_ports)(struct switch_dev *dev, struct switch_val *val);
++	int (*set_vlan_ports)(struct switch_dev *dev, struct switch_val *val);
++	int (*apply_config)(struct switch_dev *dev);
++};
++
++struct switch_port {
++	u32 id;
++	u32 flags;
++};
++
++struct switch_val {
++	struct switch_attr *attr;
++	int port_vlan;
++	int len;
++	union {
++		const char *s;
++		u32 i;
++		struct switch_port *ports;
++	} value;
++};
++
++struct switch_attr {
++	int disabled;
++	int type;
++	const char *name;
++	const char *description;
++
++	int (*set)(struct switch_dev *dev, struct switch_attr *attr, struct switch_val *val);
++	int (*get)(struct switch_dev *dev, struct switch_attr *attr, struct switch_val *val);
++
++	/* for driver internal use */
++	int id;
++	int ofs;
++	int max;
++};
++
++#endif
++
++#endif

+ 15066 - 0
target/linux/patches/2.6.33/yaffs2.patch

@@ -0,0 +1,15066 @@
+diff -Nur linux-2.6.32.orig/fs/Kconfig linux-2.6.32/fs/Kconfig
+--- linux-2.6.32.orig/fs/Kconfig	2009-12-03 04:51:21.000000000 +0100
++++ linux-2.6.32/fs/Kconfig	2010-01-30 20:35:00.921899692 +0100
+@@ -174,6 +174,10 @@
+ source "fs/befs/Kconfig"
+ source "fs/bfs/Kconfig"
+ source "fs/efs/Kconfig"
++
++# Patched by YAFFS
++source "fs/yaffs2/Kconfig"
++
+ source "fs/jffs2/Kconfig"
+ # UBIFS File system configuration
+ source "fs/ubifs/Kconfig"
+diff -Nur linux-2.6.32.orig/fs/Makefile linux-2.6.32/fs/Makefile
+--- linux-2.6.32.orig/fs/Makefile	2009-12-03 04:51:21.000000000 +0100
++++ linux-2.6.32/fs/Makefile	2010-01-30 20:35:00.933084814 +0100
+@@ -124,3 +124,4 @@
+ obj-$(CONFIG_BTRFS_FS)		+= btrfs/
+ obj-$(CONFIG_GFS2_FS)           += gfs2/
+ obj-$(CONFIG_EXOFS_FS)          += exofs/
++obj-$(CONFIG_YAFFS_FS)		+= yaffs2/
+diff -Nur linux-2.6.32.orig/fs/Makefile.pre.yaffs linux-2.6.32/fs/Makefile.pre.yaffs
+--- linux-2.6.32.orig/fs/Makefile.pre.yaffs	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/Makefile.pre.yaffs	2010-01-30 20:35:00.983076819 +0100
+@@ -0,0 +1,126 @@
++#
++# Makefile for the Linux filesystems.
++#
++# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
++# Rewritten to use lists instead of if-statements.
++# 
++
++obj-y :=	open.o read_write.o file_table.o super.o \
++		char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
++		ioctl.o readdir.o select.o fifo.o dcache.o inode.o \
++		attr.o bad_inode.o file.o filesystems.o namespace.o \
++		seq_file.o xattr.o libfs.o fs-writeback.o \
++		pnode.o drop_caches.o splice.o sync.o utimes.o \
++		stack.o
++
++ifeq ($(CONFIG_BLOCK),y)
++obj-y +=	buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
++else
++obj-y +=	no-block.o
++endif
++
++obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
++obj-y				+= notify/
++obj-$(CONFIG_EPOLL)		+= eventpoll.o
++obj-$(CONFIG_ANON_INODES)	+= anon_inodes.o
++obj-$(CONFIG_SIGNALFD)		+= signalfd.o
++obj-$(CONFIG_TIMERFD)		+= timerfd.o
++obj-$(CONFIG_EVENTFD)		+= eventfd.o
++obj-$(CONFIG_AIO)               += aio.o
++obj-$(CONFIG_FILE_LOCKING)      += locks.o
++obj-$(CONFIG_COMPAT)		+= compat.o compat_ioctl.o
++
++nfsd-$(CONFIG_NFSD)		:= nfsctl.o
++obj-y				+= $(nfsd-y) $(nfsd-m)
++
++obj-$(CONFIG_BINFMT_AOUT)	+= binfmt_aout.o
++obj-$(CONFIG_BINFMT_EM86)	+= binfmt_em86.o
++obj-$(CONFIG_BINFMT_MISC)	+= binfmt_misc.o
++
++# binfmt_script is always there
++obj-y				+= binfmt_script.o
++
++obj-$(CONFIG_BINFMT_ELF)	+= binfmt_elf.o
++obj-$(CONFIG_COMPAT_BINFMT_ELF)	+= compat_binfmt_elf.o
++obj-$(CONFIG_BINFMT_ELF_FDPIC)	+= binfmt_elf_fdpic.o
++obj-$(CONFIG_BINFMT_SOM)	+= binfmt_som.o
++obj-$(CONFIG_BINFMT_FLAT)	+= binfmt_flat.o
++
++obj-$(CONFIG_FS_MBCACHE)	+= mbcache.o
++obj-$(CONFIG_FS_POSIX_ACL)	+= posix_acl.o xattr_acl.o
++obj-$(CONFIG_NFS_COMMON)	+= nfs_common/
++obj-$(CONFIG_GENERIC_ACL)	+= generic_acl.o
++
++obj-$(CONFIG_QUOTA)		+= dquot.o
++obj-$(CONFIG_QFMT_V1)		+= quota_v1.o
++obj-$(CONFIG_QFMT_V2)		+= quota_v2.o
++obj-$(CONFIG_QUOTA_TREE)	+= quota_tree.o
++obj-$(CONFIG_QUOTACTL)		+= quota.o
++
++obj-$(CONFIG_PROC_FS)		+= proc/
++obj-y				+= partitions/
++obj-$(CONFIG_SYSFS)		+= sysfs/
++obj-$(CONFIG_CONFIGFS_FS)	+= configfs/
++obj-y				+= devpts/
++
++obj-$(CONFIG_PROFILING)		+= dcookies.o
++obj-$(CONFIG_DLM)		+= dlm/
++ 
++# Do not add any filesystems before this line
++obj-$(CONFIG_REISERFS_FS)	+= reiserfs/
++obj-$(CONFIG_EXT3_FS)		+= ext3/ # Before ext2 so root fs can be ext3
++obj-$(CONFIG_EXT2_FS)		+= ext2/
++# We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
++# unless explicitly requested by rootfstype
++obj-$(CONFIG_EXT4_FS)		+= ext4/
++obj-$(CONFIG_JBD)		+= jbd/
++obj-$(CONFIG_JBD2)		+= jbd2/
++obj-$(CONFIG_CRAMFS)		+= cramfs/
++obj-$(CONFIG_SQUASHFS)		+= squashfs/
++obj-y				+= ramfs/
++obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
++obj-$(CONFIG_CODA_FS)		+= coda/
++obj-$(CONFIG_MINIX_FS)		+= minix/
++obj-$(CONFIG_FAT_FS)		+= fat/
++obj-$(CONFIG_BFS_FS)		+= bfs/
++obj-$(CONFIG_ISO9660_FS)	+= isofs/
++obj-$(CONFIG_HFSPLUS_FS)	+= hfsplus/ # Before hfs to find wrapped HFS+
++obj-$(CONFIG_HFS_FS)		+= hfs/
++obj-$(CONFIG_ECRYPT_FS)		+= ecryptfs/
++obj-$(CONFIG_VXFS_FS)		+= freevxfs/
++obj-$(CONFIG_NFS_FS)		+= nfs/
++obj-$(CONFIG_EXPORTFS)		+= exportfs/
++obj-$(CONFIG_NFSD)		+= nfsd/
++obj-$(CONFIG_LOCKD)		+= lockd/
++obj-$(CONFIG_NLS)		+= nls/
++obj-$(CONFIG_SYSV_FS)		+= sysv/
++obj-$(CONFIG_SMB_FS)		+= smbfs/
++obj-$(CONFIG_CIFS)		+= cifs/
++obj-$(CONFIG_NCP_FS)		+= ncpfs/
++obj-$(CONFIG_HPFS_FS)		+= hpfs/
++obj-$(CONFIG_NTFS_FS)		+= ntfs/
++obj-$(CONFIG_UFS_FS)		+= ufs/
++obj-$(CONFIG_EFS_FS)		+= efs/
++obj-$(CONFIG_JFFS2_FS)		+= jffs2/
++obj-$(CONFIG_UBIFS_FS)		+= ubifs/
++obj-$(CONFIG_AFFS_FS)		+= affs/
++obj-$(CONFIG_ROMFS_FS)		+= romfs/
++obj-$(CONFIG_QNX4FS_FS)		+= qnx4/
++obj-$(CONFIG_AUTOFS_FS)		+= autofs/
++obj-$(CONFIG_AUTOFS4_FS)	+= autofs4/
++obj-$(CONFIG_ADFS_FS)		+= adfs/
++obj-$(CONFIG_FUSE_FS)		+= fuse/
++obj-$(CONFIG_UDF_FS)		+= udf/
++obj-$(CONFIG_SUN_OPENPROMFS)	+= openpromfs/
++obj-$(CONFIG_OMFS_FS)		+= omfs/
++obj-$(CONFIG_JFS_FS)		+= jfs/
++obj-$(CONFIG_XFS_FS)		+= xfs/
++obj-$(CONFIG_9P_FS)		+= 9p/
++obj-$(CONFIG_AFS_FS)		+= afs/
++obj-$(CONFIG_BEFS_FS)		+= befs/
++obj-$(CONFIG_HOSTFS)		+= hostfs/
++obj-$(CONFIG_HPPFS)		+= hppfs/
++obj-$(CONFIG_DEBUG_FS)		+= debugfs/
++obj-$(CONFIG_OCFS2_FS)		+= ocfs2/
++obj-$(CONFIG_BTRFS_FS)		+= btrfs/
++obj-$(CONFIG_GFS2_FS)           += gfs2/
+diff -Nur linux-2.6.32.orig/fs/yaffs2/devextras.h linux-2.6.32/fs/yaffs2/devextras.h
+--- linux-2.6.32.orig/fs/yaffs2/devextras.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/devextras.h	2010-01-30 20:35:01.021829008 +0100
+@@ -0,0 +1,196 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * This file is just holds extra declarations of macros that would normally
++ * be providesd in the Linux kernel. These macros have been written from
++ * scratch but are functionally equivalent to the Linux ones.
++ *
++ */
++
++#ifndef __EXTRAS_H__
++#define __EXTRAS_H__
++
++
++#if !(defined __KERNEL__)
++
++/* Definition of types */
++typedef unsigned char __u8;
++typedef unsigned short __u16;
++typedef unsigned __u32;
++
++#endif
++
++/*
++ * This is a simple doubly linked list implementation that matches the
++ * way the Linux kernel doubly linked list implementation works.
++ */
++
++struct ylist_head {
++	struct ylist_head *next; /* next in chain */
++	struct ylist_head *prev; /* previous in chain */
++};
++
++
++/* Initialise a static list */
++#define YLIST_HEAD(name) \
++struct ylist_head name = { &(name), &(name)}
++
++
++
++/* Initialise a list head to an empty list */
++#define YINIT_LIST_HEAD(p) \
++do { \
++	(p)->next = (p);\
++	(p)->prev = (p); \
++} while (0)
++
++
++/* Add an element to a list */
++static __inline__ void ylist_add(struct ylist_head *newEntry,
++				struct ylist_head *list)
++{
++	struct ylist_head *listNext = list->next;
++
++	list->next = newEntry;
++	newEntry->prev = list;
++	newEntry->next = listNext;
++	listNext->prev = newEntry;
++
++}
++
++static __inline__ void ylist_add_tail(struct ylist_head *newEntry,
++				 struct ylist_head *list)
++{
++	struct ylist_head *listPrev = list->prev;
++
++	list->prev = newEntry;
++	newEntry->next = list;
++	newEntry->prev = listPrev;
++	listPrev->next = newEntry;
++
++}
++
++
++/* Take an element out of its current list, with or without
++ * reinitialising the links.of the entry*/
++static __inline__ void ylist_del(struct ylist_head *entry)
++{
++	struct ylist_head *listNext = entry->next;
++	struct ylist_head *listPrev = entry->prev;
++
++	listNext->prev = listPrev;
++	listPrev->next = listNext;
++
++}
++
++static __inline__ void ylist_del_init(struct ylist_head *entry)
++{
++	ylist_del(entry);
++	entry->next = entry->prev = entry;
++}
++
++
++/* Test if the list is empty */
++static __inline__ int ylist_empty(struct ylist_head *entry)
++{
++	return (entry->next == entry);
++}
++
++
++/* ylist_entry takes a pointer to a list entry and offsets it to that
++ * we can find a pointer to the object it is embedded in.
++ */
++
++
++#define ylist_entry(entry, type, member) \
++	((type *)((char *)(entry)-(unsigned long)(&((type *)NULL)->member)))
++
++
++/* ylist_for_each and list_for_each_safe  iterate over lists.
++ * ylist_for_each_safe uses temporary storage to make the list delete safe
++ */
++
++#define ylist_for_each(itervar, list) \
++	for (itervar = (list)->next; itervar != (list); itervar = itervar->next)
++
++#define ylist_for_each_safe(itervar, saveVar, list) \
++	for (itervar = (list)->next, saveVar = (list)->next->next; \
++		itervar != (list); itervar = saveVar, saveVar = saveVar->next)
++
++
++#if !(defined __KERNEL__)
++
++
++#ifndef WIN32
++#include <sys/stat.h>
++#endif
++
++
++#ifdef CONFIG_YAFFS_PROVIDE_DEFS
++/* File types */
++
++
++#define DT_UNKNOWN	0
++#define DT_FIFO		1
++#define DT_CHR		2
++#define DT_DIR		4
++#define DT_BLK		6
++#define DT_REG		8
++#define DT_LNK		10
++#define DT_SOCK		12
++#define DT_WHT		14
++
++
++#ifndef WIN32
++#include <sys/stat.h>
++#endif
++
++/*
++ * Attribute flags.  These should be or-ed together to figure out what
++ * has been changed!
++ */
++#define ATTR_MODE	1
++#define ATTR_UID	2
++#define ATTR_GID	4
++#define ATTR_SIZE	8
++#define ATTR_ATIME	16
++#define ATTR_MTIME	32
++#define ATTR_CTIME	64
++
++struct iattr {
++	unsigned int ia_valid;
++	unsigned ia_mode;
++	unsigned ia_uid;
++	unsigned ia_gid;
++	unsigned ia_size;
++	unsigned ia_atime;
++	unsigned ia_mtime;
++	unsigned ia_ctime;
++	unsigned int ia_attr_flags;
++};
++
++#endif
++
++#else
++
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++
++#endif
++
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/Kconfig linux-2.6.32/fs/yaffs2/Kconfig
+--- linux-2.6.32.orig/fs/yaffs2/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/Kconfig	2010-01-30 20:35:01.053081300 +0100
+@@ -0,0 +1,156 @@
++#
++# YAFFS file system configurations
++#
++
++config YAFFS_FS
++	tristate "YAFFS2 file system support"
++	default n
++	depends on MTD_BLOCK
++	select YAFFS_YAFFS1
++	select YAFFS_YAFFS2
++	help
++	  YAFFS2, or Yet Another Flash Filing System, is a filing system
++	  optimised for NAND Flash chips.
++
++	  To compile the YAFFS2 file system support as a module, choose M
++	  here: the module will be called yaffs2.
++
++	  If unsure, say N.
++
++	  Further information on YAFFS2 is available at
++	  <http://www.aleph1.co.uk/yaffs/>.
++
++config YAFFS_YAFFS1
++	bool "512 byte / page devices"
++	depends on YAFFS_FS
++	default y
++	help
++	  Enable YAFFS1 support -- yaffs for 512 byte / page devices
++
++	  Not needed for 2K-page devices.
++
++	  If unsure, say Y.
++
++config YAFFS_9BYTE_TAGS
++	bool "Use older-style on-NAND data format with pageStatus byte"
++	depends on YAFFS_YAFFS1
++	default n
++	help
++
++	  Older-style on-NAND data format has a "pageStatus" byte to record
++	  chunk/page state.  This byte is zero when the page is discarded.
++	  Choose this option if you have existing on-NAND data using this
++	  format that you need to continue to support.  New data written
++	  also uses the older-style format.  Note: Use of this option
++	  generally requires that MTD's oob layout be adjusted to use the
++	  older-style format.  See notes on tags formats and MTD versions
++	  in yaffs_mtdif1.c.
++
++	  If unsure, say N.
++
++config YAFFS_DOES_ECC
++	bool "Lets Yaffs do its own ECC"
++	depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
++	default n
++	help
++	  This enables Yaffs to use its own ECC functions instead of using
++	  the ones from the generic MTD-NAND driver.
++
++	  If unsure, say N.
++
++config YAFFS_ECC_WRONG_ORDER
++	bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
++	depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
++	default n
++	help
++	  This makes yaffs_ecc.c use the same ecc byte order as Steven
++	  Hill's nand_ecc.c. If not set, then you get the same ecc byte
++	  order as SmartMedia.
++
++	  If unsure, say N.
++
++config YAFFS_YAFFS2
++	bool "2048 byte (or larger) / page devices"
++	depends on YAFFS_FS
++	default y
++	help
++	  Enable YAFFS2 support -- yaffs for >= 2K bytes per page devices
++
++	  If unsure, say Y.
++
++config YAFFS_AUTO_YAFFS2
++	bool "Autoselect yaffs2 format"
++	depends on YAFFS_YAFFS2
++	default y
++	help
++	  Without this, you need to explicitely use yaffs2 as the file
++	  system type. With this, you can say "yaffs" and yaffs or yaffs2
++	  will be used depending on the device page size (yaffs on
++	  512-byte page devices, yaffs2 on 2K page devices).
++
++	  If unsure, say Y.
++
++config YAFFS_DISABLE_LAZY_LOAD
++	bool "Disable lazy loading"
++	depends on YAFFS_YAFFS2
++	default n
++	help
++	  "Lazy loading" defers loading file details until they are
++	  required. This saves mount time, but makes the first look-up
++	  a bit longer.
++
++	  Lazy loading will only happen if enabled by this option being 'n'
++	  and if the appropriate tags are available, else yaffs2 will
++	  automatically fall back to immediate loading and do the right
++	  thing.
++
++	  Lazy laoding will be required by checkpointing.
++
++	  Setting this to 'y' will disable lazy loading.
++
++	  If unsure, say N.
++
++
++config YAFFS_DISABLE_WIDE_TNODES
++	bool "Turn off wide tnodes"
++	depends on YAFFS_FS
++	default n
++	help
++	  Wide tnodes are only used for NAND arrays >=32MB for 512-byte
++	  page devices and >=128MB for 2k page devices. They use slightly
++	  more RAM but are faster since they eliminate chunk group
++	  searching.
++
++	  Setting this to 'y' will force tnode width to 16 bits and save
++	  memory but make large arrays slower.
++
++	  If unsure, say N.
++
++config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++	bool "Force chunk erase check"
++	depends on YAFFS_FS
++	default n
++	help
++          Normally YAFFS only checks chunks before writing until an erased
++	  chunk is found. This helps to detect any partially written
++	  chunks that might have happened due to power loss.
++
++	  Enabling this forces on the test that chunks are erased in flash
++	  before writing to them. This takes more time but is potentially
++	  a bit more secure.
++
++	  Suggest setting Y during development and ironing out driver
++	  issues etc. Suggest setting to N if you want faster writing.
++
++	  If unsure, say Y.
++
++config YAFFS_SHORT_NAMES_IN_RAM
++	bool "Cache short names in RAM"
++	depends on YAFFS_FS
++	default y
++	help
++	  If this config is set, then short names are stored with the
++	  yaffs_Object.  This costs an extra 16 bytes of RAM per object,
++	  but makes look-ups faster.
++
++	  If unsure, say Y.
+diff -Nur linux-2.6.32.orig/fs/yaffs2/Makefile linux-2.6.32/fs/yaffs2/Makefile
+--- linux-2.6.32.orig/fs/yaffs2/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/Makefile	2010-01-30 20:35:01.093074881 +0100
+@@ -0,0 +1,10 @@
++#
++# Makefile for the linux YAFFS filesystem routines.
++#
++
++obj-$(CONFIG_YAFFS_FS) += yaffs.o
++
++yaffs-y := yaffs_ecc.o yaffs_fs.o yaffs_guts.o yaffs_checkptrw.o
++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
++yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
++yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
+diff -Nur linux-2.6.32.orig/fs/yaffs2/moduleconfig.h linux-2.6.32/fs/yaffs2/moduleconfig.h
+--- linux-2.6.32.orig/fs/yaffs2/moduleconfig.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/moduleconfig.h	2010-01-30 20:35:01.131828051 +0100
+@@ -0,0 +1,65 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Martin Fouts <Martin.Fouts@palmsource.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_CONFIG_H__
++#define __YAFFS_CONFIG_H__
++
++#ifdef YAFFS_OUT_OF_TREE
++
++/* DO NOT UNSET THESE THREE. YAFFS2 will not compile if you do. */
++#define CONFIG_YAFFS_FS
++#define CONFIG_YAFFS_YAFFS1
++#define CONFIG_YAFFS_YAFFS2
++
++/* These options are independent of each other.  Select those that matter. */
++
++/* Default: Not selected */
++/* Meaning: Yaffs does its own ECC, rather than using MTD ECC */
++/* #define CONFIG_YAFFS_DOES_ECC */
++
++/* Default: Not selected */
++/* Meaning: ECC byte order is 'wrong'.  Only meaningful if */
++/*          CONFIG_YAFFS_DOES_ECC is set */
++/* #define CONFIG_YAFFS_ECC_WRONG_ORDER */
++
++/* Default: Selected */
++/* Meaning: Disables testing whether chunks are erased before writing to them*/
++#define CONFIG_YAFFS_DISABLE_CHUNK_ERASED_CHECK
++
++/* Default: Selected */
++/* Meaning: Cache short names, taking more RAM, but faster look-ups */
++#define CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++
++/* Default: 10 */
++/* Meaning: set the count of blocks to reserve for checkpointing */
++#define CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS 10
++
++/*
++Older-style on-NAND data format has a "pageStatus" byte to record
++chunk/page state.  This byte is zeroed when the page is discarded.
++Choose this option if you have existing on-NAND data in this format
++that you need to continue to support.  New data written also uses the
++older-style format.
++Note: Use of this option generally requires that MTD's oob layout be
++adjusted to use the older-style format.  See notes on tags formats and
++MTD versions in yaffs_mtdif1.c.
++*/
++/* Default: Not selected */
++/* Meaning: Use older-style on-NAND data format with pageStatus byte */
++/* #define CONFIG_YAFFS_9BYTE_TAGS */
++
++#endif /* YAFFS_OUT_OF_TREE */
++
++#endif /* __YAFFS_CONFIG_H__ */
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_checkptrw.c linux-2.6.32/fs/yaffs2/yaffs_checkptrw.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_checkptrw.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_checkptrw.c	2010-01-30 20:35:01.171829690 +0100
+@@ -0,0 +1,394 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++const char *yaffs_checkptrw_c_version =
++	"$Id: yaffs_checkptrw.c,v 1.18 2009-03-06 17:20:49 wookey Exp $";
++
++
++#include "yaffs_checkptrw.h"
++#include "yaffs_getblockinfo.h"
++
++static int yaffs_CheckpointSpaceOk(yaffs_Device *dev)
++{
++	int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
++
++	T(YAFFS_TRACE_CHECKPOINT,
++		(TSTR("checkpt blocks available = %d" TENDSTR),
++		blocksAvailable));
++
++	return (blocksAvailable <= 0) ? 0 : 1;
++}
++
++
++static int yaffs_CheckpointErase(yaffs_Device *dev)
++{
++	int i;
++
++	if (!dev->eraseBlockInNAND)
++		return 0;
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("checking blocks %d to %d"TENDSTR),
++		dev->internalStartBlock, dev->internalEndBlock));
++
++	for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++		yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++		if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
++			T(YAFFS_TRACE_CHECKPOINT, (TSTR("erasing checkpt block %d"TENDSTR), i));
++			if (dev->eraseBlockInNAND(dev, i - dev->blockOffset /* realign */)) {
++				bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
++				dev->nErasedBlocks++;
++				dev->nFreeChunks += dev->nChunksPerBlock;
++			} else {
++				dev->markNANDBlockBad(dev, i);
++				bi->blockState = YAFFS_BLOCK_STATE_DEAD;
++			}
++		}
++	}
++
++	dev->blocksInCheckpoint = 0;
++
++	return 1;
++}
++
++
++static void yaffs_CheckpointFindNextErasedBlock(yaffs_Device *dev)
++{
++	int  i;
++	int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
++	T(YAFFS_TRACE_CHECKPOINT,
++		(TSTR("allocating checkpt block: erased %d reserved %d avail %d next %d "TENDSTR),
++		dev->nErasedBlocks, dev->nReservedBlocks, blocksAvailable, dev->checkpointNextBlock));
++
++	if (dev->checkpointNextBlock >= 0 &&
++			dev->checkpointNextBlock <= dev->internalEndBlock &&
++			blocksAvailable > 0) {
++
++		for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
++			yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++			if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
++				dev->checkpointNextBlock = i + 1;
++				dev->checkpointCurrentBlock = i;
++				T(YAFFS_TRACE_CHECKPOINT, (TSTR("allocating checkpt block %d"TENDSTR), i));
++				return;
++			}
++		}
++	}
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("out of checkpt blocks"TENDSTR)));
++
++	dev->checkpointNextBlock = -1;
++	dev->checkpointCurrentBlock = -1;
++}
++
++static void yaffs_CheckpointFindNextCheckpointBlock(yaffs_Device *dev)
++{
++	int  i;
++	yaffs_ExtendedTags tags;
++
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: start:  blocks %d next %d" TENDSTR),
++		dev->blocksInCheckpoint, dev->checkpointNextBlock));
++
++	if (dev->blocksInCheckpoint < dev->checkpointMaxBlocks)
++		for (i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++) {
++			int chunk = i * dev->nChunksPerBlock;
++			int realignedChunk = chunk - dev->chunkOffset;
++
++			dev->readChunkWithTagsFromNAND(dev, realignedChunk,
++					NULL, &tags);
++			T(YAFFS_TRACE_CHECKPOINT, (TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR),
++				i, tags.objectId, tags.sequenceNumber, tags.eccResult));
++
++			if (tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
++				/* Right kind of block */
++				dev->checkpointNextBlock = tags.objectId;
++				dev->checkpointCurrentBlock = i;
++				dev->checkpointBlockList[dev->blocksInCheckpoint] = i;
++				dev->blocksInCheckpoint++;
++				T(YAFFS_TRACE_CHECKPOINT, (TSTR("found checkpt block %d"TENDSTR), i));
++				return;
++			}
++		}
++
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("found no more checkpt blocks"TENDSTR)));
++
++	dev->checkpointNextBlock = -1;
++	dev->checkpointCurrentBlock = -1;
++}
++
++
++int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting)
++{
++
++	/* Got the functions we need? */
++	if (!dev->writeChunkWithTagsToNAND ||
++			!dev->readChunkWithTagsFromNAND ||
++			!dev->eraseBlockInNAND ||
++			!dev->markNANDBlockBad)
++		return 0;
++
++	if (forWriting && !yaffs_CheckpointSpaceOk(dev))
++		return 0;
++
++	if (!dev->checkpointBuffer)
++		dev->checkpointBuffer = YMALLOC_DMA(dev->totalBytesPerChunk);
++	if (!dev->checkpointBuffer)
++		return 0;
++
++
++	dev->checkpointPageSequence = 0;
++
++	dev->checkpointOpenForWrite = forWriting;
++
++	dev->checkpointByteCount = 0;
++	dev->checkpointSum = 0;
++	dev->checkpointXor = 0;
++	dev->checkpointCurrentBlock = -1;
++	dev->checkpointCurrentChunk = -1;
++	dev->checkpointNextBlock = dev->internalStartBlock;
++
++	/* Erase all the blocks in the checkpoint area */
++	if (forWriting) {
++		memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
++		dev->checkpointByteOffset = 0;
++		return yaffs_CheckpointErase(dev);
++	} else {
++		int i;
++		/* Set to a value that will kick off a read */
++		dev->checkpointByteOffset = dev->nDataBytesPerChunk;
++		/* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
++		 * going to be way more than we need */
++		dev->blocksInCheckpoint = 0;
++		dev->checkpointMaxBlocks = (dev->internalEndBlock - dev->internalStartBlock)/16 + 2;
++		dev->checkpointBlockList = YMALLOC(sizeof(int) * dev->checkpointMaxBlocks);
++		for (i = 0; i < dev->checkpointMaxBlocks; i++)
++			dev->checkpointBlockList[i] = -1;
++	}
++
++	return 1;
++}
++
++int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum)
++{
++	__u32 compositeSum;
++	compositeSum =  (dev->checkpointSum << 8) | (dev->checkpointXor & 0xFF);
++	*sum = compositeSum;
++	return 1;
++}
++
++static int yaffs_CheckpointFlushBuffer(yaffs_Device *dev)
++{
++	int chunk;
++	int realignedChunk;
++
++	yaffs_ExtendedTags tags;
++
++	if (dev->checkpointCurrentBlock < 0) {
++		yaffs_CheckpointFindNextErasedBlock(dev);
++		dev->checkpointCurrentChunk = 0;
++	}
++
++	if (dev->checkpointCurrentBlock < 0)
++		return 0;
++
++	tags.chunkDeleted = 0;
++	tags.objectId = dev->checkpointNextBlock; /* Hint to next place to look */
++	tags.chunkId = dev->checkpointPageSequence + 1;
++	tags.sequenceNumber =  YAFFS_SEQUENCE_CHECKPOINT_DATA;
++	tags.byteCount = dev->nDataBytesPerChunk;
++	if (dev->checkpointCurrentChunk == 0) {
++		/* First chunk we write for the block? Set block state to
++		   checkpoint */
++		yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointCurrentBlock);
++		bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
++		dev->blocksInCheckpoint++;
++	}
++
++	chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock + dev->checkpointCurrentChunk;
++
++
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
++		chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk, tags.objectId, tags.chunkId));
++
++	realignedChunk = chunk - dev->chunkOffset;
++
++	dev->writeChunkWithTagsToNAND(dev, realignedChunk,
++			dev->checkpointBuffer, &tags);
++	dev->checkpointByteOffset = 0;
++	dev->checkpointPageSequence++;
++	dev->checkpointCurrentChunk++;
++	if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock) {
++		dev->checkpointCurrentChunk = 0;
++		dev->checkpointCurrentBlock = -1;
++	}
++	memset(dev->checkpointBuffer, 0, dev->nDataBytesPerChunk);
++
++	return 1;
++}
++
++
++int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes)
++{
++	int i = 0;
++	int ok = 1;
++
++
++	__u8 * dataBytes = (__u8 *)data;
++
++
++
++	if (!dev->checkpointBuffer)
++		return 0;
++
++	if (!dev->checkpointOpenForWrite)
++		return -1;
++
++	while (i < nBytes && ok) {
++		dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes;
++		dev->checkpointSum += *dataBytes;
++		dev->checkpointXor ^= *dataBytes;
++
++		dev->checkpointByteOffset++;
++		i++;
++		dataBytes++;
++		dev->checkpointByteCount++;
++
++
++		if (dev->checkpointByteOffset < 0 ||
++		   dev->checkpointByteOffset >= dev->nDataBytesPerChunk)
++			ok = yaffs_CheckpointFlushBuffer(dev);
++	}
++
++	return i;
++}
++
++int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes)
++{
++	int i = 0;
++	int ok = 1;
++	yaffs_ExtendedTags tags;
++
++
++	int chunk;
++	int realignedChunk;
++
++	__u8 *dataBytes = (__u8 *)data;
++
++	if (!dev->checkpointBuffer)
++		return 0;
++
++	if (dev->checkpointOpenForWrite)
++		return -1;
++
++	while (i < nBytes && ok) {
++
++
++		if (dev->checkpointByteOffset < 0 ||
++			dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
++
++			if (dev->checkpointCurrentBlock < 0) {
++				yaffs_CheckpointFindNextCheckpointBlock(dev);
++				dev->checkpointCurrentChunk = 0;
++			}
++
++			if (dev->checkpointCurrentBlock < 0)
++				ok = 0;
++			else {
++				chunk = dev->checkpointCurrentBlock *
++					dev->nChunksPerBlock +
++					dev->checkpointCurrentChunk;
++
++				realignedChunk = chunk - dev->chunkOffset;
++
++				/* read in the next chunk */
++				/* printf("read checkpoint page %d\n",dev->checkpointPage); */
++				dev->readChunkWithTagsFromNAND(dev,
++						realignedChunk,
++						dev->checkpointBuffer,
++						&tags);
++
++				if (tags.chunkId != (dev->checkpointPageSequence + 1) ||
++					tags.eccResult > YAFFS_ECC_RESULT_FIXED ||
++					tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++					ok = 0;
++
++				dev->checkpointByteOffset = 0;
++				dev->checkpointPageSequence++;
++				dev->checkpointCurrentChunk++;
++
++				if (dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
++					dev->checkpointCurrentBlock = -1;
++			}
++		}
++
++		if (ok) {
++			*dataBytes = dev->checkpointBuffer[dev->checkpointByteOffset];
++			dev->checkpointSum += *dataBytes;
++			dev->checkpointXor ^= *dataBytes;
++			dev->checkpointByteOffset++;
++			i++;
++			dataBytes++;
++			dev->checkpointByteCount++;
++		}
++	}
++
++	return 	i;
++}
++
++int yaffs_CheckpointClose(yaffs_Device *dev)
++{
++
++	if (dev->checkpointOpenForWrite) {
++		if (dev->checkpointByteOffset != 0)
++			yaffs_CheckpointFlushBuffer(dev);
++	} else {
++		int i;
++		for (i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++) {
++			yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, dev->checkpointBlockList[i]);
++			if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
++				bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
++			else {
++				/* Todo this looks odd... */
++			}
++		}
++		YFREE(dev->checkpointBlockList);
++		dev->checkpointBlockList = NULL;
++	}
++
++	dev->nFreeChunks -= dev->blocksInCheckpoint * dev->nChunksPerBlock;
++	dev->nErasedBlocks -= dev->blocksInCheckpoint;
++
++
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint byte count %d" TENDSTR),
++			dev->checkpointByteCount));
++
++	if (dev->checkpointBuffer) {
++		/* free the buffer */
++		YFREE(dev->checkpointBuffer);
++		dev->checkpointBuffer = NULL;
++		return 1;
++	} else
++		return 0;
++}
++
++int yaffs_CheckpointInvalidateStream(yaffs_Device *dev)
++{
++	/* Erase the first checksum block */
++
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("checkpoint invalidate"TENDSTR)));
++
++	if (!yaffs_CheckpointSpaceOk(dev))
++		return 0;
++
++	return yaffs_CheckpointErase(dev);
++}
++
++
++
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_checkptrw.h linux-2.6.32/fs/yaffs2/yaffs_checkptrw.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_checkptrw.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_checkptrw.h	2010-01-30 20:35:01.213084831 +0100
+@@ -0,0 +1,35 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_CHECKPTRW_H__
++#define __YAFFS_CHECKPTRW_H__
++
++#include "yaffs_guts.h"
++
++int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting);
++
++int yaffs_CheckpointWrite(yaffs_Device *dev, const void *data, int nBytes);
++
++int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes);
++
++int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum);
++
++int yaffs_CheckpointClose(yaffs_Device *dev);
++
++int yaffs_CheckpointInvalidateStream(yaffs_Device *dev);
++
++
++#endif
++
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_ecc.c linux-2.6.32/fs/yaffs2/yaffs_ecc.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_ecc.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_ecc.c	2010-01-30 20:35:01.251829837 +0100
+@@ -0,0 +1,326 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
++ * blocks are used on a 512-byte NAND page.
++ *
++ */
++
++/* Table generated by gen-ecc.c
++ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
++ * for each byte of data. These are instead provided in a table in bits7..2.
++ * Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore
++ * this bytes influence on the line parity.
++ */
++
++const char *yaffs_ecc_c_version =
++	"$Id: yaffs_ecc.c,v 1.11 2009-03-06 17:20:50 wookey Exp $";
++
++#include "yportenv.h"
++
++#include "yaffs_ecc.h"
++
++static const unsigned char column_parity_table[] = {
++	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++};
++
++/* Count the bits in an unsigned char or a U32 */
++
++static int yaffs_CountBits(unsigned char x)
++{
++	int r = 0;
++	while (x) {
++		if (x & 1)
++			r++;
++		x >>= 1;
++	}
++	return r;
++}
++
++static int yaffs_CountBits32(unsigned x)
++{
++	int r = 0;
++	while (x) {
++		if (x & 1)
++			r++;
++		x >>= 1;
++	}
++	return r;
++}
++
++/* Calculate the ECC for a 256-byte block of data */
++void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc)
++{
++	unsigned int i;
++
++	unsigned char col_parity = 0;
++	unsigned char line_parity = 0;
++	unsigned char line_parity_prime = 0;
++	unsigned char t;
++	unsigned char b;
++
++	for (i = 0; i < 256; i++) {
++		b = column_parity_table[*data++];
++		col_parity ^= b;
++
++		if (b & 0x01) {		/* odd number of bits in the byte */
++			line_parity ^= i;
++			line_parity_prime ^= ~i;
++		}
++	}
++
++	ecc[2] = (~col_parity) | 0x03;
++
++	t = 0;
++	if (line_parity & 0x80)
++		t |= 0x80;
++	if (line_parity_prime & 0x80)
++		t |= 0x40;
++	if (line_parity & 0x40)
++		t |= 0x20;
++	if (line_parity_prime & 0x40)
++		t |= 0x10;
++	if (line_parity & 0x20)
++		t |= 0x08;
++	if (line_parity_prime & 0x20)
++		t |= 0x04;
++	if (line_parity & 0x10)
++		t |= 0x02;
++	if (line_parity_prime & 0x10)
++		t |= 0x01;
++	ecc[1] = ~t;
++
++	t = 0;
++	if (line_parity & 0x08)
++		t |= 0x80;
++	if (line_parity_prime & 0x08)
++		t |= 0x40;
++	if (line_parity & 0x04)
++		t |= 0x20;
++	if (line_parity_prime & 0x04)
++		t |= 0x10;
++	if (line_parity & 0x02)
++		t |= 0x08;
++	if (line_parity_prime & 0x02)
++		t |= 0x04;
++	if (line_parity & 0x01)
++		t |= 0x02;
++	if (line_parity_prime & 0x01)
++		t |= 0x01;
++	ecc[0] = ~t;
++
++#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
++	/* Swap the bytes into the wrong order */
++	t = ecc[0];
++	ecc[0] = ecc[1];
++	ecc[1] = t;
++#endif
++}
++
++
++/* Correct the ECC on a 256 byte block of data */
++
++int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
++		     const unsigned char *test_ecc)
++{
++	unsigned char d0, d1, d2;	/* deltas */
++
++	d0 = read_ecc[0] ^ test_ecc[0];
++	d1 = read_ecc[1] ^ test_ecc[1];
++	d2 = read_ecc[2] ^ test_ecc[2];
++
++	if ((d0 | d1 | d2) == 0)
++		return 0; /* no error */
++
++	if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
++	    ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
++	    ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
++		/* Single bit (recoverable) error in data */
++
++		unsigned byte;
++		unsigned bit;
++
++#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
++		/* swap the bytes to correct for the wrong order */
++		unsigned char t;
++
++		t = d0;
++		d0 = d1;
++		d1 = t;
++#endif
++
++		bit = byte = 0;
++
++		if (d1 & 0x80)
++			byte |= 0x80;
++		if (d1 & 0x20)
++			byte |= 0x40;
++		if (d1 & 0x08)
++			byte |= 0x20;
++		if (d1 & 0x02)
++			byte |= 0x10;
++		if (d0 & 0x80)
++			byte |= 0x08;
++		if (d0 & 0x20)
++			byte |= 0x04;
++		if (d0 & 0x08)
++			byte |= 0x02;
++		if (d0 & 0x02)
++			byte |= 0x01;
++
++		if (d2 & 0x80)
++			bit |= 0x04;
++		if (d2 & 0x20)
++			bit |= 0x02;
++		if (d2 & 0x08)
++			bit |= 0x01;
++
++		data[byte] ^= (1 << bit);
++
++		return 1; /* Corrected the error */
++	}
++
++	if ((yaffs_CountBits(d0) +
++	     yaffs_CountBits(d1) +
++	     yaffs_CountBits(d2)) ==  1) {
++		/* Reccoverable error in ecc */
++
++		read_ecc[0] = test_ecc[0];
++		read_ecc[1] = test_ecc[1];
++		read_ecc[2] = test_ecc[2];
++
++		return 1; /* Corrected the error */
++	}
++
++	/* Unrecoverable error */
++
++	return -1;
++
++}
++
++
++/*
++ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
++ */
++void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
++				yaffs_ECCOther *eccOther)
++{
++	unsigned int i;
++
++	unsigned char col_parity = 0;
++	unsigned line_parity = 0;
++	unsigned line_parity_prime = 0;
++	unsigned char b;
++
++	for (i = 0; i < nBytes; i++) {
++		b = column_parity_table[*data++];
++		col_parity ^= b;
++
++		if (b & 0x01)	 {
++			/* odd number of bits in the byte */
++			line_parity ^= i;
++			line_parity_prime ^= ~i;
++		}
++
++	}
++
++	eccOther->colParity = (col_parity >> 2) & 0x3f;
++	eccOther->lineParity = line_parity;
++	eccOther->lineParityPrime = line_parity_prime;
++}
++
++int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
++			yaffs_ECCOther *read_ecc,
++			const yaffs_ECCOther *test_ecc)
++{
++	unsigned char cDelta;	/* column parity delta */
++	unsigned lDelta;	/* line parity delta */
++	unsigned lDeltaPrime;	/* line parity delta */
++	unsigned bit;
++
++	cDelta = read_ecc->colParity ^ test_ecc->colParity;
++	lDelta = read_ecc->lineParity ^ test_ecc->lineParity;
++	lDeltaPrime = read_ecc->lineParityPrime ^ test_ecc->lineParityPrime;
++
++	if ((cDelta | lDelta | lDeltaPrime) == 0)
++		return 0; /* no error */
++
++	if (lDelta == ~lDeltaPrime &&
++	    (((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15)) {
++		/* Single bit (recoverable) error in data */
++
++		bit = 0;
++
++		if (cDelta & 0x20)
++			bit |= 0x04;
++		if (cDelta & 0x08)
++			bit |= 0x02;
++		if (cDelta & 0x02)
++			bit |= 0x01;
++
++		if (lDelta >= nBytes)
++			return -1;
++
++		data[lDelta] ^= (1 << bit);
++
++		return 1; /* corrected */
++	}
++
++	if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) +
++			yaffs_CountBits(cDelta)) == 1) {
++		/* Reccoverable error in ecc */
++
++		*read_ecc = *test_ecc;
++		return 1; /* corrected */
++	}
++
++	/* Unrecoverable error */
++
++	return -1;
++}
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_ecc.h linux-2.6.32/fs/yaffs2/yaffs_ecc.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_ecc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_ecc.h	2010-01-30 20:35:01.292693842 +0100
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
++ * blocks are used on a 512-byte NAND page.
++ *
++ */
++
++#ifndef __YAFFS_ECC_H__
++#define __YAFFS_ECC_H__
++
++typedef struct {
++	unsigned char colParity;
++	unsigned lineParity;
++	unsigned lineParityPrime;
++} yaffs_ECCOther;
++
++void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc);
++int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
++		const unsigned char *test_ecc);
++
++void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
++			yaffs_ECCOther *ecc);
++int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
++			yaffs_ECCOther *read_ecc,
++			const yaffs_ECCOther *test_ecc);
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_fs.c linux-2.6.32/fs/yaffs2/yaffs_fs.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_fs.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_fs.c	2010-01-30 20:35:01.331845579 +0100
+@@ -0,0 +1,2529 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2009 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ * Acknowledgements:
++ * Luc van OostenRyck for numerous patches.
++ * Nick Bane for numerous patches.
++ * Nick Bane for 2.5/2.6 integration.
++ * Andras Toth for mknod rdev issue.
++ * Michael Fischer for finding the problem with inode inconsistency.
++ * Some code bodily lifted from JFFS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ *
++ * This is the file system front-end to YAFFS that hooks it up to
++ * the VFS.
++ *
++ * Special notes:
++ * >> 2.4: sb->u.generic_sbp points to the yaffs_Device associated with
++ *         this superblock
++ * >> 2.6: sb->s_fs_info  points to the yaffs_Device associated with this
++ *         superblock
++ * >> inode->u.generic_ip points to the associated yaffs_Object.
++ */
++
++const char *yaffs_fs_c_version =
++    "$Id: yaffs_fs.c,v 1.79 2009-03-17 01:12:00 wookey Exp $";
++extern const char *yaffs_guts_c_version;
++
++#include <linux/version.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/mtd/mtd.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++
++#include "asm/div64.h"
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++#include <linux/statfs.h>	/* Added NCB 15-8-2003 */
++#include <linux/statfs.h>
++#define UnlockPage(p) unlock_page(p)
++#define Page_Uptodate(page)	test_bit(PG_uptodate, &(page)->flags)
++
++/* FIXME: use sb->s_id instead ? */
++#define yaffs_devname(sb, buf)	bdevname(sb->s_bdev, buf)
++
++#else
++
++#include <linux/locks.h>
++#define	BDEVNAME_SIZE		0
++#define	yaffs_devname(sb, buf)	kdevname(sb->s_dev)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
++/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
++#define __user
++#endif
++
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define YPROC_ROOT  (&proc_root)
++#else
++#define YPROC_ROOT  NULL
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++#define WRITE_SIZE_STR "writesize"
++#define WRITE_SIZE(mtd) ((mtd)->writesize)
++#else
++#define WRITE_SIZE_STR "oobblock"
++#define WRITE_SIZE(mtd) ((mtd)->oobblock)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
++#define YAFFS_USE_WRITE_BEGIN_END 1
++#else
++#define YAFFS_USE_WRITE_BEGIN_END 0
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
++{
++	uint64_t result = partition_size;
++	do_div(result, block_size);
++	return (uint32_t)result;
++}
++#else
++#define YCALCBLOCKS(s, b) ((s)/(b))
++#endif
++
++#include <linux/uaccess.h>
++
++#include "yportenv.h"
++#include "yaffs_guts.h"
++
++#include <linux/mtd/mtd.h>
++#include "yaffs_mtdif.h"
++#include "yaffs_mtdif1.h"
++#include "yaffs_mtdif2.h"
++
++unsigned int yaffs_traceMask = YAFFS_TRACE_BAD_BLOCKS;
++unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++unsigned int yaffs_auto_checkpoint = 1;
++
++/* Module Parameters */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++module_param(yaffs_traceMask, uint, 0644);
++module_param(yaffs_wr_attempts, uint, 0644);
++module_param(yaffs_auto_checkpoint, uint, 0644);
++#else
++MODULE_PARM(yaffs_traceMask, "i");
++MODULE_PARM(yaffs_wr_attempts, "i");
++MODULE_PARM(yaffs_auto_checkpoint, "i");
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++/* use iget and read_inode */
++#define Y_IGET(sb, inum) iget((sb), (inum))
++static void yaffs_read_inode(struct inode *inode);
++
++#else
++/* Call local equivalent */
++#define YAFFS_USE_OWN_IGET
++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino);
++#endif
++
++/*#define T(x) printk x */
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->i_private)
++#else
++#define yaffs_InodeToObjectLV(iptr) ((iptr)->u.generic_ip)
++#endif
++
++#define yaffs_InodeToObject(iptr) ((yaffs_Object *)(yaffs_InodeToObjectLV(iptr)))
++#define yaffs_DentryToObject(dptr) yaffs_InodeToObject((dptr)->d_inode)
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define yaffs_SuperToDevice(sb)	((yaffs_Device *)sb->s_fs_info)
++#else
++#define yaffs_SuperToDevice(sb)	((yaffs_Device *)sb->u.generic_sbp)
++#endif
++
++static void yaffs_put_super(struct super_block *sb);
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++				loff_t *pos);
++static ssize_t yaffs_hold_space(struct file *f);
++static void yaffs_release_space(struct file *f);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id);
++#else
++static int yaffs_file_flush(struct file *file);
++#endif
++
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++				int datasync);
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++			struct nameidata *n);
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++					struct nameidata *n);
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
++#endif
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++			struct dentry *dentry);
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++			const char *symname);
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++			dev_t dev);
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++			int dev);
++#endif
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++			struct inode *new_dir, struct dentry *new_dentry);
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait);
++static void yaffs_write_super(struct super_block *sb);
++#else
++static int yaffs_sync_fs(struct super_block *sb);
++static int yaffs_write_super(struct super_block *sb);
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
++#endif
++
++#ifdef YAFFS_HAS_PUT_INODE
++static void yaffs_put_inode(struct inode *inode);
++#endif
++
++static void yaffs_delete_inode(struct inode *);
++static void yaffs_clear_inode(struct inode *);
++
++static int yaffs_readpage(struct file *file, struct page *page);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
++#else
++static int yaffs_writepage(struct page *page);
++#endif
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END != 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++				loff_t pos, unsigned len, unsigned flags,
++				struct page **pagep, void **fsdata);
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++				loff_t pos, unsigned len, unsigned copied,
++				struct page *pg, void *fsdadata);
++#else
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++				unsigned offset, unsigned to);
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++				unsigned to);
++
++#endif
++
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++				int buflen);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
++#endif
++
++static struct address_space_operations yaffs_file_address_operations = {
++	.readpage = yaffs_readpage,
++	.writepage = yaffs_writepage,
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++	.write_begin = yaffs_write_begin,
++	.write_end = yaffs_write_end,
++#else
++	.prepare_write = yaffs_prepare_write,
++	.commit_write = yaffs_commit_write,
++#endif
++};
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
++static const struct file_operations yaffs_file_operations = {
++	.read = do_sync_read,
++	.write = do_sync_write,
++	.aio_read = generic_file_aio_read,
++	.aio_write = generic_file_aio_write,
++	.mmap = generic_file_mmap,
++	.flush = yaffs_file_flush,
++	.fsync = yaffs_sync_object,
++	.splice_read = generic_file_splice_read,
++	.splice_write = generic_file_splice_write,
++	.llseek = generic_file_llseek,
++};
++
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++
++static const struct file_operations yaffs_file_operations = {
++	.read = do_sync_read,
++	.write = do_sync_write,
++	.aio_read = generic_file_aio_read,
++	.aio_write = generic_file_aio_write,
++	.mmap = generic_file_mmap,
++	.flush = yaffs_file_flush,
++	.fsync = yaffs_sync_object,
++	.sendfile = generic_file_sendfile,
++};
++
++#else
++
++static const struct file_operations yaffs_file_operations = {
++	.read = generic_file_read,
++	.write = generic_file_write,
++	.mmap = generic_file_mmap,
++	.flush = yaffs_file_flush,
++	.fsync = yaffs_sync_object,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++	.sendfile = generic_file_sendfile,
++#endif
++};
++#endif
++
++static const struct inode_operations yaffs_file_inode_operations = {
++	.setattr = yaffs_setattr,
++};
++
++static const struct inode_operations yaffs_symlink_inode_operations = {
++	.readlink = yaffs_readlink,
++	.follow_link = yaffs_follow_link,
++	.setattr = yaffs_setattr,
++};
++
++static const struct inode_operations yaffs_dir_inode_operations = {
++	.create = yaffs_create,
++	.lookup = yaffs_lookup,
++	.link = yaffs_link,
++	.unlink = yaffs_unlink,
++	.symlink = yaffs_symlink,
++	.mkdir = yaffs_mkdir,
++	.rmdir = yaffs_unlink,
++	.mknod = yaffs_mknod,
++	.rename = yaffs_rename,
++	.setattr = yaffs_setattr,
++};
++
++static const struct file_operations yaffs_dir_operations = {
++	.read = generic_read_dir,
++	.readdir = yaffs_readdir,
++	.fsync = yaffs_sync_object,
++};
++
++static const struct super_operations yaffs_super_ops = {
++	.statfs = yaffs_statfs,
++
++#ifndef YAFFS_USE_OWN_IGET
++	.read_inode = yaffs_read_inode,
++#endif
++#ifdef YAFFS_HAS_PUT_INODE
++	.put_inode = yaffs_put_inode,
++#endif
++	.put_super = yaffs_put_super,
++	.delete_inode = yaffs_delete_inode,
++	.clear_inode = yaffs_clear_inode,
++	.sync_fs = yaffs_sync_fs,
++	.write_super = yaffs_write_super,
++};
++
++static void yaffs_GrossLock(yaffs_Device *dev)
++{
++	T(YAFFS_TRACE_OS, ("yaffs locking %p\n", current));
++	down(&dev->grossLock);
++	T(YAFFS_TRACE_OS, ("yaffs locked %p\n", current));
++}
++
++static void yaffs_GrossUnlock(yaffs_Device *dev)
++{
++	T(YAFFS_TRACE_OS, ("yaffs unlocking %p\n", current));
++	up(&dev->grossLock);
++}
++
++static int yaffs_readlink(struct dentry *dentry, char __user *buffer,
++			int buflen)
++{
++	unsigned char *alias;
++	int ret;
++
++	yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
++
++	yaffs_GrossUnlock(dev);
++
++	if (!alias)
++		return -ENOMEM;
++
++	ret = vfs_readlink(dentry, buffer, buflen, alias);
++	kfree(alias);
++	return ret;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++#endif
++{
++	unsigned char *alias;
++	int ret;
++	yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
++
++	yaffs_GrossUnlock(dev);
++
++	if (!alias) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	ret = vfs_follow_link(nd, alias);
++	kfree(alias);
++out:
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++	return ERR_PTR(ret);
++#else
++	return ret;
++#endif
++}
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++				yaffs_Object *obj);
++
++/*
++ * Lookup is used to find objects in the fs
++ */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++				struct nameidata *n)
++#else
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
++#endif
++{
++	yaffs_Object *obj;
++	struct inode *inode = NULL;	/* NCB 2.5/2.6 needs NULL here */
++
++	yaffs_Device *dev = yaffs_InodeToObject(dir)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_lookup for %d:%s\n",
++		yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
++
++	obj = yaffs_FindObjectByName(yaffs_InodeToObject(dir),
++					dentry->d_name.name);
++
++	obj = yaffs_GetEquivalentObject(obj);	/* in case it was a hardlink */
++
++	/* Can't hold gross lock when calling yaffs_get_inode() */
++	yaffs_GrossUnlock(dev);
++
++	if (obj) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_lookup found %d\n", obj->objectId));
++
++		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++
++		if (inode) {
++			T(YAFFS_TRACE_OS,
++				("yaffs_loookup dentry \n"));
++/* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
++ * d_add even if NULL inode */
++#if 0
++			/*dget(dentry); // try to solve directory bug */
++			d_add(dentry, inode);
++
++			/* return dentry; */
++			return NULL;
++#endif
++		}
++
++	} else {
++		T(YAFFS_TRACE_OS, ("yaffs_lookup not found\n"));
++
++	}
++
++/* added NCB for 2.5/6 compatability - forces add even if inode is
++ * NULL which creates dentry hash */
++	d_add(dentry, inode);
++
++	return NULL;
++}
++
++
++#ifdef YAFFS_HAS_PUT_INODE
++
++/* For now put inode is just for debugging
++ * Put inode is called when the inode **structure** is put.
++ */
++static void yaffs_put_inode(struct inode *inode)
++{
++	T(YAFFS_TRACE_OS,
++		("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
++		atomic_read(&inode->i_count)));
++
++}
++#endif
++
++/* clear is called to tell the fs to release any per-inode data it holds */
++static void yaffs_clear_inode(struct inode *inode)
++{
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++
++	obj = yaffs_InodeToObject(inode);
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
++		atomic_read(&inode->i_count),
++		obj ? "object exists" : "null object"));
++
++	if (obj) {
++		dev = obj->myDev;
++		yaffs_GrossLock(dev);
++
++		/* Clear the association between the inode and
++		 * the yaffs_Object.
++		 */
++		obj->myInode = NULL;
++		yaffs_InodeToObjectLV(inode) = NULL;
++
++		/* If the object freeing was deferred, then the real
++		 * free happens now.
++		 * This should fix the inode inconsistency problem.
++		 */
++
++		yaffs_HandleDeferedFree(obj);
++
++		yaffs_GrossUnlock(dev);
++	}
++
++}
++
++/* delete is called when the link count is zero and the inode
++ * is put (ie. nobody wants to know about it anymore, time to
++ * delete the file).
++ * NB Must call clear_inode()
++ */
++static void yaffs_delete_inode(struct inode *inode)
++{
++	yaffs_Object *obj = yaffs_InodeToObject(inode);
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
++		atomic_read(&inode->i_count),
++		obj ? "object exists" : "null object"));
++
++	if (obj) {
++		dev = obj->myDev;
++		yaffs_GrossLock(dev);
++		yaffs_DeleteObject(obj);
++		yaffs_GrossUnlock(dev);
++	}
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++	truncate_inode_pages(&inode->i_data, 0);
++#endif
++	clear_inode(inode);
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id)
++#else
++static int yaffs_file_flush(struct file *file)
++#endif
++{
++	yaffs_Object *obj = yaffs_DentryToObject(file->f_dentry);
++
++	yaffs_Device *dev = obj->myDev;
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_file_flush object %d (%s)\n", obj->objectId,
++		obj->dirty ? "dirty" : "clean"));
++
++	yaffs_GrossLock(dev);
++
++	yaffs_FlushFile(obj, 1);
++
++	yaffs_GrossUnlock(dev);
++
++	return 0;
++}
++
++static int yaffs_readpage_nolock(struct file *f, struct page *pg)
++{
++	/* Lifted from jffs2 */
++
++	yaffs_Object *obj;
++	unsigned char *pg_buf;
++	int ret;
++
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS, ("yaffs_readpage at %08x, size %08x\n",
++			(unsigned)(pg->index << PAGE_CACHE_SHIFT),
++			(unsigned)PAGE_CACHE_SIZE));
++
++	obj = yaffs_DentryToObject(f->f_dentry);
++
++	dev = obj->myDev;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++	BUG_ON(!PageLocked(pg));
++#else
++	if (!PageLocked(pg))
++		PAGE_BUG(pg);
++#endif
++
++	pg_buf = kmap(pg);
++	/* FIXME: Can kmap fail? */
++
++	yaffs_GrossLock(dev);
++
++	ret = yaffs_ReadDataFromFile(obj, pg_buf,
++				pg->index << PAGE_CACHE_SHIFT,
++				PAGE_CACHE_SIZE);
++
++	yaffs_GrossUnlock(dev);
++
++	if (ret >= 0)
++		ret = 0;
++
++	if (ret) {
++		ClearPageUptodate(pg);
++		SetPageError(pg);
++	} else {
++		SetPageUptodate(pg);
++		ClearPageError(pg);
++	}
++
++	flush_dcache_page(pg);
++	kunmap(pg);
++
++	T(YAFFS_TRACE_OS, ("yaffs_readpage done\n"));
++	return ret;
++}
++
++static int yaffs_readpage_unlock(struct file *f, struct page *pg)
++{
++	int ret = yaffs_readpage_nolock(f, pg);
++	UnlockPage(pg);
++	return ret;
++}
++
++static int yaffs_readpage(struct file *f, struct page *pg)
++{
++	return yaffs_readpage_unlock(f, pg);
++}
++
++/* writepage inspired by/stolen from smbfs */
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
++#else
++static int yaffs_writepage(struct page *page)
++#endif
++{
++	struct address_space *mapping = page->mapping;
++	loff_t offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
++	struct inode *inode;
++	unsigned long end_index;
++	char *buffer;
++	yaffs_Object *obj;
++	int nWritten = 0;
++	unsigned nBytes;
++
++	if (!mapping)
++		BUG();
++	inode = mapping->host;
++	if (!inode)
++		BUG();
++
++	if (offset > inode->i_size) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_writepage at %08x, inode size = %08x!!!\n",
++			(unsigned)(page->index << PAGE_CACHE_SHIFT),
++			(unsigned)inode->i_size));
++		T(YAFFS_TRACE_OS,
++			("                -> don't care!!\n"));
++		unlock_page(page);
++		return 0;
++	}
++
++	end_index = inode->i_size >> PAGE_CACHE_SHIFT;
++
++	/* easy case */
++	if (page->index < end_index)
++		nBytes = PAGE_CACHE_SIZE;
++	else
++		nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);
++
++	get_page(page);
++
++	buffer = kmap(page);
++
++	obj = yaffs_InodeToObject(inode);
++	yaffs_GrossLock(obj->myDev);
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_writepage at %08x, size %08x\n",
++		(unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
++	T(YAFFS_TRACE_OS,
++		("writepag0: obj = %05x, ino = %05x\n",
++		(int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
++
++	nWritten = yaffs_WriteDataToFile(obj, buffer,
++			page->index << PAGE_CACHE_SHIFT, nBytes, 0);
++
++	T(YAFFS_TRACE_OS,
++		("writepag1: obj = %05x, ino = %05x\n",
++		(int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
++
++	yaffs_GrossUnlock(obj->myDev);
++
++	kunmap(page);
++	SetPageUptodate(page);
++	UnlockPage(page);
++	put_page(page);
++
++	return (nWritten == nBytes) ? 0 : -ENOSPC;
++}
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++				loff_t pos, unsigned len, unsigned flags,
++				struct page **pagep, void **fsdata)
++{
++	struct page *pg = NULL;
++	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
++	uint32_t offset = pos & (PAGE_CACHE_SIZE - 1);
++	uint32_t to = offset + len;
++
++	int ret = 0;
++	int space_held = 0;
++
++	T(YAFFS_TRACE_OS, ("start yaffs_write_begin\n"));
++	/* Get a page */
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28)
++	pg = grab_cache_page_write_begin(mapping, index, flags);
++#else
++	pg = __grab_cache_page(mapping, index);
++#endif
++
++	*pagep = pg;
++	if (!pg) {
++		ret =  -ENOMEM;
++		goto out;
++	}
++	/* Get fs space */
++	space_held = yaffs_hold_space(filp);
++
++	if (!space_held) {
++		ret = -ENOSPC;
++		goto out;
++	}
++
++	/* Update page if required */
++
++	if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
++		ret = yaffs_readpage_nolock(filp, pg);
++
++	if (ret)
++		goto out;
++
++	/* Happy path return */
++	T(YAFFS_TRACE_OS, ("end yaffs_write_begin - ok\n"));
++
++	return 0;
++
++out:
++	T(YAFFS_TRACE_OS, ("end yaffs_write_begin fail returning %d\n", ret));
++	if (space_held)
++		yaffs_release_space(filp);
++	if (pg) {
++		unlock_page(pg);
++		page_cache_release(pg);
++	}
++	return ret;
++}
++
++#else
++
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++				unsigned offset, unsigned to)
++{
++	T(YAFFS_TRACE_OS, ("yaffs_prepair_write\n"));
++
++	if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
++		return yaffs_readpage_nolock(f, pg);
++	return 0;
++}
++#endif
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++				loff_t pos, unsigned len, unsigned copied,
++				struct page *pg, void *fsdadata)
++{
++	int ret = 0;
++	void *addr, *kva;
++	uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
++
++	kva = kmap(pg);
++	addr = kva + offset_into_page;
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_write_end addr %x pos %x nBytes %d\n",
++		(unsigned) addr,
++		(int)pos, copied));
++
++	ret = yaffs_file_write(filp, addr, copied, &pos);
++
++	if (ret != copied) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_write_end not same size ret %d  copied %d\n",
++			ret, copied));
++		SetPageError(pg);
++		ClearPageUptodate(pg);
++	} else {
++		SetPageUptodate(pg);
++	}
++
++	kunmap(pg);
++
++	yaffs_release_space(filp);
++	unlock_page(pg);
++	page_cache_release(pg);
++	return ret;
++}
++#else
++
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++				unsigned to)
++{
++	void *addr, *kva;
++
++	loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
++	int nBytes = to - offset;
++	int nWritten;
++
++	unsigned spos = pos;
++	unsigned saddr;
++
++	kva = kmap(pg);
++	addr = kva + offset;
++
++	saddr = (unsigned) addr;
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_commit_write addr %x pos %x nBytes %d\n",
++		saddr, spos, nBytes));
++
++	nWritten = yaffs_file_write(f, addr, nBytes, &pos);
++
++	if (nWritten != nBytes) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_commit_write not same size nWritten %d  nBytes %d\n",
++			nWritten, nBytes));
++		SetPageError(pg);
++		ClearPageUptodate(pg);
++	} else {
++		SetPageUptodate(pg);
++	}
++
++	kunmap(pg);
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_commit_write returning %d\n",
++		nWritten == nBytes ? 0 : nWritten));
++
++	return nWritten == nBytes ? 0 : nWritten;
++}
++#endif
++
++
++static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object *obj)
++{
++	if (inode && obj) {
++
++
++		/* Check mode against the variant type and attempt to repair if broken. */
++		__u32 mode = obj->yst_mode;
++		switch (obj->variantType) {
++		case YAFFS_OBJECT_TYPE_FILE:
++			if (!S_ISREG(mode)) {
++				obj->yst_mode &= ~S_IFMT;
++				obj->yst_mode |= S_IFREG;
++			}
++
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			if (!S_ISLNK(mode)) {
++				obj->yst_mode &= ~S_IFMT;
++				obj->yst_mode |= S_IFLNK;
++			}
++
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			if (!S_ISDIR(mode)) {
++				obj->yst_mode &= ~S_IFMT;
++				obj->yst_mode |= S_IFDIR;
++			}
++
++			break;
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++		default:
++			/* TODO? */
++			break;
++		}
++
++		inode->i_flags |= S_NOATIME;
++
++		inode->i_ino = obj->objectId;
++		inode->i_mode = obj->yst_mode;
++		inode->i_uid = obj->yst_uid;
++		inode->i_gid = obj->yst_gid;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++		inode->i_blksize = inode->i_sb->s_blocksize;
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++		inode->i_rdev = old_decode_dev(obj->yst_rdev);
++		inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
++		inode->i_atime.tv_nsec = 0;
++		inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
++		inode->i_mtime.tv_nsec = 0;
++		inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
++		inode->i_ctime.tv_nsec = 0;
++#else
++		inode->i_rdev = obj->yst_rdev;
++		inode->i_atime = obj->yst_atime;
++		inode->i_mtime = obj->yst_mtime;
++		inode->i_ctime = obj->yst_ctime;
++#endif
++		inode->i_size = yaffs_GetObjectFileLength(obj);
++		inode->i_blocks = (inode->i_size + 511) >> 9;
++
++		inode->i_nlink = yaffs_GetObjectLinkCount(obj);
++
++		T(YAFFS_TRACE_OS,
++			("yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
++			inode->i_mode, inode->i_uid, inode->i_gid,
++			(int)inode->i_size, atomic_read(&inode->i_count)));
++
++		switch (obj->yst_mode & S_IFMT) {
++		default:	/* fifo, device or socket */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++			init_special_inode(inode, obj->yst_mode,
++					old_decode_dev(obj->yst_rdev));
++#else
++			init_special_inode(inode, obj->yst_mode,
++					(dev_t) (obj->yst_rdev));
++#endif
++			break;
++		case S_IFREG:	/* file */
++			inode->i_op = &yaffs_file_inode_operations;
++			inode->i_fop = &yaffs_file_operations;
++			inode->i_mapping->a_ops =
++				&yaffs_file_address_operations;
++			break;
++		case S_IFDIR:	/* directory */
++			inode->i_op = &yaffs_dir_inode_operations;
++			inode->i_fop = &yaffs_dir_operations;
++			break;
++		case S_IFLNK:	/* symlink */
++			inode->i_op = &yaffs_symlink_inode_operations;
++			break;
++		}
++
++		yaffs_InodeToObjectLV(inode) = obj;
++
++		obj->myInode = inode;
++
++	} else {
++		T(YAFFS_TRACE_OS,
++			("yaffs_FileInode invalid parameters\n"));
++	}
++
++}
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++				yaffs_Object *obj)
++{
++	struct inode *inode;
++
++	if (!sb) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_get_inode for NULL super_block!!\n"));
++		return NULL;
++
++	}
++
++	if (!obj) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_get_inode for NULL object!!\n"));
++		return NULL;
++
++	}
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_get_inode for object %d\n", obj->objectId));
++
++	inode = Y_IGET(sb, obj->objectId);
++	if (IS_ERR(inode))
++		return NULL;
++
++	/* NB Side effect: iget calls back to yaffs_read_inode(). */
++	/* iget also increments the inode's i_count */
++	/* NB You can't be holding grossLock or deadlock will happen! */
++
++	return inode;
++}
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++				loff_t *pos)
++{
++	yaffs_Object *obj;
++	int nWritten, ipos;
++	struct inode *inode;
++	yaffs_Device *dev;
++
++	obj = yaffs_DentryToObject(f->f_dentry);
++
++	dev = obj->myDev;
++
++	yaffs_GrossLock(dev);
++
++	inode = f->f_dentry->d_inode;
++
++	if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
++		ipos = inode->i_size;
++	else
++		ipos = *pos;
++
++	if (!obj)
++		T(YAFFS_TRACE_OS,
++			("yaffs_file_write: hey obj is null!\n"));
++	else
++		T(YAFFS_TRACE_OS,
++			("yaffs_file_write about to write writing %zu bytes"
++			"to object %d at %d\n",
++			n, obj->objectId, ipos));
++
++	nWritten = yaffs_WriteDataToFile(obj, buf, ipos, n, 0);
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_file_write writing %zu bytes, %d written at %d\n",
++		n, nWritten, ipos));
++
++	if (nWritten > 0) {
++		ipos += nWritten;
++		*pos = ipos;
++		if (ipos > inode->i_size) {
++			inode->i_size = ipos;
++			inode->i_blocks = (ipos + 511) >> 9;
++
++			T(YAFFS_TRACE_OS,
++				("yaffs_file_write size updated to %d bytes, "
++				"%d blocks\n",
++				ipos, (int)(inode->i_blocks)));
++		}
++
++	}
++	yaffs_GrossUnlock(dev);
++	return nWritten == 0 ? -ENOSPC : nWritten;
++}
++
++/* Space holding and freeing is done to ensure we have space available for write_begin/end */
++/* For now we just assume few parallel writes and check against a small number. */
++/* Todo: need to do this with a counter to handle parallel reads better */
++
++static ssize_t yaffs_hold_space(struct file *f)
++{
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++
++	int nFreeChunks;
++
++
++	obj = yaffs_DentryToObject(f->f_dentry);
++
++	dev = obj->myDev;
++
++	yaffs_GrossLock(dev);
++
++	nFreeChunks = yaffs_GetNumberOfFreeChunks(dev);
++
++	yaffs_GrossUnlock(dev);
++
++	return (nFreeChunks > 20) ? 1 : 0;
++}
++
++static void yaffs_release_space(struct file *f)
++{
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++
++
++	obj = yaffs_DentryToObject(f->f_dentry);
++
++	dev = obj->myDev;
++
++	yaffs_GrossLock(dev);
++
++
++	yaffs_GrossUnlock(dev);
++}
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
++{
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++	struct inode *inode = f->f_dentry->d_inode;
++	unsigned long offset, curoffs;
++	struct ylist_head *i;
++	yaffs_Object *l;
++
++	char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++	obj = yaffs_DentryToObject(f->f_dentry);
++	dev = obj->myDev;
++
++	yaffs_GrossLock(dev);
++
++	offset = f->f_pos;
++
++	T(YAFFS_TRACE_OS, ("yaffs_readdir: starting at %d\n", (int)offset));
++
++	if (offset == 0) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_readdir: entry . ino %d \n",
++			(int)inode->i_ino));
++		if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0)
++			goto out;
++		offset++;
++		f->f_pos++;
++	}
++	if (offset == 1) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_readdir: entry .. ino %d \n",
++			(int)f->f_dentry->d_parent->d_inode->i_ino));
++		if (filldir(dirent, "..", 2, offset,
++			f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
++			goto out;
++		offset++;
++		f->f_pos++;
++	}
++
++	curoffs = 1;
++
++	/* If the directory has changed since the open or last call to
++	   readdir, rewind to after the 2 canned entries. */
++
++	if (f->f_version != inode->i_version) {
++		offset = 2;
++		f->f_pos = offset;
++		f->f_version = inode->i_version;
++	}
++
++	ylist_for_each(i, &obj->variant.directoryVariant.children) {
++		curoffs++;
++		if (curoffs >= offset) {
++			l = ylist_entry(i, yaffs_Object, siblings);
++
++			yaffs_GetObjectName(l, name,
++					    YAFFS_MAX_NAME_LENGTH + 1);
++			T(YAFFS_TRACE_OS,
++			  ("yaffs_readdir: %s inode %d\n", name,
++			   yaffs_GetObjectInode(l)));
++
++			if (filldir(dirent,
++					name,
++					strlen(name),
++					offset,
++					yaffs_GetObjectInode(l),
++					yaffs_GetObjectType(l)) < 0)
++				goto up_and_out;
++
++			offset++;
++			f->f_pos++;
++		}
++	}
++
++up_and_out:
++out:
++	yaffs_GrossUnlock(dev);
++
++	return 0;
++}
++
++/*
++ * File creation. Allocate an inode, and we're done..
++ */
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++#define YCRED(x) x
++#else
++#define YCRED(x) (x->cred)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++			dev_t rdev)
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++			int rdev)
++#endif
++{
++	struct inode *inode;
++
++	yaffs_Object *obj = NULL;
++	yaffs_Device *dev;
++
++	yaffs_Object *parent = yaffs_InodeToObject(dir);
++
++	int error = -ENOSPC;
++	uid_t uid = YCRED(current)->fsuid;
++	gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
++
++	if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++		mode |= S_ISGID;
++
++	if (parent) {
++		T(YAFFS_TRACE_OS,
++			("yaffs_mknod: parent object %d type %d\n",
++			parent->objectId, parent->variantType));
++	} else {
++		T(YAFFS_TRACE_OS,
++			("yaffs_mknod: could not get parent object\n"));
++		return -EPERM;
++	}
++
++	T(YAFFS_TRACE_OS, ("yaffs_mknod: making oject for %s, "
++			"mode %x dev %x\n",
++			dentry->d_name.name, mode, rdev));
++
++	dev = parent->myDev;
++
++	yaffs_GrossLock(dev);
++
++	switch (mode & S_IFMT) {
++	default:
++		/* Special (socket, fifo, device...) */
++		T(YAFFS_TRACE_OS, ("yaffs_mknod: making special\n"));
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++		obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
++				gid, old_encode_dev(rdev));
++#else
++		obj = yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
++				gid, rdev);
++#endif
++		break;
++	case S_IFREG:		/* file          */
++		T(YAFFS_TRACE_OS, ("yaffs_mknod: making file\n"));
++		obj = yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
++				gid);
++		break;
++	case S_IFDIR:		/* directory */
++		T(YAFFS_TRACE_OS,
++			("yaffs_mknod: making directory\n"));
++		obj = yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
++					uid, gid);
++		break;
++	case S_IFLNK:		/* symlink */
++		T(YAFFS_TRACE_OS, ("yaffs_mknod: making symlink\n"));
++		obj = NULL;	/* Do we ever get here? */
++		break;
++	}
++
++	/* Can not call yaffs_get_inode() with gross lock held */
++	yaffs_GrossUnlock(dev);
++
++	if (obj) {
++		inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
++		d_instantiate(dentry, inode);
++		T(YAFFS_TRACE_OS,
++			("yaffs_mknod created object %d count = %d\n",
++			obj->objectId, atomic_read(&inode->i_count)));
++		error = 0;
++	} else {
++		T(YAFFS_TRACE_OS,
++			("yaffs_mknod failed making object\n"));
++		error = -ENOMEM;
++	}
++
++	return error;
++}
++
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++	int retVal;
++	T(YAFFS_TRACE_OS, ("yaffs_mkdir\n"));
++	retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
++	return retVal;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++			struct nameidata *n)
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++	T(YAFFS_TRACE_OS, ("yaffs_create\n"));
++	return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
++}
++
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
++{
++	int retVal;
++
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_unlink %d:%s\n", (int)(dir->i_ino),
++		dentry->d_name.name));
++
++	dev = yaffs_InodeToObject(dir)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	retVal = yaffs_Unlink(yaffs_InodeToObject(dir), dentry->d_name.name);
++
++	if (retVal == YAFFS_OK) {
++		dentry->d_inode->i_nlink--;
++		dir->i_version++;
++		yaffs_GrossUnlock(dev);
++		mark_inode_dirty(dentry->d_inode);
++		return 0;
++	}
++	yaffs_GrossUnlock(dev);
++	return -ENOTEMPTY;
++}
++
++/*
++ * Create a link...
++ */
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++			struct dentry *dentry)
++{
++	struct inode *inode = old_dentry->d_inode;
++	yaffs_Object *obj = NULL;
++	yaffs_Object *link = NULL;
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS, ("yaffs_link\n"));
++
++	obj = yaffs_InodeToObject(inode);
++	dev = obj->myDev;
++
++	yaffs_GrossLock(dev);
++
++	if (!S_ISDIR(inode->i_mode))		/* Don't link directories */
++		link = yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
++			obj);
++
++	if (link) {
++		old_dentry->d_inode->i_nlink = yaffs_GetObjectLinkCount(obj);
++		d_instantiate(dentry, old_dentry->d_inode);
++		atomic_inc(&old_dentry->d_inode->i_count);
++		T(YAFFS_TRACE_OS,
++			("yaffs_link link count %d i_count %d\n",
++			old_dentry->d_inode->i_nlink,
++			atomic_read(&old_dentry->d_inode->i_count)));
++	}
++
++	yaffs_GrossUnlock(dev);
++
++	if (link)
++		return 0;
++
++	return -EPERM;
++}
++
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++				const char *symname)
++{
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++	uid_t uid = YCRED(current)->fsuid;
++	gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : YCRED(current)->fsgid;
++
++	T(YAFFS_TRACE_OS, ("yaffs_symlink\n"));
++
++	dev = yaffs_InodeToObject(dir)->myDev;
++	yaffs_GrossLock(dev);
++	obj = yaffs_MknodSymLink(yaffs_InodeToObject(dir), dentry->d_name.name,
++				S_IFLNK | S_IRWXUGO, uid, gid, symname);
++	yaffs_GrossUnlock(dev);
++
++	if (obj) {
++		struct inode *inode;
++
++		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++		d_instantiate(dentry, inode);
++		T(YAFFS_TRACE_OS, ("symlink created OK\n"));
++		return 0;
++	} else {
++		T(YAFFS_TRACE_OS, ("symlink not created\n"));
++	}
++
++	return -ENOMEM;
++}
++
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++				int datasync)
++{
++
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++
++	obj = yaffs_DentryToObject(dentry);
++
++	dev = obj->myDev;
++
++	T(YAFFS_TRACE_OS, ("yaffs_sync_object\n"));
++	yaffs_GrossLock(dev);
++	yaffs_FlushFile(obj, 1);
++	yaffs_GrossUnlock(dev);
++	return 0;
++}
++
++/*
++ * The VFS layer already does all the dentry stuff for rename.
++ *
++ * NB: POSIX says you can rename an object over an old object of the same name
++ */
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++			struct inode *new_dir, struct dentry *new_dentry)
++{
++	yaffs_Device *dev;
++	int retVal = YAFFS_FAIL;
++	yaffs_Object *target;
++
++	T(YAFFS_TRACE_OS, ("yaffs_rename\n"));
++	dev = yaffs_InodeToObject(old_dir)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	/* Check if the target is an existing directory that is not empty. */
++	target = yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
++				new_dentry->d_name.name);
++
++
++
++	if (target && target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
++		!ylist_empty(&target->variant.directoryVariant.children)) {
++
++		T(YAFFS_TRACE_OS, ("target is non-empty dir\n"));
++
++		retVal = YAFFS_FAIL;
++	} else {
++		/* Now does unlinking internally using shadowing mechanism */
++		T(YAFFS_TRACE_OS, ("calling yaffs_RenameObject\n"));
++
++		retVal = yaffs_RenameObject(yaffs_InodeToObject(old_dir),
++				old_dentry->d_name.name,
++				yaffs_InodeToObject(new_dir),
++				new_dentry->d_name.name);
++	}
++	yaffs_GrossUnlock(dev);
++
++	if (retVal == YAFFS_OK) {
++		if (target) {
++			new_dentry->d_inode->i_nlink--;
++			mark_inode_dirty(new_dentry->d_inode);
++		}
++
++		return 0;
++	} else {
++		return -ENOTEMPTY;
++	}
++}
++
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++	struct inode *inode = dentry->d_inode;
++	int error;
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_setattr of object %d\n",
++		yaffs_InodeToObject(inode)->objectId));
++
++	error = inode_change_ok(inode, attr);
++	if (error == 0) {
++		dev = yaffs_InodeToObject(inode)->myDev;
++		yaffs_GrossLock(dev);
++		if (yaffs_SetAttributes(yaffs_InodeToObject(inode), attr) ==
++				YAFFS_OK) {
++			error = 0;
++		} else {
++			error = -EPERM;
++		}
++		yaffs_GrossUnlock(dev);
++		if (!error)
++			error = inode_setattr(inode, attr);
++	}
++	return error;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
++	struct super_block *sb = dentry->d_sb;
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
++{
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++#endif
++
++	T(YAFFS_TRACE_OS, ("yaffs_statfs\n"));
++
++	yaffs_GrossLock(dev);
++
++	buf->f_type = YAFFS_MAGIC;
++	buf->f_bsize = sb->s_blocksize;
++	buf->f_namelen = 255;
++
++	if (dev->nDataBytesPerChunk & (dev->nDataBytesPerChunk - 1)) {
++		/* Do this if chunk size is not a power of 2 */
++
++		uint64_t bytesInDev;
++		uint64_t bytesFree;
++
++		bytesInDev = ((uint64_t)((dev->endBlock - dev->startBlock + 1))) *
++			((uint64_t)(dev->nChunksPerBlock * dev->nDataBytesPerChunk));
++
++		do_div(bytesInDev, sb->s_blocksize); /* bytesInDev becomes the number of blocks */
++		buf->f_blocks = bytesInDev;
++
++		bytesFree  = ((uint64_t)(yaffs_GetNumberOfFreeChunks(dev))) *
++			((uint64_t)(dev->nDataBytesPerChunk));
++
++		do_div(bytesFree, sb->s_blocksize);
++
++		buf->f_bfree = bytesFree;
++
++	} else if (sb->s_blocksize > dev->nDataBytesPerChunk) {
++
++		buf->f_blocks =
++			(dev->endBlock - dev->startBlock + 1) *
++			dev->nChunksPerBlock /
++			(sb->s_blocksize / dev->nDataBytesPerChunk);
++		buf->f_bfree =
++			yaffs_GetNumberOfFreeChunks(dev) /
++			(sb->s_blocksize / dev->nDataBytesPerChunk);
++	} else {
++		buf->f_blocks =
++			(dev->endBlock - dev->startBlock + 1) *
++			dev->nChunksPerBlock *
++			(dev->nDataBytesPerChunk / sb->s_blocksize);
++
++		buf->f_bfree =
++			yaffs_GetNumberOfFreeChunks(dev) *
++			(dev->nDataBytesPerChunk / sb->s_blocksize);
++	}
++
++	buf->f_files = 0;
++	buf->f_ffree = 0;
++	buf->f_bavail = buf->f_bfree;
++
++	yaffs_GrossUnlock(dev);
++	return 0;
++}
++
++
++static int yaffs_do_sync_fs(struct super_block *sb)
++{
++
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++	T(YAFFS_TRACE_OS, ("yaffs_do_sync_fs\n"));
++
++	if (sb->s_dirt) {
++		yaffs_GrossLock(dev);
++
++		if (dev) {
++			yaffs_FlushEntireDeviceCache(dev);
++			yaffs_CheckpointSave(dev);
++		}
++
++		yaffs_GrossUnlock(dev);
++
++		sb->s_dirt = 0;
++	}
++	return 0;
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static void yaffs_write_super(struct super_block *sb)
++#else
++static int yaffs_write_super(struct super_block *sb)
++#endif
++{
++
++	T(YAFFS_TRACE_OS, ("yaffs_write_super\n"));
++	if (yaffs_auto_checkpoint >= 2)
++		yaffs_do_sync_fs(sb);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++	return 0;
++#endif
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait)
++#else
++static int yaffs_sync_fs(struct super_block *sb)
++#endif
++{
++	T(YAFFS_TRACE_OS, ("yaffs_sync_fs\n"));
++
++	if (yaffs_auto_checkpoint >= 1)
++		yaffs_do_sync_fs(sb);
++
++	return 0;
++}
++
++#ifdef YAFFS_USE_OWN_IGET
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
++{
++	struct inode *inode;
++	yaffs_Object *obj;
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_iget for %lu\n", ino));
++
++	inode = iget_locked(sb, ino);
++	if (!inode)
++		return ERR_PTR(-ENOMEM);
++	if (!(inode->i_state & I_NEW))
++		return inode;
++
++	/* NB This is called as a side effect of other functions, but
++	 * we had to release the lock to prevent deadlocks, so
++	 * need to lock again.
++	 */
++
++	yaffs_GrossLock(dev);
++
++	obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
++
++	yaffs_FillInodeFromObject(inode, obj);
++
++	yaffs_GrossUnlock(dev);
++
++	unlock_new_inode(inode);
++	return inode;
++}
++
++#else
++
++static void yaffs_read_inode(struct inode *inode)
++{
++	/* NB This is called as a side effect of other functions, but
++	 * we had to release the lock to prevent deadlocks, so
++	 * need to lock again.
++	 */
++
++	yaffs_Object *obj;
++	yaffs_Device *dev = yaffs_SuperToDevice(inode->i_sb);
++
++	T(YAFFS_TRACE_OS,
++		("yaffs_read_inode for %d\n", (int)inode->i_ino));
++
++	yaffs_GrossLock(dev);
++
++	obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
++
++	yaffs_FillInodeFromObject(inode, obj);
++
++	yaffs_GrossUnlock(dev);
++}
++
++#endif
++
++static YLIST_HEAD(yaffs_dev_list);
++
++#if 0 /* not used */
++static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
++{
++	yaffs_Device    *dev = yaffs_SuperToDevice(sb);
++
++	if (*flags & MS_RDONLY) {
++		struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
++
++		T(YAFFS_TRACE_OS,
++			("yaffs_remount_fs: %s: RO\n", dev->name));
++
++		yaffs_GrossLock(dev);
++
++		yaffs_FlushEntireDeviceCache(dev);
++
++		yaffs_CheckpointSave(dev);
++
++		if (mtd->sync)
++			mtd->sync(mtd);
++
++		yaffs_GrossUnlock(dev);
++	} else {
++		T(YAFFS_TRACE_OS,
++			("yaffs_remount_fs: %s: RW\n", dev->name));
++	}
++
++	return 0;
++}
++#endif
++
++static void yaffs_put_super(struct super_block *sb)
++{
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++
++	T(YAFFS_TRACE_OS, ("yaffs_put_super\n"));
++
++	yaffs_GrossLock(dev);
++
++	yaffs_FlushEntireDeviceCache(dev);
++
++	yaffs_CheckpointSave(dev);
++
++	if (dev->putSuperFunc)
++		dev->putSuperFunc(sb);
++
++	yaffs_Deinitialise(dev);
++
++	yaffs_GrossUnlock(dev);
++
++	/* we assume this is protected by lock_kernel() in mount/umount */
++	ylist_del(&dev->devList);
++
++	if (dev->spareBuffer) {
++		YFREE(dev->spareBuffer);
++		dev->spareBuffer = NULL;
++	}
++
++	kfree(dev);
++}
++
++
++static void yaffs_MTDPutSuper(struct super_block *sb)
++{
++	struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
++
++	if (mtd->sync)
++		mtd->sync(mtd);
++
++	put_mtd_device(mtd);
++}
++
++
++static void yaffs_MarkSuperBlockDirty(void *vsb)
++{
++	struct super_block *sb = (struct super_block *)vsb;
++
++	T(YAFFS_TRACE_OS, ("yaffs_MarkSuperBlockDirty() sb = %p\n", sb));
++	if (sb)
++		sb->s_dirt = 1;
++}
++
++typedef struct {
++	int inband_tags;
++	int skip_checkpoint_read;
++	int skip_checkpoint_write;
++	int no_cache;
++} yaffs_options;
++
++#define MAX_OPT_LEN 20
++static int yaffs_parse_options(yaffs_options *options, const char *options_str)
++{
++	char cur_opt[MAX_OPT_LEN + 1];
++	int p;
++	int error = 0;
++
++	/* Parse through the options which is a comma seperated list */
++
++	while (options_str && *options_str && !error) {
++		memset(cur_opt, 0, MAX_OPT_LEN + 1);
++		p = 0;
++
++		while (*options_str && *options_str != ',') {
++			if (p < MAX_OPT_LEN) {
++				cur_opt[p] = *options_str;
++				p++;
++			}
++			options_str++;
++		}
++
++		if (!strcmp(cur_opt, "inband-tags"))
++			options->inband_tags = 1;
++		else if (!strcmp(cur_opt, "no-cache"))
++			options->no_cache = 1;
++		else if (!strcmp(cur_opt, "no-checkpoint-read"))
++			options->skip_checkpoint_read = 1;
++		else if (!strcmp(cur_opt, "no-checkpoint-write"))
++			options->skip_checkpoint_write = 1;
++		else if (!strcmp(cur_opt, "no-checkpoint")) {
++			options->skip_checkpoint_read = 1;
++			options->skip_checkpoint_write = 1;
++		} else {
++			printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
++					cur_opt);
++			error = 1;
++		}
++	}
++
++	return error;
++}
++
++static struct super_block *yaffs_internal_read_super(int yaffsVersion,
++						struct super_block *sb,
++						void *data, int silent)
++{
++	int nBlocks;
++	struct inode *inode = NULL;
++	struct dentry *root;
++	yaffs_Device *dev = 0;
++	char devname_buf[BDEVNAME_SIZE + 1];
++	struct mtd_info *mtd;
++	int err;
++	char *data_str = (char *)data;
++
++	yaffs_options options;
++
++	sb->s_magic = YAFFS_MAGIC;
++	sb->s_op = &yaffs_super_ops;
++	sb->s_flags |= MS_NOATIME;
++
++	if (!sb)
++		printk(KERN_INFO "yaffs: sb is NULL\n");
++	else if (!sb->s_dev)
++		printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
++	else if (!yaffs_devname(sb, devname_buf))
++		printk(KERN_INFO "yaffs: devname is NULL\n");
++	else
++		printk(KERN_INFO "yaffs: dev is %d name is \"%s\"\n",
++		       sb->s_dev,
++		       yaffs_devname(sb, devname_buf));
++
++	if (!data_str)
++		data_str = "";
++
++	printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
++
++	memset(&options, 0, sizeof(options));
++
++	if (yaffs_parse_options(&options, data_str)) {
++		/* Option parsing failed */
++		return NULL;
++	}
++
++
++	sb->s_blocksize = PAGE_CACHE_SIZE;
++	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
++	T(YAFFS_TRACE_OS, ("yaffs_read_super: Using yaffs%d\n", yaffsVersion));
++	T(YAFFS_TRACE_OS,
++	  ("yaffs_read_super: block size %d\n", (int)(sb->s_blocksize)));
++
++#ifdef CONFIG_YAFFS_DISABLE_WRITE_VERIFY
++	T(YAFFS_TRACE_OS,
++	  ("yaffs: Write verification disabled. All guarantees "
++	   "null and void\n"));
++#endif
++
++	T(YAFFS_TRACE_ALWAYS, ("yaffs: Attempting MTD mount on %u.%u, "
++			       "\"%s\"\n",
++			       MAJOR(sb->s_dev), MINOR(sb->s_dev),
++			       yaffs_devname(sb, devname_buf)));
++
++	/* Check it's an mtd device..... */
++	if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
++		return NULL;	/* This isn't an mtd device */
++
++	/* Get the device */
++	mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
++	if (!mtd) {
++		T(YAFFS_TRACE_ALWAYS,
++		  ("yaffs: MTD device #%u doesn't appear to exist\n",
++		   MINOR(sb->s_dev)));
++		return NULL;
++	}
++	/* Check it's NAND */
++	if (mtd->type != MTD_NANDFLASH) {
++		T(YAFFS_TRACE_ALWAYS,
++		  ("yaffs: MTD device is not NAND it's type %d\n", mtd->type));
++		return NULL;
++	}
++
++	T(YAFFS_TRACE_OS, (" erase %p\n", mtd->erase));
++	T(YAFFS_TRACE_OS, (" read %p\n", mtd->read));
++	T(YAFFS_TRACE_OS, (" write %p\n", mtd->write));
++	T(YAFFS_TRACE_OS, (" readoob %p\n", mtd->read_oob));
++	T(YAFFS_TRACE_OS, (" writeoob %p\n", mtd->write_oob));
++	T(YAFFS_TRACE_OS, (" block_isbad %p\n", mtd->block_isbad));
++	T(YAFFS_TRACE_OS, (" block_markbad %p\n", mtd->block_markbad));
++	T(YAFFS_TRACE_OS, (" %s %d\n", WRITE_SIZE_STR, WRITE_SIZE(mtd)));
++	T(YAFFS_TRACE_OS, (" oobsize %d\n", mtd->oobsize));
++	T(YAFFS_TRACE_OS, (" erasesize %d\n", mtd->erasesize));
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++	T(YAFFS_TRACE_OS, (" size %u\n", mtd->size));
++#else
++	T(YAFFS_TRACE_OS, (" size %lld\n", mtd->size));
++#endif
++
++#ifdef CONFIG_YAFFS_AUTO_YAFFS2
++
++	if (yaffsVersion == 1 && WRITE_SIZE(mtd) >= 2048) {
++		T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs2\n"));
++		yaffsVersion = 2;
++	}
++
++	/* Added NCB 26/5/2006 for completeness */
++	if (yaffsVersion == 2 && !options.inband_tags && WRITE_SIZE(mtd) == 512) {
++		T(YAFFS_TRACE_ALWAYS, ("yaffs: auto selecting yaffs1\n"));
++		yaffsVersion = 1;
++	}
++
++#endif
++
++	if (yaffsVersion == 2) {
++		/* Check for version 2 style functions */
++		if (!mtd->erase ||
++		    !mtd->block_isbad ||
++		    !mtd->block_markbad ||
++		    !mtd->read ||
++		    !mtd->write ||
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++		    !mtd->read_oob || !mtd->write_oob) {
++#else
++		    !mtd->write_ecc ||
++		    !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
++#endif
++			T(YAFFS_TRACE_ALWAYS,
++			  ("yaffs: MTD device does not support required "
++			   "functions\n"));;
++			return NULL;
++		}
++
++		if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++		    mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
++		    !options.inband_tags) {
++			T(YAFFS_TRACE_ALWAYS,
++			  ("yaffs: MTD device does not have the "
++			   "right page sizes\n"));
++			return NULL;
++		}
++	} else {
++		/* Check for V1 style functions */
++		if (!mtd->erase ||
++		    !mtd->read ||
++		    !mtd->write ||
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++		    !mtd->read_oob || !mtd->write_oob) {
++#else
++		    !mtd->write_ecc ||
++		    !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
++#endif
++			T(YAFFS_TRACE_ALWAYS,
++			  ("yaffs: MTD device does not support required "
++			   "functions\n"));;
++			return NULL;
++		}
++
++		if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
++		    mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
++			T(YAFFS_TRACE_ALWAYS,
++			  ("yaffs: MTD device does not support have the "
++			   "right page sizes\n"));
++			return NULL;
++		}
++	}
++
++	/* OK, so if we got here, we have an MTD that's NAND and looks
++	 * like it has the right capabilities
++	 * Set the yaffs_Device up for mtd
++	 */
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++	sb->s_fs_info = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
++#else
++	sb->u.generic_sbp = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
++#endif
++	if (!dev) {
++		/* Deep shit could not allocate device structure */
++		T(YAFFS_TRACE_ALWAYS,
++		  ("yaffs_read_super: Failed trying to allocate "
++		   "yaffs_Device. \n"));
++		return NULL;
++	}
++
++	memset(dev, 0, sizeof(yaffs_Device));
++	dev->genericDevice = mtd;
++	dev->name = mtd->name;
++
++	/* Set up the memory size parameters.... */
++
++	nBlocks = YCALCBLOCKS(mtd->size, (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
++
++	dev->startBlock = 0;
++	dev->endBlock = nBlocks - 1;
++	dev->nChunksPerBlock = YAFFS_CHUNKS_PER_BLOCK;
++	dev->totalBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
++	dev->nReservedBlocks = 5;
++	dev->nShortOpCaches = (options.no_cache) ? 0 : 10;
++	dev->inbandTags = options.inband_tags;
++
++	/* ... and the functions. */
++	if (yaffsVersion == 2) {
++		dev->writeChunkWithTagsToNAND =
++		    nandmtd2_WriteChunkWithTagsToNAND;
++		dev->readChunkWithTagsFromNAND =
++		    nandmtd2_ReadChunkWithTagsFromNAND;
++		dev->markNANDBlockBad = nandmtd2_MarkNANDBlockBad;
++		dev->queryNANDBlock = nandmtd2_QueryNANDBlock;
++		dev->spareBuffer = YMALLOC(mtd->oobsize);
++		dev->isYaffs2 = 1;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++		dev->totalBytesPerChunk = mtd->writesize;
++		dev->nChunksPerBlock = mtd->erasesize / mtd->writesize;
++#else
++		dev->totalBytesPerChunk = mtd->oobblock;
++		dev->nChunksPerBlock = mtd->erasesize / mtd->oobblock;
++#endif
++		nBlocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
++
++		dev->startBlock = 0;
++		dev->endBlock = nBlocks - 1;
++	} else {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++		/* use the MTD interface in yaffs_mtdif1.c */
++		dev->writeChunkWithTagsToNAND =
++			nandmtd1_WriteChunkWithTagsToNAND;
++		dev->readChunkWithTagsFromNAND =
++			nandmtd1_ReadChunkWithTagsFromNAND;
++		dev->markNANDBlockBad = nandmtd1_MarkNANDBlockBad;
++		dev->queryNANDBlock = nandmtd1_QueryNANDBlock;
++#else
++		dev->writeChunkToNAND = nandmtd_WriteChunkToNAND;
++		dev->readChunkFromNAND = nandmtd_ReadChunkFromNAND;
++#endif
++		dev->isYaffs2 = 0;
++	}
++	/* ... and common functions */
++	dev->eraseBlockInNAND = nandmtd_EraseBlockInNAND;
++	dev->initialiseNAND = nandmtd_InitialiseNAND;
++
++	dev->putSuperFunc = yaffs_MTDPutSuper;
++
++	dev->superBlock = (void *)sb;
++	dev->markSuperBlockDirty = yaffs_MarkSuperBlockDirty;
++
++
++#ifndef CONFIG_YAFFS_DOES_ECC
++	dev->useNANDECC = 1;
++#endif
++
++#ifdef CONFIG_YAFFS_DISABLE_WIDE_TNODES
++	dev->wideTnodesDisabled = 1;
++#endif
++
++	dev->skipCheckpointRead = options.skip_checkpoint_read;
++	dev->skipCheckpointWrite = options.skip_checkpoint_write;
++
++	/* we assume this is protected by lock_kernel() in mount/umount */
++	ylist_add_tail(&dev->devList, &yaffs_dev_list);
++
++	init_MUTEX(&dev->grossLock);
++
++	yaffs_GrossLock(dev);
++
++	err = yaffs_GutsInitialise(dev);
++
++	T(YAFFS_TRACE_OS,
++	  ("yaffs_read_super: guts initialised %s\n",
++	   (err == YAFFS_OK) ? "OK" : "FAILED"));
++
++	/* Release lock before yaffs_get_inode() */
++	yaffs_GrossUnlock(dev);
++
++	/* Create root inode */
++	if (err == YAFFS_OK)
++		inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0,
++					yaffs_Root(dev));
++
++	if (!inode)
++		return NULL;
++
++	inode->i_op = &yaffs_dir_inode_operations;
++	inode->i_fop = &yaffs_dir_operations;
++
++	T(YAFFS_TRACE_OS, ("yaffs_read_super: got root inode\n"));
++
++	root = d_alloc_root(inode);
++
++	T(YAFFS_TRACE_OS, ("yaffs_read_super: d_alloc_root done\n"));
++
++	if (!root) {
++		iput(inode);
++		return NULL;
++	}
++	sb->s_root = root;
++	sb->s_dirt = !dev->isCheckpointed;
++	T(YAFFS_TRACE_ALWAYS,
++	  ("yaffs_read_super: isCheckpointed %d\n", dev->isCheckpointed));
++
++	T(YAFFS_TRACE_OS, ("yaffs_read_super: done\n"));
++	return sb;
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
++					 int silent)
++{
++	return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_read_super(struct file_system_type *fs,
++			    int flags, const char *dev_name,
++			    void *data, struct vfsmount *mnt)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs_read_super(struct file_system_type *fs,
++					    int flags, const char *dev_name,
++					    void *data)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "yaffs",
++	.get_sb = yaffs_read_super,
++	.kill_sb = kill_block_super,
++	.fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
++					    int silent)
++{
++	return yaffs_internal_read_super(1, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
++		      FS_REQUIRES_DEV);
++#endif
++
++
++#ifdef CONFIG_YAFFS_YAFFS2
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
++					  int silent)
++{
++	return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs2_read_super(struct file_system_type *fs,
++			int flags, const char *dev_name, void *data,
++			struct vfsmount *mnt)
++{
++	return get_sb_bdev(fs, flags, dev_name, data,
++			yaffs2_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs2_read_super(struct file_system_type *fs,
++					     int flags, const char *dev_name,
++					     void *data)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs2_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs2_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "yaffs2",
++	.get_sb = yaffs2_read_super,
++	.kill_sb = kill_block_super,
++	.fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs2_read_super(struct super_block *sb,
++					     void *data, int silent)
++{
++	return yaffs_internal_read_super(2, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
++		      FS_REQUIRES_DEV);
++#endif
++
++#endif				/* CONFIG_YAFFS_YAFFS2 */
++
++static struct proc_dir_entry *my_proc_entry;
++
++static char *yaffs_dump_dev(char *buf, yaffs_Device * dev)
++{
++	buf += sprintf(buf, "startBlock......... %d\n", dev->startBlock);
++	buf += sprintf(buf, "endBlock........... %d\n", dev->endBlock);
++	buf += sprintf(buf, "totalBytesPerChunk. %d\n", dev->totalBytesPerChunk);
++	buf += sprintf(buf, "nDataBytesPerChunk. %d\n", dev->nDataBytesPerChunk);
++	buf += sprintf(buf, "chunkGroupBits..... %d\n", dev->chunkGroupBits);
++	buf += sprintf(buf, "chunkGroupSize..... %d\n", dev->chunkGroupSize);
++	buf += sprintf(buf, "nErasedBlocks...... %d\n", dev->nErasedBlocks);
++	buf += sprintf(buf, "nReservedBlocks.... %d\n", dev->nReservedBlocks);
++	buf += sprintf(buf, "blocksInCheckpoint. %d\n", dev->blocksInCheckpoint);
++	buf += sprintf(buf, "nTnodesCreated..... %d\n", dev->nTnodesCreated);
++	buf += sprintf(buf, "nFreeTnodes........ %d\n", dev->nFreeTnodes);
++	buf += sprintf(buf, "nObjectsCreated.... %d\n", dev->nObjectsCreated);
++	buf += sprintf(buf, "nFreeObjects....... %d\n", dev->nFreeObjects);
++	buf += sprintf(buf, "nFreeChunks........ %d\n", dev->nFreeChunks);
++	buf += sprintf(buf, "nPageWrites........ %d\n", dev->nPageWrites);
++	buf += sprintf(buf, "nPageReads......... %d\n", dev->nPageReads);
++	buf += sprintf(buf, "nBlockErasures..... %d\n", dev->nBlockErasures);
++	buf += sprintf(buf, "nGCCopies.......... %d\n", dev->nGCCopies);
++	buf += sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
++	buf += sprintf(buf, "passiveGCs......... %d\n",
++		    dev->passiveGarbageCollections);
++	buf += sprintf(buf, "nRetriedWrites..... %d\n", dev->nRetriedWrites);
++	buf += sprintf(buf, "nShortOpCaches..... %d\n", dev->nShortOpCaches);
++	buf += sprintf(buf, "nRetireBlocks...... %d\n", dev->nRetiredBlocks);
++	buf += sprintf(buf, "eccFixed........... %d\n", dev->eccFixed);
++	buf += sprintf(buf, "eccUnfixed......... %d\n", dev->eccUnfixed);
++	buf += sprintf(buf, "tagsEccFixed....... %d\n", dev->tagsEccFixed);
++	buf += sprintf(buf, "tagsEccUnfixed..... %d\n", dev->tagsEccUnfixed);
++	buf += sprintf(buf, "cacheHits.......... %d\n", dev->cacheHits);
++	buf += sprintf(buf, "nDeletedFiles...... %d\n", dev->nDeletedFiles);
++	buf += sprintf(buf, "nUnlinkedFiles..... %d\n", dev->nUnlinkedFiles);
++	buf +=
++	    sprintf(buf, "nBackgroudDeletions %d\n", dev->nBackgroundDeletions);
++	buf += sprintf(buf, "useNANDECC......... %d\n", dev->useNANDECC);
++	buf += sprintf(buf, "isYaffs2........... %d\n", dev->isYaffs2);
++	buf += sprintf(buf, "inbandTags......... %d\n", dev->inbandTags);
++
++	return buf;
++}
++
++static int yaffs_proc_read(char *page,
++			   char **start,
++			   off_t offset, int count, int *eof, void *data)
++{
++	struct ylist_head *item;
++	char *buf = page;
++	int step = offset;
++	int n = 0;
++
++	/* Get proc_file_read() to step 'offset' by one on each sucessive call.
++	 * We use 'offset' (*ppos) to indicate where we are in devList.
++	 * This also assumes the user has posted a read buffer large
++	 * enough to hold the complete output; but that's life in /proc.
++	 */
++
++	*(int *)start = 1;
++
++	/* Print header first */
++	if (step == 0) {
++		buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__
++			       "\n%s\n%s\n", yaffs_fs_c_version,
++			       yaffs_guts_c_version);
++	}
++
++	/* hold lock_kernel while traversing yaffs_dev_list */
++	lock_kernel();
++
++	/* Locate and print the Nth entry.  Order N-squared but N is small. */
++	ylist_for_each(item, &yaffs_dev_list) {
++		yaffs_Device *dev = ylist_entry(item, yaffs_Device, devList);
++		if (n < step) {
++			n++;
++			continue;
++		}
++		buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->name);
++		buf = yaffs_dump_dev(buf, dev);
++		break;
++	}
++	unlock_kernel();
++
++	return buf - page < count ? buf - page : count;
++}
++
++/**
++ * Set the verbosity of the warnings and error messages.
++ *
++ * Note that the names can only be a..z or _ with the current code.
++ */
++
++static struct {
++	char *mask_name;
++	unsigned mask_bitfield;
++} mask_flags[] = {
++	{"allocate", YAFFS_TRACE_ALLOCATE},
++	{"always", YAFFS_TRACE_ALWAYS},
++	{"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
++	{"buffers", YAFFS_TRACE_BUFFERS},
++	{"bug", YAFFS_TRACE_BUG},
++	{"checkpt", YAFFS_TRACE_CHECKPOINT},
++	{"deletion", YAFFS_TRACE_DELETION},
++	{"erase", YAFFS_TRACE_ERASE},
++	{"error", YAFFS_TRACE_ERROR},
++	{"gc_detail", YAFFS_TRACE_GC_DETAIL},
++	{"gc", YAFFS_TRACE_GC},
++	{"mtd", YAFFS_TRACE_MTD},
++	{"nandaccess", YAFFS_TRACE_NANDACCESS},
++	{"os", YAFFS_TRACE_OS},
++	{"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
++	{"scan", YAFFS_TRACE_SCAN},
++	{"tracing", YAFFS_TRACE_TRACING},
++
++	{"verify", YAFFS_TRACE_VERIFY},
++	{"verify_nand", YAFFS_TRACE_VERIFY_NAND},
++	{"verify_full", YAFFS_TRACE_VERIFY_FULL},
++	{"verify_all", YAFFS_TRACE_VERIFY_ALL},
++
++	{"write", YAFFS_TRACE_WRITE},
++	{"all", 0xffffffff},
++	{"none", 0},
++	{NULL, 0},
++};
++
++#define MAX_MASK_NAME_LENGTH 40
++static int yaffs_proc_write(struct file *file, const char *buf,
++					 unsigned long count, void *data)
++{
++	unsigned rg = 0, mask_bitfield;
++	char *end;
++	char *mask_name;
++	const char *x;
++	char substring[MAX_MASK_NAME_LENGTH + 1];
++	int i;
++	int done = 0;
++	int add, len = 0;
++	int pos = 0;
++
++	rg = yaffs_traceMask;
++
++	while (!done && (pos < count)) {
++		done = 1;
++		while ((pos < count) && isspace(buf[pos]))
++			pos++;
++
++		switch (buf[pos]) {
++		case '+':
++		case '-':
++		case '=':
++			add = buf[pos];
++			pos++;
++			break;
++
++		default:
++			add = ' ';
++			break;
++		}
++		mask_name = NULL;
++
++		mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++
++		if (end > buf + pos) {
++			mask_name = "numeral";
++			len = end - (buf + pos);
++			pos += len;
++			done = 0;
++		} else {
++			for (x = buf + pos, i = 0;
++			    (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
++			    i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++				substring[i] = *x;
++			substring[i] = '\0';
++
++			for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++				if (strcmp(substring, mask_flags[i].mask_name) == 0) {
++					mask_name = mask_flags[i].mask_name;
++					mask_bitfield = mask_flags[i].mask_bitfield;
++					done = 0;
++					break;
++				}
++			}
++		}
++
++		if (mask_name != NULL) {
++			done = 0;
++			switch (add) {
++			case '-':
++				rg &= ~mask_bitfield;
++				break;
++			case '+':
++				rg |= mask_bitfield;
++				break;
++			case '=':
++				rg = mask_bitfield;
++				break;
++			default:
++				rg |= mask_bitfield;
++				break;
++			}
++		}
++	}
++
++	yaffs_traceMask = rg | YAFFS_TRACE_ALWAYS;
++
++	printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_traceMask);
++
++	if (rg & YAFFS_TRACE_ALWAYS) {
++		for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++			char flag;
++			flag = ((rg & mask_flags[i].mask_bitfield) == mask_flags[i].mask_bitfield) ? '+' : '-';
++			printk(KERN_DEBUG "%c%s\n", flag, mask_flags[i].mask_name);
++		}
++	}
++
++	return count;
++}
++
++/* Stuff to handle installation of file systems */
++struct file_system_to_install {
++	struct file_system_type *fst;
++	int installed;
++};
++
++static struct file_system_to_install fs_to_install[] = {
++	{&yaffs_fs_type, 0},
++	{&yaffs2_fs_type, 0},
++	{NULL, 0}
++};
++
++static int __init init_yaffs_fs(void)
++{
++	int error = 0;
++	struct file_system_to_install *fsinst;
++
++	T(YAFFS_TRACE_ALWAYS,
++	  ("yaffs " __DATE__ " " __TIME__ " Installing. \n"));
++
++	/* Install the proc_fs entry */
++	my_proc_entry = create_proc_entry("yaffs",
++					       S_IRUGO | S_IFREG,
++					       YPROC_ROOT);
++
++	if (my_proc_entry) {
++		my_proc_entry->write_proc = yaffs_proc_write;
++		my_proc_entry->read_proc = yaffs_proc_read;
++		my_proc_entry->data = NULL;
++	} else
++		return -ENOMEM;
++
++	/* Now add the file system entries */
++
++	fsinst = fs_to_install;
++
++	while (fsinst->fst && !error) {
++		error = register_filesystem(fsinst->fst);
++		if (!error)
++			fsinst->installed = 1;
++		fsinst++;
++	}
++
++	/* Any errors? uninstall  */
++	if (error) {
++		fsinst = fs_to_install;
++
++		while (fsinst->fst) {
++			if (fsinst->installed) {
++				unregister_filesystem(fsinst->fst);
++				fsinst->installed = 0;
++			}
++			fsinst++;
++		}
++	}
++
++	return error;
++}
++
++static void __exit exit_yaffs_fs(void)
++{
++
++	struct file_system_to_install *fsinst;
++
++	T(YAFFS_TRACE_ALWAYS, ("yaffs " __DATE__ " " __TIME__
++			       " removing. \n"));
++
++	remove_proc_entry("yaffs", YPROC_ROOT);
++
++	fsinst = fs_to_install;
++
++	while (fsinst->fst) {
++		if (fsinst->installed) {
++			unregister_filesystem(fsinst->fst);
++			fsinst->installed = 0;
++		}
++		fsinst++;
++	}
++}
++
++module_init(init_yaffs_fs)
++module_exit(exit_yaffs_fs)
++
++MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
++MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2006");
++MODULE_LICENSE("GPL");
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_getblockinfo.h linux-2.6.32/fs/yaffs2/yaffs_getblockinfo.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_getblockinfo.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_getblockinfo.h	2010-01-30 20:35:01.373074589 +0100
+@@ -0,0 +1,34 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GETBLOCKINFO_H__
++#define __YAFFS_GETBLOCKINFO_H__
++
++#include "yaffs_guts.h"
++
++/* Function to manipulate block info */
++static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
++{
++	if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR
++		   ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
++		   blk));
++		YBUG();
++	}
++	return &dev->blockInfo[blk - dev->internalStartBlock];
++}
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_guts.c linux-2.6.32/fs/yaffs2/yaffs_guts.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_guts.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_guts.c	2010-01-30 20:35:01.393075518 +0100
+@@ -0,0 +1,7552 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++const char *yaffs_guts_c_version =
++    "$Id: yaffs_guts.c,v 1.82 2009-03-09 04:24:17 charles Exp $";
++
++#include "yportenv.h"
++
++#include "yaffsinterface.h"
++#include "yaffs_guts.h"
++#include "yaffs_tagsvalidity.h"
++#include "yaffs_getblockinfo.h"
++
++#include "yaffs_tagscompat.h"
++#ifndef CONFIG_YAFFS_USE_OWN_SORT
++#include "yaffs_qsort.h"
++#endif
++#include "yaffs_nand.h"
++
++#include "yaffs_checkptrw.h"
++
++#include "yaffs_nand.h"
++#include "yaffs_packedtags2.h"
++
++
++#define YAFFS_PASSIVE_GC_CHUNKS 2
++
++#include "yaffs_ecc.h"
++
++
++/* Robustification (if it ever comes about...) */
++static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND);
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
++		int erasedOk);
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++				const __u8 *data,
++				const yaffs_ExtendedTags *tags);
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++				const yaffs_ExtendedTags *tags);
++
++/* Other local prototypes */
++static int yaffs_UnlinkObject(yaffs_Object *obj);
++static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj);
++
++static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList);
++
++static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device *dev,
++					const __u8 *buffer,
++					yaffs_ExtendedTags *tags,
++					int useReserve);
++static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
++				int chunkInNAND, int inScan);
++
++static yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
++					yaffs_ObjectType type);
++static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
++				yaffs_Object *obj);
++static int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name,
++				int force, int isShrink, int shadows);
++static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj);
++static int yaffs_CheckStructures(void);
++static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
++			int chunkOffset, int *limit);
++static int yaffs_DoGenericObjectDeletion(yaffs_Object *in);
++
++static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device *dev, int blockNo);
++
++
++static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
++				int chunkInNAND);
++
++static int yaffs_UnlinkWorker(yaffs_Object *obj);
++
++static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
++			int chunkInObject);
++
++static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
++				yaffs_BlockInfo **blockUsedPtr);
++
++static void yaffs_VerifyFreeChunks(yaffs_Device *dev);
++
++static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in);
++
++static void yaffs_VerifyDirectory(yaffs_Object *directory);
++#ifdef YAFFS_PARANOID
++static int yaffs_CheckFileSanity(yaffs_Object *in);
++#else
++#define yaffs_CheckFileSanity(in)
++#endif
++
++static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in);
++static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId);
++
++static void yaffs_InvalidateCheckpoint(yaffs_Device *dev);
++
++static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
++				yaffs_ExtendedTags *tags);
++
++static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
++		unsigned pos);
++static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
++					yaffs_FileStructure *fStruct,
++					__u32 chunkId);
++
++
++/* Function to calculate chunk and offset */
++
++static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, int *chunkOut,
++		__u32 *offsetOut)
++{
++	int chunk;
++	__u32 offset;
++
++	chunk  = (__u32)(addr >> dev->chunkShift);
++
++	if (dev->chunkDiv == 1) {
++		/* easy power of 2 case */
++		offset = (__u32)(addr & dev->chunkMask);
++	} else {
++		/* Non power-of-2 case */
++
++		loff_t chunkBase;
++
++		chunk /= dev->chunkDiv;
++
++		chunkBase = ((loff_t)chunk) * dev->nDataBytesPerChunk;
++		offset = (__u32)(addr - chunkBase);
++	}
++
++	*chunkOut = chunk;
++	*offsetOut = offset;
++}
++
++/* Function to return the number of shifts for a power of 2 greater than or
++ * equal to the given number
++ * Note we don't try to cater for all possible numbers and this does not have to
++ * be hellishly efficient.
++ */
++
++static __u32 ShiftsGE(__u32 x)
++{
++	int extraBits;
++	int nShifts;
++
++	nShifts = extraBits = 0;
++
++	while (x > 1) {
++		if (x & 1)
++			extraBits++;
++		x >>= 1;
++		nShifts++;
++	}
++
++	if (extraBits)
++		nShifts++;
++
++	return nShifts;
++}
++
++/* Function to return the number of shifts to get a 1 in bit 0
++ */
++
++static __u32 Shifts(__u32 x)
++{
++	int nShifts;
++
++	nShifts =  0;
++
++	if (!x)
++		return 0;
++
++	while (!(x&1)) {
++		x >>= 1;
++		nShifts++;
++	}
++
++	return nShifts;
++}
++
++
++
++/*
++ * Temporary buffer manipulations.
++ */
++
++static int yaffs_InitialiseTempBuffers(yaffs_Device *dev)
++{
++	int i;
++	__u8 *buf = (__u8 *)1;
++
++	memset(dev->tempBuffer, 0, sizeof(dev->tempBuffer));
++
++	for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
++		dev->tempBuffer[i].line = 0;	/* not in use */
++		dev->tempBuffer[i].buffer = buf =
++		    YMALLOC_DMA(dev->totalBytesPerChunk);
++	}
++
++	return buf ? YAFFS_OK : YAFFS_FAIL;
++}
++
++__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo)
++{
++	int i, j;
++
++	dev->tempInUse++;
++	if (dev->tempInUse > dev->maxTemp)
++		dev->maxTemp = dev->tempInUse;
++
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		if (dev->tempBuffer[i].line == 0) {
++			dev->tempBuffer[i].line = lineNo;
++			if ((i + 1) > dev->maxTemp) {
++				dev->maxTemp = i + 1;
++				for (j = 0; j <= i; j++)
++					dev->tempBuffer[j].maxLine =
++					    dev->tempBuffer[j].line;
++			}
++
++			return dev->tempBuffer[i].buffer;
++		}
++	}
++
++	T(YAFFS_TRACE_BUFFERS,
++	  (TSTR("Out of temp buffers at line %d, other held by lines:"),
++	   lineNo));
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
++		T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line));
++
++	T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR)));
++
++	/*
++	 * If we got here then we have to allocate an unmanaged one
++	 * This is not good.
++	 */
++
++	dev->unmanagedTempAllocations++;
++	return YMALLOC(dev->nDataBytesPerChunk);
++
++}
++
++void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer,
++				    int lineNo)
++{
++	int i;
++
++	dev->tempInUse--;
++
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		if (dev->tempBuffer[i].buffer == buffer) {
++			dev->tempBuffer[i].line = 0;
++			return;
++		}
++	}
++
++	if (buffer) {
++		/* assume it is an unmanaged one. */
++		T(YAFFS_TRACE_BUFFERS,
++		  (TSTR("Releasing unmanaged temp buffer in line %d" TENDSTR),
++		   lineNo));
++		YFREE(buffer);
++		dev->unmanagedTempDeallocations++;
++	}
++
++}
++
++/*
++ * Determine if we have a managed buffer.
++ */
++int yaffs_IsManagedTempBuffer(yaffs_Device *dev, const __u8 *buffer)
++{
++	int i;
++
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		if (dev->tempBuffer[i].buffer == buffer)
++			return 1;
++	}
++
++	for (i = 0; i < dev->nShortOpCaches; i++) {
++		if (dev->srCache[i].data == buffer)
++			return 1;
++	}
++
++	if (buffer == dev->checkpointBuffer)
++		return 1;
++
++	T(YAFFS_TRACE_ALWAYS,
++		(TSTR("yaffs: unmaged buffer detected.\n" TENDSTR)));
++	return 0;
++}
++
++
++
++/*
++ * Chunk bitmap manipulations
++ */
++
++static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device *dev, int blk)
++{
++	if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
++		T(YAFFS_TRACE_ERROR,
++			(TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
++			blk));
++		YBUG();
++	}
++	return dev->chunkBits +
++		(dev->chunkBitmapStride * (blk - dev->internalStartBlock));
++}
++
++static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk)
++{
++	if (blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
++			chunk < 0 || chunk >= dev->nChunksPerBlock) {
++		T(YAFFS_TRACE_ERROR,
++		(TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),
++			blk, chunk));
++		YBUG();
++	}
++}
++
++static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device *dev, int blk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++	memset(blkBits, 0, dev->chunkBitmapStride);
++}
++
++static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device *dev, int blk, int chunk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++	yaffs_VerifyChunkBitId(dev, blk, chunk);
++
++	blkBits[chunk / 8] &= ~(1 << (chunk & 7));
++}
++
++static Y_INLINE void yaffs_SetChunkBit(yaffs_Device *dev, int blk, int chunk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++	yaffs_VerifyChunkBitId(dev, blk, chunk);
++
++	blkBits[chunk / 8] |= (1 << (chunk & 7));
++}
++
++static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device *dev, int blk, int chunk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++	yaffs_VerifyChunkBitId(dev, blk, chunk);
++
++	return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++}
++
++static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device *dev, int blk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++	int i;
++	for (i = 0; i < dev->chunkBitmapStride; i++) {
++		if (*blkBits)
++			return 1;
++		blkBits++;
++	}
++	return 0;
++}
++
++static int yaffs_CountChunkBits(yaffs_Device *dev, int blk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++	int i;
++	int n = 0;
++	for (i = 0; i < dev->chunkBitmapStride; i++) {
++		__u8 x = *blkBits;
++		while (x) {
++			if (x & 1)
++				n++;
++			x >>= 1;
++		}
++
++		blkBits++;
++	}
++	return n;
++}
++
++/*
++ * Verification code
++ */
++
++static int yaffs_SkipVerification(yaffs_Device *dev)
++{
++	return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_SkipFullVerification(yaffs_Device *dev)
++{
++	return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_SkipNANDVerification(yaffs_Device *dev)
++{
++	return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND));
++}
++
++static const char *blockStateName[] = {
++"Unknown",
++"Needs scanning",
++"Scanning",
++"Empty",
++"Allocating",
++"Full",
++"Dirty",
++"Checkpoint",
++"Collecting",
++"Dead"
++};
++
++static void yaffs_VerifyBlock(yaffs_Device *dev, yaffs_BlockInfo *bi, int n)
++{
++	int actuallyUsed;
++	int inUse;
++
++	if (yaffs_SkipVerification(dev))
++		return;
++
++	/* Report illegal runtime states */
++	if (bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
++		T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has undefined state %d"TENDSTR), n, bi->blockState));
++
++	switch (bi->blockState) {
++	case YAFFS_BLOCK_STATE_UNKNOWN:
++	case YAFFS_BLOCK_STATE_SCANNING:
++	case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
++		T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has bad run-state %s"TENDSTR),
++		n, blockStateName[bi->blockState]));
++	}
++
++	/* Check pages in use and soft deletions are legal */
++
++	actuallyUsed = bi->pagesInUse - bi->softDeletions;
++
++	if (bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
++	   bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock ||
++	   actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock)
++		T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
++		n, bi->pagesInUse, bi->softDeletions));
++
++
++	/* Check chunk bitmap legal */
++	inUse = yaffs_CountChunkBits(dev, n);
++	if (inUse != bi->pagesInUse)
++		T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
++			n, bi->pagesInUse, inUse));
++
++	/* Check that the sequence number is valid.
++	 * Ten million is legal, but is very unlikely
++	 */
++	if (dev->isYaffs2 &&
++	   (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) &&
++	   (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000))
++		T(YAFFS_TRACE_VERIFY, (TSTR("Block %d has suspect sequence number of %d"TENDSTR),
++		n, bi->sequenceNumber));
++}
++
++static void yaffs_VerifyCollectedBlock(yaffs_Device *dev, yaffs_BlockInfo *bi,
++		int n)
++{
++	yaffs_VerifyBlock(dev, bi, n);
++
++	/* After collection the block should be in the erased state */
++	/* This will need to change if we do partial gc */
++
++	if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING &&
++			bi->blockState != YAFFS_BLOCK_STATE_EMPTY) {
++		T(YAFFS_TRACE_ERROR, (TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
++			n, bi->blockState));
++	}
++}
++
++static void yaffs_VerifyBlocks(yaffs_Device *dev)
++{
++	int i;
++	int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
++	int nIllegalBlockStates = 0;
++
++	if (yaffs_SkipVerification(dev))
++		return;
++
++	memset(nBlocksPerState, 0, sizeof(nBlocksPerState));
++
++	for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++		yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, i);
++		yaffs_VerifyBlock(dev, bi, i);
++
++		if (bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
++			nBlocksPerState[bi->blockState]++;
++		else
++			nIllegalBlockStates++;
++	}
++
++	T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++	T(YAFFS_TRACE_VERIFY, (TSTR("Block summary"TENDSTR)));
++
++	T(YAFFS_TRACE_VERIFY, (TSTR("%d blocks have illegal states"TENDSTR), nIllegalBlockStates));
++	if (nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++		T(YAFFS_TRACE_VERIFY, (TSTR("Too many allocating blocks"TENDSTR)));
++
++	for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++		T(YAFFS_TRACE_VERIFY,
++		  (TSTR("%s %d blocks"TENDSTR),
++		  blockStateName[i], nBlocksPerState[i]));
++
++	if (dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
++		T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
++		 dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
++
++	if (dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
++		T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
++		 dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
++
++	if (nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++		T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
++		 nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
++
++	T(YAFFS_TRACE_VERIFY, (TSTR(""TENDSTR)));
++
++}
++
++/*
++ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
++ * case those tests will not be performed.
++ */
++static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck)
++{
++	if (obj && yaffs_SkipVerification(obj->myDev))
++		return;
++
++	if (!(tags && obj && oh)) {
++		T(YAFFS_TRACE_VERIFY,
++				(TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
++				(__u32)tags, (__u32)obj, (__u32)oh));
++		return;
++	}
++
++	if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++			oh->type > YAFFS_OBJECT_TYPE_MAX)
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
++			tags->objectId, oh->type));
++
++	if (tags->objectId != obj->objectId)
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d header mismatch objectId %d"TENDSTR),
++			tags->objectId, obj->objectId));
++
++
++	/*
++	 * Check that the object's parent ids match if parentCheck requested.
++	 *
++	 * Tests do not apply to the root object.
++	 */
++
++	if (parentCheck && tags->objectId > 1 && !obj->parent)
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
++			tags->objectId, oh->parentObjectId));
++
++	if (parentCheck && obj->parent &&
++			oh->parentObjectId != obj->parent->objectId &&
++			(oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
++			obj->parent->objectId != YAFFS_OBJECTID_DELETED))
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
++			tags->objectId, oh->parentObjectId, obj->parent->objectId));
++
++	if (tags->objectId > 1 && oh->name[0] == 0) /* Null name */
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d header name is NULL"TENDSTR),
++			obj->objectId));
++
++	if (tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d header name is 0xFF"TENDSTR),
++			obj->objectId));
++}
++
++
++
++static int yaffs_VerifyTnodeWorker(yaffs_Object *obj, yaffs_Tnode *tn,
++					__u32 level, int chunkOffset)
++{
++	int i;
++	yaffs_Device *dev = obj->myDev;
++	int ok = 1;
++
++	if (tn) {
++		if (level > 0) {
++
++			for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++				if (tn->internal[i]) {
++					ok = yaffs_VerifyTnodeWorker(obj,
++							tn->internal[i],
++							level - 1,
++							(chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
++				}
++			}
++		} else if (level == 0) {
++			yaffs_ExtendedTags tags;
++			__u32 objectId = obj->objectId;
++
++			chunkOffset <<=  YAFFS_TNODES_LEVEL0_BITS;
++
++			for (i = 0; i < YAFFS_NTNODES_LEVEL0; i++) {
++				__u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
++
++				if (theChunk > 0) {
++					/* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.objectId,tags.chunkId,theChunk)); */
++					yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
++					if (tags.objectId != objectId || tags.chunkId != chunkOffset) {
++						T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++							objectId, chunkOffset, theChunk,
++							tags.objectId, tags.chunkId));
++					}
++				}
++				chunkOffset++;
++			}
++		}
++	}
++
++	return ok;
++
++}
++
++
++static void yaffs_VerifyFile(yaffs_Object *obj)
++{
++	int requiredTallness;
++	int actualTallness;
++	__u32 lastChunk;
++	__u32 x;
++	__u32 i;
++	yaffs_Device *dev;
++	yaffs_ExtendedTags tags;
++	yaffs_Tnode *tn;
++	__u32 objectId;
++
++	if (!obj)
++		return;
++
++	if (yaffs_SkipVerification(obj->myDev))
++		return;
++
++	dev = obj->myDev;
++	objectId = obj->objectId;
++
++	/* Check file size is consistent with tnode depth */
++	lastChunk =  obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1;
++	x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
++	requiredTallness = 0;
++	while (x > 0) {
++		x >>= YAFFS_TNODES_INTERNAL_BITS;
++		requiredTallness++;
++	}
++
++	actualTallness = obj->variant.fileVariant.topLevel;
++
++	if (requiredTallness > actualTallness)
++		T(YAFFS_TRACE_VERIFY,
++		(TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR),
++		 obj->objectId, actualTallness, requiredTallness));
++
++
++	/* Check that the chunks in the tnode tree are all correct.
++	 * We do this by scanning through the tnode tree and
++	 * checking the tags for every chunk match.
++	 */
++
++	if (yaffs_SkipNANDVerification(dev))
++		return;
++
++	for (i = 1; i <= lastChunk; i++) {
++		tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant, i);
++
++		if (tn) {
++			__u32 theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
++			if (theChunk > 0) {
++				/* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */
++				yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL, &tags);
++				if (tags.objectId != objectId || tags.chunkId != i) {
++					T(~0, (TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++						objectId, i, theChunk,
++						tags.objectId, tags.chunkId));
++				}
++			}
++		}
++	}
++}
++
++
++static void yaffs_VerifyHardLink(yaffs_Object *obj)
++{
++	if (obj && yaffs_SkipVerification(obj->myDev))
++		return;
++
++	/* Verify sane equivalent object */
++}
++
++static void yaffs_VerifySymlink(yaffs_Object *obj)
++{
++	if (obj && yaffs_SkipVerification(obj->myDev))
++		return;
++
++	/* Verify symlink string */
++}
++
++static void yaffs_VerifySpecial(yaffs_Object *obj)
++{
++	if (obj && yaffs_SkipVerification(obj->myDev))
++		return;
++}
++
++static void yaffs_VerifyObject(yaffs_Object *obj)
++{
++	yaffs_Device *dev;
++
++	__u32 chunkMin;
++	__u32 chunkMax;
++
++	__u32 chunkIdOk;
++	__u32 chunkInRange;
++	__u32 chunkShouldNotBeDeleted;
++	__u32 chunkValid;
++
++	if (!obj)
++		return;
++
++	if (obj->beingCreated)
++		return;
++
++	dev = obj->myDev;
++
++	if (yaffs_SkipVerification(dev))
++		return;
++
++	/* Check sane object header chunk */
++
++	chunkMin = dev->internalStartBlock * dev->nChunksPerBlock;
++	chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1;
++
++	chunkInRange = (((unsigned)(obj->hdrChunk)) >= chunkMin && ((unsigned)(obj->hdrChunk)) <= chunkMax);
++	chunkIdOk = chunkInRange || obj->hdrChunk == 0;
++	chunkValid = chunkInRange &&
++			yaffs_CheckChunkBit(dev,
++					obj->hdrChunk / dev->nChunksPerBlock,
++					obj->hdrChunk % dev->nChunksPerBlock);
++	chunkShouldNotBeDeleted = chunkInRange && !chunkValid;
++
++	if (!obj->fake &&
++			(!chunkIdOk || chunkShouldNotBeDeleted)) {
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
++			obj->objectId, obj->hdrChunk,
++			chunkIdOk ? "" : ",out of range",
++			chunkShouldNotBeDeleted ? ",marked as deleted" : ""));
++	}
++
++	if (chunkValid && !yaffs_SkipNANDVerification(dev)) {
++		yaffs_ExtendedTags tags;
++		yaffs_ObjectHeader *oh;
++		__u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++
++		oh = (yaffs_ObjectHeader *)buffer;
++
++		yaffs_ReadChunkWithTagsFromNAND(dev, obj->hdrChunk, buffer,
++				&tags);
++
++		yaffs_VerifyObjectHeader(obj, oh, &tags, 1);
++
++		yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
++	}
++
++	/* Verify it has a parent */
++	if (obj && !obj->fake &&
++			(!obj->parent || obj->parent->myDev != dev)) {
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
++			obj->objectId, obj->parent));
++	}
++
++	/* Verify parent is a directory */
++	if (obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
++			obj->objectId, obj->parent->variantType));
++	}
++
++	switch (obj->variantType) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		yaffs_VerifyFile(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		yaffs_VerifySymlink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		yaffs_VerifyDirectory(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		yaffs_VerifyHardLink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		yaffs_VerifySpecial(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++	default:
++		T(YAFFS_TRACE_VERIFY,
++		(TSTR("Obj %d has illegaltype %d"TENDSTR),
++		obj->objectId, obj->variantType));
++		break;
++	}
++}
++
++static void yaffs_VerifyObjects(yaffs_Device *dev)
++{
++	yaffs_Object *obj;
++	int i;
++	struct ylist_head *lh;
++
++	if (yaffs_SkipVerification(dev))
++		return;
++
++	/* Iterate through the objects in each hash entry */
++
++	for (i = 0; i <  YAFFS_NOBJECT_BUCKETS; i++) {
++		ylist_for_each(lh, &dev->objectBucket[i].list) {
++			if (lh) {
++				obj = ylist_entry(lh, yaffs_Object, hashLink);
++				yaffs_VerifyObject(obj);
++			}
++		}
++	}
++}
++
++
++/*
++ *  Simple hash function. Needs to have a reasonable spread
++ */
++
++static Y_INLINE int yaffs_HashFunction(int n)
++{
++	n = abs(n);
++	return n % YAFFS_NOBJECT_BUCKETS;
++}
++
++/*
++ * Access functions to useful fake objects.
++ * Note that root might have a presence in NAND if permissions are set.
++ */
++
++yaffs_Object *yaffs_Root(yaffs_Device *dev)
++{
++	return dev->rootDir;
++}
++
++yaffs_Object *yaffs_LostNFound(yaffs_Device *dev)
++{
++	return dev->lostNFoundDir;
++}
++
++
++/*
++ *  Erased NAND checking functions
++ */
++
++int yaffs_CheckFF(__u8 *buffer, int nBytes)
++{
++	/* Horrible, slow implementation */
++	while (nBytes--) {
++		if (*buffer != 0xFF)
++			return 0;
++		buffer++;
++	}
++	return 1;
++}
++
++static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
++				int chunkInNAND)
++{
++	int retval = YAFFS_OK;
++	__u8 *data = yaffs_GetTempBuffer(dev, __LINE__);
++	yaffs_ExtendedTags tags;
++	int result;
++
++	result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
++
++	if (tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
++		retval = YAFFS_FAIL;
++
++	if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) {
++		T(YAFFS_TRACE_NANDACCESS,
++		  (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND));
++		retval = YAFFS_FAIL;
++	}
++
++	yaffs_ReleaseTempBuffer(dev, data, __LINE__);
++
++	return retval;
++
++}
++
++static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
++					const __u8 *data,
++					yaffs_ExtendedTags *tags,
++					int useReserve)
++{
++	int attempts = 0;
++	int writeOk = 0;
++	int chunk;
++
++	yaffs_InvalidateCheckpoint(dev);
++
++	do {
++		yaffs_BlockInfo *bi = 0;
++		int erasedOk = 0;
++
++		chunk = yaffs_AllocateChunk(dev, useReserve, &bi);
++		if (chunk < 0) {
++			/* no space */
++			break;
++		}
++
++		/* First check this chunk is erased, if it needs
++		 * checking.  The checking policy (unless forced
++		 * always on) is as follows:
++		 *
++		 * Check the first page we try to write in a block.
++		 * If the check passes then we don't need to check any
++		 * more.	If the check fails, we check again...
++		 * If the block has been erased, we don't need to check.
++		 *
++		 * However, if the block has been prioritised for gc,
++		 * then we think there might be something odd about
++		 * this block and stop using it.
++		 *
++		 * Rationale: We should only ever see chunks that have
++		 * not been erased if there was a partially written
++		 * chunk due to power loss.  This checking policy should
++		 * catch that case with very few checks and thus save a
++		 * lot of checks that are most likely not needed.
++		 */
++		if (bi->gcPrioritise) {
++			yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++			/* try another chunk */
++			continue;
++		}
++
++		/* let's give it a try */
++		attempts++;
++
++#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++		bi->skipErasedCheck = 0;
++#endif
++		if (!bi->skipErasedCheck) {
++			erasedOk = yaffs_CheckChunkErased(dev, chunk);
++			if (erasedOk != YAFFS_OK) {
++				T(YAFFS_TRACE_ERROR,
++				(TSTR("**>> yaffs chunk %d was not erased"
++				TENDSTR), chunk));
++
++				/* try another chunk */
++				continue;
++			}
++			bi->skipErasedCheck = 1;
++		}
++
++		writeOk = yaffs_WriteChunkWithTagsToNAND(dev, chunk,
++				data, tags);
++		if (writeOk != YAFFS_OK) {
++			yaffs_HandleWriteChunkError(dev, chunk, erasedOk);
++			/* try another chunk */
++			continue;
++		}
++
++		/* Copy the data into the robustification buffer */
++		yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
++
++	} while (writeOk != YAFFS_OK &&
++		(yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
++
++	if (!writeOk)
++		chunk = -1;
++
++	if (attempts > 1) {
++		T(YAFFS_TRACE_ERROR,
++			(TSTR("**>> yaffs write required %d attempts" TENDSTR),
++			attempts));
++
++		dev->nRetriedWrites += (attempts - 1);
++	}
++
++	return chunk;
++}
++
++/*
++ * Block retiring for handling a broken block.
++ */
++
++static void yaffs_RetireBlock(yaffs_Device *dev, int blockInNAND)
++{
++	yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
++
++	yaffs_InvalidateCheckpoint(dev);
++
++	if (yaffs_MarkBlockBad(dev, blockInNAND) != YAFFS_OK) {
++		if (yaffs_EraseBlockInNAND(dev, blockInNAND) != YAFFS_OK) {
++			T(YAFFS_TRACE_ALWAYS, (TSTR(
++				"yaffs: Failed to mark bad and erase block %d"
++				TENDSTR), blockInNAND));
++		} else {
++			yaffs_ExtendedTags tags;
++			int chunkId = blockInNAND * dev->nChunksPerBlock;
++
++			__u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++
++			memset(buffer, 0xff, dev->nDataBytesPerChunk);
++			yaffs_InitialiseTags(&tags);
++			tags.sequenceNumber = YAFFS_SEQUENCE_BAD_BLOCK;
++			if (dev->writeChunkWithTagsToNAND(dev, chunkId -
++				dev->chunkOffset, buffer, &tags) != YAFFS_OK)
++				T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Failed to "
++					TCONT("write bad block marker to block %d")
++					TENDSTR), blockInNAND));
++
++			yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
++		}
++	}
++
++	bi->blockState = YAFFS_BLOCK_STATE_DEAD;
++	bi->gcPrioritise = 0;
++	bi->needsRetiring = 0;
++
++	dev->nRetiredBlocks++;
++}
++
++/*
++ * Functions for robustisizing TODO
++ *
++ */
++
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++				const __u8 *data,
++				const yaffs_ExtendedTags *tags)
++{
++}
++
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++				const yaffs_ExtendedTags *tags)
++{
++}
++
++void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
++{
++	if (!bi->gcPrioritise) {
++		bi->gcPrioritise = 1;
++		dev->hasPendingPrioritisedGCs = 1;
++		bi->chunkErrorStrikes++;
++
++		if (bi->chunkErrorStrikes > 3) {
++			bi->needsRetiring = 1; /* Too many stikes, so retire this */
++			T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
++
++		}
++	}
++}
++
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND,
++		int erasedOk)
++{
++	int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++	yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
++
++	yaffs_HandleChunkError(dev, bi);
++
++	if (erasedOk) {
++		/* Was an actual write failure, so mark the block for retirement  */
++		bi->needsRetiring = 1;
++		T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++		  (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND));
++	}
++
++	/* Delete the chunk */
++	yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
++}
++
++
++/*---------------- Name handling functions ------------*/
++
++static __u16 yaffs_CalcNameSum(const YCHAR *name)
++{
++	__u16 sum = 0;
++	__u16 i = 1;
++
++	const YUCHAR *bname = (const YUCHAR *) name;
++	if (bname) {
++		while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++			sum += yaffs_toupper(*bname) * i;
++#else
++			sum += (*bname) * i;
++#endif
++			i++;
++			bname++;
++		}
++	}
++	return sum;
++}
++
++static void yaffs_SetObjectName(yaffs_Object *obj, const YCHAR *name)
++{
++#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++	memset(obj->shortName, 0, sizeof(YCHAR) * (YAFFS_SHORT_NAME_LENGTH+1));
++	if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH)
++		yaffs_strcpy(obj->shortName, name);
++	else
++		obj->shortName[0] = _Y('\0');
++#endif
++	obj->sum = yaffs_CalcNameSum(name);
++}
++
++/*-------------------- TNODES -------------------
++
++ * List of spare tnodes
++ * The list is hooked together using the first pointer
++ * in the tnode.
++ */
++
++/* yaffs_CreateTnodes creates a bunch more tnodes and
++ * adds them to the tnode free list.
++ * Don't use this function directly
++ */
++
++static int yaffs_CreateTnodes(yaffs_Device *dev, int nTnodes)
++{
++	int i;
++	int tnodeSize;
++	yaffs_Tnode *newTnodes;
++	__u8 *mem;
++	yaffs_Tnode *curr;
++	yaffs_Tnode *next;
++	yaffs_TnodeList *tnl;
++
++	if (nTnodes < 1)
++		return YAFFS_OK;
++
++	/* Calculate the tnode size in bytes for variable width tnode support.
++	 * Must be a multiple of 32-bits  */
++	tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++	if (tnodeSize < sizeof(yaffs_Tnode))
++		tnodeSize = sizeof(yaffs_Tnode);
++
++	/* make these things */
++
++	newTnodes = YMALLOC(nTnodes * tnodeSize);
++	mem = (__u8 *)newTnodes;
++
++	if (!newTnodes) {
++		T(YAFFS_TRACE_ERROR,
++			(TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	/* Hook them into the free list */
++#if 0
++	for (i = 0; i < nTnodes - 1; i++) {
++		newTnodes[i].internal[0] = &newTnodes[i + 1];
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++		newTnodes[i].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
++#endif
++	}
++
++	newTnodes[nTnodes - 1].internal[0] = dev->freeTnodes;
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++	newTnodes[nTnodes - 1].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
++#endif
++	dev->freeTnodes = newTnodes;
++#else
++	/* New hookup for wide tnodes */
++	for (i = 0; i < nTnodes - 1; i++) {
++		curr = (yaffs_Tnode *) &mem[i * tnodeSize];
++		next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize];
++		curr->internal[0] = next;
++	}
++
++	curr = (yaffs_Tnode *) &mem[(nTnodes - 1) * tnodeSize];
++	curr->internal[0] = dev->freeTnodes;
++	dev->freeTnodes = (yaffs_Tnode *)mem;
++
++#endif
++
++
++	dev->nFreeTnodes += nTnodes;
++	dev->nTnodesCreated += nTnodes;
++
++	/* Now add this bunch of tnodes to a list for freeing up.
++	 * NB If we can't add this to the management list it isn't fatal
++	 * but it just means we can't free this bunch of tnodes later.
++	 */
++
++	tnl = YMALLOC(sizeof(yaffs_TnodeList));
++	if (!tnl) {
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR
++		   ("yaffs: Could not add tnodes to management list" TENDSTR)));
++		   return YAFFS_FAIL;
++	} else {
++		tnl->tnodes = newTnodes;
++		tnl->next = dev->allocatedTnodeList;
++		dev->allocatedTnodeList = tnl;
++	}
++
++	T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR)));
++
++	return YAFFS_OK;
++}
++
++/* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */
++
++static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device *dev)
++{
++	yaffs_Tnode *tn = NULL;
++
++	/* If there are none left make more */
++	if (!dev->freeTnodes)
++		yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES);
++
++	if (dev->freeTnodes) {
++		tn = dev->freeTnodes;
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++		if (tn->internal[YAFFS_NTNODES_INTERNAL] != (void *)1) {
++			/* Hoosterman, this thing looks like it isn't in the list */
++			T(YAFFS_TRACE_ALWAYS,
++			  (TSTR("yaffs: Tnode list bug 1" TENDSTR)));
++		}
++#endif
++		dev->freeTnodes = dev->freeTnodes->internal[0];
++		dev->nFreeTnodes--;
++	}
++
++	dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++
++	return tn;
++}
++
++static yaffs_Tnode *yaffs_GetTnode(yaffs_Device *dev)
++{
++	yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev);
++	int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++	if (tnodeSize < sizeof(yaffs_Tnode))
++		tnodeSize = sizeof(yaffs_Tnode);
++
++	if (tn)
++		memset(tn, 0, tnodeSize);
++
++	return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++static void yaffs_FreeTnode(yaffs_Device *dev, yaffs_Tnode *tn)
++{
++	if (tn) {
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++		if (tn->internal[YAFFS_NTNODES_INTERNAL] != 0) {
++			/* Hoosterman, this thing looks like it is already in the list */
++			T(YAFFS_TRACE_ALWAYS,
++			  (TSTR("yaffs: Tnode list bug 2" TENDSTR)));
++		}
++		tn->internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
++#endif
++		tn->internal[0] = dev->freeTnodes;
++		dev->freeTnodes = tn;
++		dev->nFreeTnodes++;
++	}
++	dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++}
++
++static void yaffs_DeinitialiseTnodes(yaffs_Device *dev)
++{
++	/* Free the list of allocated tnodes */
++	yaffs_TnodeList *tmp;
++
++	while (dev->allocatedTnodeList) {
++		tmp = dev->allocatedTnodeList->next;
++
++		YFREE(dev->allocatedTnodeList->tnodes);
++		YFREE(dev->allocatedTnodeList);
++		dev->allocatedTnodeList = tmp;
++
++	}
++
++	dev->freeTnodes = NULL;
++	dev->nFreeTnodes = 0;
++}
++
++static void yaffs_InitialiseTnodes(yaffs_Device *dev)
++{
++	dev->allocatedTnodeList = NULL;
++	dev->freeTnodes = NULL;
++	dev->nFreeTnodes = 0;
++	dev->nTnodesCreated = 0;
++}
++
++
++void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos,
++		unsigned val)
++{
++	__u32 *map = (__u32 *)tn;
++	__u32 bitInMap;
++	__u32 bitInWord;
++	__u32 wordInMap;
++	__u32 mask;
++
++	pos &= YAFFS_TNODES_LEVEL0_MASK;
++	val >>= dev->chunkGroupBits;
++
++	bitInMap = pos * dev->tnodeWidth;
++	wordInMap = bitInMap / 32;
++	bitInWord = bitInMap & (32 - 1);
++
++	mask = dev->tnodeMask << bitInWord;
++
++	map[wordInMap] &= ~mask;
++	map[wordInMap] |= (mask & (val << bitInWord));
++
++	if (dev->tnodeWidth > (32 - bitInWord)) {
++		bitInWord = (32 - bitInWord);
++		wordInMap++;;
++		mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
++		map[wordInMap] &= ~mask;
++		map[wordInMap] |= (mask & (val >> bitInWord));
++	}
++}
++
++static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn,
++		unsigned pos)
++{
++	__u32 *map = (__u32 *)tn;
++	__u32 bitInMap;
++	__u32 bitInWord;
++	__u32 wordInMap;
++	__u32 val;
++
++	pos &= YAFFS_TNODES_LEVEL0_MASK;
++
++	bitInMap = pos * dev->tnodeWidth;
++	wordInMap = bitInMap / 32;
++	bitInWord = bitInMap & (32 - 1);
++
++	val = map[wordInMap] >> bitInWord;
++
++	if	(dev->tnodeWidth > (32 - bitInWord)) {
++		bitInWord = (32 - bitInWord);
++		wordInMap++;;
++		val |= (map[wordInMap] << bitInWord);
++	}
++
++	val &= dev->tnodeMask;
++	val <<= dev->chunkGroupBits;
++
++	return val;
++}
++
++/* ------------------- End of individual tnode manipulation -----------------*/
++
++/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
++ * The look up tree is represented by the top tnode and the number of topLevel
++ * in the tree. 0 means only the level 0 tnode is in the tree.
++ */
++
++/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
++static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device *dev,
++					yaffs_FileStructure *fStruct,
++					__u32 chunkId)
++{
++	yaffs_Tnode *tn = fStruct->top;
++	__u32 i;
++	int requiredTallness;
++	int level = fStruct->topLevel;
++
++	/* Check sane level and chunk Id */
++	if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
++		return NULL;
++
++	if (chunkId > YAFFS_MAX_CHUNK_ID)
++		return NULL;
++
++	/* First check we're tall enough (ie enough topLevel) */
++
++	i = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
++	requiredTallness = 0;
++	while (i) {
++		i >>= YAFFS_TNODES_INTERNAL_BITS;
++		requiredTallness++;
++	}
++
++	if (requiredTallness > fStruct->topLevel)
++		return NULL; /* Not tall enough, so we can't find it */
++
++	/* Traverse down to level 0 */
++	while (level > 0 && tn) {
++		tn = tn->internal[(chunkId >>
++			(YAFFS_TNODES_LEVEL0_BITS +
++				(level - 1) *
++				YAFFS_TNODES_INTERNAL_BITS)) &
++			YAFFS_TNODES_INTERNAL_MASK];
++		level--;
++	}
++
++	return tn;
++}
++
++/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
++ * This happens in two steps:
++ *  1. If the tree isn't tall enough, then make it taller.
++ *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
++ *
++ * Used when modifying the tree.
++ *
++ *  If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
++ *  be plugged into the ttree.
++ */
++
++static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device *dev,
++					yaffs_FileStructure *fStruct,
++					__u32 chunkId,
++					yaffs_Tnode *passedTn)
++{
++	int requiredTallness;
++	int i;
++	int l;
++	yaffs_Tnode *tn;
++
++	__u32 x;
++
++
++	/* Check sane level and page Id */
++	if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL)
++		return NULL;
++
++	if (chunkId > YAFFS_MAX_CHUNK_ID)
++		return NULL;
++
++	/* First check we're tall enough (ie enough topLevel) */
++
++	x = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
++	requiredTallness = 0;
++	while (x) {
++		x >>= YAFFS_TNODES_INTERNAL_BITS;
++		requiredTallness++;
++	}
++
++
++	if (requiredTallness > fStruct->topLevel) {
++		/* Not tall enough, gotta make the tree taller */
++		for (i = fStruct->topLevel; i < requiredTallness; i++) {
++
++			tn = yaffs_GetTnode(dev);
++
++			if (tn) {
++				tn->internal[0] = fStruct->top;
++				fStruct->top = tn;
++			} else {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR("yaffs: no more tnodes" TENDSTR)));
++			}
++		}
++
++		fStruct->topLevel = requiredTallness;
++	}
++
++	/* Traverse down to level 0, adding anything we need */
++
++	l = fStruct->topLevel;
++	tn = fStruct->top;
++
++	if (l > 0) {
++		while (l > 0 && tn) {
++			x = (chunkId >>
++			     (YAFFS_TNODES_LEVEL0_BITS +
++			      (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
++			    YAFFS_TNODES_INTERNAL_MASK;
++
++
++			if ((l > 1) && !tn->internal[x]) {
++				/* Add missing non-level-zero tnode */
++				tn->internal[x] = yaffs_GetTnode(dev);
++
++			} else if (l == 1) {
++				/* Looking from level 1 at level 0 */
++				if (passedTn) {
++					/* If we already have one, then release it.*/
++					if (tn->internal[x])
++						yaffs_FreeTnode(dev, tn->internal[x]);
++					tn->internal[x] = passedTn;
++
++				} else if (!tn->internal[x]) {
++					/* Don't have one, none passed in */
++					tn->internal[x] = yaffs_GetTnode(dev);
++				}
++			}
++
++			tn = tn->internal[x];
++			l--;
++		}
++	} else {
++		/* top is level 0 */
++		if (passedTn) {
++			memcpy(tn, passedTn, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++			yaffs_FreeTnode(dev, passedTn);
++		}
++	}
++
++	return tn;
++}
++
++static int yaffs_FindChunkInGroup(yaffs_Device *dev, int theChunk,
++				yaffs_ExtendedTags *tags, int objectId,
++				int chunkInInode)
++{
++	int j;
++
++	for (j = 0; theChunk && j < dev->chunkGroupSize; j++) {
++		if (yaffs_CheckChunkBit(dev, theChunk / dev->nChunksPerBlock,
++				theChunk % dev->nChunksPerBlock)) {
++			yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL,
++							tags);
++			if (yaffs_TagsMatch(tags, objectId, chunkInInode)) {
++				/* found it; */
++				return theChunk;
++			}
++		}
++		theChunk++;
++	}
++	return -1;
++}
++
++
++/* DeleteWorker scans backwards through the tnode tree and deletes all the
++ * chunks and tnodes in the file
++ * Returns 1 if the tree was deleted.
++ * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
++ */
++
++static int yaffs_DeleteWorker(yaffs_Object *in, yaffs_Tnode *tn, __u32 level,
++			      int chunkOffset, int *limit)
++{
++	int i;
++	int chunkInInode;
++	int theChunk;
++	yaffs_ExtendedTags tags;
++	int foundChunk;
++	yaffs_Device *dev = in->myDev;
++
++	int allDone = 1;
++
++	if (tn) {
++		if (level > 0) {
++			for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
++			     i--) {
++				if (tn->internal[i]) {
++					if (limit && (*limit) < 0) {
++						allDone = 0;
++					} else {
++						allDone =
++							yaffs_DeleteWorker(in,
++								tn->
++								internal
++								[i],
++								level -
++								1,
++								(chunkOffset
++									<<
++									YAFFS_TNODES_INTERNAL_BITS)
++								+ i,
++								limit);
++					}
++					if (allDone) {
++						yaffs_FreeTnode(dev,
++								tn->
++								internal[i]);
++						tn->internal[i] = NULL;
++					}
++				}
++			}
++			return (allDone) ? 1 : 0;
++		} else if (level == 0) {
++			int hitLimit = 0;
++
++			for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
++					i--) {
++				theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
++				if (theChunk) {
++
++					chunkInInode = (chunkOffset <<
++						YAFFS_TNODES_LEVEL0_BITS) + i;
++
++					foundChunk =
++						yaffs_FindChunkInGroup(dev,
++								theChunk,
++								&tags,
++								in->objectId,
++								chunkInInode);
++
++					if (foundChunk > 0) {
++						yaffs_DeleteChunk(dev,
++								  foundChunk, 1,
++								  __LINE__);
++						in->nDataChunks--;
++						if (limit) {
++							*limit = *limit - 1;
++							if (*limit <= 0)
++								hitLimit = 1;
++						}
++
++					}
++
++					yaffs_PutLevel0Tnode(dev, tn, i, 0);
++				}
++
++			}
++			return (i < 0) ? 1 : 0;
++
++		}
++
++	}
++
++	return 1;
++
++}
++
++static void yaffs_SoftDeleteChunk(yaffs_Device *dev, int chunk)
++{
++	yaffs_BlockInfo *theBlock;
++
++	T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
++
++	theBlock = yaffs_GetBlockInfo(dev, chunk / dev->nChunksPerBlock);
++	if (theBlock) {
++		theBlock->softDeletions++;
++		dev->nFreeChunks++;
++	}
++}
++
++/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
++ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
++ * of the tnode.
++ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
++ */
++
++static int yaffs_SoftDeleteWorker(yaffs_Object *in, yaffs_Tnode *tn,
++				  __u32 level, int chunkOffset)
++{
++	int i;
++	int theChunk;
++	int allDone = 1;
++	yaffs_Device *dev = in->myDev;
++
++	if (tn) {
++		if (level > 0) {
++
++			for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
++			     i--) {
++				if (tn->internal[i]) {
++					allDone =
++					    yaffs_SoftDeleteWorker(in,
++								   tn->
++								   internal[i],
++								   level - 1,
++								   (chunkOffset
++								    <<
++								    YAFFS_TNODES_INTERNAL_BITS)
++								   + i);
++					if (allDone) {
++						yaffs_FreeTnode(dev,
++								tn->
++								internal[i]);
++						tn->internal[i] = NULL;
++					} else {
++						/* Hoosterman... how could this happen? */
++					}
++				}
++			}
++			return (allDone) ? 1 : 0;
++		} else if (level == 0) {
++
++			for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
++				theChunk = yaffs_GetChunkGroupBase(dev, tn, i);
++				if (theChunk) {
++					/* Note this does not find the real chunk, only the chunk group.
++					 * We make an assumption that a chunk group is not larger than
++					 * a block.
++					 */
++					yaffs_SoftDeleteChunk(dev, theChunk);
++					yaffs_PutLevel0Tnode(dev, tn, i, 0);
++				}
++
++			}
++			return 1;
++
++		}
++
++	}
++
++	return 1;
++
++}
++
++static void yaffs_SoftDeleteFile(yaffs_Object *obj)
++{
++	if (obj->deleted &&
++	    obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) {
++		if (obj->nDataChunks <= 0) {
++			/* Empty file with no duplicate object headers, just delete it immediately */
++			yaffs_FreeTnode(obj->myDev,
++					obj->variant.fileVariant.top);
++			obj->variant.fileVariant.top = NULL;
++			T(YAFFS_TRACE_TRACING,
++			  (TSTR("yaffs: Deleting empty file %d" TENDSTR),
++			   obj->objectId));
++			yaffs_DoGenericObjectDeletion(obj);
++		} else {
++			yaffs_SoftDeleteWorker(obj,
++					       obj->variant.fileVariant.top,
++					       obj->variant.fileVariant.
++					       topLevel, 0);
++			obj->softDeleted = 1;
++		}
++	}
++}
++
++/* Pruning removes any part of the file structure tree that is beyond the
++ * bounds of the file (ie that does not point to chunks).
++ *
++ * A file should only get pruned when its size is reduced.
++ *
++ * Before pruning, the chunks must be pulled from the tree and the
++ * level 0 tnode entries must be zeroed out.
++ * Could also use this for file deletion, but that's probably better handled
++ * by a special case.
++ */
++
++static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device *dev, yaffs_Tnode *tn,
++				__u32 level, int del0)
++{
++	int i;
++	int hasData;
++
++	if (tn) {
++		hasData = 0;
++
++		for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
++			if (tn->internal[i] && level > 0) {
++				tn->internal[i] =
++				    yaffs_PruneWorker(dev, tn->internal[i],
++						      level - 1,
++						      (i == 0) ? del0 : 1);
++			}
++
++			if (tn->internal[i])
++				hasData++;
++		}
++
++		if (hasData == 0 && del0) {
++			/* Free and return NULL */
++
++			yaffs_FreeTnode(dev, tn);
++			tn = NULL;
++		}
++
++	}
++
++	return tn;
++
++}
++
++static int yaffs_PruneFileStructure(yaffs_Device *dev,
++				yaffs_FileStructure *fStruct)
++{
++	int i;
++	int hasData;
++	int done = 0;
++	yaffs_Tnode *tn;
++
++	if (fStruct->topLevel > 0) {
++		fStruct->top =
++		    yaffs_PruneWorker(dev, fStruct->top, fStruct->topLevel, 0);
++
++		/* Now we have a tree with all the non-zero branches NULL but the height
++		 * is the same as it was.
++		 * Let's see if we can trim internal tnodes to shorten the tree.
++		 * We can do this if only the 0th element in the tnode is in use
++		 * (ie all the non-zero are NULL)
++		 */
++
++		while (fStruct->topLevel && !done) {
++			tn = fStruct->top;
++
++			hasData = 0;
++			for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
++				if (tn->internal[i])
++					hasData++;
++			}
++
++			if (!hasData) {
++				fStruct->top = tn->internal[0];
++				fStruct->topLevel--;
++				yaffs_FreeTnode(dev, tn);
++			} else {
++				done = 1;
++			}
++		}
++	}
++
++	return YAFFS_OK;
++}
++
++/*-------------------- End of File Structure functions.-------------------*/
++
++/* yaffs_CreateFreeObjects creates a bunch more objects and
++ * adds them to the object free list.
++ */
++static int yaffs_CreateFreeObjects(yaffs_Device *dev, int nObjects)
++{
++	int i;
++	yaffs_Object *newObjects;
++	yaffs_ObjectList *list;
++
++	if (nObjects < 1)
++		return YAFFS_OK;
++
++	/* make these things */
++	newObjects = YMALLOC(nObjects * sizeof(yaffs_Object));
++	list = YMALLOC(sizeof(yaffs_ObjectList));
++
++	if (!newObjects || !list) {
++		if (newObjects)
++			YFREE(newObjects);
++		if (list)
++			YFREE(list);
++		T(YAFFS_TRACE_ALLOCATE,
++		  (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	/* Hook them into the free list */
++	for (i = 0; i < nObjects - 1; i++) {
++		newObjects[i].siblings.next =
++				(struct ylist_head *)(&newObjects[i + 1]);
++	}
++
++	newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects;
++	dev->freeObjects = newObjects;
++	dev->nFreeObjects += nObjects;
++	dev->nObjectsCreated += nObjects;
++
++	/* Now add this bunch of Objects to a list for freeing up. */
++
++	list->objects = newObjects;
++	list->next = dev->allocatedObjectList;
++	dev->allocatedObjectList = list;
++
++	return YAFFS_OK;
++}
++
++
++/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
++static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device *dev)
++{
++	yaffs_Object *tn = NULL;
++
++#ifdef VALGRIND_TEST
++	tn = YMALLOC(sizeof(yaffs_Object));
++#else
++	/* If there are none left make more */
++	if (!dev->freeObjects)
++		yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS);
++
++	if (dev->freeObjects) {
++		tn = dev->freeObjects;
++		dev->freeObjects =
++			(yaffs_Object *) (dev->freeObjects->siblings.next);
++		dev->nFreeObjects--;
++	}
++#endif
++	if (tn) {
++		/* Now sweeten it up... */
++
++		memset(tn, 0, sizeof(yaffs_Object));
++		tn->beingCreated = 1;
++
++		tn->myDev = dev;
++		tn->hdrChunk = 0;
++		tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN;
++		YINIT_LIST_HEAD(&(tn->hardLinks));
++		YINIT_LIST_HEAD(&(tn->hashLink));
++		YINIT_LIST_HEAD(&tn->siblings);
++
++
++		/* Now make the directory sane */
++		if (dev->rootDir) {
++			tn->parent = dev->rootDir;
++			ylist_add(&(tn->siblings), &dev->rootDir->variant.directoryVariant.children);
++		}
++
++		/* Add it to the lost and found directory.
++		 * NB Can't put root or lostNFound in lostNFound so
++		 * check if lostNFound exists first
++		 */
++		if (dev->lostNFoundDir)
++			yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn);
++
++		tn->beingCreated = 0;
++	}
++
++	dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++
++	return tn;
++}
++
++static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device *dev, int number,
++					       __u32 mode)
++{
++
++	yaffs_Object *obj =
++	    yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
++	if (obj) {
++		obj->fake = 1;		/* it is fake so it might have no NAND presence... */
++		obj->renameAllowed = 0;	/* ... and we're not allowed to rename it... */
++		obj->unlinkAllowed = 0;	/* ... or unlink it */
++		obj->deleted = 0;
++		obj->unlinked = 0;
++		obj->yst_mode = mode;
++		obj->myDev = dev;
++		obj->hdrChunk = 0;	/* Not a valid chunk. */
++	}
++
++	return obj;
++
++}
++
++static void yaffs_UnhashObject(yaffs_Object *tn)
++{
++	int bucket;
++	yaffs_Device *dev = tn->myDev;
++
++	/* If it is still linked into the bucket list, free from the list */
++	if (!ylist_empty(&tn->hashLink)) {
++		ylist_del_init(&tn->hashLink);
++		bucket = yaffs_HashFunction(tn->objectId);
++		dev->objectBucket[bucket].count--;
++	}
++}
++
++/*  FreeObject frees up a Object and puts it back on the free list */
++static void yaffs_FreeObject(yaffs_Object *tn)
++{
++	yaffs_Device *dev = tn->myDev;
++
++#ifdef __KERNEL__
++	T(YAFFS_TRACE_OS, (TSTR("FreeObject %p inode %p"TENDSTR), tn, tn->myInode));
++#endif
++
++	if (tn->parent)
++		YBUG();
++	if (!ylist_empty(&tn->siblings))
++		YBUG();
++
++
++#ifdef __KERNEL__
++	if (tn->myInode) {
++		/* We're still hooked up to a cached inode.
++		 * Don't delete now, but mark for later deletion
++		 */
++		tn->deferedFree = 1;
++		return;
++	}
++#endif
++
++	yaffs_UnhashObject(tn);
++
++#ifdef VALGRIND_TEST
++	YFREE(tn);
++#else
++	/* Link into the free list. */
++	tn->siblings.next = (struct ylist_head *)(dev->freeObjects);
++	dev->freeObjects = tn;
++	dev->nFreeObjects++;
++#endif
++	dev->nCheckpointBlocksRequired = 0; /* force recalculation*/
++}
++
++#ifdef __KERNEL__
++
++void yaffs_HandleDeferedFree(yaffs_Object *obj)
++{
++	if (obj->deferedFree)
++		yaffs_FreeObject(obj);
++}
++
++#endif
++
++static void yaffs_DeinitialiseObjects(yaffs_Device *dev)
++{
++	/* Free the list of allocated Objects */
++
++	yaffs_ObjectList *tmp;
++
++	while (dev->allocatedObjectList) {
++		tmp = dev->allocatedObjectList->next;
++		YFREE(dev->allocatedObjectList->objects);
++		YFREE(dev->allocatedObjectList);
++
++		dev->allocatedObjectList = tmp;
++	}
++
++	dev->freeObjects = NULL;
++	dev->nFreeObjects = 0;
++}
++
++static void yaffs_InitialiseObjects(yaffs_Device *dev)
++{
++	int i;
++
++	dev->allocatedObjectList = NULL;
++	dev->freeObjects = NULL;
++	dev->nFreeObjects = 0;
++
++	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++		YINIT_LIST_HEAD(&dev->objectBucket[i].list);
++		dev->objectBucket[i].count = 0;
++	}
++}
++
++static int yaffs_FindNiceObjectBucket(yaffs_Device *dev)
++{
++	static int x;
++	int i;
++	int l = 999;
++	int lowest = 999999;
++
++	/* First let's see if we can find one that's empty. */
++
++	for (i = 0; i < 10 && lowest > 0; i++) {
++		x++;
++		x %= YAFFS_NOBJECT_BUCKETS;
++		if (dev->objectBucket[x].count < lowest) {
++			lowest = dev->objectBucket[x].count;
++			l = x;
++		}
++
++	}
++
++	/* If we didn't find an empty list, then try
++	 * looking a bit further for a short one
++	 */
++
++	for (i = 0; i < 10 && lowest > 3; i++) {
++		x++;
++		x %= YAFFS_NOBJECT_BUCKETS;
++		if (dev->objectBucket[x].count < lowest) {
++			lowest = dev->objectBucket[x].count;
++			l = x;
++		}
++
++	}
++
++	return l;
++}
++
++static int yaffs_CreateNewObjectNumber(yaffs_Device *dev)
++{
++	int bucket = yaffs_FindNiceObjectBucket(dev);
++
++	/* Now find an object value that has not already been taken
++	 * by scanning the list.
++	 */
++
++	int found = 0;
++	struct ylist_head *i;
++
++	__u32 n = (__u32) bucket;
++
++	/* yaffs_CheckObjectHashSanity();  */
++
++	while (!found) {
++		found = 1;
++		n += YAFFS_NOBJECT_BUCKETS;
++		if (1 || dev->objectBucket[bucket].count > 0) {
++			ylist_for_each(i, &dev->objectBucket[bucket].list) {
++				/* If there is already one in the list */
++				if (i && ylist_entry(i, yaffs_Object,
++						hashLink)->objectId == n) {
++					found = 0;
++				}
++			}
++		}
++	}
++
++	return n;
++}
++
++static void yaffs_HashObject(yaffs_Object *in)
++{
++	int bucket = yaffs_HashFunction(in->objectId);
++	yaffs_Device *dev = in->myDev;
++
++	ylist_add(&in->hashLink, &dev->objectBucket[bucket].list);
++	dev->objectBucket[bucket].count++;
++}
++
++yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number)
++{
++	int bucket = yaffs_HashFunction(number);
++	struct ylist_head *i;
++	yaffs_Object *in;
++
++	ylist_for_each(i, &dev->objectBucket[bucket].list) {
++		/* Look if it is in the list */
++		if (i) {
++			in = ylist_entry(i, yaffs_Object, hashLink);
++			if (in->objectId == number) {
++#ifdef __KERNEL__
++				/* Don't tell the VFS about this one if it is defered free */
++				if (in->deferedFree)
++					return NULL;
++#endif
++
++				return in;
++			}
++		}
++	}
++
++	return NULL;
++}
++
++yaffs_Object *yaffs_CreateNewObject(yaffs_Device *dev, int number,
++				    yaffs_ObjectType type)
++{
++	yaffs_Object *theObject;
++	yaffs_Tnode *tn = NULL;
++
++	if (number < 0)
++		number = yaffs_CreateNewObjectNumber(dev);
++
++	theObject = yaffs_AllocateEmptyObject(dev);
++	if (!theObject)
++		return NULL;
++
++	if (type == YAFFS_OBJECT_TYPE_FILE) {
++		tn = yaffs_GetTnode(dev);
++		if (!tn) {
++			yaffs_FreeObject(theObject);
++			return NULL;
++		}
++	}
++
++	if (theObject) {
++		theObject->fake = 0;
++		theObject->renameAllowed = 1;
++		theObject->unlinkAllowed = 1;
++		theObject->objectId = number;
++		yaffs_HashObject(theObject);
++		theObject->variantType = type;
++#ifdef CONFIG_YAFFS_WINCE
++		yfsd_WinFileTimeNow(theObject->win_atime);
++		theObject->win_ctime[0] = theObject->win_mtime[0] =
++		    theObject->win_atime[0];
++		theObject->win_ctime[1] = theObject->win_mtime[1] =
++		    theObject->win_atime[1];
++
++#else
++
++		theObject->yst_atime = theObject->yst_mtime =
++		    theObject->yst_ctime = Y_CURRENT_TIME;
++#endif
++		switch (type) {
++		case YAFFS_OBJECT_TYPE_FILE:
++			theObject->variant.fileVariant.fileSize = 0;
++			theObject->variant.fileVariant.scannedFileSize = 0;
++			theObject->variant.fileVariant.shrinkSize = 0xFFFFFFFF;	/* max __u32 */
++			theObject->variant.fileVariant.topLevel = 0;
++			theObject->variant.fileVariant.top = tn;
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			YINIT_LIST_HEAD(&theObject->variant.directoryVariant.
++					children);
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++			/* No action required */
++			break;
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++			/* todo this should not happen */
++			break;
++		}
++	}
++
++	return theObject;
++}
++
++static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device *dev,
++						      int number,
++						      yaffs_ObjectType type)
++{
++	yaffs_Object *theObject = NULL;
++
++	if (number > 0)
++		theObject = yaffs_FindObjectByNumber(dev, number);
++
++	if (!theObject)
++		theObject = yaffs_CreateNewObject(dev, number, type);
++
++	return theObject;
++
++}
++
++
++static YCHAR *yaffs_CloneString(const YCHAR *str)
++{
++	YCHAR *newStr = NULL;
++
++	if (str && *str) {
++		newStr = YMALLOC((yaffs_strlen(str) + 1) * sizeof(YCHAR));
++		if (newStr)
++			yaffs_strcpy(newStr, str);
++	}
++
++	return newStr;
++
++}
++
++/*
++ * Mknod (create) a new object.
++ * equivalentObject only has meaning for a hard link;
++ * aliasString only has meaning for a sumlink.
++ * rdev only has meaning for devices (a subset of special objects)
++ */
++
++static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type,
++				       yaffs_Object *parent,
++				       const YCHAR *name,
++				       __u32 mode,
++				       __u32 uid,
++				       __u32 gid,
++				       yaffs_Object *equivalentObject,
++				       const YCHAR *aliasString, __u32 rdev)
++{
++	yaffs_Object *in;
++	YCHAR *str = NULL;
++
++	yaffs_Device *dev = parent->myDev;
++
++	/* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
++	if (yaffs_FindObjectByName(parent, name))
++		return NULL;
++
++	in = yaffs_CreateNewObject(dev, -1, type);
++
++	if (!in)
++		return YAFFS_FAIL;
++
++	if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
++		str = yaffs_CloneString(aliasString);
++		if (!str) {
++			yaffs_FreeObject(in);
++			return NULL;
++		}
++	}
++
++
++
++	if (in) {
++		in->hdrChunk = 0;
++		in->valid = 1;
++		in->variantType = type;
++
++		in->yst_mode = mode;
++
++#ifdef CONFIG_YAFFS_WINCE
++		yfsd_WinFileTimeNow(in->win_atime);
++		in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0];
++		in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1];
++
++#else
++		in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME;
++
++		in->yst_rdev = rdev;
++		in->yst_uid = uid;
++		in->yst_gid = gid;
++#endif
++		in->nDataChunks = 0;
++
++		yaffs_SetObjectName(in, name);
++		in->dirty = 1;
++
++		yaffs_AddObjectToDirectory(parent, in);
++
++		in->myDev = parent->myDev;
++
++		switch (type) {
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			in->variant.symLinkVariant.alias = str;
++			break;
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++			in->variant.hardLinkVariant.equivalentObject =
++				equivalentObject;
++			in->variant.hardLinkVariant.equivalentObjectId =
++				equivalentObject->objectId;
++			ylist_add(&in->hardLinks, &equivalentObject->hardLinks);
++			break;
++		case YAFFS_OBJECT_TYPE_FILE:
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++			/* do nothing */
++			break;
++		}
++
++		if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) {
++			/* Could not create the object header, fail the creation */
++			yaffs_DeleteObject(in);
++			in = NULL;
++		}
++
++	}
++
++	return in;
++}
++
++yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
++			__u32 mode, __u32 uid, __u32 gid)
++{
++	return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
++				uid, gid, NULL, NULL, 0);
++}
++
++yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
++				__u32 mode, __u32 uid, __u32 gid)
++{
++	return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
++				 mode, uid, gid, NULL, NULL, 0);
++}
++
++yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
++				__u32 mode, __u32 uid, __u32 gid, __u32 rdev)
++{
++	return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
++				 uid, gid, NULL, NULL, rdev);
++}
++
++yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
++				__u32 mode, __u32 uid, __u32 gid,
++				const YCHAR *alias)
++{
++	return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
++				uid, gid, NULL, alias, 0);
++}
++
++/* yaffs_Link returns the object id of the equivalent object.*/
++yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
++			yaffs_Object *equivalentObject)
++{
++	/* Get the real object in case we were fed a hard link as an equivalent object */
++	equivalentObject = yaffs_GetEquivalentObject(equivalentObject);
++
++	if (yaffs_MknodObject
++	    (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
++	     equivalentObject, NULL, 0)) {
++		return equivalentObject;
++	} else {
++		return NULL;
++	}
++
++}
++
++static int yaffs_ChangeObjectName(yaffs_Object *obj, yaffs_Object *newDir,
++				const YCHAR *newName, int force, int shadows)
++{
++	int unlinkOp;
++	int deleteOp;
++
++	yaffs_Object *existingTarget;
++
++	if (newDir == NULL)
++		newDir = obj->parent;	/* use the old directory */
++
++	if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_ChangeObjectName: newDir is not a directory"
++		    TENDSTR)));
++		YBUG();
++	}
++
++	/* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
++	if (obj->myDev->isYaffs2)
++		unlinkOp = (newDir == obj->myDev->unlinkedDir);
++	else
++		unlinkOp = (newDir == obj->myDev->unlinkedDir
++			    && obj->variantType == YAFFS_OBJECT_TYPE_FILE);
++
++	deleteOp = (newDir == obj->myDev->deletedDir);
++
++	existingTarget = yaffs_FindObjectByName(newDir, newName);
++
++	/* If the object is a file going into the unlinked directory,
++	 *   then it is OK to just stuff it in since duplicate names are allowed.
++	 *   else only proceed if the new name does not exist and if we're putting
++	 *   it into a directory.
++	 */
++	if ((unlinkOp ||
++	     deleteOp ||
++	     force ||
++	     (shadows > 0) ||
++	     !existingTarget) &&
++	    newDir->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) {
++		yaffs_SetObjectName(obj, newName);
++		obj->dirty = 1;
++
++		yaffs_AddObjectToDirectory(newDir, obj);
++
++		if (unlinkOp)
++			obj->unlinked = 1;
++
++		/* If it is a deletion then we mark it as a shrink for gc purposes. */
++		if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows) >= 0)
++			return YAFFS_OK;
++	}
++
++	return YAFFS_FAIL;
++}
++
++int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
++		yaffs_Object *newDir, const YCHAR *newName)
++{
++	yaffs_Object *obj = NULL;
++	yaffs_Object *existingTarget = NULL;
++	int force = 0;
++
++
++	if (!oldDir || oldDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
++		YBUG();
++	if (!newDir || newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY)
++		YBUG();
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++	/* Special case for case insemsitive systems (eg. WinCE).
++	 * While look-up is case insensitive, the name isn't.
++	 * Therefore we might want to change x.txt to X.txt
++	*/
++	if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0)
++		force = 1;
++#endif
++
++	else if (yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
++		/* ENAMETOOLONG */
++		return YAFFS_FAIL;
++
++	obj = yaffs_FindObjectByName(oldDir, oldName);
++
++	if (obj && obj->renameAllowed) {
++
++		/* Now do the handling for an existing target, if there is one */
++
++		existingTarget = yaffs_FindObjectByName(newDir, newName);
++		if (existingTarget &&
++			existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
++			!ylist_empty(&existingTarget->variant.directoryVariant.children)) {
++			/* There is a target that is a non-empty directory, so we fail */
++			return YAFFS_FAIL;	/* EEXIST or ENOTEMPTY */
++		} else if (existingTarget && existingTarget != obj) {
++			/* Nuke the target first, using shadowing,
++			 * but only if it isn't the same object
++			 */
++			yaffs_ChangeObjectName(obj, newDir, newName, force,
++						existingTarget->objectId);
++			yaffs_UnlinkObject(existingTarget);
++		}
++
++		return yaffs_ChangeObjectName(obj, newDir, newName, 1, 0);
++	}
++	return YAFFS_FAIL;
++}
++
++/*------------------------- Block Management and Page Allocation ----------------*/
++
++static int yaffs_InitialiseBlocks(yaffs_Device *dev)
++{
++	int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
++
++	dev->blockInfo = NULL;
++	dev->chunkBits = NULL;
++
++	dev->allocationBlock = -1;	/* force it to get a new one */
++
++	/* If the first allocation strategy fails, thry the alternate one */
++	dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo));
++	if (!dev->blockInfo) {
++		dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo));
++		dev->blockInfoAlt = 1;
++	} else
++		dev->blockInfoAlt = 0;
++
++	if (dev->blockInfo) {
++		/* Set up dynamic blockinfo stuff. */
++		dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
++		dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
++		if (!dev->chunkBits) {
++			dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
++			dev->chunkBitsAlt = 1;
++		} else
++			dev->chunkBitsAlt = 0;
++	}
++
++	if (dev->blockInfo && dev->chunkBits) {
++		memset(dev->blockInfo, 0, nBlocks * sizeof(yaffs_BlockInfo));
++		memset(dev->chunkBits, 0, dev->chunkBitmapStride * nBlocks);
++		return YAFFS_OK;
++	}
++
++	return YAFFS_FAIL;
++}
++
++static void yaffs_DeinitialiseBlocks(yaffs_Device *dev)
++{
++	if (dev->blockInfoAlt && dev->blockInfo)
++		YFREE_ALT(dev->blockInfo);
++	else if (dev->blockInfo)
++		YFREE(dev->blockInfo);
++
++	dev->blockInfoAlt = 0;
++
++	dev->blockInfo = NULL;
++
++	if (dev->chunkBitsAlt && dev->chunkBits)
++		YFREE_ALT(dev->chunkBits);
++	else if (dev->chunkBits)
++		YFREE(dev->chunkBits);
++	dev->chunkBitsAlt = 0;
++	dev->chunkBits = NULL;
++}
++
++static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device *dev,
++					yaffs_BlockInfo *bi)
++{
++	int i;
++	__u32 seq;
++	yaffs_BlockInfo *b;
++
++	if (!dev->isYaffs2)
++		return 1;	/* disqualification only applies to yaffs2. */
++
++	if (!bi->hasShrinkHeader)
++		return 1;	/* can gc */
++
++	/* Find the oldest dirty sequence number if we don't know it and save it
++	 * so we don't have to keep recomputing it.
++	 */
++	if (!dev->oldestDirtySequence) {
++		seq = dev->sequenceNumber;
++
++		for (i = dev->internalStartBlock; i <= dev->internalEndBlock;
++				i++) {
++			b = yaffs_GetBlockInfo(dev, i);
++			if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
++			    (b->pagesInUse - b->softDeletions) <
++			    dev->nChunksPerBlock && b->sequenceNumber < seq) {
++				seq = b->sequenceNumber;
++			}
++		}
++		dev->oldestDirtySequence = seq;
++	}
++
++	/* Can't do gc of this block if there are any blocks older than this one that have
++	 * discarded pages.
++	 */
++	return (bi->sequenceNumber <= dev->oldestDirtySequence);
++}
++
++/* FindDiretiestBlock is used to select the dirtiest block (or close enough)
++ * for garbage collection.
++ */
++
++static int yaffs_FindBlockForGarbageCollection(yaffs_Device *dev,
++					int aggressive)
++{
++	int b = dev->currentDirtyChecker;
++
++	int i;
++	int iterations;
++	int dirtiest = -1;
++	int pagesInUse = 0;
++	int prioritised = 0;
++	yaffs_BlockInfo *bi;
++	int pendingPrioritisedExist = 0;
++
++	/* First let's see if we need to grab a prioritised block */
++	if (dev->hasPendingPrioritisedGCs) {
++		for (i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++) {
++
++			bi = yaffs_GetBlockInfo(dev, i);
++			/* yaffs_VerifyBlock(dev,bi,i); */
++
++			if (bi->gcPrioritise) {
++				pendingPrioritisedExist = 1;
++				if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
++				   yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
++					pagesInUse = (bi->pagesInUse - bi->softDeletions);
++					dirtiest = i;
++					prioritised = 1;
++					aggressive = 1; /* Fool the non-aggressive skip logiv below */
++				}
++			}
++		}
++
++		if (!pendingPrioritisedExist) /* None found, so we can clear this */
++			dev->hasPendingPrioritisedGCs = 0;
++	}
++
++	/* If we're doing aggressive GC then we are happy to take a less-dirty block, and
++	 * search harder.
++	 * else (we're doing a leasurely gc), then we only bother to do this if the
++	 * block has only a few pages in use.
++	 */
++
++	dev->nonAggressiveSkip--;
++
++	if (!aggressive && (dev->nonAggressiveSkip > 0))
++		return -1;
++
++	if (!prioritised)
++		pagesInUse =
++			(aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
++
++	if (aggressive)
++		iterations =
++		    dev->internalEndBlock - dev->internalStartBlock + 1;
++	else {
++		iterations =
++		    dev->internalEndBlock - dev->internalStartBlock + 1;
++		iterations = iterations / 16;
++		if (iterations > 200)
++			iterations = 200;
++	}
++
++	for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) {
++		b++;
++		if (b < dev->internalStartBlock || b > dev->internalEndBlock)
++			b = dev->internalStartBlock;
++
++		if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
++			T(YAFFS_TRACE_ERROR,
++			  (TSTR("**>> Block %d is not valid" TENDSTR), b));
++			YBUG();
++		}
++
++		bi = yaffs_GetBlockInfo(dev, b);
++
++		if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
++			(bi->pagesInUse - bi->softDeletions) < pagesInUse &&
++				yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
++			dirtiest = b;
++			pagesInUse = (bi->pagesInUse - bi->softDeletions);
++		}
++	}
++
++	dev->currentDirtyChecker = b;
++
++	if (dirtiest > 0) {
++		T(YAFFS_TRACE_GC,
++		  (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
++		   dev->nChunksPerBlock - pagesInUse, prioritised));
++	}
++
++	dev->oldestDirtySequence = 0;
++
++	if (dirtiest > 0)
++		dev->nonAggressiveSkip = 4;
++
++	return dirtiest;
++}
++
++static void yaffs_BlockBecameDirty(yaffs_Device *dev, int blockNo)
++{
++	yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo);
++
++	int erasedOk = 0;
++
++	/* If the block is still healthy erase it and mark as clean.
++	 * If the block has had a data failure, then retire it.
++	 */
++
++	T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
++		(TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR),
++		blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : ""));
++
++	bi->blockState = YAFFS_BLOCK_STATE_DIRTY;
++
++	if (!bi->needsRetiring) {
++		yaffs_InvalidateCheckpoint(dev);
++		erasedOk = yaffs_EraseBlockInNAND(dev, blockNo);
++		if (!erasedOk) {
++			dev->nErasureFailures++;
++			T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++			  (TSTR("**>> Erasure failed %d" TENDSTR), blockNo));
++		}
++	}
++
++	if (erasedOk &&
++	    ((yaffs_traceMask & YAFFS_TRACE_ERASE) || !yaffs_SkipVerification(dev))) {
++		int i;
++		for (i = 0; i < dev->nChunksPerBlock; i++) {
++			if (!yaffs_CheckChunkErased
++			    (dev, blockNo * dev->nChunksPerBlock + i)) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   (">>Block %d erasure supposedly OK, but chunk %d not erased"
++				    TENDSTR), blockNo, i));
++			}
++		}
++	}
++
++	if (erasedOk) {
++		/* Clean it up... */
++		bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
++		dev->nErasedBlocks++;
++		bi->pagesInUse = 0;
++		bi->softDeletions = 0;
++		bi->hasShrinkHeader = 0;
++		bi->skipErasedCheck = 1;  /* This is clean, so no need to check */
++		bi->gcPrioritise = 0;
++		yaffs_ClearChunkBits(dev, blockNo);
++
++		T(YAFFS_TRACE_ERASE,
++		  (TSTR("Erased block %d" TENDSTR), blockNo));
++	} else {
++		dev->nFreeChunks -= dev->nChunksPerBlock;	/* We lost a block of free space */
++
++		yaffs_RetireBlock(dev, blockNo);
++		T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++		  (TSTR("**>> Block %d retired" TENDSTR), blockNo));
++	}
++}
++
++static int yaffs_FindBlockForAllocation(yaffs_Device *dev)
++{
++	int i;
++
++	yaffs_BlockInfo *bi;
++
++	if (dev->nErasedBlocks < 1) {
++		/* Hoosterman we've got a problem.
++		 * Can't get space to gc
++		 */
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR("yaffs tragedy: no more erased blocks" TENDSTR)));
++
++		return -1;
++	}
++
++	/* Find an empty block. */
++
++	for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++		dev->allocationBlockFinder++;
++		if (dev->allocationBlockFinder < dev->internalStartBlock
++		    || dev->allocationBlockFinder > dev->internalEndBlock) {
++			dev->allocationBlockFinder = dev->internalStartBlock;
++		}
++
++		bi = yaffs_GetBlockInfo(dev, dev->allocationBlockFinder);
++
++		if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
++			bi->blockState = YAFFS_BLOCK_STATE_ALLOCATING;
++			dev->sequenceNumber++;
++			bi->sequenceNumber = dev->sequenceNumber;
++			dev->nErasedBlocks--;
++			T(YAFFS_TRACE_ALLOCATE,
++			  (TSTR("Allocated block %d, seq  %d, %d left" TENDSTR),
++			   dev->allocationBlockFinder, dev->sequenceNumber,
++			   dev->nErasedBlocks));
++			return dev->allocationBlockFinder;
++		}
++	}
++
++	T(YAFFS_TRACE_ALWAYS,
++	  (TSTR
++	   ("yaffs tragedy: no more erased blocks, but there should have been %d"
++	    TENDSTR), dev->nErasedBlocks));
++
++	return -1;
++}
++
++
++
++static int yaffs_CalcCheckpointBlocksRequired(yaffs_Device *dev)
++{
++	if (!dev->nCheckpointBlocksRequired &&
++	   dev->isYaffs2) {
++		/* Not a valid value so recalculate */
++		int nBytes = 0;
++		int nBlocks;
++		int devBlocks = (dev->endBlock - dev->startBlock + 1);
++		int tnodeSize;
++
++		tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++		if (tnodeSize < sizeof(yaffs_Tnode))
++			tnodeSize = sizeof(yaffs_Tnode);
++
++		nBytes += sizeof(yaffs_CheckpointValidity);
++		nBytes += sizeof(yaffs_CheckpointDevice);
++		nBytes += devBlocks * sizeof(yaffs_BlockInfo);
++		nBytes += devBlocks * dev->chunkBitmapStride;
++		nBytes += (sizeof(yaffs_CheckpointObject) + sizeof(__u32)) * (dev->nObjectsCreated - dev->nFreeObjects);
++		nBytes += (tnodeSize + sizeof(__u32)) * (dev->nTnodesCreated - dev->nFreeTnodes);
++		nBytes += sizeof(yaffs_CheckpointValidity);
++		nBytes += sizeof(__u32); /* checksum*/
++
++		/* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
++
++		nBlocks = (nBytes/(dev->nDataBytesPerChunk * dev->nChunksPerBlock)) + 3;
++
++		dev->nCheckpointBlocksRequired = nBlocks;
++	}
++
++	return dev->nCheckpointBlocksRequired;
++}
++
++/*
++ * Check if there's space to allocate...
++ * Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
++ */
++static int yaffs_CheckSpaceForAllocation(yaffs_Device *dev)
++{
++	int reservedChunks;
++	int reservedBlocks = dev->nReservedBlocks;
++	int checkpointBlocks;
++
++	if (dev->isYaffs2) {
++		checkpointBlocks =  yaffs_CalcCheckpointBlocksRequired(dev) -
++				    dev->blocksInCheckpoint;
++		if (checkpointBlocks < 0)
++			checkpointBlocks = 0;
++	} else {
++		checkpointBlocks = 0;
++	}
++
++	reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock);
++
++	return (dev->nFreeChunks > reservedChunks);
++}
++
++static int yaffs_AllocateChunk(yaffs_Device *dev, int useReserve,
++		yaffs_BlockInfo **blockUsedPtr)
++{
++	int retVal;
++	yaffs_BlockInfo *bi;
++
++	if (dev->allocationBlock < 0) {
++		/* Get next block to allocate off */
++		dev->allocationBlock = yaffs_FindBlockForAllocation(dev);
++		dev->allocationPage = 0;
++	}
++
++	if (!useReserve && !yaffs_CheckSpaceForAllocation(dev)) {
++		/* Not enough space to allocate unless we're allowed to use the reserve. */
++		return -1;
++	}
++
++	if (dev->nErasedBlocks < dev->nReservedBlocks
++			&& dev->allocationPage == 0) {
++		T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
++	}
++
++	/* Next page please.... */
++	if (dev->allocationBlock >= 0) {
++		bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
++
++		retVal = (dev->allocationBlock * dev->nChunksPerBlock) +
++			dev->allocationPage;
++		bi->pagesInUse++;
++		yaffs_SetChunkBit(dev, dev->allocationBlock,
++				dev->allocationPage);
++
++		dev->allocationPage++;
++
++		dev->nFreeChunks--;
++
++		/* If the block is full set the state to full */
++		if (dev->allocationPage >= dev->nChunksPerBlock) {
++			bi->blockState = YAFFS_BLOCK_STATE_FULL;
++			dev->allocationBlock = -1;
++		}
++
++		if (blockUsedPtr)
++			*blockUsedPtr = bi;
++
++		return retVal;
++	}
++
++	T(YAFFS_TRACE_ERROR,
++			(TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
++
++	return -1;
++}
++
++static int yaffs_GetErasedChunks(yaffs_Device *dev)
++{
++	int n;
++
++	n = dev->nErasedBlocks * dev->nChunksPerBlock;
++
++	if (dev->allocationBlock > 0)
++		n += (dev->nChunksPerBlock - dev->allocationPage);
++
++	return n;
++
++}
++
++static int yaffs_GarbageCollectBlock(yaffs_Device *dev, int block,
++		int wholeBlock)
++{
++	int oldChunk;
++	int newChunk;
++	int markNAND;
++	int retVal = YAFFS_OK;
++	int cleanups = 0;
++	int i;
++	int isCheckpointBlock;
++	int matchingChunk;
++	int maxCopies;
++
++	int chunksBefore = yaffs_GetErasedChunks(dev);
++	int chunksAfter;
++
++	yaffs_ExtendedTags tags;
++
++	yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, block);
++
++	yaffs_Object *object;
++
++	isCheckpointBlock = (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT);
++
++	bi->blockState = YAFFS_BLOCK_STATE_COLLECTING;
++
++	T(YAFFS_TRACE_TRACING,
++			(TSTR("Collecting block %d, in use %d, shrink %d, wholeBlock %d" TENDSTR),
++			 block,
++			 bi->pagesInUse,
++			 bi->hasShrinkHeader,
++			 wholeBlock));
++
++	/*yaffs_VerifyFreeChunks(dev); */
++
++	bi->hasShrinkHeader = 0;	/* clear the flag so that the block can erase */
++
++	/* Take off the number of soft deleted entries because
++	 * they're going to get really deleted during GC.
++	 */
++	dev->nFreeChunks -= bi->softDeletions;
++
++	dev->isDoingGC = 1;
++
++	if (isCheckpointBlock ||
++			!yaffs_StillSomeChunkBits(dev, block)) {
++		T(YAFFS_TRACE_TRACING,
++				(TSTR
++				 ("Collecting block %d that has no chunks in use" TENDSTR),
++				 block));
++		yaffs_BlockBecameDirty(dev, block);
++	} else {
++
++		__u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++
++		yaffs_VerifyBlock(dev, bi, block);
++
++		maxCopies = (wholeBlock) ? dev->nChunksPerBlock : 10;
++		oldChunk = block * dev->nChunksPerBlock + dev->gcChunk;
++
++		for (/* init already done */;
++		     retVal == YAFFS_OK &&
++		     dev->gcChunk < dev->nChunksPerBlock &&
++		     (bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) &&
++		     maxCopies > 0;
++		     dev->gcChunk++, oldChunk++) {
++			if (yaffs_CheckChunkBit(dev, block, dev->gcChunk)) {
++
++				/* This page is in use and might need to be copied off */
++
++				maxCopies--;
++
++				markNAND = 1;
++
++				yaffs_InitialiseTags(&tags);
++
++				yaffs_ReadChunkWithTagsFromNAND(dev, oldChunk,
++								buffer, &tags);
++
++				object =
++				    yaffs_FindObjectByNumber(dev,
++							     tags.objectId);
++
++				T(YAFFS_TRACE_GC_DETAIL,
++				  (TSTR
++				   ("Collecting chunk in block %d, %d %d %d " TENDSTR),
++				   dev->gcChunk, tags.objectId, tags.chunkId,
++				   tags.byteCount));
++
++				if (object && !yaffs_SkipVerification(dev)) {
++					if (tags.chunkId == 0)
++						matchingChunk = object->hdrChunk;
++					else if (object->softDeleted)
++						matchingChunk = oldChunk; /* Defeat the test */
++					else
++						matchingChunk = yaffs_FindChunkInFile(object, tags.chunkId, NULL);
++
++					if (oldChunk != matchingChunk)
++						T(YAFFS_TRACE_ERROR,
++						  (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
++						  oldChunk, matchingChunk, tags.objectId, tags.chunkId));
++
++				}
++
++				if (!object) {
++					T(YAFFS_TRACE_ERROR,
++					  (TSTR
++					   ("page %d in gc has no object: %d %d %d "
++					    TENDSTR), oldChunk,
++					    tags.objectId, tags.chunkId, tags.byteCount));
++				}
++
++				if (object &&
++				    object->deleted &&
++				    object->softDeleted &&
++				    tags.chunkId != 0) {
++					/* Data chunk in a soft deleted file, throw it away
++					 * It's a soft deleted data chunk,
++					 * No need to copy this, just forget about it and
++					 * fix up the object.
++					 */
++
++					object->nDataChunks--;
++
++					if (object->nDataChunks <= 0) {
++						/* remeber to clean up the object */
++						dev->gcCleanupList[cleanups] =
++						    tags.objectId;
++						cleanups++;
++					}
++					markNAND = 0;
++				} else if (0) {
++					/* Todo object && object->deleted && object->nDataChunks == 0 */
++					/* Deleted object header with no data chunks.
++					 * Can be discarded and the file deleted.
++					 */
++					object->hdrChunk = 0;
++					yaffs_FreeTnode(object->myDev,
++							object->variant.
++							fileVariant.top);
++					object->variant.fileVariant.top = NULL;
++					yaffs_DoGenericObjectDeletion(object);
++
++				} else if (object) {
++					/* It's either a data chunk in a live file or
++					 * an ObjectHeader, so we're interested in it.
++					 * NB Need to keep the ObjectHeaders of deleted files
++					 * until the whole file has been deleted off
++					 */
++					tags.serialNumber++;
++
++					dev->nGCCopies++;
++
++					if (tags.chunkId == 0) {
++						/* It is an object Id,
++						 * We need to nuke the shrinkheader flags first
++						 * We no longer want the shrinkHeader flag since its work is done
++						 * and if it is left in place it will mess up scanning.
++						 */
++
++						yaffs_ObjectHeader *oh;
++						oh = (yaffs_ObjectHeader *)buffer;
++						oh->isShrink = 0;
++						tags.extraIsShrinkHeader = 0;
++
++						yaffs_VerifyObjectHeader(object, oh, &tags, 1);
++					}
++
++					newChunk =
++					    yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1);
++
++					if (newChunk < 0) {
++						retVal = YAFFS_FAIL;
++					} else {
++
++						/* Ok, now fix up the Tnodes etc. */
++
++						if (tags.chunkId == 0) {
++							/* It's a header */
++							object->hdrChunk =  newChunk;
++							object->serial =   tags.serialNumber;
++						} else {
++							/* It's a data chunk */
++							yaffs_PutChunkIntoFile
++							    (object,
++							     tags.chunkId,
++							     newChunk, 0);
++						}
++					}
++				}
++
++				if (retVal == YAFFS_OK)
++					yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
++
++			}
++		}
++
++		yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
++
++
++		/* Do any required cleanups */
++		for (i = 0; i < cleanups; i++) {
++			/* Time to delete the file too */
++			object =
++			    yaffs_FindObjectByNumber(dev,
++						     dev->gcCleanupList[i]);
++			if (object) {
++				yaffs_FreeTnode(dev,
++						object->variant.fileVariant.
++						top);
++				object->variant.fileVariant.top = NULL;
++				T(YAFFS_TRACE_GC,
++				  (TSTR
++				   ("yaffs: About to finally delete object %d"
++				    TENDSTR), object->objectId));
++				yaffs_DoGenericObjectDeletion(object);
++				object->myDev->nDeletedFiles--;
++			}
++
++		}
++
++	}
++
++	yaffs_VerifyCollectedBlock(dev, bi, block);
++
++	chunksAfter = yaffs_GetErasedChunks(dev);
++	if (chunksBefore >= chunksAfter) {
++		T(YAFFS_TRACE_GC,
++		  (TSTR
++		   ("gc did not increase free chunks before %d after %d"
++		    TENDSTR), chunksBefore, chunksAfter));
++	}
++
++	/* If the gc completed then clear the current gcBlock so that we find another. */
++	if (bi->blockState != YAFFS_BLOCK_STATE_COLLECTING) {
++		dev->gcBlock = -1;
++		dev->gcChunk = 0;
++	}
++
++	dev->isDoingGC = 0;
++
++	return retVal;
++}
++
++/* New garbage collector
++ * If we're very low on erased blocks then we do aggressive garbage collection
++ * otherwise we do "leasurely" garbage collection.
++ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
++ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
++ *
++ * The idea is to help clear out space in a more spread-out manner.
++ * Dunno if it really does anything useful.
++ */
++static int yaffs_CheckGarbageCollection(yaffs_Device *dev)
++{
++	int block;
++	int aggressive;
++	int gcOk = YAFFS_OK;
++	int maxTries = 0;
++
++	int checkpointBlockAdjust;
++
++	if (dev->isDoingGC) {
++		/* Bail out so we don't get recursive gc */
++		return YAFFS_OK;
++	}
++
++	/* This loop should pass the first time.
++	 * We'll only see looping here if the erase of the collected block fails.
++	 */
++
++	do {
++		maxTries++;
++
++		checkpointBlockAdjust = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
++		if (checkpointBlockAdjust < 0)
++			checkpointBlockAdjust = 0;
++
++		if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) {
++			/* We need a block soon...*/
++			aggressive = 1;
++		} else {
++			/* We're in no hurry */
++			aggressive = 0;
++		}
++
++		if (dev->gcBlock <= 0) {
++			dev->gcBlock = yaffs_FindBlockForGarbageCollection(dev, aggressive);
++			dev->gcChunk = 0;
++		}
++
++		block = dev->gcBlock;
++
++		if (block > 0) {
++			dev->garbageCollections++;
++			if (!aggressive)
++				dev->passiveGarbageCollections++;
++
++			T(YAFFS_TRACE_GC,
++			  (TSTR
++			   ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
++			   dev->nErasedBlocks, aggressive));
++
++			gcOk = yaffs_GarbageCollectBlock(dev, block, aggressive);
++		}
++
++		if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) {
++			T(YAFFS_TRACE_GC,
++			  (TSTR
++			   ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
++			    TENDSTR), dev->nErasedBlocks, maxTries, block));
++		}
++	} while ((dev->nErasedBlocks < dev->nReservedBlocks) &&
++		 (block > 0) &&
++		 (maxTries < 2));
++
++	return aggressive ? gcOk : YAFFS_OK;
++}
++
++/*-------------------------  TAGS --------------------------------*/
++
++static int yaffs_TagsMatch(const yaffs_ExtendedTags *tags, int objectId,
++			   int chunkInObject)
++{
++	return (tags->chunkId == chunkInObject &&
++		tags->objectId == objectId && !tags->chunkDeleted) ? 1 : 0;
++
++}
++
++
++/*-------------------- Data file manipulation -----------------*/
++
++static int yaffs_FindChunkInFile(yaffs_Object *in, int chunkInInode,
++				 yaffs_ExtendedTags *tags)
++{
++	/*Get the Tnode, then get the level 0 offset chunk offset */
++	yaffs_Tnode *tn;
++	int theChunk = -1;
++	yaffs_ExtendedTags localTags;
++	int retVal = -1;
++
++	yaffs_Device *dev = in->myDev;
++
++	if (!tags) {
++		/* Passed a NULL, so use our own tags space */
++		tags = &localTags;
++	}
++
++	tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
++
++	if (tn) {
++		theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
++
++		retVal =
++		    yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
++					   chunkInInode);
++	}
++	return retVal;
++}
++
++static int yaffs_FindAndDeleteChunkInFile(yaffs_Object *in, int chunkInInode,
++					  yaffs_ExtendedTags *tags)
++{
++	/* Get the Tnode, then get the level 0 offset chunk offset */
++	yaffs_Tnode *tn;
++	int theChunk = -1;
++	yaffs_ExtendedTags localTags;
++
++	yaffs_Device *dev = in->myDev;
++	int retVal = -1;
++
++	if (!tags) {
++		/* Passed a NULL, so use our own tags space */
++		tags = &localTags;
++	}
++
++	tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
++
++	if (tn) {
++
++		theChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
++
++		retVal =
++		    yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
++					   chunkInInode);
++
++		/* Delete the entry in the filestructure (if found) */
++		if (retVal != -1)
++			yaffs_PutLevel0Tnode(dev, tn, chunkInInode, 0);
++	}
++
++	return retVal;
++}
++
++#ifdef YAFFS_PARANOID
++
++static int yaffs_CheckFileSanity(yaffs_Object *in)
++{
++	int chunk;
++	int nChunks;
++	int fSize;
++	int failed = 0;
++	int objId;
++	yaffs_Tnode *tn;
++	yaffs_Tags localTags;
++	yaffs_Tags *tags = &localTags;
++	int theChunk;
++	int chunkDeleted;
++
++	if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
++		return YAFFS_FAIL;
++
++	objId = in->objectId;
++	fSize = in->variant.fileVariant.fileSize;
++	nChunks =
++	    (fSize + in->myDev->nDataBytesPerChunk - 1) / in->myDev->nDataBytesPerChunk;
++
++	for (chunk = 1; chunk <= nChunks; chunk++) {
++		tn = yaffs_FindLevel0Tnode(in->myDev, &in->variant.fileVariant,
++					   chunk);
++
++		if (tn) {
++
++			theChunk = yaffs_GetChunkGroupBase(dev, tn, chunk);
++
++			if (yaffs_CheckChunkBits
++			    (dev, theChunk / dev->nChunksPerBlock,
++			     theChunk % dev->nChunksPerBlock)) {
++
++				yaffs_ReadChunkTagsFromNAND(in->myDev, theChunk,
++							    tags,
++							    &chunkDeleted);
++				if (yaffs_TagsMatch
++				    (tags, in->objectId, chunk, chunkDeleted)) {
++					/* found it; */
++
++				}
++			} else {
++
++				failed = 1;
++			}
++
++		} else {
++			/* T(("No level 0 found for %d\n", chunk)); */
++		}
++	}
++
++	return failed ? YAFFS_FAIL : YAFFS_OK;
++}
++
++#endif
++
++static int yaffs_PutChunkIntoFile(yaffs_Object *in, int chunkInInode,
++				  int chunkInNAND, int inScan)
++{
++	/* NB inScan is zero unless scanning.
++	 * For forward scanning, inScan is > 0;
++	 * for backward scanning inScan is < 0
++	 */
++
++	yaffs_Tnode *tn;
++	yaffs_Device *dev = in->myDev;
++	int existingChunk;
++	yaffs_ExtendedTags existingTags;
++	yaffs_ExtendedTags newTags;
++	unsigned existingSerial, newSerial;
++
++	if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
++		/* Just ignore an attempt at putting a chunk into a non-file during scanning
++		 * If it is not during Scanning then something went wrong!
++		 */
++		if (!inScan) {
++			T(YAFFS_TRACE_ERROR,
++			  (TSTR
++			   ("yaffs tragedy:attempt to put data chunk into a non-file"
++			    TENDSTR)));
++			YBUG();
++		}
++
++		yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
++		return YAFFS_OK;
++	}
++
++	tn = yaffs_AddOrFindLevel0Tnode(dev,
++					&in->variant.fileVariant,
++					chunkInInode,
++					NULL);
++	if (!tn)
++		return YAFFS_FAIL;
++
++	existingChunk = yaffs_GetChunkGroupBase(dev, tn, chunkInInode);
++
++	if (inScan != 0) {
++		/* If we're scanning then we need to test for duplicates
++		 * NB This does not need to be efficient since it should only ever
++		 * happen when the power fails during a write, then only one
++		 * chunk should ever be affected.
++		 *
++		 * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
++		 * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
++		 */
++
++		if (existingChunk > 0) {
++			/* NB Right now existing chunk will not be real chunkId if the device >= 32MB
++			 *    thus we have to do a FindChunkInFile to get the real chunk id.
++			 *
++			 * We have a duplicate now we need to decide which one to use:
++			 *
++			 * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
++			 * Forward scanning YAFFS2: The new one is what we use, dump the old one.
++			 * YAFFS1: Get both sets of tags and compare serial numbers.
++			 */
++
++			if (inScan > 0) {
++				/* Only do this for forward scanning */
++				yaffs_ReadChunkWithTagsFromNAND(dev,
++								chunkInNAND,
++								NULL, &newTags);
++
++				/* Do a proper find */
++				existingChunk =
++				    yaffs_FindChunkInFile(in, chunkInInode,
++							  &existingTags);
++			}
++
++			if (existingChunk <= 0) {
++				/*Hoosterman - how did this happen? */
++
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("yaffs tragedy: existing chunk < 0 in scan"
++				    TENDSTR)));
++
++			}
++
++			/* NB The deleted flags should be false, otherwise the chunks will
++			 * not be loaded during a scan
++			 */
++
++			if (inScan > 0) {
++				newSerial = newTags.serialNumber;
++				existingSerial = existingTags.serialNumber;
++			}
++
++			if ((inScan > 0) &&
++			    (in->myDev->isYaffs2 ||
++			     existingChunk <= 0 ||
++			     ((existingSerial + 1) & 3) == newSerial)) {
++				/* Forward scanning.
++				 * Use new
++				 * Delete the old one and drop through to update the tnode
++				 */
++				yaffs_DeleteChunk(dev, existingChunk, 1,
++						  __LINE__);
++			} else {
++				/* Backward scanning or we want to use the existing one
++				 * Use existing.
++				 * Delete the new one and return early so that the tnode isn't changed
++				 */
++				yaffs_DeleteChunk(dev, chunkInNAND, 1,
++						  __LINE__);
++				return YAFFS_OK;
++			}
++		}
++
++	}
++
++	if (existingChunk == 0)
++		in->nDataChunks++;
++
++	yaffs_PutLevel0Tnode(dev, tn, chunkInInode, chunkInNAND);
++
++	return YAFFS_OK;
++}
++
++static int yaffs_ReadChunkDataFromObject(yaffs_Object *in, int chunkInInode,
++					__u8 *buffer)
++{
++	int chunkInNAND = yaffs_FindChunkInFile(in, chunkInInode, NULL);
++
++	if (chunkInNAND >= 0)
++		return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND,
++						buffer, NULL);
++	else {
++		T(YAFFS_TRACE_NANDACCESS,
++		  (TSTR("Chunk %d not found zero instead" TENDSTR),
++		   chunkInNAND));
++		/* get sane (zero) data if you read a hole */
++		memset(buffer, 0, in->myDev->nDataBytesPerChunk);
++		return 0;
++	}
++
++}
++
++void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn)
++{
++	int block;
++	int page;
++	yaffs_ExtendedTags tags;
++	yaffs_BlockInfo *bi;
++
++	if (chunkId <= 0)
++		return;
++
++	dev->nDeletions++;
++	block = chunkId / dev->nChunksPerBlock;
++	page = chunkId % dev->nChunksPerBlock;
++
++
++	if (!yaffs_CheckChunkBit(dev, block, page))
++		T(YAFFS_TRACE_VERIFY,
++			(TSTR("Deleting invalid chunk %d"TENDSTR),
++			 chunkId));
++
++	bi = yaffs_GetBlockInfo(dev, block);
++
++	T(YAFFS_TRACE_DELETION,
++	  (TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunkId));
++
++	if (markNAND &&
++	    bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && !dev->isYaffs2) {
++
++		yaffs_InitialiseTags(&tags);
++
++		tags.chunkDeleted = 1;
++
++		yaffs_WriteChunkWithTagsToNAND(dev, chunkId, NULL, &tags);
++		yaffs_HandleUpdateChunk(dev, chunkId, &tags);
++	} else {
++		dev->nUnmarkedDeletions++;
++	}
++
++	/* Pull out of the management area.
++	 * If the whole block became dirty, this will kick off an erasure.
++	 */
++	if (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING ||
++	    bi->blockState == YAFFS_BLOCK_STATE_FULL ||
++	    bi->blockState == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++	    bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) {
++		dev->nFreeChunks++;
++
++		yaffs_ClearChunkBit(dev, block, page);
++
++		bi->pagesInUse--;
++
++		if (bi->pagesInUse == 0 &&
++		    !bi->hasShrinkHeader &&
++		    bi->blockState != YAFFS_BLOCK_STATE_ALLOCATING &&
++		    bi->blockState != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++			yaffs_BlockBecameDirty(dev, block);
++		}
++
++	}
++
++}
++
++static int yaffs_WriteChunkDataToObject(yaffs_Object *in, int chunkInInode,
++					const __u8 *buffer, int nBytes,
++					int useReserve)
++{
++	/* Find old chunk Need to do this to get serial number
++	 * Write new one and patch into tree.
++	 * Invalidate old tags.
++	 */
++
++	int prevChunkId;
++	yaffs_ExtendedTags prevTags;
++
++	int newChunkId;
++	yaffs_ExtendedTags newTags;
++
++	yaffs_Device *dev = in->myDev;
++
++	yaffs_CheckGarbageCollection(dev);
++
++	/* Get the previous chunk at this location in the file if it exists */
++	prevChunkId = yaffs_FindChunkInFile(in, chunkInInode, &prevTags);
++
++	/* Set up new tags */
++	yaffs_InitialiseTags(&newTags);
++
++	newTags.chunkId = chunkInInode;
++	newTags.objectId = in->objectId;
++	newTags.serialNumber =
++	    (prevChunkId >= 0) ? prevTags.serialNumber + 1 : 1;
++	newTags.byteCount = nBytes;
++
++	if (nBytes < 1 || nBytes > dev->totalBytesPerChunk) {
++		T(YAFFS_TRACE_ERROR,
++		(TSTR("Writing %d bytes to chunk!!!!!!!!!" TENDSTR), nBytes));
++		YBUG();
++	}
++
++	newChunkId =
++	    yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
++					      useReserve);
++
++	if (newChunkId >= 0) {
++		yaffs_PutChunkIntoFile(in, chunkInInode, newChunkId, 0);
++
++		if (prevChunkId >= 0)
++			yaffs_DeleteChunk(dev, prevChunkId, 1, __LINE__);
++
++		yaffs_CheckFileSanity(in);
++	}
++	return newChunkId;
++
++}
++
++/* UpdateObjectHeader updates the header on NAND for an object.
++ * If name is not NULL, then that new name is used.
++ */
++int yaffs_UpdateObjectHeader(yaffs_Object *in, const YCHAR *name, int force,
++			     int isShrink, int shadows)
++{
++
++	yaffs_BlockInfo *bi;
++
++	yaffs_Device *dev = in->myDev;
++
++	int prevChunkId;
++	int retVal = 0;
++	int result = 0;
++
++	int newChunkId;
++	yaffs_ExtendedTags newTags;
++	yaffs_ExtendedTags oldTags;
++
++	__u8 *buffer = NULL;
++	YCHAR oldName[YAFFS_MAX_NAME_LENGTH + 1];
++
++	yaffs_ObjectHeader *oh = NULL;
++
++	yaffs_strcpy(oldName, _Y("silly old name"));
++
++
++	if (!in->fake ||
++		in == dev->rootDir || /* The rootDir should also be saved */
++		force) {
++
++		yaffs_CheckGarbageCollection(dev);
++		yaffs_CheckObjectDetailsLoaded(in);
++
++		buffer = yaffs_GetTempBuffer(in->myDev, __LINE__);
++		oh = (yaffs_ObjectHeader *) buffer;
++
++		prevChunkId = in->hdrChunk;
++
++		if (prevChunkId > 0) {
++			result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
++							buffer, &oldTags);
++
++			yaffs_VerifyObjectHeader(in, oh, &oldTags, 0);
++
++			memcpy(oldName, oh->name, sizeof(oh->name));
++		}
++
++		memset(buffer, 0xFF, dev->nDataBytesPerChunk);
++
++		oh->type = in->variantType;
++		oh->yst_mode = in->yst_mode;
++		oh->shadowsObject = oh->inbandShadowsObject = shadows;
++
++#ifdef CONFIG_YAFFS_WINCE
++		oh->win_atime[0] = in->win_atime[0];
++		oh->win_ctime[0] = in->win_ctime[0];
++		oh->win_mtime[0] = in->win_mtime[0];
++		oh->win_atime[1] = in->win_atime[1];
++		oh->win_ctime[1] = in->win_ctime[1];
++		oh->win_mtime[1] = in->win_mtime[1];
++#else
++		oh->yst_uid = in->yst_uid;
++		oh->yst_gid = in->yst_gid;
++		oh->yst_atime = in->yst_atime;
++		oh->yst_mtime = in->yst_mtime;
++		oh->yst_ctime = in->yst_ctime;
++		oh->yst_rdev = in->yst_rdev;
++#endif
++		if (in->parent)
++			oh->parentObjectId = in->parent->objectId;
++		else
++			oh->parentObjectId = 0;
++
++		if (name && *name) {
++			memset(oh->name, 0, sizeof(oh->name));
++			yaffs_strncpy(oh->name, name, YAFFS_MAX_NAME_LENGTH);
++		} else if (prevChunkId >= 0)
++			memcpy(oh->name, oldName, sizeof(oh->name));
++		else
++			memset(oh->name, 0, sizeof(oh->name));
++
++		oh->isShrink = isShrink;
++
++		switch (in->variantType) {
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++			/* Should not happen */
++			break;
++		case YAFFS_OBJECT_TYPE_FILE:
++			oh->fileSize =
++			    (oh->parentObjectId == YAFFS_OBJECTID_DELETED
++			     || oh->parentObjectId ==
++			     YAFFS_OBJECTID_UNLINKED) ? 0 : in->variant.
++			    fileVariant.fileSize;
++			break;
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++			oh->equivalentObjectId =
++			    in->variant.hardLinkVariant.equivalentObjectId;
++			break;
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++			/* Do nothing */
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			/* Do nothing */
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			yaffs_strncpy(oh->alias,
++				      in->variant.symLinkVariant.alias,
++				      YAFFS_MAX_ALIAS_LENGTH);
++			oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++			break;
++		}
++
++		/* Tags */
++		yaffs_InitialiseTags(&newTags);
++		in->serial++;
++		newTags.chunkId = 0;
++		newTags.objectId = in->objectId;
++		newTags.serialNumber = in->serial;
++
++		/* Add extra info for file header */
++
++		newTags.extraHeaderInfoAvailable = 1;
++		newTags.extraParentObjectId = oh->parentObjectId;
++		newTags.extraFileLength = oh->fileSize;
++		newTags.extraIsShrinkHeader = oh->isShrink;
++		newTags.extraEquivalentObjectId = oh->equivalentObjectId;
++		newTags.extraShadows = (oh->shadowsObject > 0) ? 1 : 0;
++		newTags.extraObjectType = in->variantType;
++
++		yaffs_VerifyObjectHeader(in, oh, &newTags, 1);
++
++		/* Create new chunk in NAND */
++		newChunkId =
++		    yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
++						      (prevChunkId >= 0) ? 1 : 0);
++
++		if (newChunkId >= 0) {
++
++			in->hdrChunk = newChunkId;
++
++			if (prevChunkId >= 0) {
++				yaffs_DeleteChunk(dev, prevChunkId, 1,
++						  __LINE__);
++			}
++
++			if (!yaffs_ObjectHasCachedWriteData(in))
++				in->dirty = 0;
++
++			/* If this was a shrink, then mark the block that the chunk lives on */
++			if (isShrink) {
++				bi = yaffs_GetBlockInfo(in->myDev,
++					newChunkId / in->myDev->nChunksPerBlock);
++				bi->hasShrinkHeader = 1;
++			}
++
++		}
++
++		retVal = newChunkId;
++
++	}
++
++	if (buffer)
++		yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
++
++	return retVal;
++}
++
++/*------------------------ Short Operations Cache ----------------------------------------
++ *   In many situations where there is no high level buffering (eg WinCE) a lot of
++ *   reads might be short sequential reads, and a lot of writes may be short
++ *   sequential writes. eg. scanning/writing a jpeg file.
++ *   In these cases, a short read/write cache can provide a huge perfomance benefit
++ *   with dumb-as-a-rock code.
++ *   In Linux, the page cache provides read buffering aand the short op cache provides write
++ *   buffering.
++ *
++ *   There are a limited number (~10) of cache chunks per device so that we don't
++ *   need a very intelligent search.
++ */
++
++static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj)
++{
++	yaffs_Device *dev = obj->myDev;
++	int i;
++	yaffs_ChunkCache *cache;
++	int nCaches = obj->myDev->nShortOpCaches;
++
++	for (i = 0; i < nCaches; i++) {
++		cache = &dev->srCache[i];
++		if (cache->object == obj &&
++		    cache->dirty)
++			return 1;
++	}
++
++	return 0;
++}
++
++
++static void yaffs_FlushFilesChunkCache(yaffs_Object *obj)
++{
++	yaffs_Device *dev = obj->myDev;
++	int lowest = -99;	/* Stop compiler whining. */
++	int i;
++	yaffs_ChunkCache *cache;
++	int chunkWritten = 0;
++	int nCaches = obj->myDev->nShortOpCaches;
++
++	if (nCaches > 0) {
++		do {
++			cache = NULL;
++
++			/* Find the dirty cache for this object with the lowest chunk id. */
++			for (i = 0; i < nCaches; i++) {
++				if (dev->srCache[i].object == obj &&
++				    dev->srCache[i].dirty) {
++					if (!cache
++					    || dev->srCache[i].chunkId <
++					    lowest) {
++						cache = &dev->srCache[i];
++						lowest = cache->chunkId;
++					}
++				}
++			}
++
++			if (cache && !cache->locked) {
++				/* Write it out and free it up */
++
++				chunkWritten =
++				    yaffs_WriteChunkDataToObject(cache->object,
++								 cache->chunkId,
++								 cache->data,
++								 cache->nBytes,
++								 1);
++				cache->dirty = 0;
++				cache->object = NULL;
++			}
++
++		} while (cache && chunkWritten > 0);
++
++		if (cache) {
++			/* Hoosterman, disk full while writing cache out. */
++			T(YAFFS_TRACE_ERROR,
++			  (TSTR("yaffs tragedy: no space during cache write" TENDSTR)));
++
++		}
++	}
++
++}
++
++/*yaffs_FlushEntireDeviceCache(dev)
++ *
++ *
++ */
++
++void yaffs_FlushEntireDeviceCache(yaffs_Device *dev)
++{
++	yaffs_Object *obj;
++	int nCaches = dev->nShortOpCaches;
++	int i;
++
++	/* Find a dirty object in the cache and flush it...
++	 * until there are no further dirty objects.
++	 */
++	do {
++		obj = NULL;
++		for (i = 0; i < nCaches && !obj; i++) {
++			if (dev->srCache[i].object &&
++			    dev->srCache[i].dirty)
++				obj = dev->srCache[i].object;
++
++		}
++		if (obj)
++			yaffs_FlushFilesChunkCache(obj);
++
++	} while (obj);
++
++}
++
++
++/* Grab us a cache chunk for use.
++ * First look for an empty one.
++ * Then look for the least recently used non-dirty one.
++ * Then look for the least recently used dirty one...., flush and look again.
++ */
++static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device *dev)
++{
++	int i;
++
++	if (dev->nShortOpCaches > 0) {
++		for (i = 0; i < dev->nShortOpCaches; i++) {
++			if (!dev->srCache[i].object)
++				return &dev->srCache[i];
++		}
++	}
++
++	return NULL;
++}
++
++static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device *dev)
++{
++	yaffs_ChunkCache *cache;
++	yaffs_Object *theObj;
++	int usage;
++	int i;
++	int pushout;
++
++	if (dev->nShortOpCaches > 0) {
++		/* Try find a non-dirty one... */
++
++		cache = yaffs_GrabChunkCacheWorker(dev);
++
++		if (!cache) {
++			/* They were all dirty, find the last recently used object and flush
++			 * its cache, then  find again.
++			 * NB what's here is not very accurate, we actually flush the object
++			 * the last recently used page.
++			 */
++
++			/* With locking we can't assume we can use entry zero */
++
++			theObj = NULL;
++			usage = -1;
++			cache = NULL;
++			pushout = -1;
++
++			for (i = 0; i < dev->nShortOpCaches; i++) {
++				if (dev->srCache[i].object &&
++				    !dev->srCache[i].locked &&
++				    (dev->srCache[i].lastUse < usage || !cache)) {
++					usage = dev->srCache[i].lastUse;
++					theObj = dev->srCache[i].object;
++					cache = &dev->srCache[i];
++					pushout = i;
++				}
++			}
++
++			if (!cache || cache->dirty) {
++				/* Flush and try again */
++				yaffs_FlushFilesChunkCache(theObj);
++				cache = yaffs_GrabChunkCacheWorker(dev);
++			}
++
++		}
++		return cache;
++	} else
++		return NULL;
++
++}
++
++/* Find a cached chunk */
++static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object *obj,
++					      int chunkId)
++{
++	yaffs_Device *dev = obj->myDev;
++	int i;
++	if (dev->nShortOpCaches > 0) {
++		for (i = 0; i < dev->nShortOpCaches; i++) {
++			if (dev->srCache[i].object == obj &&
++			    dev->srCache[i].chunkId == chunkId) {
++				dev->cacheHits++;
++
++				return &dev->srCache[i];
++			}
++		}
++	}
++	return NULL;
++}
++
++/* Mark the chunk for the least recently used algorithym */
++static void yaffs_UseChunkCache(yaffs_Device *dev, yaffs_ChunkCache *cache,
++				int isAWrite)
++{
++
++	if (dev->nShortOpCaches > 0) {
++		if (dev->srLastUse < 0 || dev->srLastUse > 100000000) {
++			/* Reset the cache usages */
++			int i;
++			for (i = 1; i < dev->nShortOpCaches; i++)
++				dev->srCache[i].lastUse = 0;
++
++			dev->srLastUse = 0;
++		}
++
++		dev->srLastUse++;
++
++		cache->lastUse = dev->srLastUse;
++
++		if (isAWrite)
++			cache->dirty = 1;
++	}
++}
++
++/* Invalidate a single cache page.
++ * Do this when a whole page gets written,
++ * ie the short cache for this page is no longer valid.
++ */
++static void yaffs_InvalidateChunkCache(yaffs_Object *object, int chunkId)
++{
++	if (object->myDev->nShortOpCaches > 0) {
++		yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId);
++
++		if (cache)
++			cache->object = NULL;
++	}
++}
++
++/* Invalidate all the cache pages associated with this object
++ * Do this whenever ther file is deleted or resized.
++ */
++static void yaffs_InvalidateWholeChunkCache(yaffs_Object *in)
++{
++	int i;
++	yaffs_Device *dev = in->myDev;
++
++	if (dev->nShortOpCaches > 0) {
++		/* Invalidate it. */
++		for (i = 0; i < dev->nShortOpCaches; i++) {
++			if (dev->srCache[i].object == in)
++				dev->srCache[i].object = NULL;
++		}
++	}
++}
++
++/*--------------------- Checkpointing --------------------*/
++
++
++static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev, int head)
++{
++	yaffs_CheckpointValidity cp;
++
++	memset(&cp, 0, sizeof(cp));
++
++	cp.structType = sizeof(cp);
++	cp.magic = YAFFS_MAGIC;
++	cp.version = YAFFS_CHECKPOINT_VERSION;
++	cp.head = (head) ? 1 : 0;
++
++	return (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp)) ?
++		1 : 0;
++}
++
++static int yaffs_ReadCheckpointValidityMarker(yaffs_Device *dev, int head)
++{
++	yaffs_CheckpointValidity cp;
++	int ok;
++
++	ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++	if (ok)
++		ok = (cp.structType == sizeof(cp)) &&
++		     (cp.magic == YAFFS_MAGIC) &&
++		     (cp.version == YAFFS_CHECKPOINT_VERSION) &&
++		     (cp.head == ((head) ? 1 : 0));
++	return ok ? 1 : 0;
++}
++
++static void yaffs_DeviceToCheckpointDevice(yaffs_CheckpointDevice *cp,
++					   yaffs_Device *dev)
++{
++	cp->nErasedBlocks = dev->nErasedBlocks;
++	cp->allocationBlock = dev->allocationBlock;
++	cp->allocationPage = dev->allocationPage;
++	cp->nFreeChunks = dev->nFreeChunks;
++
++	cp->nDeletedFiles = dev->nDeletedFiles;
++	cp->nUnlinkedFiles = dev->nUnlinkedFiles;
++	cp->nBackgroundDeletions = dev->nBackgroundDeletions;
++	cp->sequenceNumber = dev->sequenceNumber;
++	cp->oldestDirtySequence = dev->oldestDirtySequence;
++
++}
++
++static void yaffs_CheckpointDeviceToDevice(yaffs_Device *dev,
++					   yaffs_CheckpointDevice *cp)
++{
++	dev->nErasedBlocks = cp->nErasedBlocks;
++	dev->allocationBlock = cp->allocationBlock;
++	dev->allocationPage = cp->allocationPage;
++	dev->nFreeChunks = cp->nFreeChunks;
++
++	dev->nDeletedFiles = cp->nDeletedFiles;
++	dev->nUnlinkedFiles = cp->nUnlinkedFiles;
++	dev->nBackgroundDeletions = cp->nBackgroundDeletions;
++	dev->sequenceNumber = cp->sequenceNumber;
++	dev->oldestDirtySequence = cp->oldestDirtySequence;
++}
++
++
++static int yaffs_WriteCheckpointDevice(yaffs_Device *dev)
++{
++	yaffs_CheckpointDevice cp;
++	__u32 nBytes;
++	__u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
++
++	int ok;
++
++	/* Write device runtime values*/
++	yaffs_DeviceToCheckpointDevice(&cp, dev);
++	cp.structType = sizeof(cp);
++
++	ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++	/* Write block info */
++	if (ok) {
++		nBytes = nBlocks * sizeof(yaffs_BlockInfo);
++		ok = (yaffs_CheckpointWrite(dev, dev->blockInfo, nBytes) == nBytes);
++	}
++
++	/* Write chunk bits */
++	if (ok) {
++		nBytes = nBlocks * dev->chunkBitmapStride;
++		ok = (yaffs_CheckpointWrite(dev, dev->chunkBits, nBytes) == nBytes);
++	}
++	return	 ok ? 1 : 0;
++
++}
++
++static int yaffs_ReadCheckpointDevice(yaffs_Device *dev)
++{
++	yaffs_CheckpointDevice cp;
++	__u32 nBytes;
++	__u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
++
++	int ok;
++
++	ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++	if (!ok)
++		return 0;
++
++	if (cp.structType != sizeof(cp))
++		return 0;
++
++
++	yaffs_CheckpointDeviceToDevice(dev, &cp);
++
++	nBytes = nBlocks * sizeof(yaffs_BlockInfo);
++
++	ok = (yaffs_CheckpointRead(dev, dev->blockInfo, nBytes) == nBytes);
++
++	if (!ok)
++		return 0;
++	nBytes = nBlocks * dev->chunkBitmapStride;
++
++	ok = (yaffs_CheckpointRead(dev, dev->chunkBits, nBytes) == nBytes);
++
++	return ok ? 1 : 0;
++}
++
++static void yaffs_ObjectToCheckpointObject(yaffs_CheckpointObject *cp,
++					   yaffs_Object *obj)
++{
++
++	cp->objectId = obj->objectId;
++	cp->parentId = (obj->parent) ? obj->parent->objectId : 0;
++	cp->hdrChunk = obj->hdrChunk;
++	cp->variantType = obj->variantType;
++	cp->deleted = obj->deleted;
++	cp->softDeleted = obj->softDeleted;
++	cp->unlinked = obj->unlinked;
++	cp->fake = obj->fake;
++	cp->renameAllowed = obj->renameAllowed;
++	cp->unlinkAllowed = obj->unlinkAllowed;
++	cp->serial = obj->serial;
++	cp->nDataChunks = obj->nDataChunks;
++
++	if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++		cp->fileSizeOrEquivalentObjectId = obj->variant.fileVariant.fileSize;
++	else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
++		cp->fileSizeOrEquivalentObjectId = obj->variant.hardLinkVariant.equivalentObjectId;
++}
++
++static int yaffs_CheckpointObjectToObject(yaffs_Object *obj, yaffs_CheckpointObject *cp)
++{
++
++	yaffs_Object *parent;
++
++	if (obj->variantType != cp->variantType) {
++		T(YAFFS_TRACE_ERROR, (TSTR("Checkpoint read object %d type %d "
++			TCONT("chunk %d does not match existing object type %d")
++			TENDSTR), cp->objectId, cp->variantType, cp->hdrChunk,
++			obj->variantType));
++		return 0;
++	}
++
++	obj->objectId = cp->objectId;
++
++	if (cp->parentId)
++		parent = yaffs_FindOrCreateObjectByNumber(
++					obj->myDev,
++					cp->parentId,
++					YAFFS_OBJECT_TYPE_DIRECTORY);
++	else
++		parent = NULL;
++
++	if (parent) {
++		if (parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++			T(YAFFS_TRACE_ALWAYS, (TSTR("Checkpoint read object %d parent %d type %d"
++				TCONT(" chunk %d Parent type, %d, not directory")
++				TENDSTR),
++				cp->objectId, cp->parentId, cp->variantType,
++				cp->hdrChunk, parent->variantType));
++			return 0;
++		}
++		yaffs_AddObjectToDirectory(parent, obj);
++	}
++
++	obj->hdrChunk = cp->hdrChunk;
++	obj->variantType = cp->variantType;
++	obj->deleted = cp->deleted;
++	obj->softDeleted = cp->softDeleted;
++	obj->unlinked = cp->unlinked;
++	obj->fake = cp->fake;
++	obj->renameAllowed = cp->renameAllowed;
++	obj->unlinkAllowed = cp->unlinkAllowed;
++	obj->serial = cp->serial;
++	obj->nDataChunks = cp->nDataChunks;
++
++	if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++		obj->variant.fileVariant.fileSize = cp->fileSizeOrEquivalentObjectId;
++	else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
++		obj->variant.hardLinkVariant.equivalentObjectId = cp->fileSizeOrEquivalentObjectId;
++
++	if (obj->hdrChunk > 0)
++		obj->lazyLoaded = 1;
++	return 1;
++}
++
++
++
++static int yaffs_CheckpointTnodeWorker(yaffs_Object *in, yaffs_Tnode *tn,
++					__u32 level, int chunkOffset)
++{
++	int i;
++	yaffs_Device *dev = in->myDev;
++	int ok = 1;
++	int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++	if (tnodeSize < sizeof(yaffs_Tnode))
++		tnodeSize = sizeof(yaffs_Tnode);
++
++
++	if (tn) {
++		if (level > 0) {
++
++			for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++				if (tn->internal[i]) {
++					ok = yaffs_CheckpointTnodeWorker(in,
++							tn->internal[i],
++							level - 1,
++							(chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
++				}
++			}
++		} else if (level == 0) {
++			__u32 baseOffset = chunkOffset <<  YAFFS_TNODES_LEVEL0_BITS;
++			ok = (yaffs_CheckpointWrite(dev, &baseOffset, sizeof(baseOffset)) == sizeof(baseOffset));
++			if (ok)
++				ok = (yaffs_CheckpointWrite(dev, tn, tnodeSize) == tnodeSize);
++		}
++	}
++
++	return ok;
++
++}
++
++static int yaffs_WriteCheckpointTnodes(yaffs_Object *obj)
++{
++	__u32 endMarker = ~0;
++	int ok = 1;
++
++	if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
++		ok = yaffs_CheckpointTnodeWorker(obj,
++					    obj->variant.fileVariant.top,
++					    obj->variant.fileVariant.topLevel,
++					    0);
++		if (ok)
++			ok = (yaffs_CheckpointWrite(obj->myDev, &endMarker, sizeof(endMarker)) ==
++				sizeof(endMarker));
++	}
++
++	return ok ? 1 : 0;
++}
++
++static int yaffs_ReadCheckpointTnodes(yaffs_Object *obj)
++{
++	__u32 baseChunk;
++	int ok = 1;
++	yaffs_Device *dev = obj->myDev;
++	yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant;
++	yaffs_Tnode *tn;
++	int nread = 0;
++	int tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++	if (tnodeSize < sizeof(yaffs_Tnode))
++		tnodeSize = sizeof(yaffs_Tnode);
++
++	ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++
++	while (ok && (~baseChunk)) {
++		nread++;
++		/* Read level 0 tnode */
++
++
++		tn = yaffs_GetTnodeRaw(dev);
++		if (tn)
++			ok = (yaffs_CheckpointRead(dev, tn, tnodeSize) == tnodeSize);
++		else
++			ok = 0;
++
++		if (tn && ok)
++			ok = yaffs_AddOrFindLevel0Tnode(dev,
++							fileStructPtr,
++							baseChunk,
++							tn) ? 1 : 0;
++
++		if (ok)
++			ok = (yaffs_CheckpointRead(dev, &baseChunk, sizeof(baseChunk)) == sizeof(baseChunk));
++
++	}
++
++	T(YAFFS_TRACE_CHECKPOINT, (
++		TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
++		nread, baseChunk, ok));
++
++	return ok ? 1 : 0;
++}
++
++
++static int yaffs_WriteCheckpointObjects(yaffs_Device *dev)
++{
++	yaffs_Object *obj;
++	yaffs_CheckpointObject cp;
++	int i;
++	int ok = 1;
++	struct ylist_head *lh;
++
++
++	/* Iterate through the objects in each hash entry,
++	 * dumping them to the checkpointing stream.
++	 */
++
++	for (i = 0; ok &&  i <  YAFFS_NOBJECT_BUCKETS; i++) {
++		ylist_for_each(lh, &dev->objectBucket[i].list) {
++			if (lh) {
++				obj = ylist_entry(lh, yaffs_Object, hashLink);
++				if (!obj->deferedFree) {
++					yaffs_ObjectToCheckpointObject(&cp, obj);
++					cp.structType = sizeof(cp);
++
++					T(YAFFS_TRACE_CHECKPOINT, (
++						TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
++						cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk, (unsigned) obj));
++
++					ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++					if (ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++						ok = yaffs_WriteCheckpointTnodes(obj);
++				}
++			}
++		}
++	}
++
++	/* Dump end of list */
++	memset(&cp, 0xFF, sizeof(yaffs_CheckpointObject));
++	cp.structType = sizeof(cp);
++
++	if (ok)
++		ok = (yaffs_CheckpointWrite(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++	return ok ? 1 : 0;
++}
++
++static int yaffs_ReadCheckpointObjects(yaffs_Device *dev)
++{
++	yaffs_Object *obj;
++	yaffs_CheckpointObject cp;
++	int ok = 1;
++	int done = 0;
++	yaffs_Object *hardList = NULL;
++
++	while (ok && !done) {
++		ok = (yaffs_CheckpointRead(dev, &cp, sizeof(cp)) == sizeof(cp));
++		if (cp.structType != sizeof(cp)) {
++			T(YAFFS_TRACE_CHECKPOINT, (TSTR("struct size %d instead of %d ok %d"TENDSTR),
++				cp.structType, sizeof(cp), ok));
++			ok = 0;
++		}
++
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
++			cp.objectId, cp.parentId, cp.variantType, cp.hdrChunk));
++
++		if (ok && cp.objectId == ~0)
++			done = 1;
++		else if (ok) {
++			obj = yaffs_FindOrCreateObjectByNumber(dev, cp.objectId, cp.variantType);
++			if (obj) {
++				ok = yaffs_CheckpointObjectToObject(obj, &cp);
++				if (!ok)
++					break;
++				if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
++					ok = yaffs_ReadCheckpointTnodes(obj);
++				} else if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++					obj->hardLinks.next =
++						(struct ylist_head *) hardList;
++					hardList = obj;
++				}
++			} else
++				ok = 0;
++		}
++	}
++
++	if (ok)
++		yaffs_HardlinkFixup(dev, hardList);
++
++	return ok ? 1 : 0;
++}
++
++static int yaffs_WriteCheckpointSum(yaffs_Device *dev)
++{
++	__u32 checkpointSum;
++	int ok;
++
++	yaffs_GetCheckpointSum(dev, &checkpointSum);
++
++	ok = (yaffs_CheckpointWrite(dev, &checkpointSum, sizeof(checkpointSum)) == sizeof(checkpointSum));
++
++	if (!ok)
++		return 0;
++
++	return 1;
++}
++
++static int yaffs_ReadCheckpointSum(yaffs_Device *dev)
++{
++	__u32 checkpointSum0;
++	__u32 checkpointSum1;
++	int ok;
++
++	yaffs_GetCheckpointSum(dev, &checkpointSum0);
++
++	ok = (yaffs_CheckpointRead(dev, &checkpointSum1, sizeof(checkpointSum1)) == sizeof(checkpointSum1));
++
++	if (!ok)
++		return 0;
++
++	if (checkpointSum0 != checkpointSum1)
++		return 0;
++
++	return 1;
++}
++
++
++static int yaffs_WriteCheckpointData(yaffs_Device *dev)
++{
++	int ok = 1;
++
++	if (dev->skipCheckpointWrite || !dev->isYaffs2) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint write" TENDSTR)));
++		ok = 0;
++	}
++
++	if (ok)
++		ok = yaffs_CheckpointOpen(dev, 1);
++
++	if (ok) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++		ok = yaffs_WriteCheckpointValidityMarker(dev, 1);
++	}
++	if (ok) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint device" TENDSTR)));
++		ok = yaffs_WriteCheckpointDevice(dev);
++	}
++	if (ok) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint objects" TENDSTR)));
++		ok = yaffs_WriteCheckpointObjects(dev);
++	}
++	if (ok) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("write checkpoint validity" TENDSTR)));
++		ok = yaffs_WriteCheckpointValidityMarker(dev, 0);
++	}
++
++	if (ok)
++		ok = yaffs_WriteCheckpointSum(dev);
++
++	if (!yaffs_CheckpointClose(dev))
++		ok = 0;
++
++	if (ok)
++		dev->isCheckpointed = 1;
++	else
++		dev->isCheckpointed = 0;
++
++	return dev->isCheckpointed;
++}
++
++static int yaffs_ReadCheckpointData(yaffs_Device *dev)
++{
++	int ok = 1;
++
++	if (dev->skipCheckpointRead || !dev->isYaffs2) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("skipping checkpoint read" TENDSTR)));
++		ok = 0;
++	}
++
++	if (ok)
++		ok = yaffs_CheckpointOpen(dev, 0); /* open for read */
++
++	if (ok) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++		ok = yaffs_ReadCheckpointValidityMarker(dev, 1);
++	}
++	if (ok) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint device" TENDSTR)));
++		ok = yaffs_ReadCheckpointDevice(dev);
++	}
++	if (ok) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint objects" TENDSTR)));
++		ok = yaffs_ReadCheckpointObjects(dev);
++	}
++	if (ok) {
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint validity" TENDSTR)));
++		ok = yaffs_ReadCheckpointValidityMarker(dev, 0);
++	}
++
++	if (ok) {
++		ok = yaffs_ReadCheckpointSum(dev);
++		T(YAFFS_TRACE_CHECKPOINT, (TSTR("read checkpoint checksum %d" TENDSTR), ok));
++	}
++
++	if (!yaffs_CheckpointClose(dev))
++		ok = 0;
++
++	if (ok)
++		dev->isCheckpointed = 1;
++	else
++		dev->isCheckpointed = 0;
++
++	return ok ? 1 : 0;
++
++}
++
++static void yaffs_InvalidateCheckpoint(yaffs_Device *dev)
++{
++	if (dev->isCheckpointed ||
++			dev->blocksInCheckpoint > 0) {
++		dev->isCheckpointed = 0;
++		yaffs_CheckpointInvalidateStream(dev);
++		if (dev->superBlock && dev->markSuperBlockDirty)
++			dev->markSuperBlockDirty(dev->superBlock);
++	}
++}
++
++
++int yaffs_CheckpointSave(yaffs_Device *dev)
++{
++
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("save entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++
++	yaffs_VerifyObjects(dev);
++	yaffs_VerifyBlocks(dev);
++	yaffs_VerifyFreeChunks(dev);
++
++	if (!dev->isCheckpointed) {
++		yaffs_InvalidateCheckpoint(dev);
++		yaffs_WriteCheckpointData(dev);
++	}
++
++	T(YAFFS_TRACE_ALWAYS, (TSTR("save exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++
++	return dev->isCheckpointed;
++}
++
++int yaffs_CheckpointRestore(yaffs_Device *dev)
++{
++	int retval;
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore entry: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++
++	retval = yaffs_ReadCheckpointData(dev);
++
++	if (dev->isCheckpointed) {
++		yaffs_VerifyObjects(dev);
++		yaffs_VerifyBlocks(dev);
++		yaffs_VerifyFreeChunks(dev);
++	}
++
++	T(YAFFS_TRACE_CHECKPOINT, (TSTR("restore exit: isCheckpointed %d"TENDSTR), dev->isCheckpointed));
++
++	return retval;
++}
++
++/*--------------------- File read/write ------------------------
++ * Read and write have very similar structures.
++ * In general the read/write has three parts to it
++ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
++ * Some complete chunks
++ * An incomplete chunk to end off with
++ *
++ * Curve-balls: the first chunk might also be the last chunk.
++ */
++
++int yaffs_ReadDataFromFile(yaffs_Object *in, __u8 *buffer, loff_t offset,
++			int nBytes)
++{
++
++	int chunk;
++	__u32 start;
++	int nToCopy;
++	int n = nBytes;
++	int nDone = 0;
++	yaffs_ChunkCache *cache;
++
++	yaffs_Device *dev;
++
++	dev = in->myDev;
++
++	while (n > 0) {
++		/* chunk = offset / dev->nDataBytesPerChunk + 1; */
++		/* start = offset % dev->nDataBytesPerChunk; */
++		yaffs_AddrToChunk(dev, offset, &chunk, &start);
++		chunk++;
++
++		/* OK now check for the curveball where the start and end are in
++		 * the same chunk.
++		 */
++		if ((start + n) < dev->nDataBytesPerChunk)
++			nToCopy = n;
++		else
++			nToCopy = dev->nDataBytesPerChunk - start;
++
++		cache = yaffs_FindChunkCache(in, chunk);
++
++		/* If the chunk is already in the cache or it is less than a whole chunk
++		 * or we're using inband tags then use the cache (if there is caching)
++		 * else bypass the cache.
++		 */
++		if (cache || nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
++			if (dev->nShortOpCaches > 0) {
++
++				/* If we can't find the data in the cache, then load it up. */
++
++				if (!cache) {
++					cache = yaffs_GrabChunkCache(in->myDev);
++					cache->object = in;
++					cache->chunkId = chunk;
++					cache->dirty = 0;
++					cache->locked = 0;
++					yaffs_ReadChunkDataFromObject(in, chunk,
++								      cache->
++								      data);
++					cache->nBytes = 0;
++				}
++
++				yaffs_UseChunkCache(dev, cache, 0);
++
++				cache->locked = 1;
++
++
++				memcpy(buffer, &cache->data[start], nToCopy);
++
++				cache->locked = 0;
++			} else {
++				/* Read into the local buffer then copy..*/
++
++				__u8 *localBuffer =
++				    yaffs_GetTempBuffer(dev, __LINE__);
++				yaffs_ReadChunkDataFromObject(in, chunk,
++							      localBuffer);
++
++				memcpy(buffer, &localBuffer[start], nToCopy);
++
++
++				yaffs_ReleaseTempBuffer(dev, localBuffer,
++							__LINE__);
++			}
++
++		} else {
++
++			/* A full chunk. Read directly into the supplied buffer. */
++			yaffs_ReadChunkDataFromObject(in, chunk, buffer);
++
++		}
++
++		n -= nToCopy;
++		offset += nToCopy;
++		buffer += nToCopy;
++		nDone += nToCopy;
++
++	}
++
++	return nDone;
++}
++
++int yaffs_WriteDataToFile(yaffs_Object *in, const __u8 *buffer, loff_t offset,
++			int nBytes, int writeThrough)
++{
++
++	int chunk;
++	__u32 start;
++	int nToCopy;
++	int n = nBytes;
++	int nDone = 0;
++	int nToWriteBack;
++	int startOfWrite = offset;
++	int chunkWritten = 0;
++	__u32 nBytesRead;
++	__u32 chunkStart;
++
++	yaffs_Device *dev;
++
++	dev = in->myDev;
++
++	while (n > 0 && chunkWritten >= 0) {
++		/* chunk = offset / dev->nDataBytesPerChunk + 1; */
++		/* start = offset % dev->nDataBytesPerChunk; */
++		yaffs_AddrToChunk(dev, offset, &chunk, &start);
++
++		if (chunk * dev->nDataBytesPerChunk + start != offset ||
++				start >= dev->nDataBytesPerChunk) {
++			T(YAFFS_TRACE_ERROR, (
++			   TSTR("AddrToChunk of offset %d gives chunk %d start %d"
++			   TENDSTR),
++			   (int)offset, chunk, start));
++		}
++		chunk++;
++
++		/* OK now check for the curveball where the start and end are in
++		 * the same chunk.
++		 */
++
++		if ((start + n) < dev->nDataBytesPerChunk) {
++			nToCopy = n;
++
++			/* Now folks, to calculate how many bytes to write back....
++			 * If we're overwriting and not writing to then end of file then
++			 * we need to write back as much as was there before.
++			 */
++
++			chunkStart = ((chunk - 1) * dev->nDataBytesPerChunk);
++
++			if (chunkStart > in->variant.fileVariant.fileSize)
++				nBytesRead = 0; /* Past end of file */
++			else
++				nBytesRead = in->variant.fileVariant.fileSize - chunkStart;
++
++			if (nBytesRead > dev->nDataBytesPerChunk)
++				nBytesRead = dev->nDataBytesPerChunk;
++
++			nToWriteBack =
++			    (nBytesRead >
++			     (start + n)) ? nBytesRead : (start + n);
++
++			if (nToWriteBack < 0 || nToWriteBack > dev->nDataBytesPerChunk)
++				YBUG();
++
++		} else {
++			nToCopy = dev->nDataBytesPerChunk - start;
++			nToWriteBack = dev->nDataBytesPerChunk;
++		}
++
++		if (nToCopy != dev->nDataBytesPerChunk || dev->inbandTags) {
++			/* An incomplete start or end chunk (or maybe both start and end chunk),
++			 * or we're using inband tags, so we want to use the cache buffers.
++			 */
++			if (dev->nShortOpCaches > 0) {
++				yaffs_ChunkCache *cache;
++				/* If we can't find the data in the cache, then load the cache */
++				cache = yaffs_FindChunkCache(in, chunk);
++
++				if (!cache
++				    && yaffs_CheckSpaceForAllocation(in->
++								     myDev)) {
++					cache = yaffs_GrabChunkCache(in->myDev);
++					cache->object = in;
++					cache->chunkId = chunk;
++					cache->dirty = 0;
++					cache->locked = 0;
++					yaffs_ReadChunkDataFromObject(in, chunk,
++								      cache->
++								      data);
++				} else if (cache &&
++					!cache->dirty &&
++					!yaffs_CheckSpaceForAllocation(in->myDev)) {
++					/* Drop the cache if it was a read cache item and
++					 * no space check has been made for it.
++					 */
++					 cache = NULL;
++				}
++
++				if (cache) {
++					yaffs_UseChunkCache(dev, cache, 1);
++					cache->locked = 1;
++
++
++					memcpy(&cache->data[start], buffer,
++					       nToCopy);
++
++
++					cache->locked = 0;
++					cache->nBytes = nToWriteBack;
++
++					if (writeThrough) {
++						chunkWritten =
++						    yaffs_WriteChunkDataToObject
++						    (cache->object,
++						     cache->chunkId,
++						     cache->data, cache->nBytes,
++						     1);
++						cache->dirty = 0;
++					}
++
++				} else {
++					chunkWritten = -1;	/* fail the write */
++				}
++			} else {
++				/* An incomplete start or end chunk (or maybe both start and end chunk)
++				 * Read into the local buffer then copy, then copy over and write back.
++				 */
++
++				__u8 *localBuffer =
++				    yaffs_GetTempBuffer(dev, __LINE__);
++
++				yaffs_ReadChunkDataFromObject(in, chunk,
++							      localBuffer);
++
++
++
++				memcpy(&localBuffer[start], buffer, nToCopy);
++
++				chunkWritten =
++				    yaffs_WriteChunkDataToObject(in, chunk,
++								 localBuffer,
++								 nToWriteBack,
++								 0);
++
++				yaffs_ReleaseTempBuffer(dev, localBuffer,
++							__LINE__);
++
++			}
++
++		} else {
++			/* A full chunk. Write directly from the supplied buffer. */
++
++
++
++			chunkWritten =
++			    yaffs_WriteChunkDataToObject(in, chunk, buffer,
++							 dev->nDataBytesPerChunk,
++							 0);
++
++			/* Since we've overwritten the cached data, we better invalidate it. */
++			yaffs_InvalidateChunkCache(in, chunk);
++		}
++
++		if (chunkWritten >= 0) {
++			n -= nToCopy;
++			offset += nToCopy;
++			buffer += nToCopy;
++			nDone += nToCopy;
++		}
++
++	}
++
++	/* Update file object */
++
++	if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize)
++		in->variant.fileVariant.fileSize = (startOfWrite + nDone);
++
++	in->dirty = 1;
++
++	return nDone;
++}
++
++
++/* ---------------------- File resizing stuff ------------------ */
++
++static void yaffs_PruneResizedChunks(yaffs_Object *in, int newSize)
++{
++
++	yaffs_Device *dev = in->myDev;
++	int oldFileSize = in->variant.fileVariant.fileSize;
++
++	int lastDel = 1 + (oldFileSize - 1) / dev->nDataBytesPerChunk;
++
++	int startDel = 1 + (newSize + dev->nDataBytesPerChunk - 1) /
++	    dev->nDataBytesPerChunk;
++	int i;
++	int chunkId;
++
++	/* Delete backwards so that we don't end up with holes if
++	 * power is lost part-way through the operation.
++	 */
++	for (i = lastDel; i >= startDel; i--) {
++		/* NB this could be optimised somewhat,
++		 * eg. could retrieve the tags and write them without
++		 * using yaffs_DeleteChunk
++		 */
++
++		chunkId = yaffs_FindAndDeleteChunkInFile(in, i, NULL);
++		if (chunkId > 0) {
++			if (chunkId <
++			    (dev->internalStartBlock * dev->nChunksPerBlock)
++			    || chunkId >=
++			    ((dev->internalEndBlock +
++			      1) * dev->nChunksPerBlock)) {
++				T(YAFFS_TRACE_ALWAYS,
++				  (TSTR("Found daft chunkId %d for %d" TENDSTR),
++				   chunkId, i));
++			} else {
++				in->nDataChunks--;
++				yaffs_DeleteChunk(dev, chunkId, 1, __LINE__);
++			}
++		}
++	}
++
++}
++
++int yaffs_ResizeFile(yaffs_Object *in, loff_t newSize)
++{
++
++	int oldFileSize = in->variant.fileVariant.fileSize;
++	__u32 newSizeOfPartialChunk;
++	int newFullChunks;
++
++	yaffs_Device *dev = in->myDev;
++
++	yaffs_AddrToChunk(dev, newSize, &newFullChunks, &newSizeOfPartialChunk);
++
++	yaffs_FlushFilesChunkCache(in);
++	yaffs_InvalidateWholeChunkCache(in);
++
++	yaffs_CheckGarbageCollection(dev);
++
++	if (in->variantType != YAFFS_OBJECT_TYPE_FILE)
++		return YAFFS_FAIL;
++
++	if (newSize == oldFileSize)
++		return YAFFS_OK;
++
++	if (newSize < oldFileSize) {
++
++		yaffs_PruneResizedChunks(in, newSize);
++
++		if (newSizeOfPartialChunk != 0) {
++			int lastChunk = 1 + newFullChunks;
++
++			__u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
++
++			/* Got to read and rewrite the last chunk with its new size and zero pad */
++			yaffs_ReadChunkDataFromObject(in, lastChunk,
++						      localBuffer);
++
++			memset(localBuffer + newSizeOfPartialChunk, 0,
++			       dev->nDataBytesPerChunk - newSizeOfPartialChunk);
++
++			yaffs_WriteChunkDataToObject(in, lastChunk, localBuffer,
++						     newSizeOfPartialChunk, 1);
++
++			yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
++		}
++
++		in->variant.fileVariant.fileSize = newSize;
++
++		yaffs_PruneFileStructure(dev, &in->variant.fileVariant);
++	} else {
++		/* newsSize > oldFileSize */
++		in->variant.fileVariant.fileSize = newSize;
++	}
++
++
++	/* Write a new object header.
++	 * show we've shrunk the file, if need be
++	 * Do this only if the file is not in the deleted directories.
++	 */
++	if (in->parent &&
++	    in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
++	    in->parent->objectId != YAFFS_OBJECTID_DELETED)
++		yaffs_UpdateObjectHeader(in, NULL, 0,
++					 (newSize < oldFileSize) ? 1 : 0, 0);
++
++	return YAFFS_OK;
++}
++
++loff_t yaffs_GetFileSize(yaffs_Object *obj)
++{
++	obj = yaffs_GetEquivalentObject(obj);
++
++	switch (obj->variantType) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		return obj->variant.fileVariant.fileSize;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		return yaffs_strlen(obj->variant.symLinkVariant.alias);
++	default:
++		return 0;
++	}
++}
++
++
++
++int yaffs_FlushFile(yaffs_Object *in, int updateTime)
++{
++	int retVal;
++	if (in->dirty) {
++		yaffs_FlushFilesChunkCache(in);
++		if (updateTime) {
++#ifdef CONFIG_YAFFS_WINCE
++			yfsd_WinFileTimeNow(in->win_mtime);
++#else
++
++			in->yst_mtime = Y_CURRENT_TIME;
++
++#endif
++		}
++
++		retVal = (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
++			0) ? YAFFS_OK : YAFFS_FAIL;
++	} else {
++		retVal = YAFFS_OK;
++	}
++
++	return retVal;
++
++}
++
++static int yaffs_DoGenericObjectDeletion(yaffs_Object *in)
++{
++
++	/* First off, invalidate the file's data in the cache, without flushing. */
++	yaffs_InvalidateWholeChunkCache(in);
++
++	if (in->myDev->isYaffs2 && (in->parent != in->myDev->deletedDir)) {
++		/* Move to the unlinked directory so we have a record that it was deleted. */
++		yaffs_ChangeObjectName(in, in->myDev->deletedDir, _Y("deleted"), 0, 0);
++
++	}
++
++	yaffs_RemoveObjectFromDirectory(in);
++	yaffs_DeleteChunk(in->myDev, in->hdrChunk, 1, __LINE__);
++	in->hdrChunk = 0;
++
++	yaffs_FreeObject(in);
++	return YAFFS_OK;
++
++}
++
++/* yaffs_DeleteFile deletes the whole file data
++ * and the inode associated with the file.
++ * It does not delete the links associated with the file.
++ */
++static int yaffs_UnlinkFileIfNeeded(yaffs_Object *in)
++{
++
++	int retVal;
++	int immediateDeletion = 0;
++
++#ifdef __KERNEL__
++	if (!in->myInode)
++		immediateDeletion = 1;
++#else
++	if (in->inUse <= 0)
++		immediateDeletion = 1;
++#endif
++
++	if (immediateDeletion) {
++		retVal =
++		    yaffs_ChangeObjectName(in, in->myDev->deletedDir,
++					   _Y("deleted"), 0, 0);
++		T(YAFFS_TRACE_TRACING,
++		  (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
++		   in->objectId));
++		in->deleted = 1;
++		in->myDev->nDeletedFiles++;
++		if (1 || in->myDev->isYaffs2)
++			yaffs_ResizeFile(in, 0);
++		yaffs_SoftDeleteFile(in);
++	} else {
++		retVal =
++		    yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
++					   _Y("unlinked"), 0, 0);
++	}
++
++
++	return retVal;
++}
++
++int yaffs_DeleteFile(yaffs_Object *in)
++{
++	int retVal = YAFFS_OK;
++	int deleted = in->deleted;
++
++	yaffs_ResizeFile(in, 0);
++
++	if (in->nDataChunks > 0) {
++		/* Use soft deletion if there is data in the file.
++		 * That won't be the case if it has been resized to zero.
++		 */
++		if (!in->unlinked)
++			retVal = yaffs_UnlinkFileIfNeeded(in);
++
++		if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
++			in->deleted = 1;
++			deleted = 1;
++			in->myDev->nDeletedFiles++;
++			yaffs_SoftDeleteFile(in);
++		}
++		return deleted ? YAFFS_OK : YAFFS_FAIL;
++	} else {
++		/* The file has no data chunks so we toss it immediately */
++		yaffs_FreeTnode(in->myDev, in->variant.fileVariant.top);
++		in->variant.fileVariant.top = NULL;
++		yaffs_DoGenericObjectDeletion(in);
++
++		return YAFFS_OK;
++	}
++}
++
++static int yaffs_DeleteDirectory(yaffs_Object *in)
++{
++	/* First check that the directory is empty. */
++	if (ylist_empty(&in->variant.directoryVariant.children))
++		return yaffs_DoGenericObjectDeletion(in);
++
++	return YAFFS_FAIL;
++
++}
++
++static int yaffs_DeleteSymLink(yaffs_Object *in)
++{
++	YFREE(in->variant.symLinkVariant.alias);
++
++	return yaffs_DoGenericObjectDeletion(in);
++}
++
++static int yaffs_DeleteHardLink(yaffs_Object *in)
++{
++	/* remove this hardlink from the list assocaited with the equivalent
++	 * object
++	 */
++	ylist_del_init(&in->hardLinks);
++	return yaffs_DoGenericObjectDeletion(in);
++}
++
++int yaffs_DeleteObject(yaffs_Object *obj)
++{
++int retVal = -1;
++	switch (obj->variantType) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		retVal = yaffs_DeleteFile(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		return yaffs_DeleteDirectory(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		retVal = yaffs_DeleteSymLink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		retVal = yaffs_DeleteHardLink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		retVal = yaffs_DoGenericObjectDeletion(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++		retVal = 0;
++		break;		/* should not happen. */
++	}
++
++	return retVal;
++}
++
++static int yaffs_UnlinkWorker(yaffs_Object *obj)
++{
++
++	int immediateDeletion = 0;
++
++#ifdef __KERNEL__
++	if (!obj->myInode)
++		immediateDeletion = 1;
++#else
++	if (obj->inUse <= 0)
++		immediateDeletion = 1;
++#endif
++
++	if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++		return yaffs_DeleteHardLink(obj);
++	} else if (!ylist_empty(&obj->hardLinks)) {
++		/* Curve ball: We're unlinking an object that has a hardlink.
++		 *
++		 * This problem arises because we are not strictly following
++		 * The Linux link/inode model.
++		 *
++		 * We can't really delete the object.
++		 * Instead, we do the following:
++		 * - Select a hardlink.
++		 * - Unhook it from the hard links
++		 * - Unhook it from its parent directory (so that the rename can work)
++		 * - Rename the object to the hardlink's name.
++		 * - Delete the hardlink
++		 */
++
++		yaffs_Object *hl;
++		int retVal;
++		YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++		hl = ylist_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
++
++		ylist_del_init(&hl->hardLinks);
++		ylist_del_init(&hl->siblings);
++
++		yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++
++		retVal = yaffs_ChangeObjectName(obj, hl->parent, name, 0, 0);
++
++		if (retVal == YAFFS_OK)
++			retVal = yaffs_DoGenericObjectDeletion(hl);
++
++		return retVal;
++
++	} else if (immediateDeletion) {
++		switch (obj->variantType) {
++		case YAFFS_OBJECT_TYPE_FILE:
++			return yaffs_DeleteFile(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			return yaffs_DeleteDirectory(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			return yaffs_DeleteSymLink(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++			return yaffs_DoGenericObjectDeletion(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++		default:
++			return YAFFS_FAIL;
++		}
++	} else
++		return yaffs_ChangeObjectName(obj, obj->myDev->unlinkedDir,
++					   _Y("unlinked"), 0, 0);
++}
++
++
++static int yaffs_UnlinkObject(yaffs_Object *obj)
++{
++
++	if (obj && obj->unlinkAllowed)
++		return yaffs_UnlinkWorker(obj);
++
++	return YAFFS_FAIL;
++
++}
++int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name)
++{
++	yaffs_Object *obj;
++
++	obj = yaffs_FindObjectByName(dir, name);
++	return yaffs_UnlinkObject(obj);
++}
++
++/*----------------------- Initialisation Scanning ---------------------- */
++
++static void yaffs_HandleShadowedObject(yaffs_Device *dev, int objId,
++				int backwardScanning)
++{
++	yaffs_Object *obj;
++
++	if (!backwardScanning) {
++		/* Handle YAFFS1 forward scanning case
++		 * For YAFFS1 we always do the deletion
++		 */
++
++	} else {
++		/* Handle YAFFS2 case (backward scanning)
++		 * If the shadowed object exists then ignore.
++		 */
++		if (yaffs_FindObjectByNumber(dev, objId))
++			return;
++	}
++
++	/* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
++	 * We put it in unlinked dir to be cleaned up after the scanning
++	 */
++	obj =
++	    yaffs_FindOrCreateObjectByNumber(dev, objId,
++					     YAFFS_OBJECT_TYPE_FILE);
++	if (!obj)
++		return;
++	yaffs_AddObjectToDirectory(dev->unlinkedDir, obj);
++	obj->variant.fileVariant.shrinkSize = 0;
++	obj->valid = 1;		/* So that we don't read any other info for this file */
++
++}
++
++typedef struct {
++	int seq;
++	int block;
++} yaffs_BlockIndex;
++
++
++static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList)
++{
++	yaffs_Object *hl;
++	yaffs_Object *in;
++
++	while (hardList) {
++		hl = hardList;
++		hardList = (yaffs_Object *) (hardList->hardLinks.next);
++
++		in = yaffs_FindObjectByNumber(dev,
++					      hl->variant.hardLinkVariant.
++					      equivalentObjectId);
++
++		if (in) {
++			/* Add the hardlink pointers */
++			hl->variant.hardLinkVariant.equivalentObject = in;
++			ylist_add(&hl->hardLinks, &in->hardLinks);
++		} else {
++			/* Todo Need to report/handle this better.
++			 * Got a problem... hardlink to a non-existant object
++			 */
++			hl->variant.hardLinkVariant.equivalentObject = NULL;
++			YINIT_LIST_HEAD(&hl->hardLinks);
++
++		}
++	}
++}
++
++
++
++
++
++static int ybicmp(const void *a, const void *b)
++{
++	register int aseq = ((yaffs_BlockIndex *)a)->seq;
++	register int bseq = ((yaffs_BlockIndex *)b)->seq;
++	register int ablock = ((yaffs_BlockIndex *)a)->block;
++	register int bblock = ((yaffs_BlockIndex *)b)->block;
++	if (aseq == bseq)
++		return ablock - bblock;
++	else
++		return aseq - bseq;
++}
++
++
++struct yaffs_ShadowFixerStruct {
++	int objectId;
++	int shadowedId;
++	struct yaffs_ShadowFixerStruct *next;
++};
++
++
++static void yaffs_StripDeletedObjects(yaffs_Device *dev)
++{
++	/*
++	*  Sort out state of unlinked and deleted objects after scanning.
++	*/
++	struct ylist_head *i;
++	struct ylist_head *n;
++	yaffs_Object *l;
++
++	/* Soft delete all the unlinked files */
++	ylist_for_each_safe(i, n,
++		&dev->unlinkedDir->variant.directoryVariant.children) {
++		if (i) {
++			l = ylist_entry(i, yaffs_Object, siblings);
++			yaffs_DeleteObject(l);
++		}
++	}
++
++	ylist_for_each_safe(i, n,
++		&dev->deletedDir->variant.directoryVariant.children) {
++		if (i) {
++			l = ylist_entry(i, yaffs_Object, siblings);
++			yaffs_DeleteObject(l);
++		}
++	}
++
++}
++
++static int yaffs_Scan(yaffs_Device *dev)
++{
++	yaffs_ExtendedTags tags;
++	int blk;
++	int blockIterator;
++	int startIterator;
++	int endIterator;
++	int result;
++
++	int chunk;
++	int c;
++	int deleted;
++	yaffs_BlockState state;
++	yaffs_Object *hardList = NULL;
++	yaffs_BlockInfo *bi;
++	__u32 sequenceNumber;
++	yaffs_ObjectHeader *oh;
++	yaffs_Object *in;
++	yaffs_Object *parent;
++
++	int alloc_failed = 0;
++
++	struct yaffs_ShadowFixerStruct *shadowFixerList = NULL;
++
++
++	__u8 *chunkData;
++
++
++
++	T(YAFFS_TRACE_SCAN,
++	  (TSTR("yaffs_Scan starts  intstartblk %d intendblk %d..." TENDSTR),
++	   dev->internalStartBlock, dev->internalEndBlock));
++
++	chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++
++	dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++	/* Scan all the blocks to determine their state */
++	for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
++		bi = yaffs_GetBlockInfo(dev, blk);
++		yaffs_ClearChunkBits(dev, blk);
++		bi->pagesInUse = 0;
++		bi->softDeletions = 0;
++
++		yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
++
++		bi->blockState = state;
++		bi->sequenceNumber = sequenceNumber;
++
++		if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
++			bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
++
++		T(YAFFS_TRACE_SCAN_DEBUG,
++		  (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
++		   state, sequenceNumber));
++
++		if (state == YAFFS_BLOCK_STATE_DEAD) {
++			T(YAFFS_TRACE_BAD_BLOCKS,
++			  (TSTR("block %d is bad" TENDSTR), blk));
++		} else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++			T(YAFFS_TRACE_SCAN_DEBUG,
++			  (TSTR("Block empty " TENDSTR)));
++			dev->nErasedBlocks++;
++			dev->nFreeChunks += dev->nChunksPerBlock;
++		}
++	}
++
++	startIterator = dev->internalStartBlock;
++	endIterator = dev->internalEndBlock;
++
++	/* For each block.... */
++	for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
++	     blockIterator++) {
++
++		YYIELD();
++
++		YYIELD();
++
++		blk = blockIterator;
++
++		bi = yaffs_GetBlockInfo(dev, blk);
++		state = bi->blockState;
++
++		deleted = 0;
++
++		/* For each chunk in each block that needs scanning....*/
++		for (c = 0; !alloc_failed && c < dev->nChunksPerBlock &&
++		     state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
++			/* Read the tags and decide what to do */
++			chunk = blk * dev->nChunksPerBlock + c;
++
++			result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
++							&tags);
++
++			/* Let's have a good look at this chunk... */
++
++			if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED || tags.chunkDeleted) {
++				/* YAFFS1 only...
++				 * A deleted chunk
++				 */
++				deleted++;
++				dev->nFreeChunks++;
++				/*T((" %d %d deleted\n",blk,c)); */
++			} else if (!tags.chunkUsed) {
++				/* An unassigned chunk in the block
++				 * This means that either the block is empty or
++				 * this is the one being allocated from
++				 */
++
++				if (c == 0) {
++					/* We're looking at the first chunk in the block so the block is unused */
++					state = YAFFS_BLOCK_STATE_EMPTY;
++					dev->nErasedBlocks++;
++				} else {
++					/* this is the block being allocated from */
++					T(YAFFS_TRACE_SCAN,
++					  (TSTR
++					   (" Allocating from %d %d" TENDSTR),
++					   blk, c));
++					state = YAFFS_BLOCK_STATE_ALLOCATING;
++					dev->allocationBlock = blk;
++					dev->allocationPage = c;
++					dev->allocationBlockFinder = blk;
++					/* Set it to here to encourage the allocator to go forth from here. */
++
++				}
++
++				dev->nFreeChunks += (dev->nChunksPerBlock - c);
++			} else if (tags.chunkId > 0) {
++				/* chunkId > 0 so it is a data chunk... */
++				unsigned int endpos;
++
++				yaffs_SetChunkBit(dev, blk, c);
++				bi->pagesInUse++;
++
++				in = yaffs_FindOrCreateObjectByNumber(dev,
++								      tags.
++								      objectId,
++								      YAFFS_OBJECT_TYPE_FILE);
++				/* PutChunkIntoFile checks for a clash (two data chunks with
++				 * the same chunkId).
++				 */
++
++				if (!in)
++					alloc_failed = 1;
++
++				if (in) {
++					if (!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk, 1))
++						alloc_failed = 1;
++				}
++
++				endpos =
++				    (tags.chunkId - 1) * dev->nDataBytesPerChunk +
++				    tags.byteCount;
++				if (in &&
++				    in->variantType == YAFFS_OBJECT_TYPE_FILE
++				    && in->variant.fileVariant.scannedFileSize <
++				    endpos) {
++					in->variant.fileVariant.
++					    scannedFileSize = endpos;
++					if (!dev->useHeaderFileSize) {
++						in->variant.fileVariant.
++						    fileSize =
++						    in->variant.fileVariant.
++						    scannedFileSize;
++					}
++
++				}
++				/* T((" %d %d data %d %d\n",blk,c,tags.objectId,tags.chunkId));   */
++			} else {
++				/* chunkId == 0, so it is an ObjectHeader.
++				 * Thus, we read in the object header and make the object
++				 */
++				yaffs_SetChunkBit(dev, blk, c);
++				bi->pagesInUse++;
++
++				result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk,
++								chunkData,
++								NULL);
++
++				oh = (yaffs_ObjectHeader *) chunkData;
++
++				in = yaffs_FindObjectByNumber(dev,
++							      tags.objectId);
++				if (in && in->variantType != oh->type) {
++					/* This should not happen, but somehow
++					 * Wev'e ended up with an objectId that has been reused but not yet
++					 * deleted, and worse still it has changed type. Delete the old object.
++					 */
++
++					yaffs_DeleteObject(in);
++
++					in = 0;
++				}
++
++				in = yaffs_FindOrCreateObjectByNumber(dev,
++								      tags.
++								      objectId,
++								      oh->type);
++
++				if (!in)
++					alloc_failed = 1;
++
++				if (in && oh->shadowsObject > 0) {
++
++					struct yaffs_ShadowFixerStruct *fixer;
++					fixer = YMALLOC(sizeof(struct yaffs_ShadowFixerStruct));
++					if (fixer) {
++						fixer->next = shadowFixerList;
++						shadowFixerList = fixer;
++						fixer->objectId = tags.objectId;
++						fixer->shadowedId = oh->shadowsObject;
++					}
++
++				}
++
++				if (in && in->valid) {
++					/* We have already filled this one. We have a duplicate and need to resolve it. */
++
++					unsigned existingSerial = in->serial;
++					unsigned newSerial = tags.serialNumber;
++
++					if (((existingSerial + 1) & 3) == newSerial) {
++						/* Use new one - destroy the exisiting one */
++						yaffs_DeleteChunk(dev,
++								  in->hdrChunk,
++								  1, __LINE__);
++						in->valid = 0;
++					} else {
++						/* Use existing - destroy this one. */
++						yaffs_DeleteChunk(dev, chunk, 1,
++								  __LINE__);
++					}
++				}
++
++				if (in && !in->valid &&
++				    (tags.objectId == YAFFS_OBJECTID_ROOT ||
++				     tags.objectId == YAFFS_OBJECTID_LOSTNFOUND)) {
++					/* We only load some info, don't fiddle with directory structure */
++					in->valid = 1;
++					in->variantType = oh->type;
++
++					in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++					in->win_atime[0] = oh->win_atime[0];
++					in->win_ctime[0] = oh->win_ctime[0];
++					in->win_mtime[0] = oh->win_mtime[0];
++					in->win_atime[1] = oh->win_atime[1];
++					in->win_ctime[1] = oh->win_ctime[1];
++					in->win_mtime[1] = oh->win_mtime[1];
++#else
++					in->yst_uid = oh->yst_uid;
++					in->yst_gid = oh->yst_gid;
++					in->yst_atime = oh->yst_atime;
++					in->yst_mtime = oh->yst_mtime;
++					in->yst_ctime = oh->yst_ctime;
++					in->yst_rdev = oh->yst_rdev;
++#endif
++					in->hdrChunk = chunk;
++					in->serial = tags.serialNumber;
++
++				} else if (in && !in->valid) {
++					/* we need to load this info */
++
++					in->valid = 1;
++					in->variantType = oh->type;
++
++					in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++					in->win_atime[0] = oh->win_atime[0];
++					in->win_ctime[0] = oh->win_ctime[0];
++					in->win_mtime[0] = oh->win_mtime[0];
++					in->win_atime[1] = oh->win_atime[1];
++					in->win_ctime[1] = oh->win_ctime[1];
++					in->win_mtime[1] = oh->win_mtime[1];
++#else
++					in->yst_uid = oh->yst_uid;
++					in->yst_gid = oh->yst_gid;
++					in->yst_atime = oh->yst_atime;
++					in->yst_mtime = oh->yst_mtime;
++					in->yst_ctime = oh->yst_ctime;
++					in->yst_rdev = oh->yst_rdev;
++#endif
++					in->hdrChunk = chunk;
++					in->serial = tags.serialNumber;
++
++					yaffs_SetObjectName(in, oh->name);
++					in->dirty = 0;
++
++					/* directory stuff...
++					 * hook up to parent
++					 */
++
++					parent =
++					    yaffs_FindOrCreateObjectByNumber
++					    (dev, oh->parentObjectId,
++					     YAFFS_OBJECT_TYPE_DIRECTORY);
++					if (!parent)
++						alloc_failed = 1;
++					if (parent && parent->variantType ==
++					    YAFFS_OBJECT_TYPE_UNKNOWN) {
++						/* Set up as a directory */
++						parent->variantType =
++							YAFFS_OBJECT_TYPE_DIRECTORY;
++						YINIT_LIST_HEAD(&parent->variant.
++								directoryVariant.
++								children);
++					} else if (!parent || parent->variantType !=
++						   YAFFS_OBJECT_TYPE_DIRECTORY) {
++						/* Hoosterman, another problem....
++						 * We're trying to use a non-directory as a directory
++						 */
++
++						T(YAFFS_TRACE_ERROR,
++						  (TSTR
++						   ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++						    TENDSTR)));
++						parent = dev->lostNFoundDir;
++					}
++
++					yaffs_AddObjectToDirectory(parent, in);
++
++					if (0 && (parent == dev->deletedDir ||
++						  parent == dev->unlinkedDir)) {
++						in->deleted = 1;	/* If it is unlinked at start up then it wants deleting */
++						dev->nDeletedFiles++;
++					}
++					/* Note re hardlinks.
++					 * Since we might scan a hardlink before its equivalent object is scanned
++					 * we put them all in a list.
++					 * After scanning is complete, we should have all the objects, so we run through this
++					 * list and fix up all the chains.
++					 */
++
++					switch (in->variantType) {
++					case YAFFS_OBJECT_TYPE_UNKNOWN:
++						/* Todo got a problem */
++						break;
++					case YAFFS_OBJECT_TYPE_FILE:
++						if (dev->useHeaderFileSize)
++
++							in->variant.fileVariant.
++							    fileSize =
++							    oh->fileSize;
++
++						break;
++					case YAFFS_OBJECT_TYPE_HARDLINK:
++						in->variant.hardLinkVariant.
++							equivalentObjectId =
++							oh->equivalentObjectId;
++						in->hardLinks.next =
++							(struct ylist_head *)
++							hardList;
++						hardList = in;
++						break;
++					case YAFFS_OBJECT_TYPE_DIRECTORY:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SPECIAL:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SYMLINK:
++						in->variant.symLinkVariant.alias =
++						    yaffs_CloneString(oh->alias);
++						if (!in->variant.symLinkVariant.alias)
++							alloc_failed = 1;
++						break;
++					}
++
++/*
++					if (parent == dev->deletedDir) {
++						yaffs_DestroyObject(in);
++						bi->hasShrinkHeader = 1;
++					}
++*/
++				}
++			}
++		}
++
++		if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++			/* If we got this far while scanning, then the block is fully allocated.*/
++			state = YAFFS_BLOCK_STATE_FULL;
++		}
++
++		bi->blockState = state;
++
++		/* Now let's see if it was dirty */
++		if (bi->pagesInUse == 0 &&
++		    !bi->hasShrinkHeader &&
++		    bi->blockState == YAFFS_BLOCK_STATE_FULL) {
++			yaffs_BlockBecameDirty(dev, blk);
++		}
++
++	}
++
++
++	/* Ok, we've done all the scanning.
++	 * Fix up the hard link chains.
++	 * We should now have scanned all the objects, now it's time to add these
++	 * hardlinks.
++	 */
++
++	yaffs_HardlinkFixup(dev, hardList);
++
++	/* Fix up any shadowed objects */
++	{
++		struct yaffs_ShadowFixerStruct *fixer;
++		yaffs_Object *obj;
++
++		while (shadowFixerList) {
++			fixer = shadowFixerList;
++			shadowFixerList = fixer->next;
++			/* Complete the rename transaction by deleting the shadowed object
++			 * then setting the object header to unshadowed.
++			 */
++			obj = yaffs_FindObjectByNumber(dev, fixer->shadowedId);
++			if (obj)
++				yaffs_DeleteObject(obj);
++
++			obj = yaffs_FindObjectByNumber(dev, fixer->objectId);
++
++			if (obj)
++				yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++
++			YFREE(fixer);
++		}
++	}
++
++	yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++
++	if (alloc_failed)
++		return YAFFS_FAIL;
++
++	T(YAFFS_TRACE_SCAN, (TSTR("yaffs_Scan ends" TENDSTR)));
++
++
++	return YAFFS_OK;
++}
++
++static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in)
++{
++	__u8 *chunkData;
++	yaffs_ObjectHeader *oh;
++	yaffs_Device *dev;
++	yaffs_ExtendedTags tags;
++	int result;
++	int alloc_failed = 0;
++
++	if (!in)
++		return;
++
++	dev = in->myDev;
++
++#if 0
++	T(YAFFS_TRACE_SCAN, (TSTR("details for object %d %s loaded" TENDSTR),
++		in->objectId,
++		in->lazyLoaded ? "not yet" : "already"));
++#endif
++
++	if (in->lazyLoaded && in->hdrChunk > 0) {
++		in->lazyLoaded = 0;
++		chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++
++		result = yaffs_ReadChunkWithTagsFromNAND(dev, in->hdrChunk, chunkData, &tags);
++		oh = (yaffs_ObjectHeader *) chunkData;
++
++		in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++		in->win_atime[0] = oh->win_atime[0];
++		in->win_ctime[0] = oh->win_ctime[0];
++		in->win_mtime[0] = oh->win_mtime[0];
++		in->win_atime[1] = oh->win_atime[1];
++		in->win_ctime[1] = oh->win_ctime[1];
++		in->win_mtime[1] = oh->win_mtime[1];
++#else
++		in->yst_uid = oh->yst_uid;
++		in->yst_gid = oh->yst_gid;
++		in->yst_atime = oh->yst_atime;
++		in->yst_mtime = oh->yst_mtime;
++		in->yst_ctime = oh->yst_ctime;
++		in->yst_rdev = oh->yst_rdev;
++
++#endif
++		yaffs_SetObjectName(in, oh->name);
++
++		if (in->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++			in->variant.symLinkVariant.alias =
++						    yaffs_CloneString(oh->alias);
++			if (!in->variant.symLinkVariant.alias)
++				alloc_failed = 1; /* Not returned to caller */
++		}
++
++		yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++	}
++}
++
++static int yaffs_ScanBackwards(yaffs_Device *dev)
++{
++	yaffs_ExtendedTags tags;
++	int blk;
++	int blockIterator;
++	int startIterator;
++	int endIterator;
++	int nBlocksToScan = 0;
++
++	int chunk;
++	int result;
++	int c;
++	int deleted;
++	yaffs_BlockState state;
++	yaffs_Object *hardList = NULL;
++	yaffs_BlockInfo *bi;
++	__u32 sequenceNumber;
++	yaffs_ObjectHeader *oh;
++	yaffs_Object *in;
++	yaffs_Object *parent;
++	int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
++	int itsUnlinked;
++	__u8 *chunkData;
++
++	int fileSize;
++	int isShrink;
++	int foundChunksInBlock;
++	int equivalentObjectId;
++	int alloc_failed = 0;
++
++
++	yaffs_BlockIndex *blockIndex = NULL;
++	int altBlockIndex = 0;
++
++	if (!dev->isYaffs2) {
++		T(YAFFS_TRACE_SCAN,
++		  (TSTR("yaffs_ScanBackwards is only for YAFFS2!" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	T(YAFFS_TRACE_SCAN,
++	  (TSTR
++	   ("yaffs_ScanBackwards starts  intstartblk %d intendblk %d..."
++	    TENDSTR), dev->internalStartBlock, dev->internalEndBlock));
++
++
++	dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++	blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
++
++	if (!blockIndex) {
++		blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
++		altBlockIndex = 1;
++	}
++
++	if (!blockIndex) {
++		T(YAFFS_TRACE_SCAN,
++		  (TSTR("yaffs_Scan() could not allocate block index!" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	dev->blocksInCheckpoint = 0;
++
++	chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++
++	/* Scan all the blocks to determine their state */
++	for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
++		bi = yaffs_GetBlockInfo(dev, blk);
++		yaffs_ClearChunkBits(dev, blk);
++		bi->pagesInUse = 0;
++		bi->softDeletions = 0;
++
++		yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
++
++		bi->blockState = state;
++		bi->sequenceNumber = sequenceNumber;
++
++		if (bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++			bi->blockState = state = YAFFS_BLOCK_STATE_CHECKPOINT;
++		if (bi->sequenceNumber == YAFFS_SEQUENCE_BAD_BLOCK)
++			bi->blockState = state = YAFFS_BLOCK_STATE_DEAD;
++
++		T(YAFFS_TRACE_SCAN_DEBUG,
++		  (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
++		   state, sequenceNumber));
++
++
++		if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++			dev->blocksInCheckpoint++;
++
++		} else if (state == YAFFS_BLOCK_STATE_DEAD) {
++			T(YAFFS_TRACE_BAD_BLOCKS,
++			  (TSTR("block %d is bad" TENDSTR), blk));
++		} else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++			T(YAFFS_TRACE_SCAN_DEBUG,
++			  (TSTR("Block empty " TENDSTR)));
++			dev->nErasedBlocks++;
++			dev->nFreeChunks += dev->nChunksPerBlock;
++		} else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++
++			/* Determine the highest sequence number */
++			if (sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++			    sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++
++				blockIndex[nBlocksToScan].seq = sequenceNumber;
++				blockIndex[nBlocksToScan].block = blk;
++
++				nBlocksToScan++;
++
++				if (sequenceNumber >= dev->sequenceNumber)
++					dev->sequenceNumber = sequenceNumber;
++			} else {
++				/* TODO: Nasty sequence number! */
++				T(YAFFS_TRACE_SCAN,
++				  (TSTR
++				   ("Block scanning block %d has bad sequence number %d"
++				    TENDSTR), blk, sequenceNumber));
++
++			}
++		}
++	}
++
++	T(YAFFS_TRACE_SCAN,
++	(TSTR("%d blocks to be sorted..." TENDSTR), nBlocksToScan));
++
++
++
++	YYIELD();
++
++	/* Sort the blocks */
++#ifndef CONFIG_YAFFS_USE_OWN_SORT
++	{
++		/* Use qsort now. */
++		yaffs_qsort(blockIndex, nBlocksToScan, sizeof(yaffs_BlockIndex), ybicmp);
++	}
++#else
++	{
++		/* Dungy old bubble sort... */
++
++		yaffs_BlockIndex temp;
++		int i;
++		int j;
++
++		for (i = 0; i < nBlocksToScan; i++)
++			for (j = i + 1; j < nBlocksToScan; j++)
++				if (blockIndex[i].seq > blockIndex[j].seq) {
++					temp = blockIndex[j];
++					blockIndex[j] = blockIndex[i];
++					blockIndex[i] = temp;
++				}
++	}
++#endif
++
++	YYIELD();
++
++	T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
++
++	/* Now scan the blocks looking at the data. */
++	startIterator = 0;
++	endIterator = nBlocksToScan - 1;
++	T(YAFFS_TRACE_SCAN_DEBUG,
++	  (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
++
++	/* For each block.... backwards */
++	for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
++			blockIterator--) {
++		/* Cooperative multitasking! This loop can run for so
++		   long that watchdog timers expire. */
++		YYIELD();
++
++		/* get the block to scan in the correct order */
++		blk = blockIndex[blockIterator].block;
++
++		bi = yaffs_GetBlockInfo(dev, blk);
++
++
++		state = bi->blockState;
++
++		deleted = 0;
++
++		/* For each chunk in each block that needs scanning.... */
++		foundChunksInBlock = 0;
++		for (c = dev->nChunksPerBlock - 1;
++		     !alloc_failed && c >= 0 &&
++		     (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++		      state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
++			/* Scan backwards...
++			 * Read the tags and decide what to do
++			 */
++
++			chunk = blk * dev->nChunksPerBlock + c;
++
++			result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
++							&tags);
++
++			/* Let's have a good look at this chunk... */
++
++			if (!tags.chunkUsed) {
++				/* An unassigned chunk in the block.
++				 * If there are used chunks after this one, then
++				 * it is a chunk that was skipped due to failing the erased
++				 * check. Just skip it so that it can be deleted.
++				 * But, more typically, We get here when this is an unallocated
++				 * chunk and his means that either the block is empty or
++				 * this is the one being allocated from
++				 */
++
++				if (foundChunksInBlock) {
++					/* This is a chunk that was skipped due to failing the erased check */
++				} else if (c == 0) {
++					/* We're looking at the first chunk in the block so the block is unused */
++					state = YAFFS_BLOCK_STATE_EMPTY;
++					dev->nErasedBlocks++;
++				} else {
++					if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++					    state == YAFFS_BLOCK_STATE_ALLOCATING) {
++						if (dev->sequenceNumber == bi->sequenceNumber) {
++							/* this is the block being allocated from */
++
++							T(YAFFS_TRACE_SCAN,
++							  (TSTR
++							   (" Allocating from %d %d"
++							    TENDSTR), blk, c));
++
++							state = YAFFS_BLOCK_STATE_ALLOCATING;
++							dev->allocationBlock = blk;
++							dev->allocationPage = c;
++							dev->allocationBlockFinder = blk;
++						} else {
++							/* This is a partially written block that is not
++							 * the current allocation block. This block must have
++							 * had a write failure, so set up for retirement.
++							 */
++
++							 /* bi->needsRetiring = 1; ??? TODO */
++							 bi->gcPrioritise = 1;
++
++							 T(YAFFS_TRACE_ALWAYS,
++							 (TSTR("Partially written block %d detected" TENDSTR),
++							 blk));
++						}
++					}
++				}
++
++				dev->nFreeChunks++;
++
++			} else if (tags.eccResult == YAFFS_ECC_RESULT_UNFIXED) {
++				T(YAFFS_TRACE_SCAN,
++				  (TSTR(" Unfixed ECC in chunk(%d:%d), chunk ignored"TENDSTR),
++				  blk, c));
++
++				  dev->nFreeChunks++;
++
++			} else if (tags.chunkId > 0) {
++				/* chunkId > 0 so it is a data chunk... */
++				unsigned int endpos;
++				__u32 chunkBase =
++				    (tags.chunkId - 1) * dev->nDataBytesPerChunk;
++
++				foundChunksInBlock = 1;
++
++
++				yaffs_SetChunkBit(dev, blk, c);
++				bi->pagesInUse++;
++
++				in = yaffs_FindOrCreateObjectByNumber(dev,
++								      tags.
++								      objectId,
++								      YAFFS_OBJECT_TYPE_FILE);
++				if (!in) {
++					/* Out of memory */
++					alloc_failed = 1;
++				}
++
++				if (in &&
++				    in->variantType == YAFFS_OBJECT_TYPE_FILE
++				    && chunkBase <
++				    in->variant.fileVariant.shrinkSize) {
++					/* This has not been invalidated by a resize */
++					if (!yaffs_PutChunkIntoFile(in, tags.chunkId,
++							       chunk, -1)) {
++						alloc_failed = 1;
++					}
++
++					/* File size is calculated by looking at the data chunks if we have not
++					 * seen an object header yet. Stop this practice once we find an object header.
++					 */
++					endpos =
++					    (tags.chunkId -
++					     1) * dev->nDataBytesPerChunk +
++					    tags.byteCount;
++
++					if (!in->valid &&	/* have not got an object header yet */
++					    in->variant.fileVariant.
++					    scannedFileSize < endpos) {
++						in->variant.fileVariant.
++						    scannedFileSize = endpos;
++						in->variant.fileVariant.
++						    fileSize =
++						    in->variant.fileVariant.
++						    scannedFileSize;
++					}
++
++				} else if (in) {
++					/* This chunk has been invalidated by a resize, so delete */
++					yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++
++				}
++			} else {
++				/* chunkId == 0, so it is an ObjectHeader.
++				 * Thus, we read in the object header and make the object
++				 */
++				foundChunksInBlock = 1;
++
++				yaffs_SetChunkBit(dev, blk, c);
++				bi->pagesInUse++;
++
++				oh = NULL;
++				in = NULL;
++
++				if (tags.extraHeaderInfoAvailable) {
++					in = yaffs_FindOrCreateObjectByNumber
++					    (dev, tags.objectId,
++					     tags.extraObjectType);
++					if (!in)
++						alloc_failed = 1;
++				}
++
++				if (!in ||
++#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
++				    !in->valid ||
++#endif
++				    tags.extraShadows ||
++				    (!in->valid &&
++				    (tags.objectId == YAFFS_OBJECTID_ROOT ||
++				     tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))) {
++
++					/* If we don't have  valid info then we need to read the chunk
++					 * TODO In future we can probably defer reading the chunk and
++					 * living with invalid data until needed.
++					 */
++
++					result = yaffs_ReadChunkWithTagsFromNAND(dev,
++									chunk,
++									chunkData,
++									NULL);
++
++					oh = (yaffs_ObjectHeader *) chunkData;
++
++					if (dev->inbandTags) {
++						/* Fix up the header if they got corrupted by inband tags */
++						oh->shadowsObject = oh->inbandShadowsObject;
++						oh->isShrink = oh->inbandIsShrink;
++					}
++
++					if (!in) {
++						in = yaffs_FindOrCreateObjectByNumber(dev, tags.objectId, oh->type);
++						if (!in)
++							alloc_failed = 1;
++					}
++
++				}
++
++				if (!in) {
++					/* TODO Hoosterman we have a problem! */
++					T(YAFFS_TRACE_ERROR,
++					  (TSTR
++					   ("yaffs tragedy: Could not make object for object  %d at chunk %d during scan"
++					    TENDSTR), tags.objectId, chunk));
++					continue;
++				}
++
++				if (in->valid) {
++					/* We have already filled this one.
++					 * We have a duplicate that will be discarded, but
++					 * we first have to suck out resize info if it is a file.
++					 */
++
++					if ((in->variantType == YAFFS_OBJECT_TYPE_FILE) &&
++					     ((oh &&
++					       oh->type == YAFFS_OBJECT_TYPE_FILE) ||
++					      (tags.extraHeaderInfoAvailable  &&
++					       tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))) {
++						__u32 thisSize =
++						    (oh) ? oh->fileSize : tags.
++						    extraFileLength;
++						__u32 parentObjectId =
++						    (oh) ? oh->
++						    parentObjectId : tags.
++						    extraParentObjectId;
++
++
++						isShrink =
++						    (oh) ? oh->isShrink : tags.
++						    extraIsShrinkHeader;
++
++						/* If it is deleted (unlinked at start also means deleted)
++						 * we treat the file size as being zeroed at this point.
++						 */
++						if (parentObjectId ==
++						    YAFFS_OBJECTID_DELETED
++						    || parentObjectId ==
++						    YAFFS_OBJECTID_UNLINKED) {
++							thisSize = 0;
++							isShrink = 1;
++						}
++
++						if (isShrink &&
++						    in->variant.fileVariant.
++						    shrinkSize > thisSize) {
++							in->variant.fileVariant.
++							    shrinkSize =
++							    thisSize;
++						}
++
++						if (isShrink)
++							bi->hasShrinkHeader = 1;
++
++					}
++					/* Use existing - destroy this one. */
++					yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++
++				}
++
++				if (!in->valid && in->variantType !=
++				    (oh ? oh->type : tags.extraObjectType))
++					T(YAFFS_TRACE_ERROR, (
++						TSTR("yaffs tragedy: Bad object type, "
++					    TCONT("%d != %d, for object %d at chunk ")
++					    TCONT("%d during scan")
++						TENDSTR), oh ?
++					    oh->type : tags.extraObjectType,
++					    in->variantType, tags.objectId,
++					    chunk));
++
++				if (!in->valid &&
++				    (tags.objectId == YAFFS_OBJECTID_ROOT ||
++				     tags.objectId ==
++				     YAFFS_OBJECTID_LOSTNFOUND)) {
++					/* We only load some info, don't fiddle with directory structure */
++					in->valid = 1;
++
++					if (oh) {
++						in->variantType = oh->type;
++
++						in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++						in->win_atime[0] = oh->win_atime[0];
++						in->win_ctime[0] = oh->win_ctime[0];
++						in->win_mtime[0] = oh->win_mtime[0];
++						in->win_atime[1] = oh->win_atime[1];
++						in->win_ctime[1] = oh->win_ctime[1];
++						in->win_mtime[1] = oh->win_mtime[1];
++#else
++						in->yst_uid = oh->yst_uid;
++						in->yst_gid = oh->yst_gid;
++						in->yst_atime = oh->yst_atime;
++						in->yst_mtime = oh->yst_mtime;
++						in->yst_ctime = oh->yst_ctime;
++						in->yst_rdev = oh->yst_rdev;
++
++#endif
++					} else {
++						in->variantType = tags.extraObjectType;
++						in->lazyLoaded = 1;
++					}
++
++					in->hdrChunk = chunk;
++
++				} else if (!in->valid) {
++					/* we need to load this info */
++
++					in->valid = 1;
++					in->hdrChunk = chunk;
++
++					if (oh) {
++						in->variantType = oh->type;
++
++						in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++						in->win_atime[0] = oh->win_atime[0];
++						in->win_ctime[0] = oh->win_ctime[0];
++						in->win_mtime[0] = oh->win_mtime[0];
++						in->win_atime[1] = oh->win_atime[1];
++						in->win_ctime[1] = oh->win_ctime[1];
++						in->win_mtime[1] = oh->win_mtime[1];
++#else
++						in->yst_uid = oh->yst_uid;
++						in->yst_gid = oh->yst_gid;
++						in->yst_atime = oh->yst_atime;
++						in->yst_mtime = oh->yst_mtime;
++						in->yst_ctime = oh->yst_ctime;
++						in->yst_rdev = oh->yst_rdev;
++#endif
++
++						if (oh->shadowsObject > 0)
++							yaffs_HandleShadowedObject(dev,
++									   oh->
++									   shadowsObject,
++									   1);
++
++
++						yaffs_SetObjectName(in, oh->name);
++						parent =
++						    yaffs_FindOrCreateObjectByNumber
++							(dev, oh->parentObjectId,
++							 YAFFS_OBJECT_TYPE_DIRECTORY);
++
++						 fileSize = oh->fileSize;
++						 isShrink = oh->isShrink;
++						 equivalentObjectId = oh->equivalentObjectId;
++
++					} else {
++						in->variantType = tags.extraObjectType;
++						parent =
++						    yaffs_FindOrCreateObjectByNumber
++							(dev, tags.extraParentObjectId,
++							 YAFFS_OBJECT_TYPE_DIRECTORY);
++						 fileSize = tags.extraFileLength;
++						 isShrink = tags.extraIsShrinkHeader;
++						 equivalentObjectId = tags.extraEquivalentObjectId;
++						in->lazyLoaded = 1;
++
++					}
++					in->dirty = 0;
++
++					if (!parent)
++						alloc_failed = 1;
++
++					/* directory stuff...
++					 * hook up to parent
++					 */
++
++					if (parent && parent->variantType ==
++					    YAFFS_OBJECT_TYPE_UNKNOWN) {
++						/* Set up as a directory */
++						parent->variantType =
++							YAFFS_OBJECT_TYPE_DIRECTORY;
++						YINIT_LIST_HEAD(&parent->variant.
++							directoryVariant.
++							children);
++					} else if (!parent || parent->variantType !=
++						   YAFFS_OBJECT_TYPE_DIRECTORY) {
++						/* Hoosterman, another problem....
++						 * We're trying to use a non-directory as a directory
++						 */
++
++						T(YAFFS_TRACE_ERROR,
++						  (TSTR
++						   ("yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++						    TENDSTR)));
++						parent = dev->lostNFoundDir;
++					}
++
++					yaffs_AddObjectToDirectory(parent, in);
++
++					itsUnlinked = (parent == dev->deletedDir) ||
++						      (parent == dev->unlinkedDir);
++
++					if (isShrink) {
++						/* Mark the block as having a shrinkHeader */
++						bi->hasShrinkHeader = 1;
++					}
++
++					/* Note re hardlinks.
++					 * Since we might scan a hardlink before its equivalent object is scanned
++					 * we put them all in a list.
++					 * After scanning is complete, we should have all the objects, so we run
++					 * through this list and fix up all the chains.
++					 */
++
++					switch (in->variantType) {
++					case YAFFS_OBJECT_TYPE_UNKNOWN:
++						/* Todo got a problem */
++						break;
++					case YAFFS_OBJECT_TYPE_FILE:
++
++						if (in->variant.fileVariant.
++						    scannedFileSize < fileSize) {
++							/* This covers the case where the file size is greater
++							 * than where the data is
++							 * This will happen if the file is resized to be larger
++							 * than its current data extents.
++							 */
++							in->variant.fileVariant.fileSize = fileSize;
++							in->variant.fileVariant.scannedFileSize =
++							    in->variant.fileVariant.fileSize;
++						}
++
++						if (isShrink &&
++						    in->variant.fileVariant.shrinkSize > fileSize) {
++							in->variant.fileVariant.shrinkSize = fileSize;
++						}
++
++						break;
++					case YAFFS_OBJECT_TYPE_HARDLINK:
++						if (!itsUnlinked) {
++							in->variant.hardLinkVariant.equivalentObjectId =
++								equivalentObjectId;
++							in->hardLinks.next =
++								(struct ylist_head *) hardList;
++							hardList = in;
++						}
++						break;
++					case YAFFS_OBJECT_TYPE_DIRECTORY:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SPECIAL:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SYMLINK:
++						if (oh) {
++							in->variant.symLinkVariant.alias =
++								yaffs_CloneString(oh->alias);
++							if (!in->variant.symLinkVariant.alias)
++								alloc_failed = 1;
++						}
++						break;
++					}
++
++				}
++
++			}
++
++		} /* End of scanning for each chunk */
++
++		if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++			/* If we got this far while scanning, then the block is fully allocated. */
++			state = YAFFS_BLOCK_STATE_FULL;
++		}
++
++		bi->blockState = state;
++
++		/* Now let's see if it was dirty */
++		if (bi->pagesInUse == 0 &&
++		    !bi->hasShrinkHeader &&
++		    bi->blockState == YAFFS_BLOCK_STATE_FULL) {
++			yaffs_BlockBecameDirty(dev, blk);
++		}
++
++	}
++
++	if (altBlockIndex)
++		YFREE_ALT(blockIndex);
++	else
++		YFREE(blockIndex);
++
++	/* Ok, we've done all the scanning.
++	 * Fix up the hard link chains.
++	 * We should now have scanned all the objects, now it's time to add these
++	 * hardlinks.
++	 */
++	yaffs_HardlinkFixup(dev, hardList);
++
++
++	yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++
++	if (alloc_failed)
++		return YAFFS_FAIL;
++
++	T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
++
++	return YAFFS_OK;
++}
++
++/*------------------------------  Directory Functions ----------------------------- */
++
++static void yaffs_VerifyObjectInDirectory(yaffs_Object *obj)
++{
++	struct ylist_head *lh;
++	yaffs_Object *listObj;
++
++	int count = 0;
++
++	if (!obj) {
++		T(YAFFS_TRACE_ALWAYS, (TSTR("No object to verify" TENDSTR)));
++		YBUG();
++		return;
++	}
++
++	if (yaffs_SkipVerification(obj->myDev))
++		return;
++
++	if (!obj->parent) {
++		T(YAFFS_TRACE_ALWAYS, (TSTR("Object does not have parent" TENDSTR)));
++		YBUG();
++		return;
++	}
++
++	if (obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS, (TSTR("Parent is not directory" TENDSTR)));
++		YBUG();
++	}
++
++	/* Iterate through the objects in each hash entry */
++
++	ylist_for_each(lh, &obj->parent->variant.directoryVariant.children) {
++		if (lh) {
++			listObj = ylist_entry(lh, yaffs_Object, siblings);
++			yaffs_VerifyObject(listObj);
++			if (obj == listObj)
++				count++;
++		}
++	 }
++
++	if (count != 1) {
++		T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory %d times" TENDSTR), count));
++		YBUG();
++	}
++}
++
++static void yaffs_VerifyDirectory(yaffs_Object *directory)
++{
++	struct ylist_head *lh;
++	yaffs_Object *listObj;
++
++	if (!directory) {
++		YBUG();
++		return;
++	}
++
++	if (yaffs_SkipFullVerification(directory->myDev))
++		return;
++
++	if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS, (TSTR("Directory has wrong type: %d" TENDSTR), directory->variantType));
++		YBUG();
++	}
++
++	/* Iterate through the objects in each hash entry */
++
++	ylist_for_each(lh, &directory->variant.directoryVariant.children) {
++		if (lh) {
++			listObj = ylist_entry(lh, yaffs_Object, siblings);
++			if (listObj->parent != directory) {
++				T(YAFFS_TRACE_ALWAYS, (TSTR("Object in directory list has wrong parent %p" TENDSTR), listObj->parent));
++				YBUG();
++			}
++			yaffs_VerifyObjectInDirectory(listObj);
++		}
++	}
++}
++
++
++static void yaffs_RemoveObjectFromDirectory(yaffs_Object *obj)
++{
++	yaffs_Device *dev = obj->myDev;
++	yaffs_Object *parent;
++
++	yaffs_VerifyObjectInDirectory(obj);
++	parent = obj->parent;
++
++	yaffs_VerifyDirectory(parent);
++
++	if (dev && dev->removeObjectCallback)
++		dev->removeObjectCallback(obj);
++
++
++	ylist_del_init(&obj->siblings);
++	obj->parent = NULL;
++
++	yaffs_VerifyDirectory(parent);
++}
++
++
++static void yaffs_AddObjectToDirectory(yaffs_Object *directory,
++					yaffs_Object *obj)
++{
++	if (!directory) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: Trying to add an object to a null pointer directory"
++		    TENDSTR)));
++		YBUG();
++		return;
++	}
++	if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: Trying to add an object to a non-directory"
++		    TENDSTR)));
++		YBUG();
++	}
++
++	if (obj->siblings.prev == NULL) {
++		/* Not initialised */
++		YBUG();
++	}
++
++
++	yaffs_VerifyDirectory(directory);
++
++	yaffs_RemoveObjectFromDirectory(obj);
++
++
++	/* Now add it */
++	ylist_add(&obj->siblings, &directory->variant.directoryVariant.children);
++	obj->parent = directory;
++
++	if (directory == obj->myDev->unlinkedDir
++			|| directory == obj->myDev->deletedDir) {
++		obj->unlinked = 1;
++		obj->myDev->nUnlinkedFiles++;
++		obj->renameAllowed = 0;
++	}
++
++	yaffs_VerifyDirectory(directory);
++	yaffs_VerifyObjectInDirectory(obj);
++}
++
++yaffs_Object *yaffs_FindObjectByName(yaffs_Object *directory,
++				     const YCHAR *name)
++{
++	int sum;
++
++	struct ylist_head *i;
++	YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
++
++	yaffs_Object *l;
++
++	if (!name)
++		return NULL;
++
++	if (!directory) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_FindObjectByName: null pointer directory"
++		    TENDSTR)));
++		YBUG();
++		return NULL;
++	}
++	if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
++		YBUG();
++	}
++
++	sum = yaffs_CalcNameSum(name);
++
++	ylist_for_each(i, &directory->variant.directoryVariant.children) {
++		if (i) {
++			l = ylist_entry(i, yaffs_Object, siblings);
++
++			if (l->parent != directory)
++				YBUG();
++
++			yaffs_CheckObjectDetailsLoaded(l);
++
++			/* Special case for lost-n-found */
++			if (l->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
++				if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0)
++					return l;
++			} else if (yaffs_SumCompare(l->sum, sum) || l->hdrChunk <= 0) {
++				/* LostnFound chunk called Objxxx
++				 * Do a real check
++				 */
++				yaffs_GetObjectName(l, buffer,
++						    YAFFS_MAX_NAME_LENGTH);
++				if (yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
++					return l;
++			}
++		}
++	}
++
++	return NULL;
++}
++
++
++#if 0
++int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
++					int (*fn) (yaffs_Object *))
++{
++	struct ylist_head *i;
++	yaffs_Object *l;
++
++	if (!theDir) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_FindObjectByName: null pointer directory"
++		    TENDSTR)));
++		YBUG();
++		return YAFFS_FAIL;
++	}
++	if (theDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
++		YBUG();
++		return YAFFS_FAIL;
++	}
++
++	ylist_for_each(i, &theDir->variant.directoryVariant.children) {
++		if (i) {
++			l = ylist_entry(i, yaffs_Object, siblings);
++			if (l && !fn(l))
++				return YAFFS_FAIL;
++		}
++	}
++
++	return YAFFS_OK;
++
++}
++#endif
++
++/* GetEquivalentObject dereferences any hard links to get to the
++ * actual object.
++ */
++
++yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj)
++{
++	if (obj && obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++		/* We want the object id of the equivalent object, not this one */
++		obj = obj->variant.hardLinkVariant.equivalentObject;
++		yaffs_CheckObjectDetailsLoaded(obj);
++	}
++	return obj;
++}
++
++int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize)
++{
++	memset(name, 0, buffSize * sizeof(YCHAR));
++
++	yaffs_CheckObjectDetailsLoaded(obj);
++
++	if (obj->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
++		yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffSize - 1);
++	} else if (obj->hdrChunk <= 0) {
++		YCHAR locName[20];
++		YCHAR numString[20];
++		YCHAR *x = &numString[19];
++		unsigned v = obj->objectId;
++		numString[19] = 0;
++		while (v > 0) {
++			x--;
++			*x = '0' + (v % 10);
++			v /= 10;
++		}
++		/* make up a name */
++		yaffs_strcpy(locName, YAFFS_LOSTNFOUND_PREFIX);
++		yaffs_strcat(locName, x);
++		yaffs_strncpy(name, locName, buffSize - 1);
++
++	}
++#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++	else if (obj->shortName[0])
++		yaffs_strcpy(name, obj->shortName);
++#endif
++	else {
++		int result;
++		__u8 *buffer = yaffs_GetTempBuffer(obj->myDev, __LINE__);
++
++		yaffs_ObjectHeader *oh = (yaffs_ObjectHeader *) buffer;
++
++		memset(buffer, 0, obj->myDev->nDataBytesPerChunk);
++
++		if (obj->hdrChunk > 0) {
++			result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
++							obj->hdrChunk, buffer,
++							NULL);
++		}
++		yaffs_strncpy(name, oh->name, buffSize - 1);
++
++		yaffs_ReleaseTempBuffer(obj->myDev, buffer, __LINE__);
++	}
++
++	return yaffs_strlen(name);
++}
++
++int yaffs_GetObjectFileLength(yaffs_Object *obj)
++{
++	/* Dereference any hard linking */
++	obj = yaffs_GetEquivalentObject(obj);
++
++	if (obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++		return obj->variant.fileVariant.fileSize;
++	if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
++		return yaffs_strlen(obj->variant.symLinkVariant.alias);
++	else {
++		/* Only a directory should drop through to here */
++		return obj->myDev->nDataBytesPerChunk;
++	}
++}
++
++int yaffs_GetObjectLinkCount(yaffs_Object *obj)
++{
++	int count = 0;
++	struct ylist_head *i;
++
++	if (!obj->unlinked)
++		count++;		/* the object itself */
++
++	ylist_for_each(i, &obj->hardLinks)
++		count++;		/* add the hard links; */
++
++	return count;
++}
++
++int yaffs_GetObjectInode(yaffs_Object *obj)
++{
++	obj = yaffs_GetEquivalentObject(obj);
++
++	return obj->objectId;
++}
++
++unsigned yaffs_GetObjectType(yaffs_Object *obj)
++{
++	obj = yaffs_GetEquivalentObject(obj);
++
++	switch (obj->variantType) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		return DT_REG;
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		return DT_DIR;
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		return DT_LNK;
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		return DT_REG;
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		if (S_ISFIFO(obj->yst_mode))
++			return DT_FIFO;
++		if (S_ISCHR(obj->yst_mode))
++			return DT_CHR;
++		if (S_ISBLK(obj->yst_mode))
++			return DT_BLK;
++		if (S_ISSOCK(obj->yst_mode))
++			return DT_SOCK;
++	default:
++		return DT_REG;
++		break;
++	}
++}
++
++YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj)
++{
++	obj = yaffs_GetEquivalentObject(obj);
++	if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK)
++		return yaffs_CloneString(obj->variant.symLinkVariant.alias);
++	else
++		return yaffs_CloneString(_Y(""));
++}
++
++#ifndef CONFIG_YAFFS_WINCE
++
++int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr)
++{
++	unsigned int valid = attr->ia_valid;
++
++	if (valid & ATTR_MODE)
++		obj->yst_mode = attr->ia_mode;
++	if (valid & ATTR_UID)
++		obj->yst_uid = attr->ia_uid;
++	if (valid & ATTR_GID)
++		obj->yst_gid = attr->ia_gid;
++
++	if (valid & ATTR_ATIME)
++		obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
++	if (valid & ATTR_CTIME)
++		obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
++	if (valid & ATTR_MTIME)
++		obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
++
++	if (valid & ATTR_SIZE)
++		yaffs_ResizeFile(obj, attr->ia_size);
++
++	yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++
++	return YAFFS_OK;
++
++}
++int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr)
++{
++	unsigned int valid = 0;
++
++	attr->ia_mode = obj->yst_mode;
++	valid |= ATTR_MODE;
++	attr->ia_uid = obj->yst_uid;
++	valid |= ATTR_UID;
++	attr->ia_gid = obj->yst_gid;
++	valid |= ATTR_GID;
++
++	Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
++	valid |= ATTR_ATIME;
++	Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
++	valid |= ATTR_CTIME;
++	Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
++	valid |= ATTR_MTIME;
++
++	attr->ia_size = yaffs_GetFileSize(obj);
++	valid |= ATTR_SIZE;
++
++	attr->ia_valid = valid;
++
++	return YAFFS_OK;
++}
++
++#endif
++
++#if 0
++int yaffs_DumpObject(yaffs_Object *obj)
++{
++	YCHAR name[257];
++
++	yaffs_GetObjectName(obj, name, 256);
++
++	T(YAFFS_TRACE_ALWAYS,
++	  (TSTR
++	   ("Object %d, inode %d \"%s\"\n dirty %d valid %d serial %d sum %d"
++	    " chunk %d type %d size %d\n"
++	    TENDSTR), obj->objectId, yaffs_GetObjectInode(obj), name,
++	   obj->dirty, obj->valid, obj->serial, obj->sum, obj->hdrChunk,
++	   yaffs_GetObjectType(obj), yaffs_GetObjectFileLength(obj)));
++
++	return YAFFS_OK;
++}
++#endif
++
++/*---------------------------- Initialisation code -------------------------------------- */
++
++static int yaffs_CheckDevFunctions(const yaffs_Device *dev)
++{
++
++	/* Common functions, gotta have */
++	if (!dev->eraseBlockInNAND || !dev->initialiseNAND)
++		return 0;
++
++#ifdef CONFIG_YAFFS_YAFFS2
++
++	/* Can use the "with tags" style interface for yaffs1 or yaffs2 */
++	if (dev->writeChunkWithTagsToNAND &&
++	    dev->readChunkWithTagsFromNAND &&
++	    !dev->writeChunkToNAND &&
++	    !dev->readChunkFromNAND &&
++	    dev->markNANDBlockBad && dev->queryNANDBlock)
++		return 1;
++#endif
++
++	/* Can use the "spare" style interface for yaffs1 */
++	if (!dev->isYaffs2 &&
++	    !dev->writeChunkWithTagsToNAND &&
++	    !dev->readChunkWithTagsFromNAND &&
++	    dev->writeChunkToNAND &&
++	    dev->readChunkFromNAND &&
++	    !dev->markNANDBlockBad && !dev->queryNANDBlock)
++		return 1;
++
++	return 0;		/* bad */
++}
++
++
++static int yaffs_CreateInitialDirectories(yaffs_Device *dev)
++{
++	/* Initialise the unlinked, deleted, root and lost and found directories */
++
++	dev->lostNFoundDir = dev->rootDir =  NULL;
++	dev->unlinkedDir = dev->deletedDir = NULL;
++
++	dev->unlinkedDir =
++	    yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
++
++	dev->deletedDir =
++	    yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
++
++	dev->rootDir =
++	    yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_ROOT,
++				      YAFFS_ROOT_MODE | S_IFDIR);
++	dev->lostNFoundDir =
++	    yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_LOSTNFOUND,
++				      YAFFS_LOSTNFOUND_MODE | S_IFDIR);
++
++	if (dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir) {
++		yaffs_AddObjectToDirectory(dev->rootDir, dev->lostNFoundDir);
++		return YAFFS_OK;
++	}
++
++	return YAFFS_FAIL;
++}
++
++int yaffs_GutsInitialise(yaffs_Device *dev)
++{
++	int init_failed = 0;
++	unsigned x;
++	int bits;
++
++	T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_GutsInitialise()" TENDSTR)));
++
++	/* Check stuff that must be set */
++
++	if (!dev) {
++		T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Need a device" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	dev->internalStartBlock = dev->startBlock;
++	dev->internalEndBlock = dev->endBlock;
++	dev->blockOffset = 0;
++	dev->chunkOffset = 0;
++	dev->nFreeChunks = 0;
++
++	dev->gcBlock = -1;
++
++	if (dev->startBlock == 0) {
++		dev->internalStartBlock = dev->startBlock + 1;
++		dev->internalEndBlock = dev->endBlock + 1;
++		dev->blockOffset = 1;
++		dev->chunkOffset = dev->nChunksPerBlock;
++	}
++
++	/* Check geometry parameters. */
++
++	if ((!dev->inbandTags && dev->isYaffs2 && dev->totalBytesPerChunk < 1024) ||
++	    (!dev->isYaffs2 && dev->totalBytesPerChunk < 512) ||
++	    (dev->inbandTags && !dev->isYaffs2) ||
++	     dev->nChunksPerBlock < 2 ||
++	     dev->nReservedBlocks < 2 ||
++	     dev->internalStartBlock <= 0 ||
++	     dev->internalEndBlock <= 0 ||
++	     dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2)) {	/* otherwise it is too small */
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s, inbandTags %d "
++		    TENDSTR), dev->totalBytesPerChunk, dev->isYaffs2 ? "2" : "", dev->inbandTags));
++		return YAFFS_FAIL;
++	}
++
++	if (yaffs_InitialiseNAND(dev) != YAFFS_OK) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("yaffs: InitialiseNAND failed" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	/* Sort out space for inband tags, if required */
++	if (dev->inbandTags)
++		dev->nDataBytesPerChunk = dev->totalBytesPerChunk - sizeof(yaffs_PackedTags2TagsPart);
++	else
++		dev->nDataBytesPerChunk = dev->totalBytesPerChunk;
++
++	/* Got the right mix of functions? */
++	if (!yaffs_CheckDevFunctions(dev)) {
++		/* Function missing */
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("yaffs: device function(s) missing or wrong\n" TENDSTR)));
++
++		return YAFFS_FAIL;
++	}
++
++	/* This is really a compilation check. */
++	if (!yaffs_CheckStructures()) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("yaffs_CheckStructures failed\n" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	if (dev->isMounted) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("yaffs: device already mounted\n" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	/* Finished with most checks. One or two more checks happen later on too. */
++
++	dev->isMounted = 1;
++
++	/* OK now calculate a few things for the device */
++
++	/*
++	 *  Calculate all the chunk size manipulation numbers:
++	 */
++	x = dev->nDataBytesPerChunk;
++	/* We always use dev->chunkShift and dev->chunkDiv */
++	dev->chunkShift = Shifts(x);
++	x >>= dev->chunkShift;
++	dev->chunkDiv = x;
++	/* We only use chunk mask if chunkDiv is 1 */
++	dev->chunkMask = (1<<dev->chunkShift) - 1;
++
++	/*
++	 * Calculate chunkGroupBits.
++	 * We need to find the next power of 2 > than internalEndBlock
++	 */
++
++	x = dev->nChunksPerBlock * (dev->internalEndBlock + 1);
++
++	bits = ShiftsGE(x);
++
++	/* Set up tnode width if wide tnodes are enabled. */
++	if (!dev->wideTnodesDisabled) {
++		/* bits must be even so that we end up with 32-bit words */
++		if (bits & 1)
++			bits++;
++		if (bits < 16)
++			dev->tnodeWidth = 16;
++		else
++			dev->tnodeWidth = bits;
++	} else
++		dev->tnodeWidth = 16;
++
++	dev->tnodeMask = (1<<dev->tnodeWidth)-1;
++
++	/* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
++	 * so if the bitwidth of the
++	 * chunk range we're using is greater than 16 we need
++	 * to figure out chunk shift and chunkGroupSize
++	 */
++
++	if (bits <= dev->tnodeWidth)
++		dev->chunkGroupBits = 0;
++	else
++		dev->chunkGroupBits = bits - dev->tnodeWidth;
++
++
++	dev->chunkGroupSize = 1 << dev->chunkGroupBits;
++
++	if (dev->nChunksPerBlock < dev->chunkGroupSize) {
++		/* We have a problem because the soft delete won't work if
++		 * the chunk group size > chunks per block.
++		 * This can be remedied by using larger "virtual blocks".
++		 */
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("yaffs: chunk group too large\n" TENDSTR)));
++
++		return YAFFS_FAIL;
++	}
++
++	/* OK, we've finished verifying the device, lets continue with initialisation */
++
++	/* More device initialisation */
++	dev->garbageCollections = 0;
++	dev->passiveGarbageCollections = 0;
++	dev->currentDirtyChecker = 0;
++	dev->bufferedBlock = -1;
++	dev->doingBufferedBlockRewrite = 0;
++	dev->nDeletedFiles = 0;
++	dev->nBackgroundDeletions = 0;
++	dev->nUnlinkedFiles = 0;
++	dev->eccFixed = 0;
++	dev->eccUnfixed = 0;
++	dev->tagsEccFixed = 0;
++	dev->tagsEccUnfixed = 0;
++	dev->nErasureFailures = 0;
++	dev->nErasedBlocks = 0;
++	dev->isDoingGC = 0;
++	dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
++
++	/* Initialise temporary buffers and caches. */
++	if (!yaffs_InitialiseTempBuffers(dev))
++		init_failed = 1;
++
++	dev->srCache = NULL;
++	dev->gcCleanupList = NULL;
++
++
++	if (!init_failed &&
++	    dev->nShortOpCaches > 0) {
++		int i;
++		void *buf;
++		int srCacheBytes = dev->nShortOpCaches * sizeof(yaffs_ChunkCache);
++
++		if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES)
++			dev->nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
++
++		dev->srCache =  YMALLOC(srCacheBytes);
++
++		buf = (__u8 *) dev->srCache;
++
++		if (dev->srCache)
++			memset(dev->srCache, 0, srCacheBytes);
++
++		for (i = 0; i < dev->nShortOpCaches && buf; i++) {
++			dev->srCache[i].object = NULL;
++			dev->srCache[i].lastUse = 0;
++			dev->srCache[i].dirty = 0;
++			dev->srCache[i].data = buf = YMALLOC_DMA(dev->totalBytesPerChunk);
++		}
++		if (!buf)
++			init_failed = 1;
++
++		dev->srLastUse = 0;
++	}
++
++	dev->cacheHits = 0;
++
++	if (!init_failed) {
++		dev->gcCleanupList = YMALLOC(dev->nChunksPerBlock * sizeof(__u32));
++		if (!dev->gcCleanupList)
++			init_failed = 1;
++	}
++
++	if (dev->isYaffs2)
++		dev->useHeaderFileSize = 1;
++
++	if (!init_failed && !yaffs_InitialiseBlocks(dev))
++		init_failed = 1;
++
++	yaffs_InitialiseTnodes(dev);
++	yaffs_InitialiseObjects(dev);
++
++	if (!init_failed && !yaffs_CreateInitialDirectories(dev))
++		init_failed = 1;
++
++
++	if (!init_failed) {
++		/* Now scan the flash. */
++		if (dev->isYaffs2) {
++			if (yaffs_CheckpointRestore(dev)) {
++				yaffs_CheckObjectDetailsLoaded(dev->rootDir);
++				T(YAFFS_TRACE_ALWAYS,
++				  (TSTR("yaffs: restored from checkpoint" TENDSTR)));
++			} else {
++
++				/* Clean up the mess caused by an aborted checkpoint load
++				 * and scan backwards.
++				 */
++				yaffs_DeinitialiseBlocks(dev);
++				yaffs_DeinitialiseTnodes(dev);
++				yaffs_DeinitialiseObjects(dev);
++
++
++				dev->nErasedBlocks = 0;
++				dev->nFreeChunks = 0;
++				dev->allocationBlock = -1;
++				dev->allocationPage = -1;
++				dev->nDeletedFiles = 0;
++				dev->nUnlinkedFiles = 0;
++				dev->nBackgroundDeletions = 0;
++				dev->oldestDirtySequence = 0;
++
++				if (!init_failed && !yaffs_InitialiseBlocks(dev))
++					init_failed = 1;
++
++				yaffs_InitialiseTnodes(dev);
++				yaffs_InitialiseObjects(dev);
++
++				if (!init_failed && !yaffs_CreateInitialDirectories(dev))
++					init_failed = 1;
++
++				if (!init_failed && !yaffs_ScanBackwards(dev))
++					init_failed = 1;
++			}
++		} else if (!yaffs_Scan(dev))
++				init_failed = 1;
++
++		yaffs_StripDeletedObjects(dev);
++	}
++
++	if (init_failed) {
++		/* Clean up the mess */
++		T(YAFFS_TRACE_TRACING,
++		  (TSTR("yaffs: yaffs_GutsInitialise() aborted.\n" TENDSTR)));
++
++		yaffs_Deinitialise(dev);
++		return YAFFS_FAIL;
++	}
++
++	/* Zero out stats */
++	dev->nPageReads = 0;
++	dev->nPageWrites = 0;
++	dev->nBlockErasures = 0;
++	dev->nGCCopies = 0;
++	dev->nRetriedWrites = 0;
++
++	dev->nRetiredBlocks = 0;
++
++	yaffs_VerifyFreeChunks(dev);
++	yaffs_VerifyBlocks(dev);
++
++
++	T(YAFFS_TRACE_TRACING,
++	  (TSTR("yaffs: yaffs_GutsInitialise() done.\n" TENDSTR)));
++	return YAFFS_OK;
++
++}
++
++void yaffs_Deinitialise(yaffs_Device *dev)
++{
++	if (dev->isMounted) {
++		int i;
++
++		yaffs_DeinitialiseBlocks(dev);
++		yaffs_DeinitialiseTnodes(dev);
++		yaffs_DeinitialiseObjects(dev);
++		if (dev->nShortOpCaches > 0 &&
++		    dev->srCache) {
++
++			for (i = 0; i < dev->nShortOpCaches; i++) {
++				if (dev->srCache[i].data)
++					YFREE(dev->srCache[i].data);
++				dev->srCache[i].data = NULL;
++			}
++
++			YFREE(dev->srCache);
++			dev->srCache = NULL;
++		}
++
++		YFREE(dev->gcCleanupList);
++
++		for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
++			YFREE(dev->tempBuffer[i].buffer);
++
++		dev->isMounted = 0;
++
++		if (dev->deinitialiseNAND)
++			dev->deinitialiseNAND(dev);
++	}
++}
++
++static int yaffs_CountFreeChunks(yaffs_Device *dev)
++{
++	int nFree;
++	int b;
++
++	yaffs_BlockInfo *blk;
++
++	for (nFree = 0, b = dev->internalStartBlock; b <= dev->internalEndBlock;
++			b++) {
++		blk = yaffs_GetBlockInfo(dev, b);
++
++		switch (blk->blockState) {
++		case YAFFS_BLOCK_STATE_EMPTY:
++		case YAFFS_BLOCK_STATE_ALLOCATING:
++		case YAFFS_BLOCK_STATE_COLLECTING:
++		case YAFFS_BLOCK_STATE_FULL:
++			nFree +=
++			    (dev->nChunksPerBlock - blk->pagesInUse +
++			     blk->softDeletions);
++			break;
++		default:
++			break;
++		}
++	}
++
++	return nFree;
++}
++
++int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev)
++{
++	/* This is what we report to the outside world */
++
++	int nFree;
++	int nDirtyCacheChunks;
++	int blocksForCheckpoint;
++	int i;
++
++#if 1
++	nFree = dev->nFreeChunks;
++#else
++	nFree = yaffs_CountFreeChunks(dev);
++#endif
++
++	nFree += dev->nDeletedFiles;
++
++	/* Now count the number of dirty chunks in the cache and subtract those */
++
++	for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
++		if (dev->srCache[i].dirty)
++			nDirtyCacheChunks++;
++	}
++
++	nFree -= nDirtyCacheChunks;
++
++	nFree -= ((dev->nReservedBlocks + 1) * dev->nChunksPerBlock);
++
++	/* Now we figure out how much to reserve for the checkpoint and report that... */
++	blocksForCheckpoint = yaffs_CalcCheckpointBlocksRequired(dev) - dev->blocksInCheckpoint;
++	if (blocksForCheckpoint < 0)
++		blocksForCheckpoint = 0;
++
++	nFree -= (blocksForCheckpoint * dev->nChunksPerBlock);
++
++	if (nFree < 0)
++		nFree = 0;
++
++	return nFree;
++
++}
++
++static int yaffs_freeVerificationFailures;
++
++static void yaffs_VerifyFreeChunks(yaffs_Device *dev)
++{
++	int counted;
++	int difference;
++
++	if (yaffs_SkipVerification(dev))
++		return;
++
++	counted = yaffs_CountFreeChunks(dev);
++
++	difference = dev->nFreeChunks - counted;
++
++	if (difference) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("Freechunks verification failure %d %d %d" TENDSTR),
++		   dev->nFreeChunks, counted, difference));
++		yaffs_freeVerificationFailures++;
++	}
++}
++
++/*---------------------------------------- YAFFS test code ----------------------*/
++
++#define yaffs_CheckStruct(structure, syze, name) \
++	do { \
++		if (sizeof(structure) != syze) { \
++			T(YAFFS_TRACE_ALWAYS, (TSTR("%s should be %d but is %d\n" TENDSTR),\
++				name, syze, sizeof(structure))); \
++			return YAFFS_FAIL; \
++		} \
++	} while (0)
++
++static int yaffs_CheckStructures(void)
++{
++/*      yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags"); */
++/*      yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion"); */
++/*      yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare"); */
++#ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG
++	yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode");
++#endif
++#ifndef CONFIG_YAFFS_WINCE
++	yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader");
++#endif
++	return YAFFS_OK;
++}
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_guts.h linux-2.6.32/fs/yaffs2/yaffs_guts.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_guts.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_guts.h	2010-01-30 20:35:01.441990129 +0100
+@@ -0,0 +1,904 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GUTS_H__
++#define __YAFFS_GUTS_H__
++
++#include "devextras.h"
++#include "yportenv.h"
++
++#define YAFFS_OK	1
++#define YAFFS_FAIL  0
++
++/* Give us a  Y=0x59,
++ * Give us an A=0x41,
++ * Give us an FF=0xFF
++ * Give us an S=0x53
++ * And what have we got...
++ */
++#define YAFFS_MAGIC			0x5941FF53
++
++#define YAFFS_NTNODES_LEVEL0	  	16
++#define YAFFS_TNODES_LEVEL0_BITS	4
++#define YAFFS_TNODES_LEVEL0_MASK	0xf
++
++#define YAFFS_NTNODES_INTERNAL 		(YAFFS_NTNODES_LEVEL0 / 2)
++#define YAFFS_TNODES_INTERNAL_BITS 	(YAFFS_TNODES_LEVEL0_BITS - 1)
++#define YAFFS_TNODES_INTERNAL_MASK	0x7
++#define YAFFS_TNODES_MAX_LEVEL		6
++
++#ifndef CONFIG_YAFFS_NO_YAFFS1
++#define YAFFS_BYTES_PER_SPARE		16
++#define YAFFS_BYTES_PER_CHUNK		512
++#define YAFFS_CHUNK_SIZE_SHIFT		9
++#define YAFFS_CHUNKS_PER_BLOCK		32
++#define YAFFS_BYTES_PER_BLOCK		(YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
++#endif
++
++#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 	1024
++#define YAFFS_MIN_YAFFS2_SPARE_SIZE	32
++
++#define YAFFS_MAX_CHUNK_ID		0x000FFFFF
++
++#define YAFFS_UNUSED_OBJECT_ID		0x0003FFFF
++
++#define YAFFS_ALLOCATION_NOBJECTS	100
++#define YAFFS_ALLOCATION_NTNODES	100
++#define YAFFS_ALLOCATION_NLINKS		100
++
++#define YAFFS_NOBJECT_BUCKETS		256
++
++
++#define YAFFS_OBJECT_SPACE		0x40000
++
++#define YAFFS_CHECKPOINT_VERSION 	3
++
++#ifdef CONFIG_YAFFS_UNICODE
++#define YAFFS_MAX_NAME_LENGTH		127
++#define YAFFS_MAX_ALIAS_LENGTH		79
++#else
++#define YAFFS_MAX_NAME_LENGTH		255
++#define YAFFS_MAX_ALIAS_LENGTH		159
++#endif
++
++#define YAFFS_SHORT_NAME_LENGTH		15
++
++/* Some special object ids for pseudo objects */
++#define YAFFS_OBJECTID_ROOT		1
++#define YAFFS_OBJECTID_LOSTNFOUND	2
++#define YAFFS_OBJECTID_UNLINKED		3
++#define YAFFS_OBJECTID_DELETED		4
++
++/* Sseudo object ids for checkpointing */
++#define YAFFS_OBJECTID_SB_HEADER	0x10
++#define YAFFS_OBJECTID_CHECKPOINT_DATA	0x20
++#define YAFFS_SEQUENCE_CHECKPOINT_DATA  0x21
++
++/* */
++
++#define YAFFS_MAX_SHORT_OP_CACHES	20
++
++#define YAFFS_N_TEMP_BUFFERS		6
++
++/* We limit the number attempts at sucessfully saving a chunk of data.
++ * Small-page devices have 32 pages per block; large-page devices have 64.
++ * Default to something in the order of 5 to 10 blocks worth of chunks.
++ */
++#define YAFFS_WR_ATTEMPTS		(5*64)
++
++/* Sequence numbers are used in YAFFS2 to determine block allocation order.
++ * The range is limited slightly to help distinguish bad numbers from good.
++ * This also allows us to perhaps in the future use special numbers for
++ * special purposes.
++ * EFFFFF00 allows the allocation of 8 blocks per second (~1Mbytes) for 15 years,
++ * and is a larger number than the lifetime of a 2GB device.
++ */
++#define YAFFS_LOWEST_SEQUENCE_NUMBER	0x00001000
++#define YAFFS_HIGHEST_SEQUENCE_NUMBER	0xEFFFFF00
++
++/* Special sequence number for bad block that failed to be marked bad */
++#define YAFFS_SEQUENCE_BAD_BLOCK	0xFFFF0000
++
++/* ChunkCache is used for short read/write operations.*/
++typedef struct {
++	struct yaffs_ObjectStruct *object;
++	int chunkId;
++	int lastUse;
++	int dirty;
++	int nBytes;		/* Only valid if the cache is dirty */
++	int locked;		/* Can't push out or flush while locked. */
++#ifdef CONFIG_YAFFS_YAFFS2
++	__u8 *data;
++#else
++	__u8 data[YAFFS_BYTES_PER_CHUNK];
++#endif
++} yaffs_ChunkCache;
++
++
++
++/* Tags structures in RAM
++ * NB This uses bitfield. Bitfields should not straddle a u32 boundary otherwise
++ * the structure size will get blown out.
++ */
++
++#ifndef CONFIG_YAFFS_NO_YAFFS1
++typedef struct {
++	unsigned chunkId:20;
++	unsigned serialNumber:2;
++	unsigned byteCountLSB:10;
++	unsigned objectId:18;
++	unsigned ecc:12;
++	unsigned byteCountMSB:2;
++} yaffs_Tags;
++
++typedef union {
++	yaffs_Tags asTags;
++	__u8 asBytes[8];
++} yaffs_TagsUnion;
++
++#endif
++
++/* Stuff used for extended tags in YAFFS2 */
++
++typedef enum {
++	YAFFS_ECC_RESULT_UNKNOWN,
++	YAFFS_ECC_RESULT_NO_ERROR,
++	YAFFS_ECC_RESULT_FIXED,
++	YAFFS_ECC_RESULT_UNFIXED
++} yaffs_ECCResult;
++
++typedef enum {
++	YAFFS_OBJECT_TYPE_UNKNOWN,
++	YAFFS_OBJECT_TYPE_FILE,
++	YAFFS_OBJECT_TYPE_SYMLINK,
++	YAFFS_OBJECT_TYPE_DIRECTORY,
++	YAFFS_OBJECT_TYPE_HARDLINK,
++	YAFFS_OBJECT_TYPE_SPECIAL
++} yaffs_ObjectType;
++
++#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
++
++typedef struct {
++
++	unsigned validMarker0;
++	unsigned chunkUsed;	/*  Status of the chunk: used or unused */
++	unsigned objectId;	/* If 0 then this is not part of an object (unused) */
++	unsigned chunkId;	/* If 0 then this is a header, else a data chunk */
++	unsigned byteCount;	/* Only valid for data chunks */
++
++	/* The following stuff only has meaning when we read */
++	yaffs_ECCResult eccResult;
++	unsigned blockBad;
++
++	/* YAFFS 1 stuff */
++	unsigned chunkDeleted;	/* The chunk is marked deleted */
++	unsigned serialNumber;	/* Yaffs1 2-bit serial number */
++
++	/* YAFFS2 stuff */
++	unsigned sequenceNumber;	/* The sequence number of this block */
++
++	/* Extra info if this is an object header (YAFFS2 only) */
++
++	unsigned extraHeaderInfoAvailable;	/* There is extra info available if this is not zero */
++	unsigned extraParentObjectId;	/* The parent object */
++	unsigned extraIsShrinkHeader;	/* Is it a shrink header? */
++	unsigned extraShadows;		/* Does this shadow another object? */
++
++	yaffs_ObjectType extraObjectType;	/* What object type? */
++
++	unsigned extraFileLength;		/* Length if it is a file */
++	unsigned extraEquivalentObjectId;	/* Equivalent object Id if it is a hard link */
++
++	unsigned validMarker1;
++
++} yaffs_ExtendedTags;
++
++/* Spare structure for YAFFS1 */
++typedef struct {
++	__u8 tagByte0;
++	__u8 tagByte1;
++	__u8 tagByte2;
++	__u8 tagByte3;
++	__u8 pageStatus;	/* set to 0 to delete the chunk */
++	__u8 blockStatus;
++	__u8 tagByte4;
++	__u8 tagByte5;
++	__u8 ecc1[3];
++	__u8 tagByte6;
++	__u8 tagByte7;
++	__u8 ecc2[3];
++} yaffs_Spare;
++
++/*Special structure for passing through to mtd */
++struct yaffs_NANDSpare {
++	yaffs_Spare spare;
++	int eccres1;
++	int eccres2;
++};
++
++/* Block data in RAM */
++
++typedef enum {
++	YAFFS_BLOCK_STATE_UNKNOWN = 0,
++
++	YAFFS_BLOCK_STATE_SCANNING,
++	YAFFS_BLOCK_STATE_NEEDS_SCANNING,
++	/* The block might have something on it (ie it is allocating or full, perhaps empty)
++	 * but it needs to be scanned to determine its true state.
++	 * This state is only valid during yaffs_Scan.
++	 * NB We tolerate empty because the pre-scanner might be incapable of deciding
++	 * However, if this state is returned on a YAFFS2 device, then we expect a sequence number
++	 */
++
++	YAFFS_BLOCK_STATE_EMPTY,
++	/* This block is empty */
++
++	YAFFS_BLOCK_STATE_ALLOCATING,
++	/* This block is partially allocated.
++	 * At least one page holds valid data.
++	 * This is the one currently being used for page
++	 * allocation. Should never be more than one of these
++	 */
++
++	YAFFS_BLOCK_STATE_FULL,
++	/* All the pages in this block have been allocated.
++	 */
++
++	YAFFS_BLOCK_STATE_DIRTY,
++	/* All pages have been allocated and deleted.
++	 * Erase me, reuse me.
++	 */
++
++	YAFFS_BLOCK_STATE_CHECKPOINT,
++	/* This block is assigned to holding checkpoint data.
++	 */
++
++	YAFFS_BLOCK_STATE_COLLECTING,
++	/* This block is being garbage collected */
++
++	YAFFS_BLOCK_STATE_DEAD
++	/* This block has failed and is not in use */
++} yaffs_BlockState;
++
++#define	YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
++
++
++typedef struct {
++
++	int softDeletions:10;	/* number of soft deleted pages */
++	int pagesInUse:10;	/* number of pages in use */
++	unsigned blockState:4;	/* One of the above block states. NB use unsigned because enum is sometimes an int */
++	__u32 needsRetiring:1;	/* Data has failed on this block, need to get valid data off */
++				/* and retire the block. */
++	__u32 skipErasedCheck:1; /* If this is set we can skip the erased check on this block */
++	__u32 gcPrioritise:1; 	/* An ECC check or blank check has failed on this block.
++				   It should be prioritised for GC */
++	__u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
++
++#ifdef CONFIG_YAFFS_YAFFS2
++	__u32 hasShrinkHeader:1; /* This block has at least one shrink object header */
++	__u32 sequenceNumber;	 /* block sequence number for yaffs2 */
++#endif
++
++} yaffs_BlockInfo;
++
++/* -------------------------- Object structure -------------------------------*/
++/* This is the object structure as stored on NAND */
++
++typedef struct {
++	yaffs_ObjectType type;
++
++	/* Apply to everything  */
++	int parentObjectId;
++	__u16 sum__NoLongerUsed;        /* checksum of name. No longer used */
++	YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++	/* The following apply to directories, files, symlinks - not hard links */
++	__u32 yst_mode;         /* protection */
++
++#ifdef CONFIG_YAFFS_WINCE
++	__u32 notForWinCE[5];
++#else
++	__u32 yst_uid;
++	__u32 yst_gid;
++	__u32 yst_atime;
++	__u32 yst_mtime;
++	__u32 yst_ctime;
++#endif
++
++	/* File size  applies to files only */
++	int fileSize;
++
++	/* Equivalent object id applies to hard links only. */
++	int equivalentObjectId;
++
++	/* Alias is for symlinks only. */
++	YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
++
++	__u32 yst_rdev;		/* device stuff for block and char devices (major/min) */
++
++#ifdef CONFIG_YAFFS_WINCE
++	__u32 win_ctime[2];
++	__u32 win_atime[2];
++	__u32 win_mtime[2];
++#else
++	__u32 roomToGrow[6];
++
++#endif
++	__u32 inbandShadowsObject;
++	__u32 inbandIsShrink;
++
++	__u32 reservedSpace[2];
++	int shadowsObject;	/* This object header shadows the specified object if > 0 */
++
++	/* isShrink applies to object headers written when we shrink the file (ie resize) */
++	__u32 isShrink;
++
++} yaffs_ObjectHeader;
++
++/*--------------------------- Tnode -------------------------- */
++
++union yaffs_Tnode_union {
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++	union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL + 1];
++#else
++	union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL];
++#endif
++/*	__u16 level0[YAFFS_NTNODES_LEVEL0]; */
++
++};
++
++typedef union yaffs_Tnode_union yaffs_Tnode;
++
++struct yaffs_TnodeList_struct {
++	struct yaffs_TnodeList_struct *next;
++	yaffs_Tnode *tnodes;
++};
++
++typedef struct yaffs_TnodeList_struct yaffs_TnodeList;
++
++/*------------------------  Object -----------------------------*/
++/* An object can be one of:
++ * - a directory (no data, has children links
++ * - a regular file (data.... not prunes :->).
++ * - a symlink [symbolic link] (the alias).
++ * - a hard link
++ */
++
++typedef struct {
++	__u32 fileSize;
++	__u32 scannedFileSize;
++	__u32 shrinkSize;
++	int topLevel;
++	yaffs_Tnode *top;
++} yaffs_FileStructure;
++
++typedef struct {
++	struct ylist_head children;     /* list of child links */
++} yaffs_DirectoryStructure;
++
++typedef struct {
++	YCHAR *alias;
++} yaffs_SymLinkStructure;
++
++typedef struct {
++	struct yaffs_ObjectStruct *equivalentObject;
++	__u32 equivalentObjectId;
++} yaffs_HardLinkStructure;
++
++typedef union {
++	yaffs_FileStructure fileVariant;
++	yaffs_DirectoryStructure directoryVariant;
++	yaffs_SymLinkStructure symLinkVariant;
++	yaffs_HardLinkStructure hardLinkVariant;
++} yaffs_ObjectVariant;
++
++struct yaffs_ObjectStruct {
++	__u8 deleted:1;		/* This should only apply to unlinked files. */
++	__u8 softDeleted:1;	/* it has also been soft deleted */
++	__u8 unlinked:1;	/* An unlinked file. The file should be in the unlinked directory.*/
++	__u8 fake:1;		/* A fake object has no presence on NAND. */
++	__u8 renameAllowed:1;	/* Some objects are not allowed to be renamed. */
++	__u8 unlinkAllowed:1;
++	__u8 dirty:1;		/* the object needs to be written to flash */
++	__u8 valid:1;		/* When the file system is being loaded up, this
++				 * object might be created before the data
++				 * is available (ie. file data records appear before the header).
++				 */
++	__u8 lazyLoaded:1;	/* This object has been lazy loaded and is missing some detail */
++
++	__u8 deferedFree:1;	/* For Linux kernel. Object is removed from NAND, but is
++				 * still in the inode cache. Free of object is defered.
++				 * until the inode is released.
++				 */
++	__u8 beingCreated:1;	/* This object is still being created so skip some checks. */
++
++	__u8 serial;		/* serial number of chunk in NAND. Cached here */
++	__u16 sum;		/* sum of the name to speed searching */
++
++	struct yaffs_DeviceStruct *myDev;       /* The device I'm on */
++
++	struct ylist_head hashLink;     /* list of objects in this hash bucket */
++
++	struct ylist_head hardLinks;    /* all the equivalent hard linked objects */
++
++	/* directory structure stuff */
++	/* also used for linking up the free list */
++	struct yaffs_ObjectStruct *parent;
++	struct ylist_head siblings;
++
++	/* Where's my object header in NAND? */
++	int hdrChunk;
++
++	int nDataChunks;	/* Number of data chunks attached to the file. */
++
++	__u32 objectId;		/* the object id value */
++
++	__u32 yst_mode;
++
++#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++	YCHAR shortName[YAFFS_SHORT_NAME_LENGTH + 1];
++#endif
++
++#ifndef __KERNEL__
++	__u32 inUse;
++#endif
++
++#ifdef CONFIG_YAFFS_WINCE
++	__u32 win_ctime[2];
++	__u32 win_mtime[2];
++	__u32 win_atime[2];
++#else
++	__u32 yst_uid;
++	__u32 yst_gid;
++	__u32 yst_atime;
++	__u32 yst_mtime;
++	__u32 yst_ctime;
++#endif
++
++	__u32 yst_rdev;
++
++#ifdef __KERNEL__
++	struct inode *myInode;
++
++#endif
++
++	yaffs_ObjectType variantType;
++
++	yaffs_ObjectVariant variant;
++
++};
++
++typedef struct yaffs_ObjectStruct yaffs_Object;
++
++struct yaffs_ObjectList_struct {
++	yaffs_Object *objects;
++	struct yaffs_ObjectList_struct *next;
++};
++
++typedef struct yaffs_ObjectList_struct yaffs_ObjectList;
++
++typedef struct {
++	struct ylist_head list;
++	int count;
++} yaffs_ObjectBucket;
++
++
++/* yaffs_CheckpointObject holds the definition of an object as dumped
++ * by checkpointing.
++ */
++
++typedef struct {
++	int structType;
++	__u32 objectId;
++	__u32 parentId;
++	int hdrChunk;
++	yaffs_ObjectType variantType:3;
++	__u8 deleted:1;
++	__u8 softDeleted:1;
++	__u8 unlinked:1;
++	__u8 fake:1;
++	__u8 renameAllowed:1;
++	__u8 unlinkAllowed:1;
++	__u8 serial;
++
++	int nDataChunks;
++	__u32 fileSizeOrEquivalentObjectId;
++} yaffs_CheckpointObject;
++
++/*--------------------- Temporary buffers ----------------
++ *
++ * These are chunk-sized working buffers. Each device has a few
++ */
++
++typedef struct {
++	__u8 *buffer;
++	int line;	/* track from whence this buffer was allocated */
++	int maxLine;
++} yaffs_TempBuffer;
++
++/*----------------- Device ---------------------------------*/
++
++struct yaffs_DeviceStruct {
++	struct ylist_head devList;
++	const char *name;
++
++	/* Entry parameters set up way early. Yaffs sets up the rest.*/
++	int nDataBytesPerChunk;	/* Should be a power of 2 >= 512 */
++	int nChunksPerBlock;	/* does not need to be a power of 2 */
++	int spareBytesPerChunk;	/* spare area size */
++	int startBlock;		/* Start block we're allowed to use */
++	int endBlock;		/* End block we're allowed to use */
++	int nReservedBlocks;	/* We want this tuneable so that we can reduce */
++				/* reserved blocks on NOR and RAM. */
++
++
++	/* Stuff used by the shared space checkpointing mechanism */
++	/* If this value is zero, then this mechanism is disabled */
++
++/*	int nCheckpointReservedBlocks; */ /* Blocks to reserve for checkpoint data */
++
++
++	int nShortOpCaches;	/* If <= 0, then short op caching is disabled, else
++				 * the number of short op caches (don't use too many)
++				 */
++
++	int useHeaderFileSize;	/* Flag to determine if we should use file sizes from the header */
++
++	int useNANDECC;		/* Flag to decide whether or not to use NANDECC */
++
++	void *genericDevice;	/* Pointer to device context
++				 * On an mtd this holds the mtd pointer.
++				 */
++	void *superBlock;
++
++	/* NAND access functions (Must be set before calling YAFFS)*/
++
++	int (*writeChunkToNAND) (struct yaffs_DeviceStruct *dev,
++					int chunkInNAND, const __u8 *data,
++					const yaffs_Spare *spare);
++	int (*readChunkFromNAND) (struct yaffs_DeviceStruct *dev,
++					int chunkInNAND, __u8 *data,
++					yaffs_Spare *spare);
++	int (*eraseBlockInNAND) (struct yaffs_DeviceStruct *dev,
++					int blockInNAND);
++	int (*initialiseNAND) (struct yaffs_DeviceStruct *dev);
++	int (*deinitialiseNAND) (struct yaffs_DeviceStruct *dev);
++
++#ifdef CONFIG_YAFFS_YAFFS2
++	int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct *dev,
++					 int chunkInNAND, const __u8 *data,
++					 const yaffs_ExtendedTags *tags);
++	int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct *dev,
++					  int chunkInNAND, __u8 *data,
++					  yaffs_ExtendedTags *tags);
++	int (*markNANDBlockBad) (struct yaffs_DeviceStruct *dev, int blockNo);
++	int (*queryNANDBlock) (struct yaffs_DeviceStruct *dev, int blockNo,
++			       yaffs_BlockState *state, __u32 *sequenceNumber);
++#endif
++
++	int isYaffs2;
++
++	/* The removeObjectCallback function must be supplied by OS flavours that
++	 * need it. The Linux kernel does not use this, but yaffs direct does use
++	 * it to implement the faster readdir
++	 */
++	void (*removeObjectCallback)(struct yaffs_ObjectStruct *obj);
++
++	/* Callback to mark the superblock dirsty */
++	void (*markSuperBlockDirty)(void *superblock);
++
++	int wideTnodesDisabled; /* Set to disable wide tnodes */
++
++	YCHAR *pathDividers;	/* String of legal path dividers */
++
++
++	/* End of stuff that must be set before initialisation. */
++
++	/* Checkpoint control. Can be set before or after initialisation */
++	__u8 skipCheckpointRead;
++	__u8 skipCheckpointWrite;
++
++	/* Runtime parameters. Set up by YAFFS. */
++
++	__u16 chunkGroupBits;	/* 0 for devices <= 32MB. else log2(nchunks) - 16 */
++	__u16 chunkGroupSize;	/* == 2^^chunkGroupBits */
++
++	/* Stuff to support wide tnodes */
++	__u32 tnodeWidth;
++	__u32 tnodeMask;
++
++	/* Stuff for figuring out file offset to chunk conversions */
++	__u32 chunkShift; /* Shift value */
++	__u32 chunkDiv;   /* Divisor after shifting: 1 for power-of-2 sizes */
++	__u32 chunkMask;  /* Mask to use for power-of-2 case */
++
++	/* Stuff to handle inband tags */
++	int inbandTags;
++	__u32 totalBytesPerChunk;
++
++#ifdef __KERNEL__
++
++	struct semaphore sem;	/* Semaphore for waiting on erasure.*/
++	struct semaphore grossLock;	/* Gross locking semaphore */
++	__u8 *spareBuffer;	/* For mtdif2 use. Don't know the size of the buffer
++				 * at compile time so we have to allocate it.
++				 */
++	void (*putSuperFunc) (struct super_block *sb);
++#endif
++
++	int isMounted;
++
++	int isCheckpointed;
++
++
++	/* Stuff to support block offsetting to support start block zero */
++	int internalStartBlock;
++	int internalEndBlock;
++	int blockOffset;
++	int chunkOffset;
++
++
++	/* Runtime checkpointing stuff */
++	int checkpointPageSequence;   /* running sequence number of checkpoint pages */
++	int checkpointByteCount;
++	int checkpointByteOffset;
++	__u8 *checkpointBuffer;
++	int checkpointOpenForWrite;
++	int blocksInCheckpoint;
++	int checkpointCurrentChunk;
++	int checkpointCurrentBlock;
++	int checkpointNextBlock;
++	int *checkpointBlockList;
++	int checkpointMaxBlocks;
++	__u32 checkpointSum;
++	__u32 checkpointXor;
++
++	int nCheckpointBlocksRequired; /* Number of blocks needed to store current checkpoint set */
++
++	/* Block Info */
++	yaffs_BlockInfo *blockInfo;
++	__u8 *chunkBits;	/* bitmap of chunks in use */
++	unsigned blockInfoAlt:1;	/* was allocated using alternative strategy */
++	unsigned chunkBitsAlt:1;	/* was allocated using alternative strategy */
++	int chunkBitmapStride;	/* Number of bytes of chunkBits per block.
++				 * Must be consistent with nChunksPerBlock.
++				 */
++
++	int nErasedBlocks;
++	int allocationBlock;	/* Current block being allocated off */
++	__u32 allocationPage;
++	int allocationBlockFinder;	/* Used to search for next allocation block */
++
++	/* Runtime state */
++	int nTnodesCreated;
++	yaffs_Tnode *freeTnodes;
++	int nFreeTnodes;
++	yaffs_TnodeList *allocatedTnodeList;
++
++	int isDoingGC;
++	int gcBlock;
++	int gcChunk;
++
++	int nObjectsCreated;
++	yaffs_Object *freeObjects;
++	int nFreeObjects;
++
++	int nHardLinks;
++
++	yaffs_ObjectList *allocatedObjectList;
++
++	yaffs_ObjectBucket objectBucket[YAFFS_NOBJECT_BUCKETS];
++
++	int nFreeChunks;
++
++	int currentDirtyChecker;	/* Used to find current dirtiest block */
++
++	__u32 *gcCleanupList;	/* objects to delete at the end of a GC. */
++	int nonAggressiveSkip;	/* GC state/mode */
++
++	/* Statistcs */
++	int nPageWrites;
++	int nPageReads;
++	int nBlockErasures;
++	int nErasureFailures;
++	int nGCCopies;
++	int garbageCollections;
++	int passiveGarbageCollections;
++	int nRetriedWrites;
++	int nRetiredBlocks;
++	int eccFixed;
++	int eccUnfixed;
++	int tagsEccFixed;
++	int tagsEccUnfixed;
++	int nDeletions;
++	int nUnmarkedDeletions;
++
++	int hasPendingPrioritisedGCs; /* We think this device might have pending prioritised gcs */
++
++	/* Special directories */
++	yaffs_Object *rootDir;
++	yaffs_Object *lostNFoundDir;
++
++	/* Buffer areas for storing data to recover from write failures TODO
++	 *      __u8            bufferedData[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
++	 *      yaffs_Spare bufferedSpare[YAFFS_CHUNKS_PER_BLOCK];
++	 */
++
++	int bufferedBlock;	/* Which block is buffered here? */
++	int doingBufferedBlockRewrite;
++
++	yaffs_ChunkCache *srCache;
++	int srLastUse;
++
++	int cacheHits;
++
++	/* Stuff for background deletion and unlinked files.*/
++	yaffs_Object *unlinkedDir;	/* Directory where unlinked and deleted files live. */
++	yaffs_Object *deletedDir;	/* Directory where deleted objects are sent to disappear. */
++	yaffs_Object *unlinkedDeletion;	/* Current file being background deleted.*/
++	int nDeletedFiles;		/* Count of files awaiting deletion;*/
++	int nUnlinkedFiles;		/* Count of unlinked files. */
++	int nBackgroundDeletions;	/* Count of background deletions. */
++
++
++	/* Temporary buffer management */
++	yaffs_TempBuffer tempBuffer[YAFFS_N_TEMP_BUFFERS];
++	int maxTemp;
++	int tempInUse;
++	int unmanagedTempAllocations;
++	int unmanagedTempDeallocations;
++
++	/* yaffs2 runtime stuff */
++	unsigned sequenceNumber;	/* Sequence number of currently allocating block */
++	unsigned oldestDirtySequence;
++
++};
++
++typedef struct yaffs_DeviceStruct yaffs_Device;
++
++/* The static layout of block usage etc is stored in the super block header */
++typedef struct {
++	int StructType;
++	int version;
++	int checkpointStartBlock;
++	int checkpointEndBlock;
++	int startBlock;
++	int endBlock;
++	int rfu[100];
++} yaffs_SuperBlockHeader;
++
++/* The CheckpointDevice structure holds the device information that changes at runtime and
++ * must be preserved over unmount/mount cycles.
++ */
++typedef struct {
++	int structType;
++	int nErasedBlocks;
++	int allocationBlock;	/* Current block being allocated off */
++	__u32 allocationPage;
++	int nFreeChunks;
++
++	int nDeletedFiles;		/* Count of files awaiting deletion;*/
++	int nUnlinkedFiles;		/* Count of unlinked files. */
++	int nBackgroundDeletions;	/* Count of background deletions. */
++
++	/* yaffs2 runtime stuff */
++	unsigned sequenceNumber;	/* Sequence number of currently allocating block */
++	unsigned oldestDirtySequence;
++
++} yaffs_CheckpointDevice;
++
++
++typedef struct {
++	int structType;
++	__u32 magic;
++	__u32 version;
++	__u32 head;
++} yaffs_CheckpointValidity;
++
++
++/*----------------------- YAFFS Functions -----------------------*/
++
++int yaffs_GutsInitialise(yaffs_Device *dev);
++void yaffs_Deinitialise(yaffs_Device *dev);
++
++int yaffs_GetNumberOfFreeChunks(yaffs_Device *dev);
++
++int yaffs_RenameObject(yaffs_Object *oldDir, const YCHAR *oldName,
++		       yaffs_Object *newDir, const YCHAR *newName);
++
++int yaffs_Unlink(yaffs_Object *dir, const YCHAR *name);
++int yaffs_DeleteObject(yaffs_Object *obj);
++
++int yaffs_GetObjectName(yaffs_Object *obj, YCHAR *name, int buffSize);
++int yaffs_GetObjectFileLength(yaffs_Object *obj);
++int yaffs_GetObjectInode(yaffs_Object *obj);
++unsigned yaffs_GetObjectType(yaffs_Object *obj);
++int yaffs_GetObjectLinkCount(yaffs_Object *obj);
++
++int yaffs_SetAttributes(yaffs_Object *obj, struct iattr *attr);
++int yaffs_GetAttributes(yaffs_Object *obj, struct iattr *attr);
++
++/* File operations */
++int yaffs_ReadDataFromFile(yaffs_Object *obj, __u8 *buffer, loff_t offset,
++				int nBytes);
++int yaffs_WriteDataToFile(yaffs_Object *obj, const __u8 *buffer, loff_t offset,
++				int nBytes, int writeThrough);
++int yaffs_ResizeFile(yaffs_Object *obj, loff_t newSize);
++
++yaffs_Object *yaffs_MknodFile(yaffs_Object *parent, const YCHAR *name,
++				__u32 mode, __u32 uid, __u32 gid);
++int yaffs_FlushFile(yaffs_Object *obj, int updateTime);
++
++/* Flushing and checkpointing */
++void yaffs_FlushEntireDeviceCache(yaffs_Device *dev);
++
++int yaffs_CheckpointSave(yaffs_Device *dev);
++int yaffs_CheckpointRestore(yaffs_Device *dev);
++
++/* Directory operations */
++yaffs_Object *yaffs_MknodDirectory(yaffs_Object *parent, const YCHAR *name,
++				__u32 mode, __u32 uid, __u32 gid);
++yaffs_Object *yaffs_FindObjectByName(yaffs_Object *theDir, const YCHAR *name);
++int yaffs_ApplyToDirectoryChildren(yaffs_Object *theDir,
++				   int (*fn) (yaffs_Object *));
++
++yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device *dev, __u32 number);
++
++/* Link operations */
++yaffs_Object *yaffs_Link(yaffs_Object *parent, const YCHAR *name,
++			 yaffs_Object *equivalentObject);
++
++yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object *obj);
++
++/* Symlink operations */
++yaffs_Object *yaffs_MknodSymLink(yaffs_Object *parent, const YCHAR *name,
++				 __u32 mode, __u32 uid, __u32 gid,
++				 const YCHAR *alias);
++YCHAR *yaffs_GetSymlinkAlias(yaffs_Object *obj);
++
++/* Special inodes (fifos, sockets and devices) */
++yaffs_Object *yaffs_MknodSpecial(yaffs_Object *parent, const YCHAR *name,
++				 __u32 mode, __u32 uid, __u32 gid, __u32 rdev);
++
++/* Special directories */
++yaffs_Object *yaffs_Root(yaffs_Device *dev);
++yaffs_Object *yaffs_LostNFound(yaffs_Device *dev);
++
++#ifdef CONFIG_YAFFS_WINCE
++/* CONFIG_YAFFS_WINCE special stuff */
++void yfsd_WinFileTimeNow(__u32 target[2]);
++#endif
++
++#ifdef __KERNEL__
++
++void yaffs_HandleDeferedFree(yaffs_Object *obj);
++#endif
++
++/* Debug dump  */
++int yaffs_DumpObject(yaffs_Object *obj);
++
++void yaffs_GutsTest(yaffs_Device *dev);
++
++/* A few useful functions */
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
++void yaffs_DeleteChunk(yaffs_Device *dev, int chunkId, int markNAND, int lyn);
++int yaffs_CheckFF(__u8 *buffer, int nBytes);
++void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi);
++
++__u8 *yaffs_GetTempBuffer(yaffs_Device *dev, int lineNo);
++void yaffs_ReleaseTempBuffer(yaffs_Device *dev, __u8 *buffer, int lineNo);
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffsinterface.h linux-2.6.32/fs/yaffs2/yaffsinterface.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffsinterface.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffsinterface.h	2010-01-30 20:35:01.492020092 +0100
+@@ -0,0 +1,21 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFSINTERFACE_H__
++#define __YAFFSINTERFACE_H__
++
++int yaffs_Initialise(unsigned nBlocks);
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif1.c linux-2.6.32/fs/yaffs2/yaffs_mtdif1.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif1.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_mtdif1.c	2010-01-30 20:35:01.534033438 +0100
+@@ -0,0 +1,365 @@
++/*
++ * YAFFS: Yet another FFS. A NAND-flash specific file system.
++ * yaffs_mtdif1.c  NAND mtd interface functions for small-page NAND.
++ *
++ * Copyright (C) 2002 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This module provides the interface between yaffs_nand.c and the
++ * MTD API.  This version is used when the MTD interface supports the
++ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
++ * and we have small-page NAND device.
++ *
++ * These functions are invoked via function pointers in yaffs_nand.c.
++ * This replaces functionality provided by functions in yaffs_mtdif.c
++ * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
++ * called in yaffs_mtdif.c when the function pointers are NULL.
++ * We assume the MTD layer is performing ECC (useNANDECC is true).
++ */
++
++#include "yportenv.h"
++#include "yaffs_guts.h"
++#include "yaffs_packedtags1.h"
++#include "yaffs_tagscompat.h"	/* for yaffs_CalcTagsECC */
++
++#include "linux/kernel.h"
++#include "linux/version.h"
++#include "linux/types.h"
++#include "linux/mtd/mtd.h"
++
++/* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
++
++const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.10 2009-03-09 07:41:10 charles Exp $";
++
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++# define YTAG1_SIZE 8
++#else
++# define YTAG1_SIZE 9
++#endif
++
++#if 0
++/* Use the following nand_ecclayout with MTD when using
++ * CONFIG_YAFFS_9BYTE_TAGS and the older on-NAND tags layout.
++ * If you have existing Yaffs images and the byte order differs from this,
++ * adjust 'oobfree' to match your existing Yaffs data.
++ *
++ * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
++ * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
++ * the 9th byte.
++ *
++ * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
++ * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
++ * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
++ * byte and B is the small-page bad-block indicator byte.
++ */
++static struct nand_ecclayout nand_oob_16 = {
++	.eccbytes = 6,
++	.eccpos = { 8, 9, 10, 13, 14, 15 },
++	.oobavail = 9,
++	.oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } }
++};
++#endif
++
++/* Write a chunk (page) of data to NAND.
++ *
++ * Caller always provides ExtendedTags data which are converted to a more
++ * compact (packed) form for storage in NAND.  A mini-ECC runs over the
++ * contents of the tags meta-data; used to valid the tags when read.
++ *
++ *  - Pack ExtendedTags to PackedTags1 form
++ *  - Compute mini-ECC for PackedTags1
++ *  - Write data and packed tags to NAND.
++ *
++ * Note: Due to the use of the PackedTags1 meta-data which does not include
++ * a full sequence number (as found in the larger PackedTags2 form) it is
++ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
++ * discarded and dirty.  This is not ideal: newer NAND parts are supposed
++ * to be written just once.  When Yaffs performs this operation, this
++ * function is called with a NULL data pointer -- calling MTD write_oob
++ * without data is valid usage (2.6.17).
++ *
++ * Any underlying MTD error results in YAFFS_FAIL.
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
++	int chunkInNAND, const __u8 *data, const yaffs_ExtendedTags *etags)
++{
++	struct mtd_info *mtd = dev->genericDevice;
++	int chunkBytes = dev->nDataBytesPerChunk;
++	loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++	struct mtd_oob_ops ops;
++	yaffs_PackedTags1 pt1;
++	int retval;
++
++	/* we assume that PackedTags1 and yaffs_Tags are compatible */
++	compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
++	compile_time_assertion(sizeof(yaffs_Tags) == 8);
++
++	dev->nPageWrites++;
++
++	yaffs_PackTags1(&pt1, etags);
++	yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
++
++	/* When deleting a chunk, the upper layer provides only skeletal
++	 * etags, one with chunkDeleted set.  However, we need to update the
++	 * tags, not erase them completely.  So we use the NAND write property
++	 * that only zeroed-bits stick and set tag bytes to all-ones and
++	 * zero just the (not) deleted bit.
++	 */
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++	if (etags->chunkDeleted) {
++		memset(&pt1, 0xff, 8);
++		/* clear delete status bit to indicate deleted */
++		pt1.deleted = 0;
++	}
++#else
++	((__u8 *)&pt1)[8] = 0xff;
++	if (etags->chunkDeleted) {
++		memset(&pt1, 0xff, 8);
++		/* zero pageStatus byte to indicate deleted */
++		((__u8 *)&pt1)[8] = 0;
++	}
++#endif
++
++	memset(&ops, 0, sizeof(ops));
++	ops.mode = MTD_OOB_AUTO;
++	ops.len = (data) ? chunkBytes : 0;
++	ops.ooblen = YTAG1_SIZE;
++	ops.datbuf = (__u8 *)data;
++	ops.oobbuf = (__u8 *)&pt1;
++
++	retval = mtd->write_oob(mtd, addr, &ops);
++	if (retval) {
++		yaffs_trace(YAFFS_TRACE_MTD,
++			"write_oob failed, chunk %d, mtd error %d\n",
++			chunkInNAND, retval);
++	}
++	return retval ? YAFFS_FAIL : YAFFS_OK;
++}
++
++/* Return with empty ExtendedTags but add eccResult.
++ */
++static int rettags(yaffs_ExtendedTags *etags, int eccResult, int retval)
++{
++	if (etags) {
++		memset(etags, 0, sizeof(*etags));
++		etags->eccResult = eccResult;
++	}
++	return retval;
++}
++
++/* Read a chunk (page) from NAND.
++ *
++ * Caller expects ExtendedTags data to be usable even on error; that is,
++ * all members except eccResult and blockBad are zeroed.
++ *
++ *  - Check ECC results for data (if applicable)
++ *  - Check for blank/erased block (return empty ExtendedTags if blank)
++ *  - Check the PackedTags1 mini-ECC (correct if necessary/possible)
++ *  - Convert PackedTags1 to ExtendedTags
++ *  - Update eccResult and blockBad members to refect state.
++ *
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
++	int chunkInNAND, __u8 *data, yaffs_ExtendedTags *etags)
++{
++	struct mtd_info *mtd = dev->genericDevice;
++	int chunkBytes = dev->nDataBytesPerChunk;
++	loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++	int eccres = YAFFS_ECC_RESULT_NO_ERROR;
++	struct mtd_oob_ops ops;
++	yaffs_PackedTags1 pt1;
++	int retval;
++	int deleted;
++
++	dev->nPageReads++;
++
++	memset(&ops, 0, sizeof(ops));
++	ops.mode = MTD_OOB_AUTO;
++	ops.len = (data) ? chunkBytes : 0;
++	ops.ooblen = YTAG1_SIZE;
++	ops.datbuf = data;
++	ops.oobbuf = (__u8 *)&pt1;
++
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
++	/* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
++	 * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
++	 */
++	ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
++#endif
++	/* Read page and oob using MTD.
++	 * Check status and determine ECC result.
++	 */
++	retval = mtd->read_oob(mtd, addr, &ops);
++	if (retval) {
++		yaffs_trace(YAFFS_TRACE_MTD,
++			"read_oob failed, chunk %d, mtd error %d\n",
++			chunkInNAND, retval);
++	}
++
++	switch (retval) {
++	case 0:
++		/* no error */
++		break;
++
++	case -EUCLEAN:
++		/* MTD's ECC fixed the data */
++		eccres = YAFFS_ECC_RESULT_FIXED;
++		dev->eccFixed++;
++		break;
++
++	case -EBADMSG:
++		/* MTD's ECC could not fix the data */
++		dev->eccUnfixed++;
++		/* fall into... */
++	default:
++		rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
++		etags->blockBad = (mtd->block_isbad)(mtd, addr);
++		return YAFFS_FAIL;
++	}
++
++	/* Check for a blank/erased chunk.
++	 */
++	if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
++		/* when blank, upper layers want eccResult to be <= NO_ERROR */
++		return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
++	}
++
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++	/* Read deleted status (bit) then return it to it's non-deleted
++	 * state before performing tags mini-ECC check. pt1.deleted is
++	 * inverted.
++	 */
++	deleted = !pt1.deleted;
++	pt1.deleted = 1;
++#else
++	deleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
++#endif
++
++	/* Check the packed tags mini-ECC and correct if necessary/possible.
++	 */
++	retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
++	switch (retval) {
++	case 0:
++		/* no tags error, use MTD result */
++		break;
++	case 1:
++		/* recovered tags-ECC error */
++		dev->tagsEccFixed++;
++		if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
++			eccres = YAFFS_ECC_RESULT_FIXED;
++		break;
++	default:
++		/* unrecovered tags-ECC error */
++		dev->tagsEccUnfixed++;
++		return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
++	}
++
++	/* Unpack the tags to extended form and set ECC result.
++	 * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
++	 */
++	pt1.shouldBeFF = 0xFFFFFFFF;
++	yaffs_UnpackTags1(etags, &pt1);
++	etags->eccResult = eccres;
++
++	/* Set deleted state */
++	etags->chunkDeleted = deleted;
++	return YAFFS_OK;
++}
++
++/* Mark a block bad.
++ *
++ * This is a persistant state.
++ * Use of this function should be rare.
++ *
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++{
++	struct mtd_info *mtd = dev->genericDevice;
++	int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
++	int retval;
++
++	yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad\n", blockNo);
++
++	retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
++	return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++/* Check any MTD prerequists.
++ *
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++static int nandmtd1_TestPrerequists(struct mtd_info *mtd)
++{
++	/* 2.6.18 has mtd->ecclayout->oobavail */
++	/* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
++	int oobavail = mtd->ecclayout->oobavail;
++
++	if (oobavail < YTAG1_SIZE) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"mtd device has only %d bytes for tags, need %d\n",
++			oobavail, YTAG1_SIZE);
++		return YAFFS_FAIL;
++	}
++	return YAFFS_OK;
++}
++
++/* Query for the current state of a specific block.
++ *
++ * Examine the tags of the first chunk of the block and return the state:
++ *  - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
++ *  - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
++ *  - YAFFS_BLOCK_STATE_EMPTY, the block is clean
++ *
++ * Always returns YAFFS_OK.
++ */
++int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++	yaffs_BlockState *pState, __u32 *pSequenceNumber)
++{
++	struct mtd_info *mtd = dev->genericDevice;
++	int chunkNo = blockNo * dev->nChunksPerBlock;
++	loff_t addr = (loff_t)chunkNo * dev->nDataBytesPerChunk;
++	yaffs_ExtendedTags etags;
++	int state = YAFFS_BLOCK_STATE_DEAD;
++	int seqnum = 0;
++	int retval;
++
++	/* We don't yet have a good place to test for MTD config prerequists.
++	 * Do it here as we are called during the initial scan.
++	 */
++	if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK)
++		return YAFFS_FAIL;
++
++	retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
++	etags.blockBad = (mtd->block_isbad)(mtd, addr);
++	if (etags.blockBad) {
++		yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++			"block %d is marked bad\n", blockNo);
++		state = YAFFS_BLOCK_STATE_DEAD;
++	} else if (etags.eccResult != YAFFS_ECC_RESULT_NO_ERROR) {
++		/* bad tags, need to look more closely */
++		state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++	} else if (etags.chunkUsed) {
++		state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++		seqnum = etags.sequenceNumber;
++	} else {
++		state = YAFFS_BLOCK_STATE_EMPTY;
++	}
++
++	*pState = state;
++	*pSequenceNumber = seqnum;
++
++	/* query always succeeds */
++	return YAFFS_OK;
++}
++
++#endif /*MTD_VERSION*/
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif1.h linux-2.6.32/fs/yaffs2/yaffs_mtdif1.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif1.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_mtdif1.h	2010-01-30 20:35:01.572068025 +0100
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF1_H__
++#define __YAFFS_MTDIF1_H__
++
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++	const __u8 *data, const yaffs_ExtendedTags *tags);
++
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++	__u8 *data, yaffs_ExtendedTags *tags);
++
++int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
++
++int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++	yaffs_BlockState *state, __u32 *sequenceNumber);
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif2.c linux-2.6.32/fs/yaffs2/yaffs_mtdif2.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif2.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_mtdif2.c	2010-01-30 20:35:01.611828597 +0100
+@@ -0,0 +1,246 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* mtd interface for YAFFS2 */
++
++const char *yaffs_mtdif2_c_version =
++	"$Id: yaffs_mtdif2.c,v 1.23 2009-03-06 17:20:53 wookey Exp $";
++
++#include "yportenv.h"
++
++
++#include "yaffs_mtdif2.h"
++
++#include "linux/mtd/mtd.h"
++#include "linux/types.h"
++#include "linux/time.h"
++
++#include "yaffs_packedtags2.h"
++
++/* NB For use with inband tags....
++ * We assume that the data buffer is of size totalBytersPerChunk so that we can also
++ * use it to load the tags.
++ */
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++				      const __u8 *data,
++				      const yaffs_ExtendedTags *tags)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
++	struct mtd_oob_ops ops;
++#else
++	size_t dummy;
++#endif
++	int retval = 0;
++
++	loff_t addr;
++
++	yaffs_PackedTags2 pt;
++
++	T(YAFFS_TRACE_MTD,
++	  (TSTR
++	   ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p"
++	    TENDSTR), chunkInNAND, data, tags));
++
++
++	addr  = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
++
++	/* For yaffs2 writing there must be both data and tags.
++	 * If we're using inband tags, then the tags are stuffed into
++	 * the end of the data buffer.
++	 */
++	if (!data || !tags)
++		BUG();
++	else if (dev->inbandTags) {
++		yaffs_PackedTags2TagsPart *pt2tp;
++		pt2tp = (yaffs_PackedTags2TagsPart *)(data + dev->nDataBytesPerChunk);
++		yaffs_PackTags2TagsPart(pt2tp, tags);
++	} else
++		yaffs_PackTags2(&pt, tags);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++	ops.mode = MTD_OOB_AUTO;
++	ops.ooblen = (dev->inbandTags) ? 0 : sizeof(pt);
++	ops.len = dev->totalBytesPerChunk;
++	ops.ooboffs = 0;
++	ops.datbuf = (__u8 *)data;
++	ops.oobbuf = (dev->inbandTags) ? NULL : (void *)&pt;
++	retval = mtd->write_oob(mtd, addr, &ops);
++
++#else
++	if (!dev->inbandTags) {
++		retval =
++		    mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++				   &dummy, data, (__u8 *) &pt, NULL);
++	} else {
++		retval =
++		    mtd->write(mtd, addr, dev->totalBytesPerChunk, &dummy,
++			       data);
++	}
++#endif
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++				       __u8 *data, yaffs_ExtendedTags *tags)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
++	struct mtd_oob_ops ops;
++#endif
++	size_t dummy;
++	int retval = 0;
++	int localData = 0;
++
++	loff_t addr = ((loff_t) chunkInNAND) * dev->totalBytesPerChunk;
++
++	yaffs_PackedTags2 pt;
++
++	T(YAFFS_TRACE_MTD,
++	  (TSTR
++	   ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p"
++	    TENDSTR), chunkInNAND, data, tags));
++
++	if (dev->inbandTags) {
++
++		if (!data) {
++			localData = 1;
++			data = yaffs_GetTempBuffer(dev, __LINE__);
++		}
++
++
++	}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++	if (dev->inbandTags || (data && !tags))
++		retval = mtd->read(mtd, addr, dev->totalBytesPerChunk,
++				&dummy, data);
++	else if (tags) {
++		ops.mode = MTD_OOB_AUTO;
++		ops.ooblen = sizeof(pt);
++		ops.len = data ? dev->nDataBytesPerChunk : sizeof(pt);
++		ops.ooboffs = 0;
++		ops.datbuf = data;
++		ops.oobbuf = dev->spareBuffer;
++		retval = mtd->read_oob(mtd, addr, &ops);
++	}
++#else
++	if (!dev->inbandTags && data && tags) {
++
++		retval = mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					  &dummy, data, dev->spareBuffer,
++					  NULL);
++	} else {
++		if (data)
++			retval =
++			    mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++				      data);
++		if (!dev->inbandTags && tags)
++			retval =
++			    mtd->read_oob(mtd, addr, mtd->oobsize, &dummy,
++					  dev->spareBuffer);
++	}
++#endif
++
++
++	if (dev->inbandTags) {
++		if (tags) {
++			yaffs_PackedTags2TagsPart *pt2tp;
++			pt2tp = (yaffs_PackedTags2TagsPart *)&data[dev->nDataBytesPerChunk];
++			yaffs_UnpackTags2TagsPart(tags, pt2tp);
++		}
++	} else {
++		if (tags) {
++			memcpy(&pt, dev->spareBuffer, sizeof(pt));
++			yaffs_UnpackTags2(tags, &pt);
++		}
++	}
++
++	if (localData)
++		yaffs_ReleaseTempBuffer(dev, data, __LINE__);
++
++	if (tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
++		tags->eccResult = YAFFS_ECC_RESULT_UNFIXED;
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++	int retval;
++	T(YAFFS_TRACE_MTD,
++	  (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), blockNo));
++
++	retval =
++	    mtd->block_markbad(mtd,
++			       blockNo * dev->nChunksPerBlock *
++			       dev->totalBytesPerChunk);
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++
++}
++
++int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++			    yaffs_BlockState *state, __u32 *sequenceNumber)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++	int retval;
++
++	T(YAFFS_TRACE_MTD,
++	  (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), blockNo));
++	retval =
++	    mtd->block_isbad(mtd,
++			     blockNo * dev->nChunksPerBlock *
++			     dev->totalBytesPerChunk);
++
++	if (retval) {
++		T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR)));
++
++		*state = YAFFS_BLOCK_STATE_DEAD;
++		*sequenceNumber = 0;
++	} else {
++		yaffs_ExtendedTags t;
++		nandmtd2_ReadChunkWithTagsFromNAND(dev,
++						   blockNo *
++						   dev->nChunksPerBlock, NULL,
++						   &t);
++
++		if (t.chunkUsed) {
++			*sequenceNumber = t.sequenceNumber;
++			*state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++		} else {
++			*sequenceNumber = 0;
++			*state = YAFFS_BLOCK_STATE_EMPTY;
++		}
++	}
++	T(YAFFS_TRACE_MTD,
++	  (TSTR("block is bad seq %d state %d" TENDSTR), *sequenceNumber,
++	   *state));
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif2.h linux-2.6.32/fs/yaffs2/yaffs_mtdif2.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif2.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_mtdif2.h	2010-01-30 20:35:01.663084824 +0100
+@@ -0,0 +1,29 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF2_H__
++#define __YAFFS_MTDIF2_H__
++
++#include "yaffs_guts.h"
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device *dev, int chunkInNAND,
++				const __u8 *data,
++				const yaffs_ExtendedTags *tags);
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++				__u8 *data, yaffs_ExtendedTags *tags);
++int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
++int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++			yaffs_BlockState *state, __u32 *sequenceNumber);
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif.c linux-2.6.32/fs/yaffs2/yaffs_mtdif.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_mtdif.c	2010-01-30 20:35:01.701828807 +0100
+@@ -0,0 +1,241 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++const char *yaffs_mtdif_c_version =
++	"$Id: yaffs_mtdif.c,v 1.22 2009-03-06 17:20:51 wookey Exp $";
++
++#include "yportenv.h"
++
++
++#include "yaffs_mtdif.h"
++
++#include "linux/mtd/mtd.h"
++#include "linux/types.h"
++#include "linux/time.h"
++#include "linux/mtd/nand.h"
++
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18))
++static struct nand_oobinfo yaffs_oobinfo = {
++	.useecc = 1,
++	.eccbytes = 6,
++	.eccpos = {8, 9, 10, 13, 14, 15}
++};
++
++static struct nand_oobinfo yaffs_noeccinfo = {
++	.useecc = 0,
++};
++#endif
++
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
++static inline void translate_spare2oob(const yaffs_Spare *spare, __u8 *oob)
++{
++	oob[0] = spare->tagByte0;
++	oob[1] = spare->tagByte1;
++	oob[2] = spare->tagByte2;
++	oob[3] = spare->tagByte3;
++	oob[4] = spare->tagByte4;
++	oob[5] = spare->tagByte5 & 0x3f;
++	oob[5] |= spare->blockStatus == 'Y' ? 0 : 0x80;
++	oob[5] |= spare->pageStatus == 0 ? 0 : 0x40;
++	oob[6] = spare->tagByte6;
++	oob[7] = spare->tagByte7;
++}
++
++static inline void translate_oob2spare(yaffs_Spare *spare, __u8 *oob)
++{
++	struct yaffs_NANDSpare *nspare = (struct yaffs_NANDSpare *)spare;
++	spare->tagByte0 = oob[0];
++	spare->tagByte1 = oob[1];
++	spare->tagByte2 = oob[2];
++	spare->tagByte3 = oob[3];
++	spare->tagByte4 = oob[4];
++	spare->tagByte5 = oob[5] == 0xff ? 0xff : oob[5] & 0x3f;
++	spare->blockStatus = oob[5] & 0x80 ? 0xff : 'Y';
++	spare->pageStatus = oob[5] & 0x40 ? 0xff : 0;
++	spare->ecc1[0] = spare->ecc1[1] = spare->ecc1[2] = 0xff;
++	spare->tagByte6 = oob[6];
++	spare->tagByte7 = oob[7];
++	spare->ecc2[0] = spare->ecc2[1] = spare->ecc2[2] = 0xff;
++
++	nspare->eccres1 = nspare->eccres2 = 0; /* FIXME */
++}
++#endif
++
++int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
++			     const __u8 *data, const yaffs_Spare *spare)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
++	struct mtd_oob_ops ops;
++#endif
++	size_t dummy;
++	int retval = 0;
++
++	loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
++	__u8 spareAsBytes[8]; /* OOB */
++
++	if (data && !spare)
++		retval = mtd->write(mtd, addr, dev->nDataBytesPerChunk,
++				&dummy, data);
++	else if (spare) {
++		if (dev->useNANDECC) {
++			translate_spare2oob(spare, spareAsBytes);
++			ops.mode = MTD_OOB_AUTO;
++			ops.ooblen = 8; /* temp hack */
++		} else {
++			ops.mode = MTD_OOB_RAW;
++			ops.ooblen = YAFFS_BYTES_PER_SPARE;
++		}
++		ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
++		ops.datbuf = (u8 *)data;
++		ops.ooboffs = 0;
++		ops.oobbuf = spareAsBytes;
++		retval = mtd->write_oob(mtd, addr, &ops);
++	}
++#else
++	__u8 *spareAsBytes = (__u8 *) spare;
++
++	if (data && spare) {
++		if (dev->useNANDECC)
++			retval =
++			    mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					   &dummy, data, spareAsBytes,
++					   &yaffs_oobinfo);
++		else
++			retval =
++			    mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					   &dummy, data, spareAsBytes,
++					   &yaffs_noeccinfo);
++	} else {
++		if (data)
++			retval =
++			    mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++				       data);
++		if (spare)
++			retval =
++			    mtd->write_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
++					   &dummy, spareAsBytes);
++	}
++#endif
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
++			      yaffs_Spare *spare)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
++	struct mtd_oob_ops ops;
++#endif
++	size_t dummy;
++	int retval = 0;
++
++	loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++#if (MTD_VERSION_CODE > MTD_VERSION(2, 6, 17))
++	__u8 spareAsBytes[8]; /* OOB */
++
++	if (data && !spare)
++		retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
++				&dummy, data);
++	else if (spare) {
++		if (dev->useNANDECC) {
++			ops.mode = MTD_OOB_AUTO;
++			ops.ooblen = 8; /* temp hack */
++		} else {
++			ops.mode = MTD_OOB_RAW;
++			ops.ooblen = YAFFS_BYTES_PER_SPARE;
++		}
++		ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
++		ops.datbuf = data;
++		ops.ooboffs = 0;
++		ops.oobbuf = spareAsBytes;
++		retval = mtd->read_oob(mtd, addr, &ops);
++		if (dev->useNANDECC)
++			translate_oob2spare(spare, spareAsBytes);
++	}
++#else
++	__u8 *spareAsBytes = (__u8 *) spare;
++
++	if (data && spare) {
++		if (dev->useNANDECC) {
++			/* Careful, this call adds 2 ints */
++			/* to the end of the spare data.  Calling function */
++			/* should allocate enough memory for spare, */
++			/* i.e. [YAFFS_BYTES_PER_SPARE+2*sizeof(int)]. */
++			retval =
++			    mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					  &dummy, data, spareAsBytes,
++					  &yaffs_oobinfo);
++		} else {
++			retval =
++			    mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					  &dummy, data, spareAsBytes,
++					  &yaffs_noeccinfo);
++		}
++	} else {
++		if (data)
++			retval =
++			    mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++				      data);
++		if (spare)
++			retval =
++			    mtd->read_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
++					  &dummy, spareAsBytes);
++	}
++#endif
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++	__u32 addr =
++	    ((loff_t) blockNumber) * dev->nDataBytesPerChunk
++		* dev->nChunksPerBlock;
++	struct erase_info ei;
++	int retval = 0;
++
++	ei.mtd = mtd;
++	ei.addr = addr;
++	ei.len = dev->nDataBytesPerChunk * dev->nChunksPerBlock;
++	ei.time = 1000;
++	ei.retries = 2;
++	ei.callback = NULL;
++	ei.priv = (u_long) dev;
++
++	/* Todo finish off the ei if required */
++
++	sema_init(&dev->sem, 0);
++
++	retval = mtd->erase(mtd, &ei);
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd_InitialiseNAND(yaffs_Device *dev)
++{
++	return YAFFS_OK;
++}
++
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif.h linux-2.6.32/fs/yaffs2/yaffs_mtdif.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_mtdif.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_mtdif.h	2010-01-30 20:35:01.733074983 +0100
+@@ -0,0 +1,32 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF_H__
++#define __YAFFS_MTDIF_H__
++
++#include "yaffs_guts.h"
++
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 18))
++extern struct nand_oobinfo yaffs_oobinfo;
++extern struct nand_oobinfo yaffs_noeccinfo;
++#endif
++
++int nandmtd_WriteChunkToNAND(yaffs_Device *dev, int chunkInNAND,
++			const __u8 *data, const yaffs_Spare *spare);
++int nandmtd_ReadChunkFromNAND(yaffs_Device *dev, int chunkInNAND, __u8 *data,
++			yaffs_Spare *spare);
++int nandmtd_EraseBlockInNAND(yaffs_Device *dev, int blockNumber);
++int nandmtd_InitialiseNAND(yaffs_Device *dev);
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_nand.c linux-2.6.32/fs/yaffs2/yaffs_nand.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_nand.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_nand.c	2010-01-30 20:35:01.771938289 +0100
+@@ -0,0 +1,135 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++const char *yaffs_nand_c_version =
++	"$Id: yaffs_nand.c,v 1.10 2009-03-06 17:20:54 wookey Exp $";
++
++#include "yaffs_nand.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_tagsvalidity.h"
++
++#include "yaffs_getblockinfo.h"
++
++int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++					   __u8 *buffer,
++					   yaffs_ExtendedTags *tags)
++{
++	int result;
++	yaffs_ExtendedTags localTags;
++
++	int realignedChunkInNAND = chunkInNAND - dev->chunkOffset;
++
++	/* If there are no tags provided, use local tags to get prioritised gc working */
++	if (!tags)
++		tags = &localTags;
++
++	if (dev->readChunkWithTagsFromNAND)
++		result = dev->readChunkWithTagsFromNAND(dev, realignedChunkInNAND, buffer,
++						      tags);
++	else
++		result = yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(dev,
++									realignedChunkInNAND,
++									buffer,
++									tags);
++	if (tags &&
++	   tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR) {
++
++		yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->nChunksPerBlock);
++		yaffs_HandleChunkError(dev, bi);
++	}
++
++	return result;
++}
++
++int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
++						   int chunkInNAND,
++						   const __u8 *buffer,
++						   yaffs_ExtendedTags *tags)
++{
++	chunkInNAND -= dev->chunkOffset;
++
++
++	if (tags) {
++		tags->sequenceNumber = dev->sequenceNumber;
++		tags->chunkUsed = 1;
++		if (!yaffs_ValidateTags(tags)) {
++			T(YAFFS_TRACE_ERROR,
++			  (TSTR("Writing uninitialised tags" TENDSTR)));
++			YBUG();
++		}
++		T(YAFFS_TRACE_WRITE,
++		  (TSTR("Writing chunk %d tags %d %d" TENDSTR), chunkInNAND,
++		   tags->objectId, tags->chunkId));
++	} else {
++		T(YAFFS_TRACE_ERROR, (TSTR("Writing with no tags" TENDSTR)));
++		YBUG();
++	}
++
++	if (dev->writeChunkWithTagsToNAND)
++		return dev->writeChunkWithTagsToNAND(dev, chunkInNAND, buffer,
++						     tags);
++	else
++		return yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(dev,
++								       chunkInNAND,
++								       buffer,
++								       tags);
++}
++
++int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo)
++{
++	blockNo -= dev->blockOffset;
++
++;
++	if (dev->markNANDBlockBad)
++		return dev->markNANDBlockBad(dev, blockNo);
++	else
++		return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo);
++}
++
++int yaffs_QueryInitialBlockState(yaffs_Device *dev,
++						 int blockNo,
++						 yaffs_BlockState *state,
++						 __u32 *sequenceNumber)
++{
++	blockNo -= dev->blockOffset;
++
++	if (dev->queryNANDBlock)
++		return dev->queryNANDBlock(dev, blockNo, state, sequenceNumber);
++	else
++		return yaffs_TagsCompatabilityQueryNANDBlock(dev, blockNo,
++							     state,
++							     sequenceNumber);
++}
++
++
++int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
++				  int blockInNAND)
++{
++	int result;
++
++	blockInNAND -= dev->blockOffset;
++
++
++	dev->nBlockErasures++;
++	result = dev->eraseBlockInNAND(dev, blockInNAND);
++
++	return result;
++}
++
++int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev)
++{
++	return dev->initialiseNAND(dev);
++}
++
++
++
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_nandemul2k.h linux-2.6.32/fs/yaffs2/yaffs_nandemul2k.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_nandemul2k.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_nandemul2k.h	2010-01-30 20:35:01.811826092 +0100
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* Interface to emulated NAND functions (2k page size) */
++
++#ifndef __YAFFS_NANDEMUL2K_H__
++#define __YAFFS_NANDEMUL2K_H__
++
++#include "yaffs_guts.h"
++
++int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
++					int chunkInNAND, const __u8 *data,
++					const yaffs_ExtendedTags *tags);
++int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_DeviceStruct *dev,
++					 int chunkInNAND, __u8 *data,
++					 yaffs_ExtendedTags *tags);
++int nandemul2k_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
++int nandemul2k_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++			      yaffs_BlockState *state, __u32 *sequenceNumber);
++int nandemul2k_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
++				int blockInNAND);
++int nandemul2k_InitialiseNAND(struct yaffs_DeviceStruct *dev);
++int nandemul2k_GetBytesPerChunk(void);
++int nandemul2k_GetChunksPerBlock(void);
++int nandemul2k_GetNumberOfBlocks(void);
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_nand.h linux-2.6.32/fs/yaffs2/yaffs_nand.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_nand.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_nand.h	2010-01-30 20:35:01.851862979 +0100
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_NAND_H__
++#define __YAFFS_NAND_H__
++#include "yaffs_guts.h"
++
++
++
++int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device *dev, int chunkInNAND,
++					__u8 *buffer,
++					yaffs_ExtendedTags *tags);
++
++int yaffs_WriteChunkWithTagsToNAND(yaffs_Device *dev,
++						int chunkInNAND,
++						const __u8 *buffer,
++						yaffs_ExtendedTags *tags);
++
++int yaffs_MarkBlockBad(yaffs_Device *dev, int blockNo);
++
++int yaffs_QueryInitialBlockState(yaffs_Device *dev,
++						int blockNo,
++						yaffs_BlockState *state,
++						unsigned *sequenceNumber);
++
++int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
++				  int blockInNAND);
++
++int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev);
++
++#endif
++
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_packedtags1.c linux-2.6.32/fs/yaffs2/yaffs_packedtags1.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_packedtags1.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_packedtags1.c	2010-01-30 20:35:01.902201298 +0100
+@@ -0,0 +1,50 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags1.h"
++#include "yportenv.h"
++
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t)
++{
++	pt->chunkId = t->chunkId;
++	pt->serialNumber = t->serialNumber;
++	pt->byteCount = t->byteCount;
++	pt->objectId = t->objectId;
++	pt->ecc = 0;
++	pt->deleted = (t->chunkDeleted) ? 0 : 1;
++	pt->unusedStuff = 0;
++	pt->shouldBeFF = 0xFFFFFFFF;
++
++}
++
++void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt)
++{
++	static const __u8 allFF[] =
++	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++0xff };
++
++	if (memcmp(allFF, pt, sizeof(yaffs_PackedTags1))) {
++		t->blockBad = 0;
++		if (pt->shouldBeFF != 0xFFFFFFFF)
++			t->blockBad = 1;
++		t->chunkUsed = 1;
++		t->objectId = pt->objectId;
++		t->chunkId = pt->chunkId;
++		t->byteCount = pt->byteCount;
++		t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++		t->chunkDeleted = (pt->deleted) ? 0 : 1;
++		t->serialNumber = pt->serialNumber;
++	} else {
++		memset(t, 0, sizeof(yaffs_ExtendedTags));
++	}
++}
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_packedtags1.h linux-2.6.32/fs/yaffs2/yaffs_packedtags1.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_packedtags1.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_packedtags1.h	2010-01-30 20:35:01.951826961 +0100
+@@ -0,0 +1,37 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
++
++#ifndef __YAFFS_PACKEDTAGS1_H__
++#define __YAFFS_PACKEDTAGS1_H__
++
++#include "yaffs_guts.h"
++
++typedef struct {
++	unsigned chunkId:20;
++	unsigned serialNumber:2;
++	unsigned byteCount:10;
++	unsigned objectId:18;
++	unsigned ecc:12;
++	unsigned deleted:1;
++	unsigned unusedStuff:1;
++	unsigned shouldBeFF;
++
++} yaffs_PackedTags1;
++
++void yaffs_PackTags1(yaffs_PackedTags1 *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags1(yaffs_ExtendedTags *t, const yaffs_PackedTags1 *pt);
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_packedtags2.c linux-2.6.32/fs/yaffs2/yaffs_packedtags2.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_packedtags2.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_packedtags2.c	2010-01-30 20:35:01.991823846 +0100
+@@ -0,0 +1,206 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags2.h"
++#include "yportenv.h"
++#include "yaffs_tagsvalidity.h"
++
++/* This code packs a set of extended tags into a binary structure for
++ * NAND storage
++ */
++
++/* Some of the information is "extra" struff which can be packed in to
++ * speed scanning
++ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
++ */
++
++/* Extra flags applied to chunkId */
++
++#define EXTRA_HEADER_INFO_FLAG	0x80000000
++#define EXTRA_SHRINK_FLAG	0x40000000
++#define EXTRA_SHADOWS_FLAG	0x20000000
++#define EXTRA_SPARE_FLAGS	0x10000000
++
++#define ALL_EXTRA_FLAGS		0xF0000000
++
++/* Also, the top 4 bits of the object Id are set to the object type. */
++#define EXTRA_OBJECT_TYPE_SHIFT (28)
++#define EXTRA_OBJECT_TYPE_MASK  ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
++
++
++static void yaffs_DumpPackedTags2TagsPart(const yaffs_PackedTags2TagsPart *ptt)
++{
++	T(YAFFS_TRACE_MTD,
++	  (TSTR("packed tags obj %d chunk %d byte %d seq %d" TENDSTR),
++	   ptt->objectId, ptt->chunkId, ptt->byteCount,
++	   ptt->sequenceNumber));
++}
++static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 *pt)
++{
++	yaffs_DumpPackedTags2TagsPart(&pt->t);
++}
++
++static void yaffs_DumpTags2(const yaffs_ExtendedTags *t)
++{
++	T(YAFFS_TRACE_MTD,
++	  (TSTR
++	   ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d"
++	    TENDSTR), t->eccResult, t->blockBad, t->chunkUsed, t->objectId,
++	   t->chunkId, t->byteCount, t->chunkDeleted, t->serialNumber,
++	   t->sequenceNumber));
++
++}
++
++void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *ptt,
++		const yaffs_ExtendedTags *t)
++{
++	ptt->chunkId = t->chunkId;
++	ptt->sequenceNumber = t->sequenceNumber;
++	ptt->byteCount = t->byteCount;
++	ptt->objectId = t->objectId;
++
++	if (t->chunkId == 0 && t->extraHeaderInfoAvailable) {
++		/* Store the extra header info instead */
++		/* We save the parent object in the chunkId */
++		ptt->chunkId = EXTRA_HEADER_INFO_FLAG
++			| t->extraParentObjectId;
++		if (t->extraIsShrinkHeader)
++			ptt->chunkId |= EXTRA_SHRINK_FLAG;
++		if (t->extraShadows)
++			ptt->chunkId |= EXTRA_SHADOWS_FLAG;
++
++		ptt->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
++		ptt->objectId |=
++		    (t->extraObjectType << EXTRA_OBJECT_TYPE_SHIFT);
++
++		if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
++			ptt->byteCount = t->extraEquivalentObjectId;
++		else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE)
++			ptt->byteCount = t->extraFileLength;
++		else
++			ptt->byteCount = 0;
++	}
++
++	yaffs_DumpPackedTags2TagsPart(ptt);
++	yaffs_DumpTags2(t);
++}
++
++
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t)
++{
++	yaffs_PackTags2TagsPart(&pt->t, t);
++
++#ifndef YAFFS_IGNORE_TAGS_ECC
++	{
++		yaffs_ECCCalculateOther((unsigned char *)&pt->t,
++					sizeof(yaffs_PackedTags2TagsPart),
++					&pt->ecc);
++	}
++#endif
++}
++
++
++void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t,
++		yaffs_PackedTags2TagsPart *ptt)
++{
++
++	memset(t, 0, sizeof(yaffs_ExtendedTags));
++
++	yaffs_InitialiseTags(t);
++
++	if (ptt->sequenceNumber != 0xFFFFFFFF) {
++		t->blockBad = 0;
++		t->chunkUsed = 1;
++		t->objectId = ptt->objectId;
++		t->chunkId = ptt->chunkId;
++		t->byteCount = ptt->byteCount;
++		t->chunkDeleted = 0;
++		t->serialNumber = 0;
++		t->sequenceNumber = ptt->sequenceNumber;
++
++		/* Do extra header info stuff */
++
++		if (ptt->chunkId & EXTRA_HEADER_INFO_FLAG) {
++			t->chunkId = 0;
++			t->byteCount = 0;
++
++			t->extraHeaderInfoAvailable = 1;
++			t->extraParentObjectId =
++			    ptt->chunkId & (~(ALL_EXTRA_FLAGS));
++			t->extraIsShrinkHeader =
++			    (ptt->chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
++			t->extraShadows =
++			    (ptt->chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
++			t->extraObjectType =
++			    ptt->objectId >> EXTRA_OBJECT_TYPE_SHIFT;
++			t->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
++
++			if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK)
++				t->extraEquivalentObjectId = ptt->byteCount;
++			else
++				t->extraFileLength = ptt->byteCount;
++		}
++	}
++
++	yaffs_DumpPackedTags2TagsPart(ptt);
++	yaffs_DumpTags2(t);
++
++}
++
++
++void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt)
++{
++
++	yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++
++	if (pt->t.sequenceNumber != 0xFFFFFFFF) {
++		/* Page is in use */
++#ifndef YAFFS_IGNORE_TAGS_ECC
++		{
++			yaffs_ECCOther ecc;
++			int result;
++			yaffs_ECCCalculateOther((unsigned char *)&pt->t,
++						sizeof
++						(yaffs_PackedTags2TagsPart),
++						&ecc);
++			result =
++			    yaffs_ECCCorrectOther((unsigned char *)&pt->t,
++						  sizeof
++						  (yaffs_PackedTags2TagsPart),
++						  &pt->ecc, &ecc);
++			switch (result) {
++			case 0:
++				eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++				break;
++			case 1:
++				eccResult = YAFFS_ECC_RESULT_FIXED;
++				break;
++			case -1:
++				eccResult = YAFFS_ECC_RESULT_UNFIXED;
++				break;
++			default:
++				eccResult = YAFFS_ECC_RESULT_UNKNOWN;
++			}
++		}
++#endif
++	}
++
++	yaffs_UnpackTags2TagsPart(t, &pt->t);
++
++	t->eccResult = eccResult;
++
++	yaffs_DumpPackedTags2(pt);
++	yaffs_DumpTags2(t);
++
++}
++
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_packedtags2.h linux-2.6.32/fs/yaffs2/yaffs_packedtags2.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_packedtags2.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_packedtags2.h	2010-01-30 20:35:02.031973375 +0100
+@@ -0,0 +1,43 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
++
++#ifndef __YAFFS_PACKEDTAGS2_H__
++#define __YAFFS_PACKEDTAGS2_H__
++
++#include "yaffs_guts.h"
++#include "yaffs_ecc.h"
++
++typedef struct {
++	unsigned sequenceNumber;
++	unsigned objectId;
++	unsigned chunkId;
++	unsigned byteCount;
++} yaffs_PackedTags2TagsPart;
++
++typedef struct {
++	yaffs_PackedTags2TagsPart t;
++	yaffs_ECCOther ecc;
++} yaffs_PackedTags2;
++
++/* Full packed tags with ECC, used for oob tags */
++void yaffs_PackTags2(yaffs_PackedTags2 *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags2(yaffs_ExtendedTags *t, yaffs_PackedTags2 *pt);
++
++/* Only the tags part (no ECC for use with inband tags */
++void yaffs_PackTags2TagsPart(yaffs_PackedTags2TagsPart *pt, const yaffs_ExtendedTags *t);
++void yaffs_UnpackTags2TagsPart(yaffs_ExtendedTags *t, yaffs_PackedTags2TagsPart *pt);
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_qsort.c linux-2.6.32/fs/yaffs2/yaffs_qsort.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_qsort.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_qsort.c	2010-01-30 20:35:02.071829765 +0100
+@@ -0,0 +1,163 @@
++/*
++ * Copyright (c) 1992, 1993
++ *	The Regents of the University of California.  All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the University nor the names of its contributors
++ *    may be used to endorse or promote products derived from this software
++ *    without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include "yportenv.h"
++/* #include <linux/string.h> */
++
++/*
++ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
++ */
++#define swapcode(TYPE, parmi, parmj, n) do { 		\
++	long i = (n) / sizeof (TYPE); 			\
++	register TYPE *pi = (TYPE *) (parmi); 		\
++	register TYPE *pj = (TYPE *) (parmj); 		\
++	do { 						\
++		register TYPE	t = *pi;		\
++		*pi++ = *pj;				\
++		*pj++ = t;				\
++	} while (--i > 0);				\
++} while (0)
++
++#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
++	es % sizeof(long) ? 2 : es == sizeof(long) ? 0 : 1;
++
++static __inline void
++swapfunc(char *a, char *b, int n, int swaptype)
++{
++	if (swaptype <= 1)
++		swapcode(long, a, b, n);
++	else
++		swapcode(char, a, b, n);
++}
++
++#define yswap(a, b) do {					\
++	if (swaptype == 0) {				\
++		long t = *(long *)(a);			\
++		*(long *)(a) = *(long *)(b);		\
++		*(long *)(b) = t;			\
++	} else						\
++		swapfunc(a, b, es, swaptype);		\
++} while (0)
++
++#define vecswap(a, b, n) 	if ((n) > 0) swapfunc(a, b, n, swaptype)
++
++static __inline char *
++med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *))
++{
++	return cmp(a, b) < 0 ?
++		(cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a))
++		: (cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c));
++}
++
++#ifndef min
++#define min(a, b) (((a) < (b)) ? (a) : (b))
++#endif
++
++void
++yaffs_qsort(void *aa, size_t n, size_t es,
++	int (*cmp)(const void *, const void *))
++{
++	char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
++	int d, r, swaptype, swap_cnt;
++	register char *a = aa;
++
++loop:	SWAPINIT(a, es);
++	swap_cnt = 0;
++	if (n < 7) {
++		for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es)
++			for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
++			     pl -= es)
++				yswap(pl, pl - es);
++		return;
++	}
++	pm = (char *)a + (n / 2) * es;
++	if (n > 7) {
++		pl = (char *)a;
++		pn = (char *)a + (n - 1) * es;
++		if (n > 40) {
++			d = (n / 8) * es;
++			pl = med3(pl, pl + d, pl + 2 * d, cmp);
++			pm = med3(pm - d, pm, pm + d, cmp);
++			pn = med3(pn - 2 * d, pn - d, pn, cmp);
++		}
++		pm = med3(pl, pm, pn, cmp);
++	}
++	yswap(a, pm);
++	pa = pb = (char *)a + es;
++
++	pc = pd = (char *)a + (n - 1) * es;
++	for (;;) {
++		while (pb <= pc && (r = cmp(pb, a)) <= 0) {
++			if (r == 0) {
++				swap_cnt = 1;
++				yswap(pa, pb);
++				pa += es;
++			}
++			pb += es;
++		}
++		while (pb <= pc && (r = cmp(pc, a)) >= 0) {
++			if (r == 0) {
++				swap_cnt = 1;
++				yswap(pc, pd);
++				pd -= es;
++			}
++			pc -= es;
++		}
++		if (pb > pc)
++			break;
++		yswap(pb, pc);
++		swap_cnt = 1;
++		pb += es;
++		pc -= es;
++	}
++	if (swap_cnt == 0) {  /* Switch to insertion sort */
++		for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
++			for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
++			     pl -= es)
++				yswap(pl, pl - es);
++		return;
++	}
++
++	pn = (char *)a + n * es;
++	r = min(pa - (char *)a, pb - pa);
++	vecswap(a, pb - r, r);
++	r = min((long)(pd - pc), (long)(pn - pd - es));
++	vecswap(pb, pn - r, r);
++	r = pb - pa;
++	if (r > es)
++		yaffs_qsort(a, r / es, es, cmp);
++	r = pd - pc;
++	if (r > es) {
++		/* Iterate rather than recurse to save stack space */
++		a = pn - r;
++		n = r / es;
++		goto loop;
++	}
++/*		yaffs_qsort(pn - r, r / es, es, cmp);*/
++}
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_qsort.h linux-2.6.32/fs/yaffs2/yaffs_qsort.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_qsort.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_qsort.h	2010-01-30 20:35:02.131826281 +0100
+@@ -0,0 +1,23 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#ifndef __YAFFS_QSORT_H__
++#define __YAFFS_QSORT_H__
++
++extern void yaffs_qsort(void *const base, size_t total_elems, size_t size,
++			int (*cmp)(const void *, const void *));
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_tagscompat.c linux-2.6.32/fs/yaffs2/yaffs_tagscompat.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_tagscompat.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_tagscompat.c	2010-01-30 20:35:02.173084965 +0100
+@@ -0,0 +1,541 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_ecc.h"
++#include "yaffs_getblockinfo.h"
++
++static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND);
++#ifdef NOTYET
++static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND);
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++				     const __u8 *data,
++				     const yaffs_Spare *spare);
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++				    const yaffs_Spare *spare);
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND);
++#endif
++
++static const char yaffs_countBitsTable[256] = {
++	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
++	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
++	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
++	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
++	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
++	4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
++};
++
++int yaffs_CountBits(__u8 x)
++{
++	int retVal;
++	retVal = yaffs_countBitsTable[x];
++	return retVal;
++}
++
++/********** Tags ECC calculations  *********/
++
++void yaffs_CalcECC(const __u8 *data, yaffs_Spare *spare)
++{
++	yaffs_ECCCalculate(data, spare->ecc1);
++	yaffs_ECCCalculate(&data[256], spare->ecc2);
++}
++
++void yaffs_CalcTagsECC(yaffs_Tags *tags)
++{
++	/* Calculate an ecc */
++
++	unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
++	unsigned i, j;
++	unsigned ecc = 0;
++	unsigned bit = 0;
++
++	tags->ecc = 0;
++
++	for (i = 0; i < 8; i++) {
++		for (j = 1; j & 0xff; j <<= 1) {
++			bit++;
++			if (b[i] & j)
++				ecc ^= bit;
++		}
++	}
++
++	tags->ecc = ecc;
++
++}
++
++int yaffs_CheckECCOnTags(yaffs_Tags *tags)
++{
++	unsigned ecc = tags->ecc;
++
++	yaffs_CalcTagsECC(tags);
++
++	ecc ^= tags->ecc;
++
++	if (ecc && ecc <= 64) {
++		/* TODO: Handle the failure better. Retire? */
++		unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
++
++		ecc--;
++
++		b[ecc / 8] ^= (1 << (ecc & 7));
++
++		/* Now recvalc the ecc */
++		yaffs_CalcTagsECC(tags);
++
++		return 1;	/* recovered error */
++	} else if (ecc) {
++		/* Wierd ecc failure value */
++		/* TODO Need to do somethiong here */
++		return -1;	/* unrecovered error */
++	}
++
++	return 0;
++}
++
++/********** Tags **********/
++
++static void yaffs_LoadTagsIntoSpare(yaffs_Spare *sparePtr,
++				yaffs_Tags *tagsPtr)
++{
++	yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
++
++	yaffs_CalcTagsECC(tagsPtr);
++
++	sparePtr->tagByte0 = tu->asBytes[0];
++	sparePtr->tagByte1 = tu->asBytes[1];
++	sparePtr->tagByte2 = tu->asBytes[2];
++	sparePtr->tagByte3 = tu->asBytes[3];
++	sparePtr->tagByte4 = tu->asBytes[4];
++	sparePtr->tagByte5 = tu->asBytes[5];
++	sparePtr->tagByte6 = tu->asBytes[6];
++	sparePtr->tagByte7 = tu->asBytes[7];
++}
++
++static void yaffs_GetTagsFromSpare(yaffs_Device *dev, yaffs_Spare *sparePtr,
++				yaffs_Tags *tagsPtr)
++{
++	yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
++	int result;
++
++	tu->asBytes[0] = sparePtr->tagByte0;
++	tu->asBytes[1] = sparePtr->tagByte1;
++	tu->asBytes[2] = sparePtr->tagByte2;
++	tu->asBytes[3] = sparePtr->tagByte3;
++	tu->asBytes[4] = sparePtr->tagByte4;
++	tu->asBytes[5] = sparePtr->tagByte5;
++	tu->asBytes[6] = sparePtr->tagByte6;
++	tu->asBytes[7] = sparePtr->tagByte7;
++
++	result = yaffs_CheckECCOnTags(tagsPtr);
++	if (result > 0)
++		dev->tagsEccFixed++;
++	else if (result < 0)
++		dev->tagsEccUnfixed++;
++}
++
++static void yaffs_SpareInitialise(yaffs_Spare *spare)
++{
++	memset(spare, 0xFF, sizeof(yaffs_Spare));
++}
++
++static int yaffs_WriteChunkToNAND(struct yaffs_DeviceStruct *dev,
++				int chunkInNAND, const __u8 *data,
++				yaffs_Spare *spare)
++{
++	if (chunkInNAND < dev->startBlock * dev->nChunksPerBlock) {
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR("**>> yaffs chunk %d is not valid" TENDSTR),
++		   chunkInNAND));
++		return YAFFS_FAIL;
++	}
++
++	dev->nPageWrites++;
++	return dev->writeChunkToNAND(dev, chunkInNAND, data, spare);
++}
++
++static int yaffs_ReadChunkFromNAND(struct yaffs_DeviceStruct *dev,
++				   int chunkInNAND,
++				   __u8 *data,
++				   yaffs_Spare *spare,
++				   yaffs_ECCResult *eccResult,
++				   int doErrorCorrection)
++{
++	int retVal;
++	yaffs_Spare localSpare;
++
++	dev->nPageReads++;
++
++	if (!spare && data) {
++		/* If we don't have a real spare, then we use a local one. */
++		/* Need this for the calculation of the ecc */
++		spare = &localSpare;
++	}
++
++	if (!dev->useNANDECC) {
++		retVal = dev->readChunkFromNAND(dev, chunkInNAND, data, spare);
++		if (data && doErrorCorrection) {
++			/* Do ECC correction */
++			/* Todo handle any errors */
++			int eccResult1, eccResult2;
++			__u8 calcEcc[3];
++
++			yaffs_ECCCalculate(data, calcEcc);
++			eccResult1 =
++			    yaffs_ECCCorrect(data, spare->ecc1, calcEcc);
++			yaffs_ECCCalculate(&data[256], calcEcc);
++			eccResult2 =
++			    yaffs_ECCCorrect(&data[256], spare->ecc2, calcEcc);
++
++			if (eccResult1 > 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>yaffs ecc error fix performed on chunk %d:0"
++				    TENDSTR), chunkInNAND));
++				dev->eccFixed++;
++			} else if (eccResult1 < 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>yaffs ecc error unfixed on chunk %d:0"
++				    TENDSTR), chunkInNAND));
++				dev->eccUnfixed++;
++			}
++
++			if (eccResult2 > 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>yaffs ecc error fix performed on chunk %d:1"
++				    TENDSTR), chunkInNAND));
++				dev->eccFixed++;
++			} else if (eccResult2 < 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>yaffs ecc error unfixed on chunk %d:1"
++				    TENDSTR), chunkInNAND));
++				dev->eccUnfixed++;
++			}
++
++			if (eccResult1 || eccResult2) {
++				/* We had a data problem on this page */
++				yaffs_HandleReadDataError(dev, chunkInNAND);
++			}
++
++			if (eccResult1 < 0 || eccResult2 < 0)
++				*eccResult = YAFFS_ECC_RESULT_UNFIXED;
++			else if (eccResult1 > 0 || eccResult2 > 0)
++				*eccResult = YAFFS_ECC_RESULT_FIXED;
++			else
++				*eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++		}
++	} else {
++		/* Must allocate enough memory for spare+2*sizeof(int) */
++		/* for ecc results from device. */
++		struct yaffs_NANDSpare nspare;
++
++		memset(&nspare, 0, sizeof(nspare));
++
++		retVal = dev->readChunkFromNAND(dev, chunkInNAND, data,
++					(yaffs_Spare *) &nspare);
++		memcpy(spare, &nspare, sizeof(yaffs_Spare));
++		if (data && doErrorCorrection) {
++			if (nspare.eccres1 > 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>mtd ecc error fix performed on chunk %d:0"
++				    TENDSTR), chunkInNAND));
++			} else if (nspare.eccres1 < 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>mtd ecc error unfixed on chunk %d:0"
++				    TENDSTR), chunkInNAND));
++			}
++
++			if (nspare.eccres2 > 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>mtd ecc error fix performed on chunk %d:1"
++				    TENDSTR), chunkInNAND));
++			} else if (nspare.eccres2 < 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>mtd ecc error unfixed on chunk %d:1"
++				    TENDSTR), chunkInNAND));
++			}
++
++			if (nspare.eccres1 || nspare.eccres2) {
++				/* We had a data problem on this page */
++				yaffs_HandleReadDataError(dev, chunkInNAND);
++			}
++
++			if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
++				*eccResult = YAFFS_ECC_RESULT_UNFIXED;
++			else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
++				*eccResult = YAFFS_ECC_RESULT_FIXED;
++			else
++				*eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++
++		}
++	}
++	return retVal;
++}
++
++#ifdef NOTYET
++static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
++				  int chunkInNAND)
++{
++	static int init;
++	static __u8 cmpbuf[YAFFS_BYTES_PER_CHUNK];
++	static __u8 data[YAFFS_BYTES_PER_CHUNK];
++	/* Might as well always allocate the larger size for */
++	/* dev->useNANDECC == true; */
++	static __u8 spare[sizeof(struct yaffs_NANDSpare)];
++
++	dev->readChunkFromNAND(dev, chunkInNAND, data, (yaffs_Spare *) spare);
++
++	if (!init) {
++		memset(cmpbuf, 0xff, YAFFS_BYTES_PER_CHUNK);
++		init = 1;
++	}
++
++	if (memcmp(cmpbuf, data, YAFFS_BYTES_PER_CHUNK))
++		return YAFFS_FAIL;
++	if (memcmp(cmpbuf, spare, 16))
++		return YAFFS_FAIL;
++
++	return YAFFS_OK;
++
++}
++#endif
++
++/*
++ * Functions for robustisizing
++ */
++
++static void yaffs_HandleReadDataError(yaffs_Device *dev, int chunkInNAND)
++{
++	int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++
++	/* Mark the block for retirement */
++	yaffs_GetBlockInfo(dev, blockInNAND + dev->blockOffset)->needsRetiring = 1;
++	T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++	  (TSTR("**>>Block %d marked for retirement" TENDSTR), blockInNAND));
++
++	/* TODO:
++	 * Just do a garbage collection on the affected block
++	 * then retire the block
++	 * NB recursion
++	 */
++}
++
++#ifdef NOTYET
++static void yaffs_CheckWrittenBlock(yaffs_Device *dev, int chunkInNAND)
++{
++}
++
++static void yaffs_HandleWriteChunkOk(yaffs_Device *dev, int chunkInNAND,
++				     const __u8 *data,
++				     const yaffs_Spare *spare)
++{
++}
++
++static void yaffs_HandleUpdateChunk(yaffs_Device *dev, int chunkInNAND,
++				    const yaffs_Spare *spare)
++{
++}
++
++static void yaffs_HandleWriteChunkError(yaffs_Device *dev, int chunkInNAND)
++{
++	int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++
++	/* Mark the block for retirement */
++	yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
++	/* Delete the chunk */
++	yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
++}
++
++static int yaffs_VerifyCompare(const __u8 *d0, const __u8 *d1,
++			       const yaffs_Spare *s0, const yaffs_Spare *s1)
++{
++
++	if (memcmp(d0, d1, YAFFS_BYTES_PER_CHUNK) != 0 ||
++	    s0->tagByte0 != s1->tagByte0 ||
++	    s0->tagByte1 != s1->tagByte1 ||
++	    s0->tagByte2 != s1->tagByte2 ||
++	    s0->tagByte3 != s1->tagByte3 ||
++	    s0->tagByte4 != s1->tagByte4 ||
++	    s0->tagByte5 != s1->tagByte5 ||
++	    s0->tagByte6 != s1->tagByte6 ||
++	    s0->tagByte7 != s1->tagByte7 ||
++	    s0->ecc1[0] != s1->ecc1[0] ||
++	    s0->ecc1[1] != s1->ecc1[1] ||
++	    s0->ecc1[2] != s1->ecc1[2] ||
++	    s0->ecc2[0] != s1->ecc2[0] ||
++	    s0->ecc2[1] != s1->ecc2[1] || s0->ecc2[2] != s1->ecc2[2]) {
++		return 0;
++	}
++
++	return 1;
++}
++#endif				/* NOTYET */
++
++int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
++						int chunkInNAND,
++						const __u8 *data,
++						const yaffs_ExtendedTags *eTags)
++{
++	yaffs_Spare spare;
++	yaffs_Tags tags;
++
++	yaffs_SpareInitialise(&spare);
++
++	if (eTags->chunkDeleted)
++		spare.pageStatus = 0;
++	else {
++		tags.objectId = eTags->objectId;
++		tags.chunkId = eTags->chunkId;
++
++		tags.byteCountLSB = eTags->byteCount & 0x3ff;
++
++		if (dev->nDataBytesPerChunk >= 1024)
++			tags.byteCountMSB = (eTags->byteCount >> 10) & 3;
++		else
++			tags.byteCountMSB = 3;
++
++
++		tags.serialNumber = eTags->serialNumber;
++
++		if (!dev->useNANDECC && data)
++			yaffs_CalcECC(data, &spare);
++
++		yaffs_LoadTagsIntoSpare(&spare, &tags);
++
++	}
++
++	return yaffs_WriteChunkToNAND(dev, chunkInNAND, data, &spare);
++}
++
++int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
++						     int chunkInNAND,
++						     __u8 *data,
++						     yaffs_ExtendedTags *eTags)
++{
++
++	yaffs_Spare spare;
++	yaffs_Tags tags;
++	yaffs_ECCResult eccResult = YAFFS_ECC_RESULT_UNKNOWN;
++
++	static yaffs_Spare spareFF;
++	static int init;
++
++	if (!init) {
++		memset(&spareFF, 0xFF, sizeof(spareFF));
++		init = 1;
++	}
++
++	if (yaffs_ReadChunkFromNAND
++	    (dev, chunkInNAND, data, &spare, &eccResult, 1)) {
++		/* eTags may be NULL */
++		if (eTags) {
++
++			int deleted =
++			    (yaffs_CountBits(spare.pageStatus) < 7) ? 1 : 0;
++
++			eTags->chunkDeleted = deleted;
++			eTags->eccResult = eccResult;
++			eTags->blockBad = 0;	/* We're reading it */
++			/* therefore it is not a bad block */
++			eTags->chunkUsed =
++			    (memcmp(&spareFF, &spare, sizeof(spareFF)) !=
++			     0) ? 1 : 0;
++
++			if (eTags->chunkUsed) {
++				yaffs_GetTagsFromSpare(dev, &spare, &tags);
++
++				eTags->objectId = tags.objectId;
++				eTags->chunkId = tags.chunkId;
++				eTags->byteCount = tags.byteCountLSB;
++
++				if (dev->nDataBytesPerChunk >= 1024)
++					eTags->byteCount |= (((unsigned) tags.byteCountMSB) << 10);
++
++				eTags->serialNumber = tags.serialNumber;
++			}
++		}
++
++		return YAFFS_OK;
++	} else {
++		return YAFFS_FAIL;
++	}
++}
++
++int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
++					    int blockInNAND)
++{
++
++	yaffs_Spare spare;
++
++	memset(&spare, 0xff, sizeof(yaffs_Spare));
++
++	spare.blockStatus = 'Y';
++
++	yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock, NULL,
++			       &spare);
++	yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock + 1,
++			       NULL, &spare);
++
++	return YAFFS_OK;
++
++}
++
++int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
++					  int blockNo,
++					  yaffs_BlockState *state,
++					  __u32 *sequenceNumber)
++{
++
++	yaffs_Spare spare0, spare1;
++	static yaffs_Spare spareFF;
++	static int init;
++	yaffs_ECCResult dummy;
++
++	if (!init) {
++		memset(&spareFF, 0xFF, sizeof(spareFF));
++		init = 1;
++	}
++
++	*sequenceNumber = 0;
++
++	yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock, NULL,
++				&spare0, &dummy, 1);
++	yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock + 1, NULL,
++				&spare1, &dummy, 1);
++
++	if (yaffs_CountBits(spare0.blockStatus & spare1.blockStatus) < 7)
++		*state = YAFFS_BLOCK_STATE_DEAD;
++	else if (memcmp(&spareFF, &spare0, sizeof(spareFF)) == 0)
++		*state = YAFFS_BLOCK_STATE_EMPTY;
++	else
++		*state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++
++	return YAFFS_OK;
++}
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_tagscompat.h linux-2.6.32/fs/yaffs2/yaffs_tagscompat.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_tagscompat.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_tagscompat.h	2010-01-30 20:35:02.213084840 +0100
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_TAGSCOMPAT_H__
++#define __YAFFS_TAGSCOMPAT_H__
++
++#include "yaffs_guts.h"
++int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device *dev,
++						int chunkInNAND,
++						const __u8 *data,
++						const yaffs_ExtendedTags *tags);
++int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device *dev,
++						int chunkInNAND,
++						__u8 *data,
++						yaffs_ExtendedTags *tags);
++int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
++					    int blockNo);
++int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
++					  int blockNo,
++					  yaffs_BlockState *state,
++					  __u32 *sequenceNumber);
++
++void yaffs_CalcTagsECC(yaffs_Tags *tags);
++int yaffs_CheckECCOnTags(yaffs_Tags *tags);
++int yaffs_CountBits(__u8 byte);
++
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_tagsvalidity.c linux-2.6.32/fs/yaffs2/yaffs_tagsvalidity.c
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_tagsvalidity.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_tagsvalidity.c	2010-01-30 20:35:02.251827798 +0100
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_tagsvalidity.h"
++
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags)
++{
++	memset(tags, 0, sizeof(yaffs_ExtendedTags));
++	tags->validMarker0 = 0xAAAAAAAA;
++	tags->validMarker1 = 0x55555555;
++}
++
++int yaffs_ValidateTags(yaffs_ExtendedTags *tags)
++{
++	return (tags->validMarker0 == 0xAAAAAAAA &&
++		tags->validMarker1 == 0x55555555);
++
++}
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yaffs_tagsvalidity.h linux-2.6.32/fs/yaffs2/yaffs_tagsvalidity.h
+--- linux-2.6.32.orig/fs/yaffs2/yaffs_tagsvalidity.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yaffs_tagsvalidity.h	2010-01-30 20:35:02.292761803 +0100
+@@ -0,0 +1,24 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#ifndef __YAFFS_TAGS_VALIDITY_H__
++#define __YAFFS_TAGS_VALIDITY_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_InitialiseTags(yaffs_ExtendedTags *tags);
++int yaffs_ValidateTags(yaffs_ExtendedTags *tags);
++#endif
+diff -Nur linux-2.6.32.orig/fs/yaffs2/yportenv.h linux-2.6.32/fs/yaffs2/yportenv.h
+--- linux-2.6.32.orig/fs/yaffs2/yportenv.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.32/fs/yaffs2/yportenv.h	2010-01-30 20:35:02.332076176 +0100
+@@ -0,0 +1,203 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#ifndef __YPORTENV_H__
++#define __YPORTENV_H__
++
++/*
++ * Define the MTD version in terms of Linux Kernel versions
++ * This allows yaffs to be used independantly of the kernel
++ * as well as with it.
++ */
++
++#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
++
++#if defined CONFIG_YAFFS_WINCE
++
++#include "ywinceenv.h"
++
++#elif defined __KERNEL__
++
++#include "moduleconfig.h"
++
++/* Linux kernel */
++
++#include <linux/version.h>
++#define MTD_VERSION_CODE LINUX_VERSION_CODE
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++
++#define YCHAR char
++#define YUCHAR unsigned char
++#define _Y(x)     x
++#define yaffs_strcat(a, b)     strcat(a, b)
++#define yaffs_strcpy(a, b)     strcpy(a, b)
++#define yaffs_strncpy(a, b, c) strncpy(a, b, c)
++#define yaffs_strncmp(a, b, c) strncmp(a, b, c)
++#define yaffs_strlen(s)	       strlen(s)
++#define yaffs_sprintf	       sprintf
++#define yaffs_toupper(a)       toupper(a)
++
++#define Y_INLINE inline
++
++#define YAFFS_LOSTNFOUND_NAME		"lost+found"
++#define YAFFS_LOSTNFOUND_PREFIX		"obj"
++
++/* #define YPRINTF(x) printk x */
++#define YMALLOC(x) kmalloc(x, GFP_NOFS)
++#define YFREE(x)   kfree(x)
++#define YMALLOC_ALT(x) vmalloc(x)
++#define YFREE_ALT(x)   vfree(x)
++#define YMALLOC_DMA(x) YMALLOC(x)
++
++/* KR - added for use in scan so processes aren't blocked indefinitely. */
++#define YYIELD() schedule()
++
++#define YAFFS_ROOT_MODE			0666
++#define YAFFS_LOSTNFOUND_MODE		0666
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
++#define Y_TIME_CONVERT(x) (x).tv_sec
++#else
++#define Y_CURRENT_TIME CURRENT_TIME
++#define Y_TIME_CONVERT(x) (x)
++#endif
++
++#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_strcmp(a, b) strcmp(a, b)
++
++#define TENDSTR "\n"
++#define TSTR(x) KERN_WARNING x
++#define TCONT(x) x
++#define TOUT(p) printk p
++
++#define yaffs_trace(mask, fmt, args...) \
++	do { if ((mask) & (yaffs_traceMask|YAFFS_TRACE_ERROR)) \
++		printk(KERN_WARNING "yaffs: " fmt, ## args); \
++	} while (0)
++
++#define compile_time_assertion(assertion) \
++	({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
++
++#elif defined CONFIG_YAFFS_DIRECT
++
++#define MTD_VERSION_CODE MTD_VERSION(2, 6, 22)
++
++/* Direct interface */
++#include "ydirectenv.h"
++
++#elif defined CONFIG_YAFFS_UTIL
++
++/* Stuff for YAFFS utilities */
++
++#include "stdlib.h"
++#include "stdio.h"
++#include "string.h"
++
++#include "devextras.h"
++
++#define YMALLOC(x) malloc(x)
++#define YFREE(x)   free(x)
++#define YMALLOC_ALT(x) malloc(x)
++#define YFREE_ALT(x) free(x)
++
++#define YCHAR char
++#define YUCHAR unsigned char
++#define _Y(x)     x
++#define yaffs_strcat(a, b)     strcat(a, b)
++#define yaffs_strcpy(a, b)     strcpy(a, b)
++#define yaffs_strncpy(a, b, c) strncpy(a, b, c)
++#define yaffs_strlen(s)	       strlen(s)
++#define yaffs_sprintf	       sprintf
++#define yaffs_toupper(a)       toupper(a)
++
++#define Y_INLINE inline
++
++/* #define YINFO(s) YPRINTF(( __FILE__ " %d %s\n",__LINE__,s)) */
++/* #define YALERT(s) YINFO(s) */
++
++#define TENDSTR "\n"
++#define TSTR(x) x
++#define TOUT(p) printf p
++
++#define YAFFS_LOSTNFOUND_NAME		"lost+found"
++#define YAFFS_LOSTNFOUND_PREFIX		"obj"
++/* #define YPRINTF(x) printf x */
++
++#define YAFFS_ROOT_MODE				0666
++#define YAFFS_LOSTNFOUND_MODE		0666
++
++#define yaffs_SumCompare(x, y) ((x) == (y))
++#define yaffs_strcmp(a, b) strcmp(a, b)
++
++#else
++/* Should have specified a configuration type */
++#error Unknown configuration
++
++#endif
++
++/* see yaffs_fs.c */
++extern unsigned int yaffs_traceMask;
++extern unsigned int yaffs_wr_attempts;
++
++/*
++ * Tracing flags.
++ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
++ */
++
++#define YAFFS_TRACE_OS			0x00000002
++#define YAFFS_TRACE_ALLOCATE		0x00000004
++#define YAFFS_TRACE_SCAN		0x00000008
++#define YAFFS_TRACE_BAD_BLOCKS		0x00000010
++#define YAFFS_TRACE_ERASE		0x00000020
++#define YAFFS_TRACE_GC			0x00000040
++#define YAFFS_TRACE_WRITE		0x00000080
++#define YAFFS_TRACE_TRACING		0x00000100
++#define YAFFS_TRACE_DELETION		0x00000200
++#define YAFFS_TRACE_BUFFERS		0x00000400
++#define YAFFS_TRACE_NANDACCESS		0x00000800
++#define YAFFS_TRACE_GC_DETAIL		0x00001000
++#define YAFFS_TRACE_SCAN_DEBUG		0x00002000
++#define YAFFS_TRACE_MTD			0x00004000
++#define YAFFS_TRACE_CHECKPOINT		0x00008000
++
++#define YAFFS_TRACE_VERIFY		0x00010000
++#define YAFFS_TRACE_VERIFY_NAND		0x00020000
++#define YAFFS_TRACE_VERIFY_FULL		0x00040000
++#define YAFFS_TRACE_VERIFY_ALL		0x000F0000
++
++
++#define YAFFS_TRACE_ERROR		0x40000000
++#define YAFFS_TRACE_BUG			0x80000000
++#define YAFFS_TRACE_ALWAYS		0xF0000000
++
++
++#define T(mask, p) do { if ((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p); } while (0)
++
++#ifndef YBUG
++#define YBUG() do {T(YAFFS_TRACE_BUG, (TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR), __LINE__)); } while (0)
++#endif
++
++#endif

+ 4 - 8
target/qemu-x86/Makefile

@@ -12,7 +12,7 @@ kernel-install:
 		$(BIN_DIR)/${ADK_TARGET}-${FS}-kernel
 
 createinit:
-	@-rm $(LINUX_DIR)/usr/initramfs_data.cpio.*
+	@-rm $(LINUX_DIR)/usr/initramfs_data.cpio.* $(MAKE_TRACE)
 	$(SED) 's#^CONFIG_INITRAMFS_SOURCE.*#CONFIG_INITRAMFS_SOURCE="${BUILD_DIR}/${INITRAMFS_PIGGYBACK}"#' \
 		$(LINUX_DIR)/.config
 	echo 'CONFIG_INITRAMFS_ROOT_UID=0' >> $(LINUX_DIR)/.config
@@ -29,7 +29,7 @@ imageinstall: $(BIN_DIR)/$(ROOTFSTARBALL)
 	@echo "Use following command to create a QEMU Image:"
 	@echo "sudo ./scripts/create-image.sh -f ${ADK_TARGET_ROOTFS} qemu-${CPU_ARCH}.img $(BIN_DIR)/$(ROOTFSTARBALL)"
 	@echo "Start qemu with following command line:"
-	@echo 'qemu -nographic -net user,hostfwd=tcp::2222-:22 -net nic,model=rtl8139 -kernel $(BIN_DIR)/${ADK_TARGET}-${FS}-kernel qemu-${CPU_ARCH}.img'
+	@echo 'qemu -nographic -net user,hostfwd=tcp::2222-:22 -net nic,model=e1000 -kernel $(BIN_DIR)/${ADK_TARGET}-${FS}-kernel qemu-${CPU_ARCH}.img'
 	@echo 'Login as user root with password linux123 via ssh or console'
 endif
 
@@ -37,19 +37,15 @@ ifeq ($(FS),initramfs)
 imageinstall: $(BIN_DIR)/$(INITRAMFS)
 	@echo 'The kernel file is: ${BIN_DIR}/${ADK_TARGET}-${FS}-kernel'
 	@echo 'The initramfs image is: ${BIN_DIR}/${INITRAMFS}'
-	@echo 'You need a dummy qemu.img, create it with:'
-	@echo 'qemu-img create -f raw qemu.img 1M'
 	@echo "Start qemu with following command line:"
-	@echo 'qemu -nographic -net user,hostfwd=tcp::2222-:22 -net nic,model=rtl8139 -kernel $(BIN_DIR)/${ADK_TARGET}-${FS}-kernel -initrd ${BIN_DIR}/${INITRAMFS} qemu.img'
+	@echo 'qemu -nographic -net user,hostfwd=tcp::2222-:22 -net nic,model=e1000 -kernel $(BIN_DIR)/${ADK_TARGET}-${FS}-kernel -initrd ${BIN_DIR}/${INITRAMFS}'
 	@echo 'Login as user root with password linux123 via ssh or console'
 endif
 
 ifeq ($(FS),initramfs-piggyback)
 imageinstall: ${BUILD_DIR}/${INITRAMFS_PIGGYBACK} createinit
 	@echo 'The kernel+initramfs file is: ${BIN_DIR}/${ADK_TARGET}-${FS}-kernel'
-	@echo 'You need a dummy qemu.img, create it with:'
-	@echo 'qemu-img create -f raw qemu.img 1M'
 	@echo "Start qemu with following command line:"
-	@echo 'qemu -nographic -net user,hostfwd=tcp::2222-:22 -net nic,model=rtl8139 -kernel $(BIN_DIR)/${ADK_TARGET}-${FS}-kernel qemu.img'
+	@echo 'qemu -nographic -net user,hostfwd=tcp::2222-:22 -net nic,model=e1000 -kernel $(BIN_DIR)/${ADK_TARGET}-${FS}-kernel'
 	@echo 'Login as user root with password linux123 via ssh or console'
 endif

+ 63 - 24
target/qemu-x86/kernel.config

@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32
-# Fri Feb  5 13:30:34 2010
+# Linux kernel version: 2.6.33
+# Thu Feb 25 21:06:10 2010
 #
 # CONFIG_64BIT is not set
 CONFIG_X86_32=y
@@ -61,9 +61,11 @@ CONFIG_LOCALVERSION=""
 CONFIG_HAVE_KERNEL_GZIP=y
 CONFIG_HAVE_KERNEL_BZIP2=y
 CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
 CONFIG_KERNEL_GZIP=y
 # CONFIG_KERNEL_BZIP2 is not set
 # CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
@@ -78,6 +80,7 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
 #
 CONFIG_TREE_RCU=y
 # CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
 # CONFIG_RCU_TRACE is not set
 CONFIG_RCU_FANOUT=32
 # CONFIG_RCU_FANOUT_EXACT is not set
@@ -95,6 +98,7 @@ CONFIG_INITRAMFS_SOURCE=""
 CONFIG_RD_GZIP=y
 # CONFIG_RD_BZIP2 is not set
 # CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 CONFIG_ANON_INODES=y
@@ -121,7 +125,7 @@ CONFIG_HAVE_PERF_EVENTS=y
 #
 # Kernel Performance Events And Counters
 #
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
 # CONFIG_PERF_COUNTERS is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_PCI_QUIRKS is not set
@@ -139,6 +143,8 @@ CONFIG_HAVE_KRETPROBES=y
 CONFIG_HAVE_ARCH_TRACEHOOK=y
 CONFIG_HAVE_DMA_ATTRS=y
 CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
 
 #
 # GCOV-based kernel profiling
@@ -163,14 +169,41 @@ CONFIG_BLOCK=y
 # IO Schedulers
 #
 CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
 # CONFIG_DEFAULT_DEADLINE is not set
 # CONFIG_DEFAULT_CFQ is not set
 CONFIG_DEFAULT_NOOP=y
 CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
 # CONFIG_FREEZER is not set
 
 #
@@ -215,10 +248,9 @@ CONFIG_M586=y
 # CONFIG_GENERIC_CPU is not set
 CONFIG_X86_GENERIC=y
 CONFIG_X86_CPU=y
-CONFIG_X86_L1_CACHE_BYTES=64
-CONFIG_X86_INTERNODE_CACHE_BYTES=64
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
 CONFIG_X86_CMPXCHG=y
-CONFIG_X86_L1_CACHE_SHIFT=5
+CONFIG_X86_L1_CACHE_SHIFT=6
 CONFIG_X86_XADD=y
 # CONFIG_X86_PPRO_FENCE is not set
 CONFIG_X86_F00F_BUG=y
@@ -228,8 +260,7 @@ CONFIG_X86_BSWAP=y
 CONFIG_X86_POPAD_OK=y
 CONFIG_X86_ALIGNMENT_16=y
 CONFIG_X86_INTEL_USERCOPY=y
-CONFIG_X86_CMPXCHG64=y
-CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_MINIMUM_CPU_FAMILY=4
 CONFIG_PROCESSOR_SELECT=y
 CONFIG_CPU_SUP_INTEL=y
 # CONFIG_CPU_SUP_CYRIX_32 is not set
@@ -254,7 +285,6 @@ CONFIG_PREEMPT_NONE=y
 # CONFIG_MICROCODE is not set
 # CONFIG_X86_MSR is not set
 # CONFIG_X86_CPUID is not set
-# CONFIG_X86_CPU_DEBUG is not set
 CONFIG_NOHIGHMEM=y
 # CONFIG_HIGHMEM4G is not set
 # CONFIG_HIGHMEM64G is not set
@@ -269,6 +299,7 @@ CONFIG_PAGE_OFFSET=0xC0000000
 CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
 CONFIG_SELECT_MEMORY_MODEL=y
 CONFIG_FLATMEM_MANUAL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -282,8 +313,6 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_BOUNCE=y
 CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
 # CONFIG_KSM is not set
 CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 # CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
@@ -365,8 +394,6 @@ CONFIG_UNIX=y
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
 # CONFIG_IP_ADVANCED_ROUTER is not set
-# CONFIG_ASK_IP_FIB_HASH is not set
-# CONFIG_IP_FIB_TRIE is not set
 CONFIG_IP_FIB_HASH=y
 # CONFIG_IP_PNP is not set
 # CONFIG_NET_IPIP is not set
@@ -423,9 +450,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_AF_RXRPC is not set
 CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
-CONFIG_CFG80211_DEFAULT_PS_VALUE=0
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
-# CONFIG_WIRELESS_EXT is not set
 # CONFIG_LIB80211 is not set
 
 #
@@ -460,6 +484,10 @@ CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_UMEM is not set
 # CONFIG_BLK_DEV_COW_COMMON is not set
 # CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
 # CONFIG_BLK_DEV_NBD is not set
 # CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_RAM is not set
@@ -558,15 +586,16 @@ CONFIG_ATA_PIIX=y
 # CONFIG_PATA_NS87415 is not set
 # CONFIG_PATA_OPTI is not set
 # CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
 # CONFIG_PATA_PDC_OLD is not set
 # CONFIG_PATA_RADISYS is not set
 # CONFIG_PATA_RDC is not set
 # CONFIG_PATA_RZ1000 is not set
 # CONFIG_PATA_SC1200 is not set
 # CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_PDC2027X is not set
 # CONFIG_PATA_SIL680 is not set
 # CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
 # CONFIG_PATA_VIA is not set
 # CONFIG_PATA_WINBOND is not set
 # CONFIG_PATA_PLATFORM is not set
@@ -583,7 +612,7 @@ CONFIG_ATA_PIIX=y
 #
 
 #
-# See the help texts for more information.
+# The newer stack is recommended.
 #
 # CONFIG_FIREWIRE is not set
 # CONFIG_IEEE1394 is not set
@@ -625,8 +654,10 @@ CONFIG_NETDEV_1000=y
 # CONFIG_NETDEV_10000 is not set
 # CONFIG_TR is not set
 CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_HOSTAP is not set
 
 #
 # Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -650,6 +681,7 @@ CONFIG_WLAN=y
 CONFIG_INPUT=y
 # CONFIG_INPUT_FF_MEMLESS is not set
 # CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
 
 #
 # Userland interfaces
@@ -689,6 +721,7 @@ CONFIG_SERIO_I8042=y
 # CONFIG_SERIO_PCIPS2 is not set
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
 # CONFIG_GAMEPORT is not set
 
 #
@@ -923,7 +956,9 @@ CONFIG_RTC_DRV_CMOS=y
 # CONFIG_RTC_DRV_M48T86 is not set
 # CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
 # CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
@@ -1070,7 +1105,7 @@ CONFIG_MAGIC_SYSRQ=y
 # CONFIG_DEBUG_FS is not set
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_MEMORY_INIT is not set
 CONFIG_ARCH_WANT_FRAME_POINTERS=y
 CONFIG_FRAME_POINTER=y
@@ -1116,7 +1151,11 @@ CONFIG_DEFAULT_IO_DELAY_TYPE=0
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
 # CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
 # CONFIG_CRYPTO is not set
 
 #

+ 2 - 2
target/qemu-x86/target.mk

@@ -1,7 +1,7 @@
 ARCH:=			x86
 CPU_ARCH:=		i586
-KERNEL_VERSION:=	2.6.32
+KERNEL_VERSION:=	2.6.33
 KERNEL_RELEASE:=	1
-KERNEL_MD5SUM:=		260551284ac224c3a43c4adac7df4879
+KERNEL_MD5SUM:=		c3883760b18d50e8d78819c54d579b00
 TARGET_OPTIMIZATION:=	-Os -pipe
 TARGET_CFLAGS_ARCH:=    -march=i586