Sindbad~EG File Manager

Current Path : /usr/src/snapapi26-0.7.90/
Upload File :
Current File : //usr/src/snapapi26-0.7.90/snumbd26.c

/* snumbd26.c
   Copyright (C) Acronis, 2004
   Written by Vladimir Simonov
   $Id: snumbd26.c 1155907 2016-01-14 12:55:07Z marina $
*/
#ifdef HAVE_LINUX_CONFIG
#include <linux/config.h>
#elif defined(HAVE_LINUX_AUTOCONF)
#include <linux/autoconf.h>
#elif defined(HAVE_GENERATED_AUTOCONF)
#include <generated/autoconf.h>
#else
#warning "neither linux/config.h nor linux/autoconf.h or generated/autoconf.h found"
#endif
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/version.h>

#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
#include <asm/system.h>
#endif
#include <asm/uaccess.h>
#include <asm/bitops.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/smp.h>

#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/interrupt.h> /* for in_interrupt */
#include <linux/poll.h>
#include <linux/timer.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#ifdef HAVE_IOCTL32_CONVERSIONS
#include <linux/ioctl32.h>
#endif
#include "snumbd.h"

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
#define sn_request_queue request_queue_t
#else
#define sn_request_queue struct request_queue
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
#define sn_bio_endio(x, y, z) bio_endio(x, y, z)
#else
#ifdef HAVE_BIO_ENDIO_2ARGS
#define sn_bio_endio(x, y, z) bio_endio(x, z)
#else
#define sn_bio_endio(x, y, z) bio_endio(x)
#endif /* HAVE_BIO_ENDIO_2ARGS */
#endif /* LINUX_VERSION_CODE */

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
#define USE_KERNEL_THREAD
#endif

#ifdef HAVE_KMAP_ATOMIC_2ARGS
#define sn_kmap_atomic(a) kmap_atomic(a, KM_USER0)
#define sn_kunmap_atomic(a) kunmap_atomic(a, KM_USER0)
#else /* 1 argument */
#define sn_kmap_atomic(a) kmap_atomic(a)
#define sn_kunmap_atomic(a) kunmap_atomic(a)
#endif

#ifdef HAVE_ASM_HAVE_SET_MB
#define sn_set_mb set_mb
#else
#define sn_set_mb smp_store_mb
#endif

#ifndef HAVE_REQ_WRITE
#define REQ_WRITE       (1 << BIO_RW)
#endif

#define MAX_MINOR	255
#define DEBUG		0

#define DEBUG_API	(1 << 1)
#define DEBUG_ALLOC	(1 << 2)
#define DEBUG_BH	(1 << 3)
#define DEBUG_BHQUE	(1 << 4)
#define DEBUG_CACHE	(1 << 5)
#define DEBUG_FAST	(1 << 6)
#define DEBUG_INTERNALS	(1 << 7)
#define DEBUG_DUMP	(1 << 8)

#define DEBUG_LEVEL 	(DEBUG_API)

#if DEBUG
#define inline
#define sa_debug(level, fmt, arg...)					\
	do {								\
		static const char *func = __FUNCTION__;			\
		if (((level) & DEBUG_LEVEL) && snumbd_printk_rate_limit())\
			printk(KERN_DEBUG "%s(%s,%d): " fmt, func,	\
				current->comm, current->pid, ##arg);	\
	} while (0)
#else
#define sa_debug(fmt,arg...) do { } while (0)
#endif

#define sa_kdebug(fmt, arg...)					\
	do {							\
		static const char *func= __FUNCTION__;		\
		if (snumbd_printk_rate_limit())			\
			printk(KERN_DEBUG "%s(%s,%d): " fmt, func,\
			current->comm, current->pid, ##arg);	\
	} while (0)
#define sa_info(fmt, arg...)					\
	do {							\
		static const char *func = __FUNCTION__;		\
		if (snumbd_printk_rate_limit())			\
			printk(KERN_INFO "%s(%s,%d): " fmt, func,\
			current->comm, current->pid, ##arg);	\
	} while (0)
#define sa_warn(fmt, arg...)					\
	do {							\
		static const char *func = __FUNCTION__;		\
		if (snumbd_printk_rate_limit())			\
			printk(KERN_WARNING "%s(%s,%d): " fmt, func,\
			current->comm, current->pid, ##arg);	\
	} while (0)
#define sa_error(fmt, arg...)					\
	do {							\
		static const char *func = __FUNCTION__;		\
		if (snumbd_printk_rate_limit())			\
			printk(KERN_ERR "%s(%s,%d): " fmt, func,\
			current->comm, current->pid, ##arg);	\
	} while (0)


#define sa_BUG(fmt, arg...)					\
	do {							\
		static const char *func = __FUNCTION__;		\
		printk(KERN_CRIT "%s(%s,%d): " fmt, func,	\
			current->comm, current->pid, ##arg);	\
		BUG();						\
	} while (0)


#if defined(__x86_64) && defined(CONFIG_COMPAT) && !defined(HAVE_COMPAT_IOCTL) 
#define HAVE_IOCTL32_CONVERSION 
#endif 

static int snumbd_init_ok;
static int snumbdctl_major;
static int snumbd_major;

static wait_queue_head_t resolver_queue;		/* deadlock resolver */

#ifndef USE_KERNEL_THREAD
static struct task_struct *resolver_task;
#else
static pid_t resolver_thread_pid;
#endif

static int resolver_task_continue = 1;

static LIST_HEAD(sessions_list);
static LIST_HEAD(notinited_list);
static int sessions_count;
/* sessions_list  & noninit_sessions_list protection */
#ifdef HAVE_SPIN_LOCK_UNLOCKED
static spinlock_t sessions_lock = SPIN_LOCK_UNLOCKED;
#else
static DEFINE_SPINLOCK(sessions_lock);
#endif


#ifdef __GFP_HIGHIO
#define GFP_SNAPHIGH	(__GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
#else
#define GFP_SNAPHIGH	(__GFP_IO | __GFP_FS | __GFP_HIGHMEM)
#endif

#define TIMER_INTERVAL (5*HZ)

#ifndef HAVE_FMODE_T
typedef unsigned int fmode_t;
#endif

struct session_struct {
	struct list_head	s_list;		/* under sessions_lock */
	dev_t			s_kdev;
	unsigned long long 	s_scount;	/* sectors count */

	volatile unsigned int	s_state;
	atomic_t		s_users;
	int 			s_ro;		/* read-only mode */
	unsigned int		s_hpid;

	wait_queue_head_t 	s_select_wait;
	wait_queue_head_t 	s_io_wait;

	sn_request_queue *	s_queue;
	struct semaphore        s_iosem;	/* make_request serialization */
	struct gendisk *	s_disk;

	int			s_cmd;		/* user level cmd */
	unsigned long long	s_sector;	/*  */
	unsigned int		s_offset;	/*  */
	struct bio_vec *	s_bio_vec;

	spinlock_t		s_misc_lock;		/* protects from here to */
							/* s_vma */
	unsigned int		s_ioctlcnt;		/* state data */
	unsigned int		s_ioctlcnt_prev;
	struct timer_list 	s_timer;		/* heartbeat */

	struct vm_area_struct *	s_vma;
	struct page *		s_mpage;		/* mmapped page */

	struct semaphore        s_sem;			/* user space requests
							   serialization */

	unsigned int		s_gpages;	/* got pages */
	unsigned int		s_ppages;	/* put pages */
	unsigned int		s_reads;	/* total reads count */
	pid_t			s_apgrp;	/* allowed pgrp */
};

static void close_session(struct session_struct *s, int do_free);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
#include <linux/nsproxy.h>
static pid_t sn_current_pgrp(void)
{
	if (!current->nsproxy)
		return 1;

#ifdef HAVE_PID_NS_CHILDREN
	return task_pgrp_nr_ns(current, current->nsproxy->pid_ns_for_children);
#else
	return task_pgrp_nr_ns(current, current->nsproxy->pid_ns);
#endif
}
#else
static pid_t sn_current_pgrp(void)
{
	return process_group(current);
}
#endif

static int snumbd_printk_rate_limit(void)
{
	static unsigned long count, last;

	if (jiffies - last > HZ)
		count = 0;
	if (count >= 10)
        	return 0;
	last = jiffies;
	count++;
	return 1;
}

static void wait_for_users(struct session_struct *s)
{
	spin_lock(&sessions_lock);
	while (!atomic_dec_and_test(&s->s_users)) {
		atomic_inc(&s->s_users);
		spin_unlock(&sessions_lock);
		if (s->s_state == SNUM_REQ_RECV)
			/* no answer from userspace */
			sn_set_mb(s->s_state, SNUM_DEADLOCK_ERR);
		up(&s->s_sem);
		if (snumbd_printk_rate_limit())
			sa_debug(DEBUG_API, "s=%p users=%d state=%d\n", s,
					atomic_read(&s->s_users), s->s_state);
		if (waitqueue_active(&s->s_io_wait))
			wake_up_all(&s->s_io_wait);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
		down(&s->s_sem);
		spin_lock(&sessions_lock);
	}
	atomic_inc(&s->s_users);
}

static inline int is_session_alive(struct session_struct *sess)
{
	struct list_head *tmp;
	spin_lock(&sessions_lock);
	list_for_each(tmp, &sessions_list) {
		struct session_struct *s;
		s = list_entry(tmp, struct session_struct, s_list);
		if (s == sess) {
			spin_unlock(&sessions_lock);
			return 1;
		}
	}
	spin_unlock(&sessions_lock);
	return 0;
}

static inline struct session_struct *find_deadlocked(void)
{
	struct list_head *tmp;
	list_for_each(tmp, &sessions_list) {
		struct session_struct *s;
		s = list_entry(tmp, struct session_struct, s_list);
		if (s->s_state == SNUM_DEADLOCK_ERR) {
			atomic_inc(&s->s_users);
			return s;
		}
	}
	return NULL;
}
#if 0
static inline struct session_struct *find_by_dev(dev_t dev)
{
	struct list_head *tmp;
	list_for_each(tmp, &sessions_list) {
		struct session_struct *s;
		s = list_entry(tmp, struct session_struct, s_list);
		if (s->s_kdev == dev) {
			atomic_inc(&s->s_users);
			return s;
		}
	}
	return NULL;
}
#endif
static inline int get_free_minor(void)
{
	dev_t dev;
	int minor;
	struct list_head *tmp;

	minor = 0;
repeate:
	minor++;
	dev = MKDEV(snumbd_major, minor);
	list_for_each(tmp, &sessions_list) {
		struct session_struct *s;
		s = list_entry(tmp, struct session_struct, s_list);
		if (s->s_kdev == dev)
			goto repeate;
	}
	return minor;
}

static int snumbd_ioctl_blk(struct block_device *bdev, fmode_t mode, unsigned cmd,
                                                        unsigned long arg)
{
#if DEBUG
	struct session_struct *s = bdev->bd_disk->private_data;
	if (s)
		sa_debug(DEBUG_API, "s=%p dev=%x\n", s, s->s_kdev);
#endif
	return -ENOTTY;
}

static int snumbd_open_blk(struct block_device *bdev, fmode_t mode)
{
	int users;
	pid_t pgrp;
	struct session_struct *s = bdev->bd_disk->private_data;
	if (!s)
		return -ENOTTY;
	pgrp = sn_current_pgrp();
	/*
	Allow to open device only programs in device creator's group.
	This eliminates problems with device access(reference) from
	udev, multipathd, automount and others. 
	*/
	if (pgrp != s->s_apgrp) {
		sa_info("Disable access (%d,%d)...\n", pgrp, s->s_apgrp);
		return -EACCES;
	}
	users = atomic_read(&s->s_users);
	sa_debug(DEBUG_API, "s=%p dev=%x users=%d\n", s, s->s_kdev, users);
	atomic_inc(&s->s_users);
	if (users < 3) {
		bd_set_size(bdev, s->s_scount << 9);
		set_blocksize(bdev, 512);
		set_device_ro(bdev, (s->s_ro != 0));
	}
	return 0;
}

static int snumbd_release_blk(struct gendisk *disk, fmode_t mode)
{
	struct session_struct *s = disk->private_data;
	if (!s)
		return -ENOTTY;
	sa_debug(DEBUG_API, "s=%p dev=%x\n", s, s->s_kdev);
	atomic_dec(&s->s_users);
	return 0;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
static int snumbd_open(struct inode *inode, struct file *file)
{
	struct block_device *bdev;
	bdev = inode->i_bdev;
	return snumbd_open_blk(bdev, 1);
}

static int snumbd_release(struct inode *inode, struct file *file)
{
	struct gendisk *disk;
	disk = inode->i_bdev->bd_disk;
	return snumbd_release_blk(disk, 1);
}

static int snumbd_ioctl(struct inode *inode, struct file *file, unsigned cmd,
							unsigned long arg)
{
	struct block_device *bdev;
	bdev = inode->i_bdev;
	return snumbd_ioctl_blk(bdev, 1, cmd, arg);
}
#else
#define snumbd_open snumbd_open_blk
#define snumbd_ioctl snumbd_ioctl_blk
#define snumbd_release snumbd_release_blk
#endif

static struct block_device_operations snumbd_bdops = {
	.owner =	THIS_MODULE,
	.open =		snumbd_open,
	.ioctl =	snumbd_ioctl,
	.release = 	snumbd_release,
};

#ifdef HAVE_MAKE_REQUEST_INT
#define MAKE_REQUEST_EXIT_STATUS 0
#define MAKE_REQUEST_RETURN_VALUE int
#else
#define MAKE_REQUEST_EXIT_STATUS
#define MAKE_REQUEST_RETURN_VALUE void
#endif

static MAKE_REQUEST_RETURN_VALUE snumbd_make_request(sn_request_queue *q, struct bio *bio);

static int register_device(struct session_struct * s, int ro)
{
	int ret;
	ret = -ENOMEM;
	sa_debug(DEBUG_API, "s=%p\n", s);
	s->s_queue = blk_alloc_queue(GFP_KERNEL);
	if (!s->s_queue)
		return ret;
	s->s_disk = alloc_disk(1);
	if (!s->s_disk)
		goto out;
	s->s_disk->major = MAJOR(s->s_kdev);
	s->s_disk->first_minor = MINOR(s->s_kdev);
	sprintf(s->s_disk->disk_name, SNUMBD_NAME"%dd", MINOR(s->s_kdev));
	s->s_disk->private_data = s;
	
	s->s_disk->fops = &snumbd_bdops;
	atomic_inc(&s->s_users);
	sa_debug(DEBUG_INTERNALS, "s=%p(%d) users=%d\n", s, s->s_state,
						atomic_read(&s->s_users));
	s->s_disk->queue = s->s_queue;
	set_capacity(s->s_disk, s->s_scount);
	s->s_ro = ro;
	blk_queue_make_request(s->s_queue, snumbd_make_request);
	add_disk(s->s_disk);
	return 0;
out:
	blk_cleanup_queue(s->s_queue);
	s->s_queue = NULL;
	return ret;
}

static void unregister_device(struct session_struct * s)
{
	sa_debug(DEBUG_API, "s=%p\n", s);
	if (s->s_disk) {
		sa_debug(DEBUG_INTERNALS, "s=%p(%d) users=%d\n", s, s->s_state,
						atomic_read(&s->s_users));
		s->s_disk->private_data = 0;
		del_gendisk(s->s_disk);
		put_disk(s->s_disk);
		atomic_dec(&s->s_users);
		s->s_disk = NULL;
	}
	if (s->s_queue) {
		blk_cleanup_queue(s->s_queue);
		s->s_queue = NULL;
	}
	return;
}

static void do_resolver(void)
{
	struct session_struct *s;
	int count;
	count = 0;
	sa_debug(DEBUG_API, "\n");

repeate:
	spin_lock(&sessions_lock);
	s = find_deadlocked();
	if (!s) {
		spin_unlock(&sessions_lock);
		sa_debug(DEBUG_INTERNALS, "No deadlocked session found.\n");
		return;
	}
	spin_unlock(&sessions_lock);
	sa_info("Real cleanup started(%d,%d)...\n", s->s_state,
					atomic_read(&s->s_users));
	atomic_dec(&s->s_users);
	if (waitqueue_active(&s->s_io_wait))
		wake_up_all(&s->s_io_wait);
	if (waitqueue_active(&s->s_select_wait))
		wake_up_all(&s->s_select_wait);
	count++;
	set_current_state(TASK_INTERRUPTIBLE);
	schedule_timeout(HZ);
	if (count < 3)
		goto repeate;
}

static int resolver_loop(void *flag)
{
	sa_debug(DEBUG_API, "\n");
	init_waitqueue_head(&resolver_queue);
#ifdef USE_KERNEL_THREAD
	daemonize("snumbdd");
#endif
	while (resolver_task_continue) {
		wait_event_interruptible(resolver_queue,
				(resolver_task_continue == 0));
		if (resolver_task_continue)
			do_resolver();
		else
			break;
	}
	sa_debug(DEBUG_API, "exiting\n");
	return 0;
}

static void heartbeat_timer_func(unsigned long __data);

static inline void set_session_timer(struct session_struct *s)
{
	sa_debug(DEBUG_INTERNALS, "s=%p users=%d\n", s,
				atomic_read(&s->s_users));

	spin_lock(&s->s_misc_lock);
	if (!s->s_timer.function) {
		init_timer(&s->s_timer);
		s->s_timer.function = &heartbeat_timer_func;
		s->s_timer.data = (unsigned long) s;
		s->s_timer.expires = jiffies + TIMER_INTERVAL;
		s->s_ioctlcnt_prev = s->s_ioctlcnt;
		add_timer(&s->s_timer);
	}
	spin_unlock(&s->s_misc_lock);
}

static inline void reset_session_timer(struct session_struct *s)
{
	sa_debug(DEBUG_INTERNALS, "s=%p users=%d\n", s,
				atomic_read(&s->s_users));
	spin_lock(&s->s_misc_lock);
	if (s->s_timer.function) {
		del_timer_sync(&s->s_timer);
		s->s_timer.function = NULL;
	}
	spin_unlock(&s->s_misc_lock);
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
#define SN_NOPAGE_SIGBUS NOPAGE_SIGBUS
#else
#define SN_NOPAGE_SIGBUS VM_FAULT_ERROR
#endif

static void heartbeat_timer_func(unsigned long __data)
{
	struct session_struct *s;

	s = (struct session_struct *) __data;
	if (IS_ERROR_STATE(s->s_state))
		return;
	if (s->s_ioctlcnt != s->s_ioctlcnt_prev) {
		sa_debug(DEBUG_API, "s=%p\n", s);
		mod_timer(&s->s_timer, jiffies + TIMER_INTERVAL);
		s->s_ioctlcnt_prev = s->s_ioctlcnt;
		return;
	}
	sn_set_mb(s->s_state, SNUM_DEADLOCK_ERR);
	sa_info("Deadlock detected. Finishing(%d,%d)...\n", s->s_state,
					atomic_read(&s->s_users));
	if (waitqueue_active(&resolver_queue))
		wake_up_all(&resolver_queue);
	if (waitqueue_active(&s->s_io_wait))
		wake_up_all(&s->s_io_wait);
	if (waitqueue_active(&s->s_select_wait))
		wake_up_all(&s->s_select_wait);
#ifdef USE_KERNEL_THREAD
	kill_proc(resolver_thread_pid, SIGKILL, 1);
#endif
}

#if 0
static void session_stat(struct sn_state *sn)
{
	sa_info("dev=%x:%x, state=%d, blksize=%d, mmapsize=%d\n",
				sn->major, sn->minor, sn->state,
				sn->blksize, sn->mmapsize);

	sa_info("psize=%u, pstrt=%u, mshft=%d, ioctls=%u\n",
				sn->partsize, sn->partstrt, sn->minorshft,
				sn->ioctlcnt);

	sa_info("bhpgs=%d, bhcnt=%d, abhs=%u, fbhs=%u, dbhs=%u\n",
				sn->bhpages, sn->bhcount,
				sn->abhs, sn->fbhs, sn->dbhs);

	sa_info("gpgs=%u, ppgs=%u, emmax=%d, emmin=%d, emcur=%d, cached=%d\n", 
				sn->gpages, sn->ppages, sn->emmax, sn->emmin,
				sn->emcur, sn->cachepages);

	sa_info("rblocks=%u, cblocks=%u, rcblocks=%u, rwcolls=%u\n",
				sn->rblocks, sn->cblocks, sn->rcblocks,
				sn->rwcolls);
}
#endif

static void fill_state(struct session_struct *s, struct snumbd_state *out)
{
	out->version =  (SNUMBD_VMAJOR << 16) + (SNUMBD_VMINOR << 8) +
							SNUMBD_VSUBMINOR;
	out->major = MAJOR(s->s_kdev);
	out->minor = MINOR(s->s_kdev);
	out->state = s->s_state;
	out->hpid = s->s_hpid;

	out->scount = s->s_scount;
	out->mmapsize = PAGE_SIZE;

	out->ioctlcnt = s->s_ioctlcnt;
	out->users = atomic_read(&s->s_users);
}

static int sn_notify_reboot(struct notifier_block *unused1, unsigned long code,
				void *unused2)
{
	if ((code == SYS_DOWN) || (code == SYS_HALT) ||
			(code == SYS_POWER_OFF)) {
#ifndef USE_KERNEL_THREAD
		if (resolver_task) {
			resolver_task_continue = 0;
			kthread_stop(resolver_task);
			resolver_task = NULL;
		}
#else
		if (resolver_thread_pid > 0) {
			resolver_task_continue = 0;
			kill_proc(resolver_thread_pid, SIGKILL, 1);
			resolver_thread_pid = 0;
		}
#endif
	}
        return NOTIFY_DONE;
}

struct notifier_block sn_notifier = {
	.notifier_call  = sn_notify_reboot,
	.next           = NULL,
	.priority       = INT_MAX, /* before any real devices */
};

static void close_session(struct session_struct *s, int do_free)
{
	down(&s->s_sem);
	sa_debug(DEBUG_API, "s=%p, state=%d, users=%d, do_free=%d\n", s,
				s->s_state, atomic_read(&s->s_users), do_free);
	if (s->s_disk)
		atomic_dec(&s->s_users); /* "disk" user */
	wait_for_users(s);
	if (s->s_disk)
		atomic_inc(&s->s_users); /* for further unregister_device */

	spin_lock(&s->s_misc_lock);
	if (s->s_timer.function) {
		del_timer_sync(&s->s_timer);
		s->s_timer.function = NULL;
	}

	spin_unlock(&s->s_misc_lock);
	spin_unlock(&sessions_lock);

	unregister_device(s);

	spin_lock(&sessions_lock);
	list_del_init(&s->s_list);
	s->s_kdev = 0;
	if (!do_free)
		list_add(&s->s_list, &notinited_list);
	else
		sessions_count--;
	spin_unlock(&sessions_lock);

	sn_set_mb(s->s_state, SNUM_NOTINITED);
	if (s->s_mpage) {
		sa_debug(DEBUG_ALLOC, "s=%p, page_release=%p(%d)\n", s,
				s->s_mpage, page_count(s->s_mpage));
		s->s_ppages++;
		page_cache_release(s->s_mpage);
		s->s_mpage = NULL;
	}
	up(&s->s_sem);
	if (do_free)
		kfree(s);
}

#if !defined(BIO_EOPNOTSUPP)
static void clean_bio(struct bio *bio)
{
	int i;
	for (i = 0; i < bio->bi_vcnt; i++) {
		char *addr;
		struct bio_vec *vec = &bio->bi_io_vec[i];
		addr = sn_kmap_atomic(vec->bv_page);
		memset(addr + vec->bv_offset, 0, vec->bv_len);
		sn_kunmap_atomic(addr);
	}
}
#endif

static inline sector_t sn_bio_bi_sector(struct bio *bio)
{
#ifdef HAVE_BVEC_ITER
	return bio->bi_iter.bi_sector;
#else
	return bio->bi_sector;
#endif
}

static inline unsigned int sn_bio_bi_size(struct bio *bio)
{
#ifdef HAVE_BVEC_ITER
	return bio->bi_iter.bi_size;
#else
	return bio->bi_size;
#endif
}

static MAKE_REQUEST_RETURN_VALUE snumbd_make_request(sn_request_queue *q, struct bio *bio)
{
	struct session_struct *s;
	int i;
	unsigned len;
	
	s = (struct session_struct *)(bio->bi_bdev->bd_disk->private_data);
	if (!s) {
		sa_warn("Can't find session, bio=%p dev=%x.\n", bio,
			bio->bi_bdev->bd_dev);
		sn_bio_endio(bio, 0, -EIO);
		return MAKE_REQUEST_EXIT_STATUS;
	}
	if (!is_session_alive(s)) {
		sa_warn("Not our session! bio=%p dev=%x.\n", bio,
			bio->bi_bdev->bd_dev);
		sn_bio_endio(bio, 0, -EIO);
		return MAKE_REQUEST_EXIT_STATUS;
	}
	len = 0;
	atomic_inc(&s->s_users);
	if (s->s_state < SNUM_INITED) {
		sa_warn("Session is not inited, bio=%p dev=%x.\n", bio, bio->bi_bdev->bd_dev);
		sn_bio_endio(bio, 0, -EIO);
		atomic_dec(&s->s_users);
		return MAKE_REQUEST_EXIT_STATUS;
	}
	down(&s->s_iosem);
	sa_debug(DEBUG_API, "Start s=%p, state=%d, users=%d\n", s,
				s->s_state, atomic_read(&s->s_users));
	if (IS_ERROR_STATE(s->s_state)) {
#if defined(BIO_EOPNOTSUPP)
		bio->bi_flags |= 1 << BIO_EOPNOTSUPP;
		goto out_ok;
#else
		clean_bio(bio);
		goto out_up;
#endif
	}

	for (i = 0; i < bio->bi_vcnt; i++) {
		s->s_cmd = (bio->bi_rw & REQ_WRITE) ? WRITE_DATA : READ_DATA;
		s->s_bio_vec = &bio->bi_io_vec[i];
		s->s_sector = sn_bio_bi_sector(bio) + (len >> 9);
		s->s_offset = len % 512;
		sn_set_mb(s->s_state, SNUM_WAKEUP_REQ);
		wake_up_all(&s->s_select_wait);
		sa_debug(DEBUG_INTERNALS, "s=%p state=%d %s(%lu) sector=%llu"
				" bv_len=%d offset=%d\n", s, s->s_state,
				(bio->bi_rw & REQ_WRITE) ? "WRITE" : "READ",
				bio->bi_rw,
				sn_bio_bi_sector(bio), bio->bi_io_vec[i].bv_len,
				bio->bi_io_vec[i].bv_offset);
		set_session_timer(s);
		wait_event(s->s_io_wait, s->s_state == SNUM_DATA_READY ||
					IS_ERROR_STATE(s->s_state));
		reset_session_timer(s);
		if (IS_ERROR_STATE(s->s_state)) {
#if defined(BIO_EOPNOTSUPP)
			bio->bi_flags |= 1 << BIO_EOPNOTSUPP;
			goto out_ok;
#else
			clean_bio(bio);
			goto out_up;
#endif
		}
		len += bio->bi_io_vec[i].bv_len;
		sn_set_mb(s->s_state, SNUM_INITED);
	}
out_ok:
	s->s_bio_vec = NULL;
	up(&s->s_iosem);
	atomic_dec(&s->s_users);
	sn_bio_endio(bio, sn_bio_bi_size(bio), 0);
	return MAKE_REQUEST_EXIT_STATUS;

#if !defined(BIO_EOPNOTSUPP)
out_up:
	s->s_bio_vec = NULL;
	up(&s->s_iosem);
	atomic_dec(&s->s_users);
	sn_bio_endio(bio, len, -EIO);
	return MAKE_REQUEST_EXIT_STATUS;
#endif
}

static int session_init(struct session_struct * s, unsigned long long size,
								int ro)
{
	int ret;
	int minor;
//	char buf[32];

	sa_debug(DEBUG_API, "len=%llu.\n", size);
	ret = -EINVAL;
	down(&s->s_sem);
	if (s->s_state != SNUM_NOTINITED)
		goto out;

	ret = -ENOMEM;
	s->s_mpage = alloc_page(GFP_KERNEL);
	if (!s->s_mpage)
		goto out;
	s->s_scount = size;

	init_waitqueue_head(&s->s_select_wait);
	init_waitqueue_head(&s->s_io_wait);

	spin_lock(&sessions_lock);
	minor = get_free_minor();
	ret = -ENODEV;
	if (minor > MAX_MINOR) {
		spin_unlock(&sessions_lock);
		goto out;
	}
	list_del_init(&s->s_list);
	s->s_kdev = MKDEV(snumbd_major, minor);
	list_add_tail(&s->s_list, &sessions_list);
//	snprintf(buf, sizeof(buf), SNUMBD_NAME"%d", sessions_count);
	spin_unlock(&sessions_lock);
	ret = register_device(s, ro);
	if (ret) {
		spin_lock(&sessions_lock);
		list_del_init(&s->s_list);
		s->s_kdev = 0;
		list_add(&s->s_list, &notinited_list);
		spin_unlock(&sessions_lock);
		unregister_device(s);
		goto out_free;
	}

	s->s_apgrp = sn_current_pgrp();
	sa_kdebug("OK. kdev=%x:%x, len=%llu s=%p pgrp=%d.\n", MAJOR(s->s_kdev),
				MINOR(s->s_kdev), s->s_scount, s, s->s_apgrp);
	s->s_hpid = current->pid;
	sn_set_mb(s->s_state, SNUM_INITED);
	goto out;

out_free:
	page_cache_release(s->s_mpage);
	s->s_mpage = NULL;
out:
	up(&s->s_sem);
	return ret;
}

static int session_req(struct session_struct *s, unsigned int size,
								void *req)
{
	int ret;
	struct snumbd_req kreq;

	sa_debug(DEBUG_API, "s=%p, kdev=%x, size=%u, cmd=%d, state=%d, "
				"users=%d.\n", s, s->s_kdev, size, s->s_cmd,
				s->s_state, atomic_read(&s->s_users));
	down(&s->s_sem);
	sn_set_mb(s->s_state, SNUM_REQ_RECV);
	kreq.cmd = s->s_cmd;
	kreq.sno = s->s_sector;
	kreq.offset = s->s_offset;
	kreq.len = s->s_bio_vec->bv_len;

	if (s->s_cmd == WRITE_DATA) {
		char *kaddr;
		kaddr = sn_kmap_atomic(s->s_bio_vec->bv_page);
		memcpy(page_address(s->s_mpage), kaddr +
			s->s_bio_vec->bv_offset, s->s_bio_vec->bv_len);
		sn_kunmap_atomic(kaddr);
	}
	if (size > sizeof(kreq))
		size = sizeof(kreq);
	ret = copy_to_user(req, &kreq, size);
	if (ret)
		ret = -EACCES;

	up(&s->s_sem);
	return ret;
}

static int session_dataready(struct session_struct *s, unsigned int size,
							const void *req)
{
	int ret;
	struct snumbd_req kreq;
	sa_debug(DEBUG_API, "s=%p kdev=%x, size=%u, cmd=%d, state=%d, "
				"users=%d.\n", s, s->s_kdev, size, s->s_cmd,
				s->s_state, atomic_read(&s->s_users));
	if (size > sizeof(kreq))
		size = sizeof(kreq);
	down(&s->s_sem);
	ret = copy_from_user(&kreq, req, size);
	if (ret) {
		ret = -EACCES;
		goto out;
	}
	if (kreq.cmd & ERROR_FLAG) {
		ret = -ENOSPC;
		sn_set_mb(s->s_state, SNUM_SESSION_ERR);
		wake_up_all(&s->s_io_wait);
		goto out;
	}
	ret = -EINVAL;
	if (s->s_state != SNUM_REQ_RECV)
		goto out;
	if (s->s_cmd == READ_DATA) {
		char *kaddr;
		kaddr = sn_kmap_atomic(s->s_bio_vec->bv_page);
		memcpy(kaddr + s->s_bio_vec->bv_offset,
			page_address(s->s_mpage), s->s_bio_vec->bv_len);
		sn_kunmap_atomic(kaddr);
	}
	sn_set_mb(s->s_state, SNUM_DATA_READY);
	wake_up_all(&s->s_io_wait);
	ret = 0;

out:
	up(&s->s_sem);
	return ret;
}

static int session_state(struct session_struct *s, int size, void *state)
{
	struct snumbd_state st;
	int ret;

	atomic_inc(&s->s_users);
	fill_state(s, &st);
	atomic_dec(&s->s_users);
	spin_lock(&sessions_lock);
	st.sessions = sessions_count;
	spin_unlock(&sessions_lock);

	if (size > sizeof(st))
		size = sizeof(st);
	ret = copy_to_user(state, &st, size);
	if (ret)
		ret = -EACCES;

	return ret;
}

static int session_states(struct session_struct *s, int size, void *state)
{
	struct snumbd_state st;
	struct snumbd_state *out;
	struct list_head *tmp;
	int len;
	int ret;

	sa_debug(DEBUG_API, "s=%p, size=%d, state=%p\n", s, size, state);
	out = state;
	len = 0;
	ret = -ENOSPC;
	spin_lock(&sessions_lock);
	list_for_each(tmp, &sessions_list) {
		struct session_struct *ss;
		ss = list_entry(tmp, struct session_struct, s_list);
		fill_state(ss, &st);
		st.sessions = sessions_count;
		if (size - len < sizeof(st))
			goto err_unlock;
		sa_debug(DEBUG_INTERNALS, "out=%p, len=%d\n", out, len);
		ret = copy_to_user(out, &st, sizeof(st));
		if (ret) {
			ret = -EACCES;
			goto err_unlock;
		}
		len += sizeof(st);
		out++;
	}
	list_for_each(tmp, &notinited_list) {
		struct session_struct *s;
		s = list_entry(tmp, struct session_struct, s_list);
		fill_state(s, &st);
		st.sessions = sessions_count;
		if (size - len < sizeof(st))
			goto err_unlock;
		sa_debug(DEBUG_INTERNALS, "out=%p, len=%d\n", out, len);
		ret = copy_to_user(out, &st, sizeof(st));
		if (ret) {
			ret = -EACCES;
			goto err_unlock;
		}
		len += sizeof(st);
		out++;
	}
	ret = 0;

err_unlock:
	spin_unlock(&sessions_lock);
	return ret;
}

static int snumbdctl3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int err;
	struct session_struct * ss;

	if (!snumbd_init_ok)
		return -EPERM;
	ss = file->private_data;
	if (!ss)
		return -EINVAL;
	err = -EFAULT;

	spin_lock(&ss->s_misc_lock);
	ss->s_ioctlcnt++;
	spin_unlock(&ss->s_misc_lock);

	switch(cmd) {
	    case SNUMBDCTL_INIT: {
			struct snumbdctl_init s;
			if (copy_from_user(&s, (void *)arg, sizeof(s)))
				break;
			err = session_init(ss, s.scount, s.dev_ro);
		}
		break;
	    case SNUMBDCTL_REQ: {
			struct snumbdctl_req s;
			if (copy_from_user(&s, (void *)arg, sizeof(s)))
				break;
			err = session_req(ss, s.size, s.req);
		}
		break;
	    case SNUMBDCTL_DATAREADY: {
			struct snumbdctl_dataready s;
			if (copy_from_user(&s, (void *)arg, sizeof(s)))
				break;
			err = session_dataready(ss, s.size, s.req);
		}
		break;
	    case SNUMBDCTL_STATE: {
			struct snumbdctl_state s;
			if (copy_from_user(&s, (void *)arg, sizeof(s)))
				break;
			err = session_state(ss, s.size, s.state);
		}
		break;
	    case SNUMBDCTL_STATES: {
			struct snumbdctl_state s;
			if (copy_from_user(&s, (void *)arg, sizeof(s)))
				break;
			err = session_states(ss, s.size, s.state);
		}
		break;
	    default:
		err = -ENOTTY;
		break;
	}
	sa_debug(DEBUG_API, "err=%d\n", -err);
	return err;
}
#ifndef HAVE_UNLOCKED_IOCTL
static int snumbdctl4_ioctl(struct inode *ino, struct file *file, unsigned int cmd,
		unsigned long arg)
{
	return snumbdctl3_ioctl(file, cmd, arg);
}
#endif /* 2.6.37 */
#ifdef HAVE_IOCTL32_CONVERSION
static int
snumbdctl_compat_ioctl(unsigned int fd, unsigned int cmd,
			unsigned long arg, struct file *filep)
{
	return snumbdctl3_ioctl(filep, cmd, arg);
}
#endif

#ifdef HAVE_COMPAT_IOCTL
static long
snumbdctl_compat_ioctl(struct file *filep, unsigned int cmd,
			unsigned long arg)
{
	return snumbdctl3_ioctl(filep, cmd, arg);
}
#endif

static int snumbdctl_open(struct inode *ino, struct file *file)
{
	struct session_struct * s;

	sa_debug(DEBUG_API,"%s\n","enter");
	if (!snumbd_init_ok)
		return -EPERM;
	s = kmalloc(sizeof(*s), GFP_KERNEL);
	if (!s)
		return -ENOMEM;
	if (!try_module_get(THIS_MODULE)) {
		kfree(s);
		return -ENODEV;
	}
	memset(s, 0, sizeof(*s));
	INIT_LIST_HEAD(&s->s_list);
	sema_init(&s->s_sem, 1);
	sema_init(&s->s_iosem, 1);
	spin_lock_init(&s->s_misc_lock);
	atomic_set(&s->s_users, 1);

	spin_lock(&sessions_lock);
	list_add(&s->s_list, &notinited_list);
	sessions_count++;
	spin_unlock(&sessions_lock);

	file->private_data = s;
	sa_kdebug("%s s=%p\n", "OK", s);
	return 0;
}

static int snumbdctl_release(struct inode *ino, struct file *file)
{
	struct session_struct * s;

	s = file->private_data;
	if (!s)
		return -EINVAL;
	file->private_data = NULL;
	sa_debug(DEBUG_API,"%s\n","enter");

	close_session(s, 1);
	module_put(THIS_MODULE);
	sa_kdebug("%s s=%p\n", "OK", s);
	return 0;
}

static struct page * snumbdctl_vm_nopage(struct vm_area_struct * vma,
					unsigned long address, int *unused)
{
	struct session_struct *s;
	
	if (!vma->vm_file) {
		sa_warn("vma does not have a file attached.%s", "\n");
		return (struct page *)SN_NOPAGE_SIGBUS;
	}
	s = vma->vm_file->private_data;
	sa_debug(DEBUG_API,"s=%p, vma=%p, address=%lx, pgoff=%lu\n", s, vma,
			address, vma->vm_pgoff);

	if (address - vma->vm_start >= PAGE_SIZE) {
		sa_warn("Incorrect address.%s", "\n");
		return (struct page *)SN_NOPAGE_SIGBUS;
	}
	get_page(s->s_mpage);
	sa_debug(DEBUG_ALLOC, "s=%p, nopage=%p(%d)\n", s, s->s_mpage,
					page_count(s->s_mpage));
	s->s_gpages++;

	return s->s_mpage;
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
static int snumbdctl_vm_fault(struct vm_area_struct * vma,
					struct vm_fault *vmf)
{
	unsigned long address = (unsigned long) vmf->virtual_address;
	
	vmf->page = snumbdctl_vm_nopage(vma, address, 0);
	if (vmf->page == (struct page *)SN_NOPAGE_SIGBUS)
		return VM_FAULT_ERROR;
	return 0;
}
#endif

static void snumbdctl_vm_close(struct vm_area_struct * vma)
{
	struct session_struct *s;
	
	if (!vma->vm_file) {
		sa_warn("vma does not have a file attached.%s", "\n");
		return;
	}
	s = vma->vm_file->private_data;
	sa_debug(DEBUG_API,"s=%p, vma=%p, state=%d, users=%d\n", s, vma,
			s->s_state, atomic_read(&s->s_users));

	if (s->s_mpage) {
		sa_debug(DEBUG_ALLOC, "s=%p, put page=%p(%d)\n", s, s->s_mpage,
							page_count(s->s_mpage));
		/* page was put by upper level */
		s->s_ppages++;
	}
	s->s_vma = NULL;
}

static struct vm_operations_struct snumbdctl_vm_ops = {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
	nopage:	snumbdctl_vm_nopage,
#else
	fault:	snumbdctl_vm_fault,
#endif
	close:	snumbdctl_vm_close,
};

static int snumbdctl_mmap(struct file * file, struct vm_area_struct * vma)
{
	struct session_struct *s;
	int ret;

	s = file->private_data;
	sa_debug(DEBUG_API,"s=%p, vma=%p,%lx-%lx %lx %lx\n", s, vma,
						vma->vm_start, vma->vm_end,
						vma->vm_flags, vma->vm_pgoff);
	if (!s)
		return -EBADF;
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

	ret = -EINVAL;
	down(&s->s_sem);
	if (s->s_vma || s->s_state < SNUM_INITED ||
			vma->vm_pgoff != MINOR(s->s_kdev) - 1)
		goto out_up;

	ret = -ENOMEM;
	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
		goto out_up;

	ret = 0;
	s->s_vma = vma;
	vma->vm_ops = &snumbdctl_vm_ops;

out_up:
	up(&s->s_sem);
	return ret;
}

ssize_t
snumbdctl_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
{
	struct session_struct *s;
	ssize_t ret;

	if (count != sizeof(struct snumbd_req))
		return -EINVAL;
	s = filp->private_data;
	sa_debug(DEBUG_API,"s=%p, buf=%p, count=%zu, ppos=%lld, state=%d\n",
			s, buf, count, *ppos, s->s_state);
	if (!s)
		return -EBADF;
	atomic_inc(&s->s_users);
	if (IS_ERROR_STATE(s->s_state)) {
		atomic_dec(&s->s_users);
		return -EIO;
	}

	down(&s->s_sem);
	if (s->s_state == SNUM_WAKEUP_REQ)
		goto send_req;
	wait_event_interruptible(s->s_select_wait, s->s_state == SNUM_WAKEUP_REQ
					|| IS_ERROR_STATE(s->s_state));
	if (signal_pending(current) || IS_ERROR_STATE(s->s_state))
		goto out_io;

send_req:
	up(&s->s_sem);
	ret = session_req(s, count, buf);
	atomic_dec(&s->s_users);
	if (!ret)
		ret = count;
	return ret;

out_io:
	up(&s->s_sem);
	atomic_dec(&s->s_users);
	return -EIO;
}

ssize_t
snumbdctl_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
{
	struct session_struct *s;
	ssize_t ret;

	s = filp->private_data;
	sa_debug(DEBUG_API,"s=%p, buf=%p, count=%zu, ppos=%lld, state=%d\n",
			s, buf, count, *ppos, s->s_state);
	if (!s)
		return -EBADF;
	ret = session_dataready(s, count, buf);
	if (!ret)
		ret = count;
	return ret;
}

static unsigned int snumbdctl_poll(struct file *filp, poll_table *wait)
{
	struct session_struct *s;
	unsigned int mask;

	s = filp->private_data;
	sa_debug(DEBUG_API, "s=%p\n", s);
	if (!s || IS_ERROR_STATE(s->s_state))
		return POLLERR;
	poll_wait(filp, &s->s_select_wait, wait);
	down(&s->s_sem);
	mask = 0;
	if (s->s_state == SNUM_WAKEUP_REQ)
		mask = POLLIN | POLLRDNORM;
	up(&s->s_sem);
	return mask;
}

static struct file_operations snumbdctl_fops = {
#ifdef HAVE_UNLOCKED_IOCTL
	unlocked_ioctl: snumbdctl3_ioctl,
#else
	ioctl: snumbdctl4_ioctl,
#endif
	open: snumbdctl_open,
	read: snumbdctl_read,
	write: snumbdctl_write,
	poll: snumbdctl_poll,
	mmap:  snumbdctl_mmap,
	release: snumbdctl_release,
#ifdef HAVE_COMPAT_IOCTL
	compat_ioctl: snumbdctl_compat_ioctl,
#endif
};

static int __init snumbd_init(void)
{
	int ret;

#ifndef USE_KERNEL_THREAD 
	resolver_task = kthread_create(resolver_loop, NULL, "snumbdd");
	if (IS_ERR(resolver_task)) {
		ret = IS_ERR(resolver_task);
		goto out_info;
	}
	wake_up_process(resolver_task);
#else
	resolver_thread_pid = kernel_thread(resolver_loop, NULL, 0);
	if (resolver_thread_pid < 0) {
		ret = resolver_thread_pid;
		goto out_info;
	}
#endif
	snumbdctl_fops.owner = THIS_MODULE;
	ret = register_chrdev(0, SNUMBDCTL_NAME, &snumbdctl_fops);
	if (ret < 0)
		goto out_notify;
	snumbdctl_major = ret;
	ret = register_blkdev(0, SNUMBD_NAME);
	if (ret < 0)
		goto out_unreg_chr;
	snumbd_major = ret;
	register_reboot_notifier(&sn_notifier);
#ifdef HAVE_IOCTL32_CONVERSION
	register_ioctl32_conversion(SNUMBDCTL_INIT, snumbdctl_compat_ioctl);
	register_ioctl32_conversion(SNUMBDCTL_STOP, snumbdctl_compat_ioctl);
	register_ioctl32_conversion(SNUMBDCTL_REQ, snumbdctl_compat_ioctl);
	register_ioctl32_conversion(SNUMBDCTL_DATAREADY, snumbdctl_compat_ioctl);
	register_ioctl32_conversion(SNUMBDCTL_STATE, snumbdctl_compat_ioctl);
	register_ioctl32_conversion(SNUMBDCTL_STATES, snumbdctl_compat_ioctl);
#endif
	snumbd_init_ok = 1;
	ret = 0;
out_info:
	sa_info("Snumbd(v.%d.%d.%d) init %s. Ctl major %d, blk major %d.\n",
				SNUMBD_VMAJOR, SNUMBD_VMINOR, SNUMBD_VSUBMINOR,
				snumbd_init_ok ? "OK" : "failed",
				snumbdctl_major, snumbd_major);
	return ret;

out_unreg_chr:
	unregister_chrdev(snumbdctl_major, SNUMBDCTL_NAME);
out_notify:
	sn_notify_reboot(NULL, SYS_DOWN, NULL);
	goto out_info;
}

static void __exit snumbd_exit(void)
{
	unregister_reboot_notifier(&sn_notifier);
	sn_notify_reboot(NULL, SYS_DOWN, NULL);
	unregister_chrdev(snumbdctl_major, SNUMBDCTL_NAME);
	unregister_blkdev(snumbd_major, SNUMBD_NAME);
#ifdef HAVE_IOCTL32_CONVERSION 
	unregister_ioctl32_conversion(SNUMBDCTL_INIT);
	unregister_ioctl32_conversion(SNUMBDCTL_STOP);
	unregister_ioctl32_conversion(SNUMBDCTL_REQ);
	unregister_ioctl32_conversion(SNUMBDCTL_DATAREADY);
	unregister_ioctl32_conversion(SNUMBDCTL_STATE);
	unregister_ioctl32_conversion(SNUMBDCTL_STATES);
#endif
	sa_info("Snumbd unloading...%s", "\n");
}

module_init(snumbd_init);
module_exit(snumbd_exit);
MODULE_AUTHOR("Acronis");
MODULE_DESCRIPTION("Acronis User Mode Block Device");
MODULE_LICENSE("Proprietary");
MODULE_VERSION(SNUMBD_COMMON_MOD_VERSION);
MODULE_INFO(supported, "external");

Sindbad File Manager Version 1.0, Coded By Sindbad EG ~ The Terrorists