/*
       * iobuf.c
       *
       * Keep track of the general-purpose IO-buffer structures used to track
       * abstract kernel-space io buffers.
       * 
       */
      
      #include <linux/iobuf.h>
      #include <linux/malloc.h>
      #include <linux/slab.h>
      
      static kmem_cache_t *kiobuf_cachep;
      
      
  16  void end_kio_request(struct kiobuf *kiobuf, int uptodate)
      {
  18  	if ((!uptodate) && !kiobuf->errno)
      		kiobuf->errno = -EIO;
      
  21  	if (atomic_dec_and_test(&kiobuf->io_count)) {
  22  		if (kiobuf->end_io)
      			kiobuf->end_io(kiobuf);
      		wake_up(&kiobuf->wait_queue);
      	}
      }
      
      
  29  void __init kiobuf_setup(void)
      {
      	kiobuf_cachep =  kmem_cache_create("kiobuf",
      					   sizeof(struct kiobuf),
      					   0,
      					   SLAB_HWCACHE_ALIGN, NULL, NULL);
  35  	if(!kiobuf_cachep)
      		panic("Cannot create kernel iobuf cache\n");
      }
      
  39  void kiobuf_init(struct kiobuf *iobuf)
      {
      	memset(iobuf, 0, sizeof(*iobuf));
      	init_waitqueue_head(&iobuf->wait_queue);
      	iobuf->array_len = KIO_STATIC_PAGES;
      	iobuf->maplist   = iobuf->map_array;
      }
      
  47  int alloc_kiovec(int nr, struct kiobuf **bufp)
      {
      	int i;
      	struct kiobuf *iobuf;
      	
  52  	for (i = 0; i < nr; i++) {
      		iobuf = kmem_cache_alloc(kiobuf_cachep, SLAB_KERNEL);
  54  		if (!iobuf) {
      			free_kiovec(i, bufp);
  56  			return -ENOMEM;
      		}
      		kiobuf_init(iobuf);
      		*bufp++ = iobuf;
      	}
      	
  62  	return 0;
      }
      
  65  void free_kiovec(int nr, struct kiobuf **bufp) 
      {
      	int i;
      	struct kiobuf *iobuf;
      	
  70  	for (i = 0; i < nr; i++) {
      		iobuf = bufp[i];
  72  		if (iobuf->locked)
      			unlock_kiovec(1, &iobuf);
  74  		if (iobuf->array_len > KIO_STATIC_PAGES)
      			kfree (iobuf->maplist);
      		kmem_cache_free(kiobuf_cachep, bufp[i]);
      	}
      }
      
  80  int expand_kiobuf(struct kiobuf *iobuf, int wanted)
      {
      	struct page ** maplist;
      	
  84  	if (iobuf->array_len >= wanted)
  85  		return 0;
      	
      	maplist = (struct page **) 
      		kmalloc(wanted * sizeof(struct page **), GFP_KERNEL);
  89  	if (!maplist)
  90  		return -ENOMEM;
      
      	/* Did it grow while we waited? */
  93  	if (iobuf->array_len >= wanted) {
      		kfree(maplist);
  95  		return 0;
      	}
      	
      	memcpy (maplist, iobuf->maplist, iobuf->array_len * sizeof(struct page **));
      
 100  	if (iobuf->array_len > KIO_STATIC_PAGES)
      		kfree (iobuf->maplist);
      	
      	iobuf->maplist   = maplist;
      	iobuf->array_len = wanted;
 105  	return 0;
      }
      
      
 109  void kiobuf_wait_for_io(struct kiobuf *kiobuf)
      {
      	struct task_struct *tsk = current;
      	DECLARE_WAITQUEUE(wait, tsk);
      
 114  	if (atomic_read(&kiobuf->io_count) == 0)
 115  		return;
      
      	add_wait_queue(&kiobuf->wait_queue, &wait);
      repeat:
      	run_task_queue(&tq_disk);
 120  	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 121  	if (atomic_read(&kiobuf->io_count) != 0) {
      		schedule();
 123  		goto repeat;
      	}
      	tsk->state = TASK_RUNNING;
      	remove_wait_queue(&kiobuf->wait_queue, &wait);
      }