linux 1.0 内核注解 linux/fs/inode.c
时间:2009-05-05 来源:taozhijiangscu
/********************************************
*Created By: 陶治江
*Date: 2009年5月2日1:15:13
********************************************/
#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h> #include <asm/system.h> //i节点使用了hash链表来实现了
static struct inode_hash_entry {
struct inode * inode;
int updating;
} hash_table[NR_IHASH]; //131 static struct inode * first_inode;
static struct wait_queue * inode_wait = NULL;
static int nr_inodes = 0, nr_free_inodes = 0; //hash函数
static inline int const hashfn(dev_t dev, unsigned int i)
{
return (dev ^ i) % NR_IHASH;
} //查找i节点hash表的入口,就是hash_table中的表项
static inline struct inode_hash_entry * const hash(dev_t dev, int i)
{
return hash_table + hashfn(dev, i);
} //这里进行的是双向链表的插入操作
//其中first_inode指针维持了一个头部~~~
static void insert_inode_free(struct inode *inode)
{
inode->i_next = first_inode;
inode->i_prev = first_inode->i_prev;
inode->i_next->i_prev = inode;
inode->i_prev->i_next = inode;
first_inode = inode;
} //从这里可以看出,维持的不是一个循环链表哈
static void remove_inode_free(struct inode *inode)
{
if (first_inode == inode)
first_inode = first_inode->i_next;
if (inode->i_next)
inode->i_next->i_prev = inode->i_prev;
if (inode->i_prev)
inode->i_prev->i_next = inode->i_next;
inode->i_next = inode->i_prev = NULL;
} //注意的是上面的双向链表结构用的是i_prev i_next
//而这里用的双向链表使用的是i_hash_prev i_hash_next
//可能上面维持的是空闲的链表项而下面的就是维持了所
//有的节点信息了(记得可能同缓冲块的操作是一样的哦)
void insert_inode_hash(struct inode *inode)
{
struct inode_hash_entry *h;
h = hash(inode->i_dev, inode->i_ino); //获得hash表项 //将指定的节点插入到hash表的最头部
inode->i_hash_next = h->inode;
inode->i_hash_prev = NULL;
if (inode->i_hash_next)
inode->i_hash_next->i_hash_prev = inode;
h->inode = inode;
} static void remove_inode_hash(struct inode *inode)
{
struct inode_hash_entry *h;
h = hash(inode->i_dev, inode->i_ino); if (h->inode == inode) /*如果是最前面的项就需要调整*/
h->inode = inode->i_hash_next;
if (inode->i_hash_next)
inode->i_hash_next->i_hash_prev = inode->i_hash_prev;
if (inode->i_hash_prev)
inode->i_hash_prev->i_hash_next = inode->i_hash_next;
inode->i_hash_prev = inode->i_hash_next = NULL;
} //插入到first_inode的前面一项
//(或许是最新使用的最不太空闲的一项哦)
static void put_last_free(struct inode *inode)
{
remove_inode_free(inode);
inode->i_prev = first_inode->i_prev;
inode->i_prev->i_next = inode;
inode->i_next = first_inode;
inode->i_next->i_prev = inode;
} //分配新的内存页面来存放节点结构,并插入到相应的链表中
void grow_inodes(void)
{
struct inode * inode;
int i; if (!(inode = (struct inode*) get_free_page(GFP_KERNEL)))
return; i=PAGE_SIZE / sizeof(struct inode);
nr_inodes += i;
nr_free_inodes += i; if (!first_inode) //第一次,设置头部,注意,这里的i自减了一次哦
inode->i_next = inode->i_prev = first_inode = inode++, i--; for ( ; i ; i-- )
insert_inode_free(inode++);
} //参数用来干什么的???
unsigned long inode_init(unsigned long start, unsigned long end)
{
memset(hash_table, 0, sizeof(hash_table)); //表全部清零NULL
first_inode = NULL;
return start; //??
} static void __wait_on_inode(struct inode *); static inline void wait_on_inode(struct inode * inode)
{
if (inode->i_lock)
__wait_on_inode(inode);
} static inline void lock_inode(struct inode * inode)
{
wait_on_inode(inode);
inode->i_lock = 1;
} static inline void unlock_inode(struct inode * inode)
{
inode->i_lock = 0;
wake_up(&inode->i_wait); //唤醒等待的进程
} void clear_inode(struct inode * inode)
{
struct wait_queue * wait; wait_on_inode(inode);
remove_inode_hash(inode);
remove_inode_free(inode);
wait = ((volatile struct inode *) inode)->i_wait;
//如果先前是被引用的,这里释放了,所以空闲的节点应该增加一项
if (inode->i_count)
nr_free_inodes++;
memset(inode,0,sizeof(*inode));
/*Note the volatile
*同文件结构一样,这里也不破坏等待的进程队列哦
*就是前面的memset可以清除一切,但是不能断裂等待队列
*当然先从空闲inode中删除,这里再加入也是这个原因啊~*/
((volatile struct inode *) inode)->i_wait = wait;
insert_inode_free(inode);
} //下面两个函数都是放回0表示被挂载了,返回
//1表示没有挂载,空闲的
//返回0表示可能被挂载了
int fs_may_mount(dev_t dev)
{
struct inode * inode, * next;
int i; next = first_inode;
for (i = nr_inodes ; i > 0 ; i--) {
inode = next;
/*下面使用了clear_inode函数,这个函数调用了
*remove_inode_hash和remove_inode_free以及后来
*有调用insert_inode_free了,所以进行了这项操作
*之后具体的节点的顺序是不确定的,解决的方法是
*从头开始,但是这样很不实际,所以这里在操作之前
*先保存了下面一个要操作的节点的地址,应该是很好的选择 :-) indeed*/
next = inode->i_next; /* clear_inode() changes the queues.. */
if (inode->i_dev != dev)
continue;
if (inode->i_count || inode->i_dirt || inode->i_lock)
return 0; //返回表明节点可能被使用了
clear_inode(inode);
}
return 1;
} int fs_may_umount(dev_t dev, struct inode * mount_root)
{
struct inode * inode;
int i; inode = first_inode;
//这里没有进行clear_inode操作,所以比较简单了
for (i=0 ; i < nr_inodes ; i++, inode = inode->i_next)
{
if (inode->i_dev != dev || !inode->i_count)
continue;
if (inode == mount_root && inode->i_count == 1)
continue; //根设备比较特殊处理了,当然不会返回0了啊
return 0; //被挂载,忙
}
return 1; //可能没有被挂载
} //返回1表示可能只读的
int fs_may_remount_ro(dev_t dev)
{
struct file * file;
int i; /* Check that no files are currently opened for writing. */
for (file = first_file, i=0; i<nr_files; i++, file=file->f_next)
{
if (!file->f_count || !file->f_inode ||
file->f_inode->i_dev != dev)
continue;
if (S_ISREG(file->f_inode->i_mode) && (file->f_mode & 2)) //可写,No
return 0;
}
return 1;
} static void write_inode(struct inode * inode)
{
if (!inode->i_dirt) //没有脏,不用写
return;
wait_on_inode(inode);
if (!inode->i_dirt) //等待后很多事情都可能发生的哦
return;
if (!inode->i_sb || !inode->i_sb->s_op || !inode->i_sb->s_op->write_inode) {
inode->i_dirt = 0; //没有写操作就复位i_dirt了,真爱骗人...
return;
}
inode->i_lock = 1; //为什么不调用lock_inode呢???
//呃,反正调用lock_inode()就是多了wait_on_inode,这里已经得到
//所有权了,一样的~~
inode->i_sb->s_op->write_inode(inode);
unlock_inode(inode);
} //读取的过程中进行了节点的锁与解锁的操作
static void read_inode(struct inode * inode)
{
lock_inode(inode);
if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->read_inode)
inode->i_sb->s_op->read_inode(inode);
unlock_inode(inode);
} //对于chown, chmod, utime, and truncate等这些可能改变节点信息的
//操作中调用的,这里保证一定能被调用(不像write_inode) :-(
int notify_change(int flags, struct inode * inode)
{
if (inode->i_sb && inode->i_sb->s_op &&
inode->i_sb->s_op->notify_change)
return inode->i_sb->s_op->notify_change(flags, inode);
return 0;
} /*
* bmap is needed for demand-loading and paging: if this function
* doesn't exist for a filesystem, then those things are impossible:
* executables cannot be run from the filesystem etc...
*
* This isn't as bad as it sounds: the read-routines might still work,
* so the filesystem would be otherwise ok (for example, you might have
* a DOS filesystem, which doesn't lend itself to bmap very well, but
* you could still transfer files to/from the filesystem)
*/
int bmap(struct inode * inode, int block)
{
if (inode->i_op && inode->i_op->bmap)
return inode->i_op->bmap(inode,block);
return 0;
} //怎么感觉就是删除节点的啊
void invalidate_inodes(dev_t dev)
{
struct inode * inode, * next;
int i; next = first_inode;
for(i = nr_inodes ; i > 0 ; i--)
{
inode = next;
next = inode->i_next; //有clear_inode,那么这种操作就是必须的
if (inode->i_dev != dev)
continue;
if (inode->i_count || inode->i_dirt || inode->i_lock) {
printk("VFS: inode busy on removed device %d/%d\n", MAJOR(dev), MINOR(dev));
continue;
}
clear_inode(inode);
}
} void sync_inodes(dev_t dev) //同步
{
int i;
struct inode * inode; inode = first_inode;
for(i = 0; i < nr_inodes*2; i++, inode = inode->i_next) { /*可能是两遍操作哈*/
if (dev && inode->i_dev != dev)
continue;
wait_on_inode(inode);
if (inode->i_dirt) //脏了就调用底层的写节点函数
write_inode(inode);
}
} //放回节点
void iput(struct inode * inode)
{
if (!inode)
return;
wait_on_inode(inode);
//释放已经是空闲的节点,因为在调用这个函数的时候
//本身要递减i_count引用次数,所以本身不能为0
if (!inode->i_count) {
printk("VFS: iput: trying to free free inode\n");
printk("VFS: device %d/%d, inode %lu, mode=0%07o\n",
MAJOR(inode->i_rdev), MINOR(inode->i_rdev),
inode->i_ino, inode->i_mode);
return;
}
if (inode->i_pipe) /*I do not know how to deal with pipe.*/
wake_up_interruptible(&PIPE_WAIT(*inode));
repeat:
if (inode->i_count>1) {
inode->i_count--; //只是递减
return;
}
//这里表示先前i_count==1,但是还没有递减
wake_up(&inode_wait);
if (inode->i_pipe) {
unsigned long page = (unsigned long) PIPE_BASE(*inode);
PIPE_BASE(*inode) = NULL;
free_page(page);
}
if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->put_inode) {
inode->i_sb->s_op->put_inode(inode);
if (!inode->i_nlink) //到文件的连接数
return;
}
//呃~~~ ??
if (inode->i_dirt) {
//脏了就写,因为在写节点的地方有锁和解
//锁的操作,所以这里要等待操作
write_inode(inode);
wait_on_inode(inode);
goto repeat;
}
//真正空闲的节点了
inode->i_count--;
nr_free_inodes++;
return;
} struct inode * get_empty_inode(void)
{
struct inode * inode, * best;
int i; if (nr_inodes < NR_INODE && nr_free_inodes < (nr_inodes >> 2))
//这里的节点的添加具有另外的一个条件了:
//空闲节点的数目不到总的节点数目的1/4了
grow_inodes();
repeat:
inode = first_inode;
best = NULL;
for (i = 0; i<nr_inodes; inode = inode->i_next, i++)
{
if (!inode->i_count) {
if (!best) //收下先要有
best = inode;
/*没有脏的也没有被锁的节点当然是最好的节点了
*到达这里就可以立即获取节点了*/
if (!inode->i_dirt && !inode->i_lock) {
best = inode; //This is the best!
break;
}
}
}
/*这里的代码要注意了,因为从break跳出后没有经过睡眠,所以
*如果是好的节点就应该保持了那个状态了,否则就说明没有找到
*最好的节点了,增加节点的数目是有必要的
*如果节点的使用过多导致不能创建新的节点了,那么下面的各项
*判断睡眠的操作还是有必要的了,只能用他们了*/
if (!best || best->i_dirt || best->i_lock)
if (nr_inodes < NR_INODE) {
grow_inodes();
goto repeat;
}
inode = best;
if (!inode) {
printk("VFS: No free inodes - contact Linus\n");
sleep_on(&inode_wait);
goto repeat;
}
if (inode->i_lock) {
wait_on_inode(inode);
goto repeat;
}
if (inode->i_dirt) {
write_inode(inode);
goto repeat;
}
if (inode->i_count) /*又被抢了...*/
goto repeat;
/*初始化节点*/
clear_inode(inode);
inode->i_count = 1;
inode->i_nlink = 1;
inode->i_sem.count = 1;
nr_free_inodes--;
if (nr_free_inodes < 0) {
printk ("VFS: get_empty_inode: bad free inode count.\n");
nr_free_inodes = 0;
}
return inode;
} //管道是一个特殊的东东··
struct inode * get_pipe_inode(void)
{
struct inode * inode;
extern struct inode_operations pipe_inode_operations; if (!(inode = get_empty_inode()))
return NULL;
//管道通信的缓冲区(4K)
if (!(PIPE_BASE(*inode) = (char*) __get_free_page(GFP_USER))) {
iput(inode);
return NULL;
}
inode->i_op = &pipe_inode_operations;
inode->i_count = 2; /* sum of readers/writers */
PIPE_WAIT(*inode) = NULL;
PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
PIPE_RD_OPENERS(*inode) = PIPE_WR_OPENERS(*inode) = 0;
PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
PIPE_LOCK(*inode) = 0;
inode->i_pipe = 1;
inode->i_mode |= S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current->euid;
inode->i_gid = current->egid;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
return inode;
} struct inode * iget(struct super_block * sb,int nr)
{
return __iget(sb,nr,1);
} //获得指定sb的i节点,如果有就返回,如果没有就
//创建一个新的
struct inode * __iget(struct super_block * sb, int nr, int crossmntp)
{
static struct wait_queue * update_wait = NULL;
struct inode_hash_entry * h;
struct inode * inode;
struct inode * empty = NULL; if (!sb)
panic("VFS: iget with sb==NULL");
h = hash(sb->s_dev, nr);
repeat:
for (inode = h->inode; inode ; inode = inode->i_hash_next)
if (inode->i_dev == sb->s_dev && inode->i_ino == nr)
goto found_it;
if (!empty)
{
h->updating++;
empty = get_empty_inode();
if (!--h->updating)
wake_up(&update_wait);
if (empty)
goto repeat;
return (NULL); //empty is empty
}
//对空闲的empty节点进行初始化操作,实际就是复制一份了啊
inode = empty;
inode->i_sb = sb;
inode->i_dev = sb->s_dev;
inode->i_ino = nr;
inode->i_flags = sb->s_flags;
put_last_free(inode);
insert_inode_hash(inode);
read_inode(inode);
goto return_it; found_it:
if (!inode->i_count) //空闲的节点,新节点
nr_free_inodes--;
inode->i_count++;
wait_on_inode(inode);
//保守检测
if (inode->i_dev != sb->s_dev || inode->i_ino != nr) {
printk("Whee.. inode changed from under us. Tell Linus\n");
iput(inode);
goto repeat;
}
//呃,不太了解,放回一次然后再使用,干什么?
//有资料说是跨挂接点
if (crossmntp && inode->i_mount) {
struct inode * tmp = inode->i_mount;
tmp->i_count++;
iput(inode);
inode = tmp;
wait_on_inode(inode);
}
//先前申请的emtpy新节点可能没有被使用,如果是就放回
if (empty)
iput(empty); return_it:
while (h->updating)
sleep_on(&update_wait);
return inode;
} //对节点的等待操作,为不可中断的等待状态的,直到所有
//的锁被接触后,这个函数才能返回~~~
static void __wait_on_inode(struct inode * inode)
{
struct wait_queue wait = { current, NULL }; add_wait_queue(&inode->i_wait, &wait);
repeat:
current->state = TASK_UNINTERRUPTIBLE;
if (inode->i_lock) {
schedule();
goto repeat;
}
remove_wait_queue(&inode->i_wait, &wait);
current->state = TASK_RUNNING;
}
文档地址:http://blogimg.chinaunix.net/blog/upfile2/090503233203.pdf
*Created By: 陶治江
*Date: 2009年5月2日1:15:13
********************************************/
#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h> #include <asm/system.h> //i节点使用了hash链表来实现了
static struct inode_hash_entry {
struct inode * inode;
int updating;
} hash_table[NR_IHASH]; //131 static struct inode * first_inode;
static struct wait_queue * inode_wait = NULL;
static int nr_inodes = 0, nr_free_inodes = 0; //hash函数
static inline int const hashfn(dev_t dev, unsigned int i)
{
return (dev ^ i) % NR_IHASH;
} //查找i节点hash表的入口,就是hash_table中的表项
static inline struct inode_hash_entry * const hash(dev_t dev, int i)
{
return hash_table + hashfn(dev, i);
} //这里进行的是双向链表的插入操作
//其中first_inode指针维持了一个头部~~~
static void insert_inode_free(struct inode *inode)
{
inode->i_next = first_inode;
inode->i_prev = first_inode->i_prev;
inode->i_next->i_prev = inode;
inode->i_prev->i_next = inode;
first_inode = inode;
} //从这里可以看出,维持的不是一个循环链表哈
static void remove_inode_free(struct inode *inode)
{
if (first_inode == inode)
first_inode = first_inode->i_next;
if (inode->i_next)
inode->i_next->i_prev = inode->i_prev;
if (inode->i_prev)
inode->i_prev->i_next = inode->i_next;
inode->i_next = inode->i_prev = NULL;
} //注意的是上面的双向链表结构用的是i_prev i_next
//而这里用的双向链表使用的是i_hash_prev i_hash_next
//可能上面维持的是空闲的链表项而下面的就是维持了所
//有的节点信息了(记得可能同缓冲块的操作是一样的哦)
void insert_inode_hash(struct inode *inode)
{
struct inode_hash_entry *h;
h = hash(inode->i_dev, inode->i_ino); //获得hash表项 //将指定的节点插入到hash表的最头部
inode->i_hash_next = h->inode;
inode->i_hash_prev = NULL;
if (inode->i_hash_next)
inode->i_hash_next->i_hash_prev = inode;
h->inode = inode;
} static void remove_inode_hash(struct inode *inode)
{
struct inode_hash_entry *h;
h = hash(inode->i_dev, inode->i_ino); if (h->inode == inode) /*如果是最前面的项就需要调整*/
h->inode = inode->i_hash_next;
if (inode->i_hash_next)
inode->i_hash_next->i_hash_prev = inode->i_hash_prev;
if (inode->i_hash_prev)
inode->i_hash_prev->i_hash_next = inode->i_hash_next;
inode->i_hash_prev = inode->i_hash_next = NULL;
} //插入到first_inode的前面一项
//(或许是最新使用的最不太空闲的一项哦)
static void put_last_free(struct inode *inode)
{
remove_inode_free(inode);
inode->i_prev = first_inode->i_prev;
inode->i_prev->i_next = inode;
inode->i_next = first_inode;
inode->i_next->i_prev = inode;
} //分配新的内存页面来存放节点结构,并插入到相应的链表中
void grow_inodes(void)
{
struct inode * inode;
int i; if (!(inode = (struct inode*) get_free_page(GFP_KERNEL)))
return; i=PAGE_SIZE / sizeof(struct inode);
nr_inodes += i;
nr_free_inodes += i; if (!first_inode) //第一次,设置头部,注意,这里的i自减了一次哦
inode->i_next = inode->i_prev = first_inode = inode++, i--; for ( ; i ; i-- )
insert_inode_free(inode++);
} //参数用来干什么的???
unsigned long inode_init(unsigned long start, unsigned long end)
{
memset(hash_table, 0, sizeof(hash_table)); //表全部清零NULL
first_inode = NULL;
return start; //??
} static void __wait_on_inode(struct inode *); static inline void wait_on_inode(struct inode * inode)
{
if (inode->i_lock)
__wait_on_inode(inode);
} static inline void lock_inode(struct inode * inode)
{
wait_on_inode(inode);
inode->i_lock = 1;
} static inline void unlock_inode(struct inode * inode)
{
inode->i_lock = 0;
wake_up(&inode->i_wait); //唤醒等待的进程
} void clear_inode(struct inode * inode)
{
struct wait_queue * wait; wait_on_inode(inode);
remove_inode_hash(inode);
remove_inode_free(inode);
wait = ((volatile struct inode *) inode)->i_wait;
//如果先前是被引用的,这里释放了,所以空闲的节点应该增加一项
if (inode->i_count)
nr_free_inodes++;
memset(inode,0,sizeof(*inode));
/*Note the volatile
*同文件结构一样,这里也不破坏等待的进程队列哦
*就是前面的memset可以清除一切,但是不能断裂等待队列
*当然先从空闲inode中删除,这里再加入也是这个原因啊~*/
((volatile struct inode *) inode)->i_wait = wait;
insert_inode_free(inode);
} //下面两个函数都是放回0表示被挂载了,返回
//1表示没有挂载,空闲的
//返回0表示可能被挂载了
int fs_may_mount(dev_t dev)
{
struct inode * inode, * next;
int i; next = first_inode;
for (i = nr_inodes ; i > 0 ; i--) {
inode = next;
/*下面使用了clear_inode函数,这个函数调用了
*remove_inode_hash和remove_inode_free以及后来
*有调用insert_inode_free了,所以进行了这项操作
*之后具体的节点的顺序是不确定的,解决的方法是
*从头开始,但是这样很不实际,所以这里在操作之前
*先保存了下面一个要操作的节点的地址,应该是很好的选择 :-) indeed*/
next = inode->i_next; /* clear_inode() changes the queues.. */
if (inode->i_dev != dev)
continue;
if (inode->i_count || inode->i_dirt || inode->i_lock)
return 0; //返回表明节点可能被使用了
clear_inode(inode);
}
return 1;
} int fs_may_umount(dev_t dev, struct inode * mount_root)
{
struct inode * inode;
int i; inode = first_inode;
//这里没有进行clear_inode操作,所以比较简单了
for (i=0 ; i < nr_inodes ; i++, inode = inode->i_next)
{
if (inode->i_dev != dev || !inode->i_count)
continue;
if (inode == mount_root && inode->i_count == 1)
continue; //根设备比较特殊处理了,当然不会返回0了啊
return 0; //被挂载,忙
}
return 1; //可能没有被挂载
} //返回1表示可能只读的
int fs_may_remount_ro(dev_t dev)
{
struct file * file;
int i; /* Check that no files are currently opened for writing. */
for (file = first_file, i=0; i<nr_files; i++, file=file->f_next)
{
if (!file->f_count || !file->f_inode ||
file->f_inode->i_dev != dev)
continue;
if (S_ISREG(file->f_inode->i_mode) && (file->f_mode & 2)) //可写,No
return 0;
}
return 1;
} static void write_inode(struct inode * inode)
{
if (!inode->i_dirt) //没有脏,不用写
return;
wait_on_inode(inode);
if (!inode->i_dirt) //等待后很多事情都可能发生的哦
return;
if (!inode->i_sb || !inode->i_sb->s_op || !inode->i_sb->s_op->write_inode) {
inode->i_dirt = 0; //没有写操作就复位i_dirt了,真爱骗人...
return;
}
inode->i_lock = 1; //为什么不调用lock_inode呢???
//呃,反正调用lock_inode()就是多了wait_on_inode,这里已经得到
//所有权了,一样的~~
inode->i_sb->s_op->write_inode(inode);
unlock_inode(inode);
} //读取的过程中进行了节点的锁与解锁的操作
static void read_inode(struct inode * inode)
{
lock_inode(inode);
if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->read_inode)
inode->i_sb->s_op->read_inode(inode);
unlock_inode(inode);
} //对于chown, chmod, utime, and truncate等这些可能改变节点信息的
//操作中调用的,这里保证一定能被调用(不像write_inode) :-(
int notify_change(int flags, struct inode * inode)
{
if (inode->i_sb && inode->i_sb->s_op &&
inode->i_sb->s_op->notify_change)
return inode->i_sb->s_op->notify_change(flags, inode);
return 0;
} /*
* bmap is needed for demand-loading and paging: if this function
* doesn't exist for a filesystem, then those things are impossible:
* executables cannot be run from the filesystem etc...
*
* This isn't as bad as it sounds: the read-routines might still work,
* so the filesystem would be otherwise ok (for example, you might have
* a DOS filesystem, which doesn't lend itself to bmap very well, but
* you could still transfer files to/from the filesystem)
*/
int bmap(struct inode * inode, int block)
{
if (inode->i_op && inode->i_op->bmap)
return inode->i_op->bmap(inode,block);
return 0;
} //怎么感觉就是删除节点的啊
void invalidate_inodes(dev_t dev)
{
struct inode * inode, * next;
int i; next = first_inode;
for(i = nr_inodes ; i > 0 ; i--)
{
inode = next;
next = inode->i_next; //有clear_inode,那么这种操作就是必须的
if (inode->i_dev != dev)
continue;
if (inode->i_count || inode->i_dirt || inode->i_lock) {
printk("VFS: inode busy on removed device %d/%d\n", MAJOR(dev), MINOR(dev));
continue;
}
clear_inode(inode);
}
} void sync_inodes(dev_t dev) //同步
{
int i;
struct inode * inode; inode = first_inode;
for(i = 0; i < nr_inodes*2; i++, inode = inode->i_next) { /*可能是两遍操作哈*/
if (dev && inode->i_dev != dev)
continue;
wait_on_inode(inode);
if (inode->i_dirt) //脏了就调用底层的写节点函数
write_inode(inode);
}
} //放回节点
void iput(struct inode * inode)
{
if (!inode)
return;
wait_on_inode(inode);
//释放已经是空闲的节点,因为在调用这个函数的时候
//本身要递减i_count引用次数,所以本身不能为0
if (!inode->i_count) {
printk("VFS: iput: trying to free free inode\n");
printk("VFS: device %d/%d, inode %lu, mode=0%07o\n",
MAJOR(inode->i_rdev), MINOR(inode->i_rdev),
inode->i_ino, inode->i_mode);
return;
}
if (inode->i_pipe) /*I do not know how to deal with pipe.*/
wake_up_interruptible(&PIPE_WAIT(*inode));
repeat:
if (inode->i_count>1) {
inode->i_count--; //只是递减
return;
}
//这里表示先前i_count==1,但是还没有递减
wake_up(&inode_wait);
if (inode->i_pipe) {
unsigned long page = (unsigned long) PIPE_BASE(*inode);
PIPE_BASE(*inode) = NULL;
free_page(page);
}
if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->put_inode) {
inode->i_sb->s_op->put_inode(inode);
if (!inode->i_nlink) //到文件的连接数
return;
}
//呃~~~ ??
if (inode->i_dirt) {
//脏了就写,因为在写节点的地方有锁和解
//锁的操作,所以这里要等待操作
write_inode(inode);
wait_on_inode(inode);
goto repeat;
}
//真正空闲的节点了
inode->i_count--;
nr_free_inodes++;
return;
} struct inode * get_empty_inode(void)
{
struct inode * inode, * best;
int i; if (nr_inodes < NR_INODE && nr_free_inodes < (nr_inodes >> 2))
//这里的节点的添加具有另外的一个条件了:
//空闲节点的数目不到总的节点数目的1/4了
grow_inodes();
repeat:
inode = first_inode;
best = NULL;
for (i = 0; i<nr_inodes; inode = inode->i_next, i++)
{
if (!inode->i_count) {
if (!best) //收下先要有
best = inode;
/*没有脏的也没有被锁的节点当然是最好的节点了
*到达这里就可以立即获取节点了*/
if (!inode->i_dirt && !inode->i_lock) {
best = inode; //This is the best!
break;
}
}
}
/*这里的代码要注意了,因为从break跳出后没有经过睡眠,所以
*如果是好的节点就应该保持了那个状态了,否则就说明没有找到
*最好的节点了,增加节点的数目是有必要的
*如果节点的使用过多导致不能创建新的节点了,那么下面的各项
*判断睡眠的操作还是有必要的了,只能用他们了*/
if (!best || best->i_dirt || best->i_lock)
if (nr_inodes < NR_INODE) {
grow_inodes();
goto repeat;
}
inode = best;
if (!inode) {
printk("VFS: No free inodes - contact Linus\n");
sleep_on(&inode_wait);
goto repeat;
}
if (inode->i_lock) {
wait_on_inode(inode);
goto repeat;
}
if (inode->i_dirt) {
write_inode(inode);
goto repeat;
}
if (inode->i_count) /*又被抢了...*/
goto repeat;
/*初始化节点*/
clear_inode(inode);
inode->i_count = 1;
inode->i_nlink = 1;
inode->i_sem.count = 1;
nr_free_inodes--;
if (nr_free_inodes < 0) {
printk ("VFS: get_empty_inode: bad free inode count.\n");
nr_free_inodes = 0;
}
return inode;
} //管道是一个特殊的东东··
struct inode * get_pipe_inode(void)
{
struct inode * inode;
extern struct inode_operations pipe_inode_operations; if (!(inode = get_empty_inode()))
return NULL;
//管道通信的缓冲区(4K)
if (!(PIPE_BASE(*inode) = (char*) __get_free_page(GFP_USER))) {
iput(inode);
return NULL;
}
inode->i_op = &pipe_inode_operations;
inode->i_count = 2; /* sum of readers/writers */
PIPE_WAIT(*inode) = NULL;
PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
PIPE_RD_OPENERS(*inode) = PIPE_WR_OPENERS(*inode) = 0;
PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
PIPE_LOCK(*inode) = 0;
inode->i_pipe = 1;
inode->i_mode |= S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current->euid;
inode->i_gid = current->egid;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
return inode;
} struct inode * iget(struct super_block * sb,int nr)
{
return __iget(sb,nr,1);
} //获得指定sb的i节点,如果有就返回,如果没有就
//创建一个新的
struct inode * __iget(struct super_block * sb, int nr, int crossmntp)
{
static struct wait_queue * update_wait = NULL;
struct inode_hash_entry * h;
struct inode * inode;
struct inode * empty = NULL; if (!sb)
panic("VFS: iget with sb==NULL");
h = hash(sb->s_dev, nr);
repeat:
for (inode = h->inode; inode ; inode = inode->i_hash_next)
if (inode->i_dev == sb->s_dev && inode->i_ino == nr)
goto found_it;
if (!empty)
{
h->updating++;
empty = get_empty_inode();
if (!--h->updating)
wake_up(&update_wait);
if (empty)
goto repeat;
return (NULL); //empty is empty
}
//对空闲的empty节点进行初始化操作,实际就是复制一份了啊
inode = empty;
inode->i_sb = sb;
inode->i_dev = sb->s_dev;
inode->i_ino = nr;
inode->i_flags = sb->s_flags;
put_last_free(inode);
insert_inode_hash(inode);
read_inode(inode);
goto return_it; found_it:
if (!inode->i_count) //空闲的节点,新节点
nr_free_inodes--;
inode->i_count++;
wait_on_inode(inode);
//保守检测
if (inode->i_dev != sb->s_dev || inode->i_ino != nr) {
printk("Whee.. inode changed from under us. Tell Linus\n");
iput(inode);
goto repeat;
}
//呃,不太了解,放回一次然后再使用,干什么?
//有资料说是跨挂接点
if (crossmntp && inode->i_mount) {
struct inode * tmp = inode->i_mount;
tmp->i_count++;
iput(inode);
inode = tmp;
wait_on_inode(inode);
}
//先前申请的emtpy新节点可能没有被使用,如果是就放回
if (empty)
iput(empty); return_it:
while (h->updating)
sleep_on(&update_wait);
return inode;
} //对节点的等待操作,为不可中断的等待状态的,直到所有
//的锁被接触后,这个函数才能返回~~~
static void __wait_on_inode(struct inode * inode)
{
struct wait_queue wait = { current, NULL }; add_wait_queue(&inode->i_wait, &wait);
repeat:
current->state = TASK_UNINTERRUPTIBLE;
if (inode->i_lock) {
schedule();
goto repeat;
}
remove_wait_queue(&inode->i_wait, &wait);
current->state = TASK_RUNNING;
}
文档地址:http://blogimg.chinaunix.net/blog/upfile2/090503233203.pdf
相关阅读 更多 +