工作队列
时间:2009-05-18 来源:@sky
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/delay.h> #define err(msg) printk(KERN_ALERT "%s\n", msg) #define DATAALIGN (sizeof(unsigned long) - 1) static void sendwork(struct work_struct *arg);
static void recvwork(struct work_struct *arg); struct buff_struct_head {
struct buff_struct *prev;
struct buff_struct *next; spinlock_t lock;
}; struct buff_struct {
struct buff_struct *prev;
struct buff_struct *next;
char data[0];
}; static struct buff_struct_head head = {
.prev = (struct buff_struct *)&head,
.next = (struct buff_struct *)&head,
.lock = SPIN_LOCK_UNLOCKED,
}; static struct workqueue_struct *sender;
static struct workqueue_struct *recver; static DECLARE_WORK(sender_work, sendwork);
static DECLARE_WORK(recver_work, recvwork); static void buff_queue_tail(struct buff_struct_head *list, struct buff_struct *newsk)
{
struct buff_struct *prev, *next;
unsigned long flags; spin_lock_irqsave(&list->lock, flags);
next = (struct buff_struct *)list;
prev = next->prev;
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
spin_unlock_irqrestore(&list->lock, flags);
} static struct buff_struct * buff_dequeue(struct buff_struct_head *list)
{
struct buff_struct *next, *prev, *result;
unsigned long flags; spin_lock_irqsave(&list->lock, flags);
prev = (struct buff_struct *) list;
next = prev->next;
result = NULL;
if (next != prev) {
result = next;
next = next->next;
next->prev = prev;
prev->next = next;
result->next = result->prev = NULL;
}
spin_unlock_irqrestore(&list->lock, flags); return result;
} static void sendwork(struct work_struct *arg)
{
struct buff_struct *t;
int i;
char data[] = "abcdefg";
int len = (sizeof(struct buff_struct) + strlen(data) + DATAALIGN) & ~DATAALIGN;
for (i = 0; i < 10; i++) {
t = kmalloc(len, GFP_KERNEL);
if (!t) {
err("kmalloc");
return;
} memset(t, '\0', len);
memcpy(t->data, data, strlen(data));
buff_queue_tail(&head, t);
}
} static void recvwork(struct work_struct *arg)
{
struct buff_struct *t;
int count = 0; while (1) {
t = buff_dequeue(&head);
if (t) {
printk(KERN_ALERT "%s\n", t->data);
kfree(t);
if (++count == 10)
break;
}
msleep(10);
}
} static int workqueue_init(void)
{
sender = create_workqueue("sender");
recver = create_workqueue("recver"); queue_work(sender, &sender_work);
queue_work(recver, &recver_work); return 0;
} static void workqueue_exit(void)
{
flush_workqueue(sender);
flush_workqueue(recver); printk(KERN_ALERT "workqueue_exit\n");
} module_init(workqueue_init);
module_exit(workqueue_exit); MODULE_LICENSE("GPL");
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/delay.h> #define err(msg) printk(KERN_ALERT "%s\n", msg) #define DATAALIGN (sizeof(unsigned long) - 1) static void sendwork(struct work_struct *arg);
static void recvwork(struct work_struct *arg); struct buff_struct_head {
struct buff_struct *prev;
struct buff_struct *next; spinlock_t lock;
}; struct buff_struct {
struct buff_struct *prev;
struct buff_struct *next;
char data[0];
}; static struct buff_struct_head head = {
.prev = (struct buff_struct *)&head,
.next = (struct buff_struct *)&head,
.lock = SPIN_LOCK_UNLOCKED,
}; static struct workqueue_struct *sender;
static struct workqueue_struct *recver; static DECLARE_WORK(sender_work, sendwork);
static DECLARE_WORK(recver_work, recvwork); static void buff_queue_tail(struct buff_struct_head *list, struct buff_struct *newsk)
{
struct buff_struct *prev, *next;
unsigned long flags; spin_lock_irqsave(&list->lock, flags);
next = (struct buff_struct *)list;
prev = next->prev;
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
spin_unlock_irqrestore(&list->lock, flags);
} static struct buff_struct * buff_dequeue(struct buff_struct_head *list)
{
struct buff_struct *next, *prev, *result;
unsigned long flags; spin_lock_irqsave(&list->lock, flags);
prev = (struct buff_struct *) list;
next = prev->next;
result = NULL;
if (next != prev) {
result = next;
next = next->next;
next->prev = prev;
prev->next = next;
result->next = result->prev = NULL;
}
spin_unlock_irqrestore(&list->lock, flags); return result;
} static void sendwork(struct work_struct *arg)
{
struct buff_struct *t;
int i;
char data[] = "abcdefg";
int len = (sizeof(struct buff_struct) + strlen(data) + DATAALIGN) & ~DATAALIGN;
for (i = 0; i < 10; i++) {
t = kmalloc(len, GFP_KERNEL);
if (!t) {
err("kmalloc");
return;
} memset(t, '\0', len);
memcpy(t->data, data, strlen(data));
buff_queue_tail(&head, t);
}
} static void recvwork(struct work_struct *arg)
{
struct buff_struct *t;
int count = 0; while (1) {
t = buff_dequeue(&head);
if (t) {
printk(KERN_ALERT "%s\n", t->data);
kfree(t);
if (++count == 10)
break;
}
msleep(10);
}
} static int workqueue_init(void)
{
sender = create_workqueue("sender");
recver = create_workqueue("recver"); queue_work(sender, &sender_work);
queue_work(recver, &recver_work); return 0;
} static void workqueue_exit(void)
{
flush_workqueue(sender);
flush_workqueue(recver); printk(KERN_ALERT "workqueue_exit\n");
} module_init(workqueue_init);
module_exit(workqueue_exit); MODULE_LICENSE("GPL");
相关阅读 更多 +