loopback.c
时间:2009-08-06 来源:snriyt
Loopback.c
/*------------------- -------------------*/
212 struct pernet_operations __net_initdata loopback_net_ops = {
213 .init = loopback_net_init,
214 .exit = loopback_net_exit,
215 };
/*----------module initial-----------------*/
static __net_init int loopback_net_init(struct net *net){
struct net_device *dev;
dev = alloc_netdev(0, "lo", loopback_setup);/*alloc a netdev,use loopback_setup to initialize*/
187 dev_net_set(dev, net); /*do dev->nd_net = hold_net(net); hold_net function does atomic_inc(&net->use_count); */
188 err = register_netdev(dev);
192 net->loopback_dev = dev;
}
/*---------------the loopback_setup function do initialize-----------------*/
static void loopback_setup(struct net_device *dev)
157 {
158 dev->mtu = (16 * 1024) + 20 + 20 + 12;
159 dev->hard_header_len = ETH_HLEN; /* 14 */
160 dev->addr_len = ETH_ALEN; /* 6 */
161 dev->tx_queue_len = 0;
162 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
163 dev->flags = IFF_LOOPBACK;
164 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
165 | NETIF_F_TSO
166 | NETIF_F_NO_CSUM
167 | NETIF_F_HIGHDMA
168 | NETIF_F_LLTX
169 | NETIF_F_NETNS_LOCAL;
170 dev->ethtool_ops = &loopback_ethtool_ops;
171 dev->header_ops = ð_header_ops;
172 dev->netdev_ops = &loopback_ops;
173 dev->destructor = loopback_dev_free;
174 }
/*-------loopback_ops defines the operations of loopback device-------------*/
static const struct net_device_ops loopback_ops = {
147 .ndo_init = loopback_dev_init,
148 .ndo_start_xmit= loopback_xmit,
149 .ndo_get_stats = loopback_get_stats,
150 };
/*---------loopback_dev_init------------------*/
26 static int loopback_dev_init(struct net_device *dev)
127 {
128 struct pcpu_lstats lstats;
/* struct pcpu_lstats { unsigned long packets; unsigned long bytes; };*/
130 lstats = alloc_percpu(struct pcpu_lstats);
/*#define alloc_percpu(type) (type )__alloc_percpu(sizeof(type),__alignof__(type)),and finally call kzalloc(size, GFP_KERNEL);*/
134 dev->ml_priv = lstats;
136 }
/*---------------------loopback_xmit--------------*/
static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
72 {
73 struct pcpu_lstats *pcpu_lstats, *lb_stats;
74
75 skb_orphan(skb); /*orphan a skb,make it a orphan*/
76
77 skb->protocol = eth_type_trans(skb,dev);
78
79 /* it's OK to use per_cpu_ptr() because BHs are off */
80 pcpu_lstats = dev->ml_priv;
81 lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
82 lb_stats->bytes += skb->len;
83 lb_stats->packets++;
84
85 netif_rx(skb);/*This function receives a packet from a device driver and queues it for the upper (protocol) levels to process. It always succeeds. The buffer may be dropped during processing for congestion control or by the protocol layers.
*/
86
87 return 0;
88 }
/*---------------/net/core/dev.c/ netif_rx()----------------*/
int netif_rx(struct sk_buff *skb)
1934 {
1935 struct softnet_data *queue;
1936 unsigned long flags;
1937
1938 /* if netpoll wants it, pretend we never saw it */
1939 if (netpoll_rx(skb))
1940 return NET_RX_DROP;
1941
1942 if (!skb->tstamp.tv64)
1943 net_timestamp(skb);
1944
1945 /*
1946 * The code is rearranged so that the path is the most
1947 * short when CPU is congested, but is still operating.
1948 */
1949 local_irq_save(flags);
1950 queue = &__get_cpu_var(softnet_data);
1951
1952 __get_cpu_var(netdev_rx_stat).total++;
1953 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1954 if (queue->input_pkt_queue.qlen) {
1955 enqueue:
1956 __skb_queue_tail(&queue->input_pkt_queue, skb);
1957 local_irq_restore(flags);
1958 return NET_RX_SUCCESS;
1959 }
1960
1961 napi_schedule(&queue->backlog);
1962 goto enqueue;
1963 }
1964
1965 __get_cpu_var(netdev_rx_stat).dropped++;
1966 local_irq_restore(flags);
1967
1968 kfree_skb(skb);
1969 return NET_RX_DROP;
1970 }
/*-------------- loopback_get_stats get the stastics-----------*/
static struct net_device_stats *loopback_get_stats(struct net_device *dev)
91 {
92 const struct pcpu_lstats *pcpu_lstats;
93 struct net_device_stats *stats = &dev->stats;
94 unsigned long bytes = 0;
95 unsigned long packets = 0;
96 int i;
97
98 pcpu_lstats = dev->ml_priv;
99 for_each_possible_cpu(i) { /*count for each cpu*/
100 const struct pcpu_lstats *lb_stats;
101
102 lb_stats = per_cpu_ptr(pcpu_lstats, i);
103 bytes += lb_stats->bytes;
104 packets += lb_stats->packets;
105 }
106 stats->rx_packets = packets;
107 stats->tx_packets = packets;
108 stats->rx_bytes = bytes;
109 stats->tx_bytes = bytes;
/* for loopback device receives and send were the same */
110 return stats;
111 }
/*---------loopback_ethtool_ops-------------*/
13 static u32 always_on(struct net_device *dev)
114 {
115 return 1;
116 }
117
118 static const struct ethtool_ops loopback_ethtool_ops = {
119 .get_link = always_on,
120 .set_tso = ethtool_op_set_tso,
121 .get_tx_csum = always_on,
122 .get_sg = always_on,
123 .get_rx_csum = always_on,
124 };
125
/*------------------- -------------------*/
212 struct pernet_operations __net_initdata loopback_net_ops = {
213 .init = loopback_net_init,
214 .exit = loopback_net_exit,
215 };
/*----------module initial-----------------*/
static __net_init int loopback_net_init(struct net *net){
struct net_device *dev;
dev = alloc_netdev(0, "lo", loopback_setup);/*alloc a netdev,use loopback_setup to initialize*/
187 dev_net_set(dev, net); /*do dev->nd_net = hold_net(net); hold_net function does atomic_inc(&net->use_count); */
188 err = register_netdev(dev);
192 net->loopback_dev = dev;
}
/*---------------the loopback_setup function do initialize-----------------*/
static void loopback_setup(struct net_device *dev)
157 {
158 dev->mtu = (16 * 1024) + 20 + 20 + 12;
159 dev->hard_header_len = ETH_HLEN; /* 14 */
160 dev->addr_len = ETH_ALEN; /* 6 */
161 dev->tx_queue_len = 0;
162 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
163 dev->flags = IFF_LOOPBACK;
164 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
165 | NETIF_F_TSO
166 | NETIF_F_NO_CSUM
167 | NETIF_F_HIGHDMA
168 | NETIF_F_LLTX
169 | NETIF_F_NETNS_LOCAL;
170 dev->ethtool_ops = &loopback_ethtool_ops;
171 dev->header_ops = ð_header_ops;
172 dev->netdev_ops = &loopback_ops;
173 dev->destructor = loopback_dev_free;
174 }
/*-------loopback_ops defines the operations of loopback device-------------*/
static const struct net_device_ops loopback_ops = {
147 .ndo_init = loopback_dev_init,
148 .ndo_start_xmit= loopback_xmit,
149 .ndo_get_stats = loopback_get_stats,
150 };
/*---------loopback_dev_init------------------*/
26 static int loopback_dev_init(struct net_device *dev)
127 {
128 struct pcpu_lstats lstats;
/* struct pcpu_lstats { unsigned long packets; unsigned long bytes; };*/
130 lstats = alloc_percpu(struct pcpu_lstats);
/*#define alloc_percpu(type) (type )__alloc_percpu(sizeof(type),__alignof__(type)),and finally call kzalloc(size, GFP_KERNEL);*/
134 dev->ml_priv = lstats;
136 }
/*---------------------loopback_xmit--------------*/
static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
72 {
73 struct pcpu_lstats *pcpu_lstats, *lb_stats;
74
75 skb_orphan(skb); /*orphan a skb,make it a orphan*/
76
77 skb->protocol = eth_type_trans(skb,dev);
78
79 /* it's OK to use per_cpu_ptr() because BHs are off */
80 pcpu_lstats = dev->ml_priv;
81 lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
82 lb_stats->bytes += skb->len;
83 lb_stats->packets++;
84
85 netif_rx(skb);/*This function receives a packet from a device driver and queues it for the upper (protocol) levels to process. It always succeeds. The buffer may be dropped during processing for congestion control or by the protocol layers.
*/
86
87 return 0;
88 }
/*---------------/net/core/dev.c/ netif_rx()----------------*/
int netif_rx(struct sk_buff *skb)
1934 {
1935 struct softnet_data *queue;
1936 unsigned long flags;
1937
1938 /* if netpoll wants it, pretend we never saw it */
1939 if (netpoll_rx(skb))
1940 return NET_RX_DROP;
1941
1942 if (!skb->tstamp.tv64)
1943 net_timestamp(skb);
1944
1945 /*
1946 * The code is rearranged so that the path is the most
1947 * short when CPU is congested, but is still operating.
1948 */
1949 local_irq_save(flags);
1950 queue = &__get_cpu_var(softnet_data);
1951
1952 __get_cpu_var(netdev_rx_stat).total++;
1953 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1954 if (queue->input_pkt_queue.qlen) {
1955 enqueue:
1956 __skb_queue_tail(&queue->input_pkt_queue, skb);
1957 local_irq_restore(flags);
1958 return NET_RX_SUCCESS;
1959 }
1960
1961 napi_schedule(&queue->backlog);
1962 goto enqueue;
1963 }
1964
1965 __get_cpu_var(netdev_rx_stat).dropped++;
1966 local_irq_restore(flags);
1967
1968 kfree_skb(skb);
1969 return NET_RX_DROP;
1970 }
/*-------------- loopback_get_stats get the stastics-----------*/
static struct net_device_stats *loopback_get_stats(struct net_device *dev)
91 {
92 const struct pcpu_lstats *pcpu_lstats;
93 struct net_device_stats *stats = &dev->stats;
94 unsigned long bytes = 0;
95 unsigned long packets = 0;
96 int i;
97
98 pcpu_lstats = dev->ml_priv;
99 for_each_possible_cpu(i) { /*count for each cpu*/
100 const struct pcpu_lstats *lb_stats;
101
102 lb_stats = per_cpu_ptr(pcpu_lstats, i);
103 bytes += lb_stats->bytes;
104 packets += lb_stats->packets;
105 }
106 stats->rx_packets = packets;
107 stats->tx_packets = packets;
108 stats->rx_bytes = bytes;
109 stats->tx_bytes = bytes;
/* for loopback device receives and send were the same */
110 return stats;
111 }
/*---------loopback_ethtool_ops-------------*/
13 static u32 always_on(struct net_device *dev)
114 {
115 return 1;
116 }
117
118 static const struct ethtool_ops loopback_ethtool_ops = {
119 .get_link = always_on,
120 .set_tso = ethtool_op_set_tso,
121 .get_tx_csum = always_on,
122 .get_sg = always_on,
123 .get_rx_csum = always_on,
124 };
125
相关阅读 更多 +