static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
bool contended;
int rc;
qdisc_pkt_len_init(skb);
qdisc_calculate_pkt_len(skb, q);
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
* This permits __QDISC___STATE_RUNNING owner to get the lock more
* often and dequeue packets faster.
*/
contended = qdisc_is_running(q); //判断qdisc是否运行
if (unlikely(contended))
spin_lock(&q->busylock);
spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb);
rc = NET_XMIT_DROP;
} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && //qisc没有运行,且没有缓存报文,则直接可以发送报文
qdisc_run_begin(q)) {
/*
* This is a work-conserving queue; there are no old skbs
* waiting to be sent out; and the qdisc is not running -
* xmit the skb directly.
*/
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
}
__qdisc_run(q); //sch_direct_xmit返回为正值,说明qdisc中有报文待发送,尝试发送缓冲区报文
} else
qdisc_run_end(q); //正常发送完成,qdisc停止运行
rc = NET_XMIT_SUCCESS;
} else {
rc = q->enqueue(skb, q) & NET_XMIT_MASK; //qdisc running或者有缓存报文, 则把报文发动qdisc队列中
if (qdisc_run_begin(q)) { //尝试启动qdisc,如果qisc成功启动,则尝试发送报文
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
}
__qdisc_run(q); //发送qdisc缓冲队列中的报文
}
}
spin_unlock(root_lock);
if (unlikely(contended))
spin_unlock(&q->busylock);
return rc;
}
2、__qdisc_run 函数
1234567891011121314151617181920
void __qdisc_run(struct Qdisc *q)
{
int quota = weight_p;
int packets;
while (qdisc_restart(q, &packets)) { //循环发送报文
/*
* Ordered by possible occurrence: Postpone processing if
* 1. we've exceeded packet quota
* 2. another process needs the CPU;
*/
quota -= packets;
if (quota <= 0 || need_resched()) { //如果配额或需要调度,则触发软中断后退出
__netif_schedule(q);
break;
}
}
qdisc_run_end(q); //qdisc停止
}
/* board specific private data structure */
struct ixgbe_adapter {
//发送的rings
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
//接收的rings
struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
//这个vector里面包含了napi结构
//应该是跟下面的entries一一对应起来做为是一个中断向量的东西吧
struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
//这个里面估计是MSIX的多个中断对应的响应接口
struct msix_entry *msix_entries;
}
struct ixgbe_q_vector {
struct ixgbe_adapter *adapter;
ifdef CONFIG_IXGBE_DCA
int cpu; /* CPU for DCA */
#endif
u16 v_idx; /* index of q_vector within array, also used for
* finding the bit in EICR and friends that
* represents the vector for this ring */
u16 itr; /* Interrupt throttle rate written to EITR */
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;/*napi结构体*/
cpumask_t affinity_mask;
int numa_node;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
* to the per-cpu poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
unsigned long state;
int weight;
unsigned int gro_count;
int (*poll)(struct napi_struct *, int);//poll的接口实现
#ifdef CONFIG_NETPOLL
spinlock_t poll_lock;
int poll_owner;
#endif
struct net_device *dev;
struct sk_buff *gro_list;
struct sk_buff *skb;
struct list_head dev_list;
};