skbuff: Move rxhash and vlan_tci to consolidate holes in sk_buff

This change helps to reduce the overall size of the sk_buff by moving
rxhash and vlan_tci so that the u16 values and u8 bitfields can be better
combined to create only one hole instead of multiple.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Alexander Duyck 2012-01-27 06:22:53 +00:00 committed by Jeff Kirsher
parent 8a0da21be8
commit 4031ae6edb

View file

@ -438,6 +438,11 @@ struct sk_buff {
#endif
int skb_iif;
__u32 rxhash;
__u16 vlan_tci;
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
#ifdef CONFIG_NET_CLS_ACT
@ -445,8 +450,6 @@ struct sk_buff {
#endif
#endif
__u32 rxhash;
__u16 queue_mapping;
kmemcheck_bitfield_begin(flags2);
#ifdef CONFIG_IPV6_NDISC_NODETYPE
@ -470,8 +473,6 @@ struct sk_buff {
__u32 dropcount;
};
__u16 vlan_tci;
sk_buff_data_t transport_header;
sk_buff_data_t network_header;
sk_buff_data_t mac_header;