-
Notifications
You must be signed in to change notification settings - Fork 10
/
mv_netdev.h
844 lines (708 loc) · 27 KB
/
mv_netdev.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
/*******************************************************************************
Copyright (C) Marvell International Ltd. and its affiliates
This software file (the "File") is owned and distributed by Marvell
International Ltd. and/or its affiliates ("Marvell") under the following
alternative licensing terms. Once you have made an election to distribute the
File under one of the following license alternatives, please (i) delete this
introductory statement regarding license alternatives, (ii) delete the two
license alternatives that you have not elected to use and (iii) preserve the
Marvell copyright notice above.
********************************************************************************
Marvell GPL License Option
If you received this File from Marvell, you may opt to use, redistribute and/or
modify this File in accordance with the terms and conditions of the General
Public License Version 2, June 1991 (the "GPL License"), a copy of which is
available along with the File in the license.txt file or by writing to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
DISCLAIMED. The GPL License provides additional details about this warranty
disclaimer.
*******************************************************************************/
#ifndef __mv_netdev_h__
#define __mv_netdev_h__
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include "os/mvOs.h"
/******************************************************
* driver statistics control -- *
******************************************************/
#ifdef CONFIG_MV_ETH_STAT_ERR
#define STAT_ERR(c) c
#else
#define STAT_ERR(c)
#endif
#ifdef CONFIG_MV_ETH_STAT_INF
#define STAT_INFO(c) c
#else
#define STAT_INFO(c)
#endif
#ifdef CONFIG_MV_ETH_STAT_DBG
#define STAT_DBG(c) c
#else
#define STAT_DBG(c)
#endif
#ifdef CONFIG_MV_ETH_STAT_DIST
#define STAT_DIST(c) c
#else
#define STAT_DIST(c)
#endif
#ifdef CONFIG_MV_ETH_PNC
extern unsigned int mv_eth_pnc_ctrl_en;
int mv_eth_ctrl_pnc(int en);
#endif /* CONFIG_MV_ETH_PNC */
extern int mv_ctrl_txdone;
/****************************************************************************
* Rx buffer size: MTU + 2(Marvell Header) + 4(VLAN) + 14(MAC hdr) + 4(CRC) *
****************************************************************************/
#define RX_PKT_SIZE(mtu) \
MV_ALIGN_UP((mtu) + 2 + 4 + ETH_HLEN + 4, CPU_D_CACHE_LINE_SIZE)
#define RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
#ifdef CONFIG_NET_SKB_RECYCLE
extern int mv_ctrl_recycle;
#define mv_eth_is_recycle() (mv_ctrl_recycle)
int mv_eth_skb_recycle(struct sk_buff *skb);
#else
#define mv_eth_is_recycle() 0
#endif /* CONFIG_NET_SKB_RECYCLE */
/******************************************************
* interrupt control -- *
******************************************************/
#ifdef CONFIG_MV_ETH_TXDONE_ISR
#define MV_ETH_TXDONE_INTR_MASK (((1 << CONFIG_MV_ETH_TXQ) - 1) << NETA_CAUSE_TXQ_SENT_DESC_OFFS)
#else
#define MV_ETH_TXDONE_INTR_MASK 0
#endif
#define MV_ETH_MISC_SUM_INTR_MASK (NETA_CAUSE_TX_ERR_SUM_MASK | NETA_CAUSE_MISC_SUM_MASK)
#define MV_ETH_RX_INTR_MASK (((1 << CONFIG_MV_ETH_RXQ) - 1) << NETA_CAUSE_RXQ_OCCUP_DESC_OFFS)
#define NETA_RX_FL_DESC_MASK (NETA_RX_F_DESC_MASK|NETA_RX_L_DESC_MASK)
/* NAPI CPU defualt group */
#define CPU_GROUP_DEF 0
#define MV_ETH_TRYLOCK(lock, flags) \
(in_interrupt() ? spin_trylock((lock)) : \
spin_trylock_irqsave((lock), (flags)))
#define MV_ETH_LOCK(lock, flags) \
{ \
if (in_interrupt()) \
spin_lock((lock)); \
else \
spin_lock_irqsave((lock), (flags)); \
}
#define MV_ETH_UNLOCK(lock, flags) \
{ \
if (in_interrupt()) \
spin_unlock((lock)); \
else \
spin_unlock_irqrestore((lock), (flags)); \
}
#define MV_ETH_LIGHT_LOCK(flags) \
if (!in_interrupt()) \
local_irq_save(flags);
#define MV_ETH_LIGHT_UNLOCK(flags) \
if (!in_interrupt()) \
local_irq_restore(flags);
#define mv_eth_lock(txq_ctrl, flags) \
{ \
if (txq_ctrl->flags & MV_ETH_F_TX_SHARED) \
MV_ETH_LOCK(&txq_ctrl->queue_lock, flags) \
else \
MV_ETH_LIGHT_LOCK(flags) \
}
#define mv_eth_unlock(txq_ctrl, flags) \
{ \
if (txq_ctrl->flags & MV_ETH_F_TX_SHARED) \
MV_ETH_UNLOCK(&txq_ctrl->queue_lock, flags) \
else \
MV_ETH_LIGHT_UNLOCK(flags) \
}
/******************************************************
* rx / tx queues -- *
******************************************************/
/*
* Debug statistics
*/
struct txq_stats {
#ifdef CONFIG_MV_ETH_STAT_ERR
u32 txq_err;
#endif /* CONFIG_MV_ETH_STAT_ERR */
#ifdef CONFIG_MV_ETH_STAT_DBG
u32 txq_tx;
u32 txq_txdone;
#endif /* CONFIG_MV_ETH_STAT_DBG */
};
struct port_stats {
#ifdef CONFIG_MV_ETH_STAT_ERR
u32 rx_error;
u32 tx_timeout;
u32 netif_stop;
u32 ext_stack_empty;
u32 ext_stack_full;
u32 netif_wake;
u32 state_err;
#endif /* CONFIG_MV_ETH_STAT_ERR */
#ifdef CONFIG_MV_ETH_STAT_INF
u32 irq;
u32 irq_err;
u32 poll[CONFIG_NR_CPUS];
u32 poll_exit[CONFIG_NR_CPUS];
u32 tx_fragment;
u32 tx_done;
u32 tx_done_timer;
u32 cleanup_timer;
u32 link;
u32 netdev_stop;
#ifdef CONFIG_MV_ETH_RX_SPECIAL
u32 rx_special;
#endif /* CONFIG_MV_ETH_RX_SPECIAL */
#ifdef CONFIG_MV_ETH_TX_SPECIAL
u32 tx_special;
#endif /* CONFIG_MV_ETH_TX_SPECIAL */
#endif /* CONFIG_MV_ETH_STAT_INF */
#ifdef CONFIG_MV_ETH_STAT_DBG
u32 rxq[CONFIG_MV_ETH_RXQ];
u32 rxq_fill[CONFIG_MV_ETH_RXQ];
u32 rx_netif;
u32 rx_nfp;
u32 rx_nfp_drop;
u32 rx_gro;
u32 rx_gro_bytes;
u32 rx_drop_sw;
u32 rx_csum_hw;
u32 rx_csum_sw;
u32 tx_csum_hw;
u32 tx_csum_sw;
u32 tx_skb_free;
u32 tx_sg;
u32 tx_tso;
u32 tx_tso_bytes;
u32 ext_stack_put;
u32 ext_stack_get;
#endif /* CONFIG_MV_ETH_STAT_DBG */
};
/* Used for define type of data saved in shadow: SKB or eth_pbuf or nothing */
#define MV_ETH_SHADOW_SKB 0x1
#define MV_ETH_SHADOW_EXT 0x2
/* Masks used for pp->flags */
#define MV_ETH_F_STARTED_BIT 0
#define MV_ETH_F_SWITCH_BIT 1 /* port is connected to the Switch using the Gateway driver */
#define MV_ETH_F_MH_BIT 2
#define MV_ETH_F_NO_PAD_BIT 3
#define MV_ETH_F_DBG_RX_BIT 4
#define MV_ETH_F_DBG_TX_BIT 5
#define MV_ETH_F_EXT_SWITCH_BIT 6 /* port is connected to the Switch without the Gateway driver */
#define MV_ETH_F_CONNECT_LINUX_BIT 7 /* port is connected to Linux netdevice */
#define MV_ETH_F_LINK_UP_BIT 8
#define MV_ETH_F_DBG_DUMP_BIT 9
#define MV_ETH_F_DBG_ISR_BIT 10
#define MV_ETH_F_DBG_POLL_BIT 11
#define MV_ETH_F_NFP_EN_BIT 12
#define MV_ETH_F_SUSPEND_BIT 13
#define MV_ETH_F_STARTED_OLD_BIT 14 /*STARTED_BIT value before suspend */
#define MV_ETH_F_STARTED (1 << MV_ETH_F_STARTED_BIT)
#define MV_ETH_F_SWITCH (1 << MV_ETH_F_SWITCH_BIT)
#define MV_ETH_F_MH (1 << MV_ETH_F_MH_BIT)
#define MV_ETH_F_NO_PAD (1 << MV_ETH_F_NO_PAD_BIT)
#define MV_ETH_F_DBG_RX (1 << MV_ETH_F_DBG_RX_BIT)
#define MV_ETH_F_DBG_TX (1 << MV_ETH_F_DBG_TX_BIT)
#define MV_ETH_F_EXT_SWITCH (1 << MV_ETH_F_EXT_SWITCH_BIT)
#define MV_ETH_F_CONNECT_LINUX (1 << MV_ETH_F_CONNECT_LINUX_BIT)
#define MV_ETH_F_LINK_UP (1 << MV_ETH_F_LINK_UP_BIT)
#define MV_ETH_F_DBG_DUMP (1 << MV_ETH_F_DBG_DUMP_BIT)
#define MV_ETH_F_DBG_ISR (1 << MV_ETH_F_DBG_ISR_BIT)
#define MV_ETH_F_DBG_POLL (1 << MV_ETH_F_DBG_POLL_BIT)
#define MV_ETH_F_NFP_EN (1 << MV_ETH_F_NFP_EN_BIT)
#define MV_ETH_F_SUSPEND (1 << MV_ETH_F_SUSPEND_BIT)
#define MV_ETH_F_STARTED_OLD (1 << MV_ETH_F_STARTED_OLD_BIT)
/* Masks used for cpu_ctrl->flags */
#define MV_ETH_F_TX_DONE_TIMER_BIT 0
#define MV_ETH_F_CLEANUP_TIMER_BIT 1
#define MV_ETH_F_TX_DONE_TIMER (1 << MV_ETH_F_TX_DONE_TIMER_BIT) /* 0x01 */
#define MV_ETH_F_CLEANUP_TIMER (1 << MV_ETH_F_CLEANUP_TIMER_BIT) /* 0x02 */
/* Masks used for tx_queue->flags */
#define MV_ETH_F_TX_SHARED_BIT 0
#define MV_ETH_F_TX_SHARED (1 << MV_ETH_F_TX_SHARED_BIT) /* 0x01 */
/* One of three TXQ states */
#define MV_ETH_TXQ_FREE 0
#define MV_ETH_TXQ_CPU 1
#define MV_ETH_TXQ_HWF 2
#define MV_ETH_TXQ_INVALID 0xFF
struct mv_eth_tx_spec {
u32 hw_cmd; /* tx_desc offset = 0xC */
u16 flags;
u8 txp;
u8 txq;
#ifdef CONFIG_MV_ETH_TX_SPECIAL
void (*tx_func) (u8 *data, int size, struct mv_eth_tx_spec *tx_spec);
#endif
};
struct tx_queue {
MV_NETA_TXQ_CTRL *q;
u8 cpu_owner[CONFIG_NR_CPUS]; /* counter */
u8 hwf_rxp;
u8 txp;
u8 txq;
int txq_size;
int txq_count;
int bm_only;
u32 *shadow_txq; /* can be MV_ETH_PKT* or struct skbuf* */
int shadow_txq_put_i;
int shadow_txq_get_i;
struct txq_stats stats;
spinlock_t queue_lock;
MV_U32 txq_done_pkts_coal;
unsigned long flags;
int nfpCounter;
};
struct rx_queue {
MV_NETA_RXQ_CTRL *q;
int rxq_size;
int missed;
MV_U32 rxq_pkts_coal;
MV_U32 rxq_time_coal;
};
struct dist_stats {
u32 *rx_dist;
int rx_dist_size;
u32 *tx_done_dist;
int tx_done_dist_size;
u32 *tx_tso_dist;
int tx_tso_dist_size;
};
struct cpu_ctrl {
MV_U8 cpuTxqMask;
MV_U8 cpuRxqMask;
MV_U8 cpuTxqOwner;
MV_U8 txq_tos_map[256];
MV_U32 causeRxTx;
struct napi_struct *napi;
int napiCpuGroup;
int txq;
struct timer_list tx_done_timer;
struct timer_list cleanup_timer;
unsigned long flags;
};
struct eth_port {
int port;
MV_NETA_PORT_CTRL *port_ctrl;
struct rx_queue *rxq_ctrl;
struct tx_queue *txq_ctrl;
int txp_num;
struct net_device *dev;
rwlock_t rwlock;
struct bm_pool *pool_long;
int pool_long_num;
#ifdef CONFIG_MV_ETH_BM_CPU
struct bm_pool *pool_short;
int pool_short_num;
#endif /* CONFIG_MV_ETH_BM_CPU */
struct napi_struct *napiGroup[CONFIG_MV_ETH_NAPI_GROUPS];
unsigned long flags; /* MH, TIMER, etc. */
u32 hw_cmd; /* offset 0xc in TX descriptor */
int txp;
u16 tx_mh; /* 2B MH */
struct port_stats stats;
struct dist_stats dist_stats;
int weight;
MV_STACK *extArrStack;
int extBufSize;
spinlock_t extLock;
#ifdef CONFIG_MV_ETH_TOOL
__u16 speed_cfg;
__u8 duplex_cfg;
__u8 autoneg_cfg;
__u16 advertise_cfg;
#endif/* CONFIG_MV_ETH_TOOL */
#ifdef CONFIG_MV_ETH_RX_CSUM_OFFLOAD
MV_U32 rx_csum_offload;
#endif /* CONFIG_MV_ETH_RX_CSUM_OFFLOAD */
#ifdef CONFIG_MV_ETH_RX_SPECIAL
void (*rx_special_proc)(int port, int rxq, struct net_device *dev,
struct sk_buff *skb, struct neta_rx_desc *rx_desc);
#endif /* CONFIG_MV_ETH_RX_SPECIAL */
#ifdef CONFIG_MV_ETH_TX_SPECIAL
int (*tx_special_check)(int port, struct net_device *dev, struct sk_buff *skb,
struct mv_eth_tx_spec *tx_spec_out);
#endif /* CONFIG_MV_ETH_TX_SPECIAL */
MV_U32 cpuMask;
MV_U32 rx_indir_table[256];
struct cpu_ctrl *cpu_config[CONFIG_NR_CPUS];
MV_U32 sgmii_serdes;
int wol_mode;
};
struct eth_netdev {
u16 tx_vlan_mh; /* 2B MH */
u16 vlan_grp_id; /* vlan group ID */
u16 port_map; /* switch port map */
u16 link_map; /* switch port link map */
u16 cpu_port; /* switch CPU port */
u16 group;
};
struct eth_dev_priv {
struct eth_port *port_p;
struct eth_netdev *netdev_p;
};
#define MV_ETH_PRIV(dev) (((struct eth_dev_priv *)(netdev_priv(dev)))->port_p)
#define MV_DEV_PRIV(dev) (((struct eth_dev_priv *)(netdev_priv(dev)))->netdev_p)
#define MV_DEV_STAT(dev) (&((dev)->stats))
/* define which Switch ports are relevant */
#define SWITCH_CONNECTED_PORTS_MASK 0x7F
#define MV_SWITCH_ID_0 0
#define MV_ETH_PORT_0 0
#define MV_ETH_PORT_1 1
struct pool_stats {
#ifdef CONFIG_MV_ETH_STAT_ERR
u32 skb_alloc_oom;
u32 stack_empty;
u32 stack_full;
#endif /* CONFIG_MV_ETH_STAT_ERR */
#ifdef CONFIG_MV_ETH_STAT_DBG
u32 bm_put;
u32 stack_put;
u32 stack_get;
u32 skb_alloc_ok;
u32 skb_recycled_ok;
u32 skb_recycled_err;
#endif /* CONFIG_MV_ETH_STAT_DBG */
};
struct bm_pool {
int pool;
int capacity;
int buf_num;
int pkt_size;
MV_ULONG physAddr;
u32 *bm_pool;
MV_STACK *stack;
spinlock_t lock;
u32 port_map;
int missed; /* FIXME: move to stats */
struct pool_stats stats;
};
#ifdef CONFIG_MV_ETH_BM_CPU
#define MV_ETH_BM_POOLS MV_BM_POOLS
#define mv_eth_pool_bm(p) (p->bm_pool)
#define mv_eth_txq_bm(q) (q->bm_only)
#else
#define MV_ETH_BM_POOLS CONFIG_MV_ETH_PORTS_NUM
#define mv_eth_pool_bm(p) 0
#define mv_eth_txq_bm(q) 0
#endif /* CONFIG_MV_ETH_BM_CPU */
#ifdef CONFIG_MV_ETH_BM
MV_STATUS mv_eth_bm_config_get(void);
int mv_eth_bm_config_pkt_size_get(int pool);
int mv_eth_bm_config_pkt_size_set(int pool, int pkt_size);
int mv_eth_bm_config_short_pool_get(int port);
int mv_eth_bm_config_short_buf_num_get(int port);
int mv_eth_bm_config_long_pool_get(int port);
int mv_eth_bm_config_long_buf_num_get(int port);
void mv_eth_bm_config_print(void);
#endif /* CONFIG_MV_ETH_BM */
void mv_eth_stack_print(int port, MV_BOOL isPrintElements);
extern struct bm_pool mv_eth_pool[MV_ETH_BM_POOLS];
extern struct eth_port **mv_eth_ports;
static inline void mv_eth_interrupts_unmask(struct eth_port *pp)
{
/* unmask interrupts */
if (!(pp->flags & (MV_ETH_F_SWITCH | MV_ETH_F_EXT_SWITCH)))
MV_REG_WRITE(NETA_INTR_MISC_MASK_REG(pp->port), NETA_CAUSE_LINK_CHANGE_MASK);
MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port),
(MV_ETH_MISC_SUM_INTR_MASK |
MV_ETH_TXDONE_INTR_MASK |
MV_ETH_RX_INTR_MASK));
}
static inline void mv_eth_interrupts_mask(struct eth_port *pp)
{
/* clear all ethernet port interrupts */
MV_REG_WRITE(NETA_INTR_MISC_CAUSE_REG(pp->port), 0);
MV_REG_WRITE(NETA_INTR_OLD_CAUSE_REG(pp->port), 0);
/* mask all ethernet port interrupts */
MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port), 0);
MV_REG_WRITE(NETA_INTR_OLD_MASK_REG(pp->port), 0);
MV_REG_WRITE(NETA_INTR_MISC_MASK_REG(pp->port), 0);
}
static inline void mv_eth_txq_update_shared(struct tx_queue *txq_ctrl, struct eth_port *pp)
{
int numOfRefCpu, cpu;
struct cpu_ctrl *cpuCtrl;
numOfRefCpu = 0;
for_each_possible_cpu(cpu) {
cpuCtrl = pp->cpu_config[cpu];
if (txq_ctrl->cpu_owner[cpu] == 0)
cpuCtrl->cpuTxqOwner &= ~(1 << txq_ctrl->txq);
else {
numOfRefCpu++;
cpuCtrl->cpuTxqOwner |= (1 << txq_ctrl->txq);
}
}
if ((txq_ctrl->nfpCounter != 0) || (numOfRefCpu > 1))
txq_ctrl->flags |= MV_ETH_F_TX_SHARED;
else
txq_ctrl->flags &= ~MV_ETH_F_TX_SHARED;
}
static inline int mv_eth_ctrl_is_tx_enabled(struct eth_port *pp)
{
if (!pp)
return -ENODEV;
if (pp->flags & MV_ETH_F_CONNECT_LINUX)
return 1;
return 0;
}
static inline struct neta_tx_desc *mv_eth_tx_desc_get(struct tx_queue *txq_ctrl, int num)
{
/* Is enough TX descriptors to send packet */
if ((txq_ctrl->txq_count + num) >= txq_ctrl->txq_size) {
/*
printk(KERN_ERR "eth_tx: txq_ctrl->txq=%d - no_resource: txq_count=%d, txq_size=%d, num=%d\n",
txq_ctrl->txq, txq_ctrl->txq_count, txq_ctrl->txq_size, num);
*/
STAT_ERR(txq_ctrl->stats.txq_err++);
return NULL;
}
return mvNetaTxqNextDescGet(txq_ctrl->q);
}
static inline void mv_eth_tx_desc_flush(struct neta_tx_desc *tx_desc)
{
#if defined(MV_CPU_BE)
mvNetaTxqDescSwap(tx_desc);
#endif /* MV_CPU_BE */
mvOsCacheLineFlush(NULL, tx_desc);
}
static inline void *mv_eth_extra_pool_get(struct eth_port *pp)
{
void *ext_buf;
spin_lock(&pp->extLock);
if (mvStackIndex(pp->extArrStack) == 0) {
STAT_ERR(pp->stats.ext_stack_empty++);
ext_buf = mvOsMalloc(CONFIG_MV_ETH_EXTRA_BUF_SIZE);
} else {
STAT_DBG(pp->stats.ext_stack_get++);
ext_buf = (void *)mvStackPop(pp->extArrStack);
}
spin_unlock(&pp->extLock);
return ext_buf;
}
static inline int mv_eth_extra_pool_put(struct eth_port *pp, void *ext_buf)
{
spin_lock(&pp->extLock);
if (mvStackIsFull(pp->extArrStack)) {
STAT_ERR(pp->stats.ext_stack_full++);
spin_unlock(&pp->extLock);
mvOsFree(ext_buf);
return 1;
}
mvStackPush(pp->extArrStack, (MV_U32)ext_buf);
STAT_DBG(pp->stats.ext_stack_put++);
spin_unlock(&pp->extLock);
return 0;
}
static inline void mv_eth_add_cleanup_timer(struct cpu_ctrl *cpuCtrl)
{
if (test_and_set_bit(MV_ETH_F_CLEANUP_TIMER_BIT, &(cpuCtrl->flags)) == 0) {
cpuCtrl->cleanup_timer.expires = jiffies + ((HZ * CONFIG_MV_ETH_CLEANUP_TIMER_PERIOD) / 1000); /* ms */
add_timer_on(&cpuCtrl->cleanup_timer, smp_processor_id());
}
}
static inline void mv_eth_add_tx_done_timer(struct cpu_ctrl *cpuCtrl)
{
if (test_and_set_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags)) == 0) {
cpuCtrl->tx_done_timer.expires = jiffies + ((HZ * CONFIG_MV_ETH_TX_DONE_TIMER_PERIOD) / 1000); /* ms */
add_timer_on(&cpuCtrl->tx_done_timer, smp_processor_id());
}
}
static inline void mv_eth_shadow_inc_get(struct tx_queue *txq)
{
txq->shadow_txq_get_i++;
if (txq->shadow_txq_get_i == txq->txq_size)
txq->shadow_txq_get_i = 0;
}
static inline void mv_eth_shadow_inc_put(struct tx_queue *txq)
{
txq->shadow_txq_put_i++;
if (txq->shadow_txq_put_i == txq->txq_size)
txq->shadow_txq_put_i = 0;
}
static inline void mv_eth_shadow_dec_put(struct tx_queue *txq)
{
if (txq->shadow_txq_put_i == 0)
txq->shadow_txq_put_i = txq->txq_size - 1;
else
txq->shadow_txq_put_i--;
}
/* Free pkt + skb pair */
static inline void mv_eth_pkt_free(struct eth_pbuf *pkt)
{
struct sk_buff *skb = (struct sk_buff *)pkt->osInfo;
#ifdef CONFIG_NET_SKB_RECYCLE
skb->skb_recycle = NULL;
skb->hw_cookie = NULL;
#endif /* CONFIG_NET_SKB_RECYCLE */
dev_kfree_skb_any(skb);
mvOsFree(pkt);
}
static inline int mv_eth_pool_put(struct bm_pool *pool, struct eth_pbuf *pkt)
{
unsigned long flags = 0;
MV_ETH_LOCK(&pool->lock, flags);
if (mvStackIsFull(pool->stack)) {
STAT_ERR(pool->stats.stack_full++);
MV_ETH_UNLOCK(&pool->lock, flags);
/* free pkt+skb */
mv_eth_pkt_free(pkt);
return 1;
}
mvStackPush(pool->stack, (MV_U32) pkt);
STAT_DBG(pool->stats.stack_put++);
MV_ETH_UNLOCK(&pool->lock, flags);
return 0;
}
/* Pass pkt to BM Pool or RXQ ring */
static inline void mv_eth_rxq_refill(struct eth_port *pp, int rxq,
struct eth_pbuf *pkt, struct bm_pool *pool, struct neta_rx_desc *rx_desc)
{
if (mv_eth_pool_bm(pool)) {
/* Refill BM pool */
STAT_DBG(pool->stats.bm_put++);
mvBmPoolPut(pkt->pool, (MV_ULONG) pkt->physAddr);
mvOsCacheLineInv(NULL, rx_desc);
} else {
/* Refill Rx descriptor */
STAT_DBG(pp->stats.rxq_fill[rxq]++);
mvNetaRxDescFill(rx_desc, pkt->physAddr, (MV_U32)pkt);
}
}
#ifdef CONFIG_MV_ETH_SWITCH
struct mv_eth_switch_config {
int mtu;
int netdev_max;
int netdev_cfg;
unsigned char mac_addr[CONFIG_MV_ETH_SWITCH_NETDEV_NUM][MV_MAC_ADDR_SIZE];
u16 board_port_map[CONFIG_MV_ETH_SWITCH_NETDEV_NUM];
};
extern int mv_eth_switch_netdev_first, mv_eth_switch_netdev_last;
extern struct mv_eth_switch_config switch_net_config[CONFIG_MV_ETH_PORTS_NUM];
extern struct net_device **mv_net_devs;
int mv_eth_switch_config_get(int use_existing_config, int port);
int mv_eth_switch_set_mac_addr(struct net_device *dev, void *mac);
void mv_eth_switch_set_multicast_list(struct net_device *dev);
int mv_eth_switch_change_mtu(struct net_device *dev, int mtu);
int mv_eth_switch_start(struct net_device *dev);
int mv_eth_switch_stop(struct net_device *dev);
void mv_eth_switch_status_print(int port);
int mv_eth_switch_port_add(struct net_device *dev, int port);
int mv_eth_switch_port_del(struct net_device *dev, int port);
#endif /* CONFIG_MV_ETH_SWITCH */
/******************************************************
* Function prototypes -- *
******************************************************/
int mv_eth_stop(struct net_device *dev);
int mv_eth_change_mtu(struct net_device *dev, int mtu);
int mv_eth_check_mtu_internals(struct net_device *dev, int mtu);
int mv_eth_check_mtu_valid(struct net_device *dev, int mtu);
int mv_eth_set_mac_addr(struct net_device *dev, void *mac);
void mv_eth_set_multicast_list(struct net_device *dev);
int mv_eth_open(struct net_device *dev);
int mv_eth_port_suspend(int port);
int mv_eth_port_resume(int port);
int mv_eth_resume_clock(int port);
int mv_eth_suspend_clock(int port);
int mv_eth_suspend_internals(struct eth_port *pp);
int mv_eth_resume_internals(struct eth_port *pp, int mtu);
int mv_eth_restore_registers(struct eth_port *pp, int mtu);
void mv_eth_win_init(int port);
int mv_eth_resume_network_interfaces(struct eth_port *pp);
int mv_eth_wol_mode_set(int port, int mode);
int mv_eth_cpu_txq_mask_set(int port, int cpu, int txqMask);
irqreturn_t mv_eth_isr(int irq, void *dev_id);
int mv_eth_start_internals(struct eth_port *pp, int mtu);
int mv_eth_stop_internals(struct eth_port *pp);
int mv_eth_change_mtu_internals(struct net_device *netdev, int mtu);
int mv_eth_rx_reset(int port);
int mv_eth_txp_reset(int port, int txp);
MV_STATUS mv_eth_rx_ptks_coal_set(int port, int rxq, MV_U32 value);
MV_STATUS mv_eth_rx_time_coal_set(int port, int rxq, MV_U32 value);
MV_STATUS mv_eth_tx_done_ptks_coal_set(int port, int txp, int txq, MV_U32 value);
struct eth_port *mv_eth_port_by_id(unsigned int port);
struct net_device *mv_eth_netdev_by_id(unsigned int idx);
bool mv_eth_netdev_find(unsigned int if_index);
void mv_eth_mac_show(int port);
void mv_eth_tos_map_show(int port);
int mv_eth_rxq_tos_map_set(int port, int rxq, unsigned char tos);
int mv_eth_txq_tos_map_set(int port, int txq, int cpu, unsigned int tos);
int mv_eth_napi_set_cpu_affinity(int port, int group, int affinity);
int mv_eth_napi_set_rxq_affinity(int port, int group, int rxq);
void mv_eth_napi_group_show(int port);
int mv_eth_rxq_vlan_prio_set(int port, int rxq, unsigned char prio);
void mv_eth_vlan_prio_show(int port);
void mv_eth_netdev_print(struct net_device *netdev);
void mv_eth_status_print(void);
void mv_eth_port_status_print(unsigned int port);
void mv_eth_port_stats_print(unsigned int port);
void mv_eth_set_noqueue(struct net_device *dev, int enable);
void mv_eth_ctrl_hwf(int en);
int mv_eth_ctrl_recycle(int en);
void mv_eth_ctrl_txdone(int num);
int mv_eth_ctrl_tx_mh(int port, u16 mh);
int mv_eth_ctrl_tx_cmd(int port, u32 cmd);
int mv_eth_ctrl_txq_cpu_def(int port, int txp, int txq, int cpu);
int mv_eth_ctrl_txq_mode_get(int port, int txp, int txq, int *rx_port);
int mv_eth_ctrl_txq_cpu_own(int port, int txp, int txq, int add, int cpu);
int mv_eth_ctrl_txq_hwf_own(int port, int txp, int txq, int rxp);
int mv_eth_ctrl_flag(int port, u32 flag, u32 val);
int mv_eth_ctrl_txq_size_set(int port, int txp, int txq, int value);
int mv_eth_ctrl_rxq_size_set(int port, int rxq, int value);
int mv_eth_ctrl_port_buf_num_set(int port, int long_num, int short_num);
int mv_eth_ctrl_pool_size_set(int pool, int pkt_size);
int mv_eth_ctrl_set_poll_rx_weight(int port, u32 weight);
int mv_eth_shared_set(int port, int txp, int txq, int value);
void mv_eth_tx_desc_print(struct neta_tx_desc *desc);
void mv_eth_pkt_print(struct eth_pbuf *pkt);
void mv_eth_rx_desc_print(struct neta_rx_desc *desc);
void mv_eth_skb_print(struct sk_buff *skb);
void mv_eth_link_status_print(int port);
#ifdef CONFIG_MV_PON
typedef MV_BOOL(*PONLINKSTATUSPOLLFUNC)(void); /* prototype for PON link status polling function */
typedef void (*PONLINKSTATUSNOTIFYFUNC)(MV_BOOL state); /* prototype for PON link status notification function */
MV_BOOL mv_pon_link_status(void);
void mv_pon_link_state_register(PONLINKSTATUSPOLLFUNC poll_func, PONLINKSTATUSNOTIFYFUNC *notify_func);
void mv_pon_ctrl_omci_type(MV_U16 type);
void mv_pon_ctrl_omci_rx_gh(int en);
void mv_pon_omci_print(void);
#endif /* CONFIG_MV_PON */
#ifdef CONFIG_MV_ETH_TX_SPECIAL
void mv_eth_tx_special_check_func(int port, int (*func)(int port, struct net_device *dev,
struct sk_buff *skb, struct mv_eth_tx_spec *tx_spec_out));
#endif /* CONFIG_MV_ETH_TX_SPECIAL */
#ifdef CONFIG_MV_ETH_RX_SPECIAL
void mv_eth_rx_special_proc_func(int port, void (*func)(int port, int rxq, struct net_device *dev,
struct sk_buff *skb, struct neta_rx_desc *rx_desc));
#endif /* CONFIG_MV_ETH_RX_SPECIAL */
int mv_eth_poll(struct napi_struct *napi, int budget);
void mv_eth_link_event(struct eth_port *pp, int print);
int mv_eth_rx_policy(u32 cause);
int mv_eth_refill(struct eth_port *pp, int rxq,
struct eth_pbuf *pkt, struct bm_pool *pool, struct neta_rx_desc *rx_desc);
u32 mv_eth_txq_done(struct eth_port *pp, struct tx_queue *txq_ctrl);
u32 mv_eth_tx_done_gbe(struct eth_port *pp, u32 cause_tx_done, int *tx_todo);
u32 mv_eth_tx_done_pon(struct eth_port *pp, int *tx_todo);
#ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH
struct neta_rx_desc *mv_eth_rx_prefetch(struct eth_port *pp,
MV_NETA_RXQ_CTRL *rx_ctrl, int rx_done, int rx_todo);
#endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */
#ifdef CONFIG_MV_ETH_BM
void *mv_eth_bm_pool_create(int pool, int capacity, MV_ULONG *physAddr);
#endif /* CONFIG_MV_ETH_BM */
#if defined(CONFIG_MV_ETH_HWF) && !defined(CONFIG_MV_ETH_BM_CPU)
MV_STATUS mv_eth_hwf_bm_create(int port, int mtuPktSize);
void mv_hwf_bm_dump(void);
#endif /* CONFIG_MV_ETH_HWF && !CONFIG_MV_ETH_BM_CPU */
#ifdef CONFIG_MV_ETH_NFP
int mv_eth_nfp_ctrl(struct net_device *dev, int en);
int mv_eth_nfp_ext_ctrl(struct net_device *dev, int en);
int mv_eth_nfp_ext_add(struct net_device *dev, int port);
int mv_eth_nfp_ext_del(struct net_device *dev);
MV_STATUS mv_eth_nfp(struct eth_port *pp, int rxq, struct neta_rx_desc *rx_desc,
struct eth_pbuf *pkt, struct bm_pool *pool);
#endif /* CONFIG_MV_ETH_NFP */
#endif /* __mv_netdev_h__ */