blob: b7b572fe99e2aa9867d8bd5168a451630c3c44a5 [file] [log] [blame]
Shad Ansari2f7f9be2017-06-07 13:34:53 -07001/*
2<:copyright-BRCM:2016:DUAL/GPL:standard
3
4 Broadcom Proprietary and Confidential.(c) 2016 Broadcom
5 All Rights Reserved
6
7Unless you and Broadcom execute a separate written software license
8agreement governing use of this software, this software is licensed
9to you under the terms of the GNU General Public License version 2
10(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
11with the following added to such license:
12
13 As a special exception, the copyright holders of this software give
14 you permission to link this software with independent modules, and
15 to copy and distribute the resulting executable under terms of your
16 choice, provided that you also meet, for each linked independent
17 module, the terms and conditions of the license of that module.
18 An independent module is a module which is not derived from this
19 software. The special exception does not apply to any modifications
20 of the software.
21
22Not withstanding the above, under no circumstances may you combine
23this software in any way with any other Broadcom software provided
24under a license other than the GPL, without Broadcom's express prior
25written consent.
26
27:>
28 */
29#include "bcmtr_pcie.h"
30#include "bcmolt_tr_pcie_specific.h"
31
32
33/*
34 Synchronization flow of DMA data base between Host and Maple
35 The process takes place after Maple is loaded and run from DDR.
36 ========================================================
37 Host Maple
38 ========================================================
39
40 write to SRAM its TX and RX queue size
41 write to SRAM DDR_FINISH indication
42 wait for PRM_BIT from Maple run from DDR
43 get from SRAM host tx and rx queue size
44 calls pre_connect
45 write to SRAM opaque data
46 write to SRAM PRM_BIT
47 read from SRAM opaque data calls connect
48 clear PRM_BIT from Maple wait for PRM_BIT from Host
49 calls pre_connect
50 calls connect
51 write to SRAM PRM_BIT
52 register rx interrupt handler register rx interrupt handler
53 ========================================================
54 Both ready to send/receive packets thru DMA:
55 ========================================================
56*/
57/* misc local_bd info structure
58 _____________________________________________________
59|___31 - 25_____| 24 |____23 -16_____|____15-0_____|
60|__reserved_____|__OWN__|___chanal ID___|____length___|
61
62*/
63#define BCM_PCIED_BD_OWNERSHIP_SHIFT 24
64#define BCM_PCIED_BD_OWNERSHIP_MASK (0x1UL << BCM_PCIED_BD_OWNERSHIP_SHIFT)
65
66#define BCM_PCIED_BD_CHANNEL_ID_SHIFT 16
67#define BCM_PCIED_BD_CHANNEL_ID_MASK (0xffUL << BCM_PCIED_BD_CHANNEL_ID_SHIFT)
68
69#define BCM_PCIED_BD_PKT_LENGTH_MASK 0xffff
70
71/* length_and_isrenable */
72#define BCM_PCIED_INTR_ENABLE_SHIFT 31
73#define BCM_PCIED_INTR_ENABLE_MASK (0x1UL << BCM_PCIED_INTR_ENABLE_SHIFT)
74
75#define BCM_PCIED_TRANSFER_SIZE_MASK 0x01ffffffUL
76#define BCM_PCIED_TRANSFER_SIZE_SHIFT 0
77
78/* last_next_indicator */
79#define BCM_PCIED_LAST_RECORD_SHIFT 31
80#define BCM_PCIED_LAST_RECORD_MASK (0x1UL << BCM_PCIED_LAST_RECORD_SHIFT)
81
82
83#define BCM_PCIED_NEXT_CONTINOUS_SHIFT 2
84#define BCM_PCIED_NEXT_CONTINOUS_MASK (0x1UL << BCM_PCIED_NEXT_CONTINOUS_SHIFT)
85
86#define BCM_PCIE_ALL_DMA_INTERRUPTS_MASK (DMA_RX_DONE_MASK | DMA_RX_ERROR_MASK | DMA_TX_DONE_MASK | DMA_TX_ERROR_MASK)
87
88/* packet descriptor - used by HW DMA mechanism , defined by HW do not change it */
89typedef struct
90{
91 uint32_t ddr_buff_address_low; /* Maple address - word[0]*/
92 uint32_t pcie_pkt_address_low; /* Host address - word[1]*/
93 uint32_t pcie_pkt_address_high; /* - word[2]*/
94 uint32_t length_and_isrenable; /* bit 31 - interrupt enable; bits 24:2 - transfer length */
95 uint32_t last_next_indicator; /* bit 31 - last indicator; bit 30 - direction; bit 2 - next descriptor, if 1 -> continuous */
96 uint32_t next_pd_address_low;
97 uint32_t next_pd_address_high;
98 uint32_t ddr_buff_address_high; /* - word[7]*/
99} bcm_pcied_pd;
100
101/* transfer unit for TX/RX */
102typedef struct
103{
104 bcm_pcied_pd data_pd; /* PD used to transfer data packet */
105 bcm_pcied_pd remote_to_local_pd; /* PD used to transfer shadow_rbd field from local CPU
106 to local_bd field on peer CPU */
107} bcm_pcied_tu;
108
109/* opaque data, in current implementation will be sent from the Maple to Host only.
110 Provide offsets from the start of Maple DDR window for tx and rx TU rings (where tx and rx rings in term of Host)
111*/
112struct bcmtr_pcie_opaque_data
113{
114 uint32_t tx_tu_ring_offset;
115 uint32_t rx_tu_ring_offset;
116};
117
118/* main structure of the driver */
119typedef struct
120{
121 bcm_pcied_tu *tx_tu_ring; /* TX DMA ring */
122 bcm_pcied_tu *rx_tu_ring; /* RX DMA ring */
123
124 uint32_t *local_bd; /* used for receive, updated by peer in tx */
125 uint32_t *shadow_rbd; /* used by local in tx as just buffer for update remote local_bd */
126
127 uint32_t *tx_owner; /* used for local in tx for lookup that tu mine, updated by local and peer */
128 uint32_t *peer_tx_owner; /* used in rx for update tx_owner of the remote*/
129
130 bcmos_buf **tx_nbuff_save; /* array of network buffers pointers, used by TX, to store sent buffers */
131 bcmos_buf **rx_nbuff_save; /* array of network buffers pointers, used by RX, to store allocated for rx buffers */
132
133 uint32_t current_tx; /* index of current tx tu */
134 uint32_t conf_tx; /* index of the last reclaimed tx tu */
135 uint32_t current_rx; /* index of current rx tu */
136 int32_t prev_tx;
137
138 uint32_t max_tx_index;
139 uint32_t max_rx_index;
140
141 uint32_t max_mtu;
142
143 unsigned long ddr_win_base;
144 bcm_pcied_isr_data isrdata; /* includes pcie register base, id and irq number */
145
146 TX_LOCKDEF
147
148 /* counters */
149 uint32_t rx_counter;
150 uint32_t tx_counter;
151 uint32_t rx_pcie_empty_counter;
152 uint32_t tx_pcie_full_counter;
153
154 /* saved meta-data */
155 uint32_t txq_length;
156 uint32_t rxq_length;
157 uint32_t *tx_tu_ring_orig; /* TX DMA ring */
158 uint32_t *rx_tu_ring_orig; /* RX DMA ring */
159
160} bcm_pcied_comm_data;
161
162f_bcmtr_int bcmtr_pcie_rx_irq_handler;
163f_bcmtr_int bcmtr_pcie_tx_irq_handler;
164
165static void default_tx_done_callback(uint8_t device, bcmos_buf *buf);
166
167static f_bcmtr_done tx_done_handler = default_tx_done_callback;
168
169/* used for error messages */
170#if defined(__KERNEL__)
171#define pcie_print(fmt, args...) printk("%s: %d: " fmt, __FUNCTION__ , __LINE__, ##args)
172#else
173#define pcie_print bcmos_printf
174#endif
175
176#define LOCAL_OWNER 0
177#define REMOTE_OWNER 1
178
179#define INCREMENT_RECEIVED(device) bcmtr_pcie_data[device].rx_counter++
180#define INCREMENT_TRANSMITED(device) bcmtr_pcie_data[device].tx_counter++
181
182/* set next index into a DMA ring */
183#define NEXT_INDEX(maxindex, index) \
184do{ \
185 index++; \
186 if (index > maxindex) \
187 index = 0; \
188} while(0)
189
190/* calculate register address according to registers'base and offset */
191#define PCI_REG_ADDRESS(reg) (uint32_t *)(bcmtr_pcie_data[device].isrdata.pcie_reg_base + reg)
192
193/* Just for avoid of using this "BCM_ERR_OK" stupid define , i can not understand how "error" can be "ok" (D.B.) */
194#define BCMTR_SUCCESS BCM_ERR_OK
195
196#define BCMTR_PARANOID_CHECK() \
197 do { \
198 if (!bcmtr_pcie_data) \
199 return BCM_ERR_NORES; \
200 if (device >= bcmtr_max_devices_number)\
201 return BCM_ERR_RANGE; \
202 } while(0)
203
204#define BCMTR_PARANOID_CHECK_TYPE(t) \
205 do { \
206 if (!bcmtr_pcie_data) \
207 return (t)BCM_ERR_NORES; \
208 if (device >= bcmtr_max_devices_number)\
209 return (t)BCM_ERR_RANGE; \
210 } while(0)
211
212#define BCMTR_PARANOID_CHECK_EXT() \
213 do { \
214 if (!bcmtr_pcie_data) \
215 return BCM_ERR_NORES; \
216 if (device >= bcmtr_max_devices_number)\
217 return BCM_ERR_RANGE; \
218 if (!bcmtr_pcie_data[device].isrdata.pcie_reg_base)\
219 return BCM_ERR_NORES; \
220 } while(0)
221
222 /* device data array */
223static bcm_pcied_comm_data *bcmtr_pcie_data;
224
225static uint32_t bcmtr_max_devices_number;
226
227/* stop one dma engine */
228static inline void stop_dma(uint8_t device)
229{
230 volatile uint32_t value;
231
232 value = bcm_pci_read32(PCI_REG_ADDRESS((uint32_t)DESCRIPTOR_CONTROL));
233 value &= ~DMA_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_MASK;
234 bcm_pci_write32(PCI_REG_ADDRESS(DESCRIPTOR_CONTROL), value);
235}
236
237/* Default tx-done callback */
238static void default_tx_done_callback(uint8_t device, bcmos_buf *buf)
239{
240 bcmos_free(buf);
241}
242
243/* free all allocated buffers */
244static bcmos_errno free_buffers(uint8_t device, const char *error_string)
245{
246 uint32_t i;
247 bcm_pcied_comm_data *current_device = &bcmtr_pcie_data[device];
248
249 pcie_print("%s", error_string);
250
251 if (current_device->rx_nbuff_save)
252 {
253 for (i = 0; i < current_device->rxq_length; i++)
254 {
255 if (current_device->rx_nbuff_save[i])
256 bcmos_buf_free(current_device->rx_nbuff_save[i]);
257 }
258 bcmos_free(current_device->rx_nbuff_save);
259 }
260
261 if (current_device->tx_nbuff_save)
262 {
263 for (i = 0; i < current_device->txq_length; i++)
264 {
265 if (current_device->tx_nbuff_save[i])
266 bcmos_buf_free(current_device->tx_nbuff_save[i]);
267 }
268 bcmos_free(current_device->tx_nbuff_save);
269 }
270
271 if (current_device->local_bd)
272 bcmos_dma_free(device, current_device->local_bd);
273
274 if (current_device->shadow_rbd)
275 bcmos_dma_free(device, current_device->shadow_rbd);
276
277 DESTROY_TXLOCK(device);
278
279 bcmtr_pcie_free_rings(device, current_device->tx_tu_ring_orig, current_device->rx_tu_ring_orig);
280
281 memset(&bcmtr_pcie_data[device], 0, sizeof(bcm_pcied_comm_data));
282
283 return BCM_ERR_NOMEM;
284}
285
286bcmos_errno bcmtr_pcie_init(uint8_t max_devices)
287{
288 bcmtr_max_devices_number = max_devices;
289
290 bcmtr_pcie_data = (bcm_pcied_comm_data *)bcmos_calloc(bcmtr_max_devices_number * sizeof(bcm_pcied_comm_data));
291 if (!bcmtr_pcie_data)
292 {
293 pcie_print("Driver cannot be initialized\n");
294 return BCM_ERR_NOMEM;
295 }
296
297 return BCMTR_SUCCESS;
298}
299
300bcmos_errno bcmtr_pcie_pre_connect(uint8_t device, const bcmtr_pcie_pre_connect_cfg *cfg, bcmtr_pcie_opaque_data *opaque_data)
301{
302 bcm_pcied_comm_data *current_device;
303 bcmos_errno ret_code;
304 uint32_t i;
305
306 BCMTR_PARANOID_CHECK();
307
308 if (!cfg)
309 {
310 pcie_print("Second parameter(config) is NULL pointer\n");
311 return BCM_ERR_NULL;
312 }
313
314 if (!opaque_data)
315 {
316 pcie_print("Return area pointer(Third parameter) is NULL\n");
317 return BCM_ERR_NULL;
318 }
319
320 current_device = &bcmtr_pcie_data[device];
321
322 CREATE_TXLOCK(device);
323
324 /* copy user's data to internal data base */
325 current_device->txq_length = cfg->txq_size;
326 current_device->rxq_length = cfg->rxq_size;
327 current_device->max_tx_index = cfg->txq_size - 1;
328 current_device->max_rx_index = cfg->rxq_size - 1;
329 current_device->max_mtu = cfg->max_mtu;
330 current_device->isrdata.device = device;
331 current_device->isrdata.rx_irq = cfg->rx_irq;
332 current_device->isrdata.pcie_reg_base = cfg->pcie_reg_base;
333 current_device->ddr_win_base = cfg->ddr_win_base;
334 current_device->prev_tx = -1;
335
336 /**********************/
337 /* allocate data base */
338 /**********************/
339
340 bcmtr_pcie_specific_init(current_device->isrdata.pcie_reg_base);
341
342 ret_code = bcmtr_create_tu_rings(current_device->txq_length, current_device->rxq_length,
343 (void **)&current_device->tx_tu_ring, (void **)&current_device->rx_tu_ring,
344 (void **)&current_device->tx_tu_ring_orig, (void **)&current_device->rx_tu_ring_orig,
345 current_device->isrdata.pcie_reg_base);
346 if (ret_code != BCMTR_SUCCESS)
347 {
348 pcie_print("Failed to create transfer unit rings : (error=%d)\n",ret_code);
349 return ret_code;
350 }
351
352 /* allocate array of local_bd for RX chain */
353 current_device->local_bd = (uint32_t *)bcmos_dma_alloc(device, sizeof(*current_device->local_bd) * current_device->rxq_length);
354 if (!current_device->local_bd)
355 return free_buffers(device, "Failed to allocate local_bd for RX ring\n");
356
357 /* set remote side as owner for all local_bd */
358 for (i = 0; i < current_device->rxq_length; i++)
359 *(uint32_t*)(&current_device->local_bd[i])=
360 BCMOS_ENDIAN_CPU_TO_LITTLE_U32((uint32_t)(REMOTE_OWNER << BCM_PCIED_BD_OWNERSHIP_SHIFT));
361
362 /* allocate array of local_bd for TX chain */
363 current_device->shadow_rbd = (uint32_t *)bcmos_dma_alloc(device, sizeof(*current_device->shadow_rbd) * current_device->txq_length);
364 if (!current_device->shadow_rbd )
365 return free_buffers(device, "Failed to allocate shadow_rbd for TX ring\n");
366 /* clear all shadow_rbd , actualy no need but suatble for debugging */
367 memset(current_device->shadow_rbd, 0, sizeof(*current_device->shadow_rbd) * current_device->txq_length);
368
369 /* allocate netowrk buffer pointers arraies */
370 current_device->tx_nbuff_save = (bcmos_buf**)bcmos_calloc(sizeof(bcmos_buf*) * current_device->txq_length);
371 if (!current_device->tx_nbuff_save)
372 return free_buffers(device,"Failed to allocate array for TX buffers pointers\n");
373
374 current_device->rx_nbuff_save = (bcmos_buf**)bcmos_calloc(sizeof(bcmos_buf*) * current_device->rxq_length);
375 if (!current_device->rx_nbuff_save)
376 return free_buffers(device, "Failed to allocate array for RX buffers pointers\n");
377
378 /* update return value, cross tx and rx for peer
379 If we do not allocate the corresponded area just send zero to peer for indicate it
380 */
381 if (current_device->tx_tu_ring)
382 opaque_data->rx_tu_ring_offset = (unsigned long)current_device->tx_tu_ring - current_device->ddr_win_base;
383
384 if (current_device->rx_tu_ring)
385 opaque_data->tx_tu_ring_offset = (unsigned long)current_device->rx_tu_ring - current_device->ddr_win_base;
386
387 return BCMTR_SUCCESS;
388}
389
390bcmos_errno bcmtr_pcie_connect(uint8_t device, const bcmtr_pcie_opaque_data *opaque_data)
391{
392 uint32_t i;
393 bcm_pcied_comm_data *current_device;
394 bcm_pcied_tu *tu_ptr = NULL;
395 uint8_t *pkt_ptr;
396
397 BCMTR_PARANOID_CHECK();
398
399 current_device = &bcmtr_pcie_data[device];
400
401 /* update local tu_rings pointers , no need cross since we already do in pre-connect
402 update only not zero pointers */
403 if (!current_device->rx_tu_ring)
404 current_device->rx_tu_ring = (bcm_pcied_tu*)(opaque_data->rx_tu_ring_offset + current_device->ddr_win_base);
405 else
406 {
407 /* set next to point to the beginning of the ring */
408 tu_ptr = &current_device->rx_tu_ring[current_device->max_rx_index];
409 bcm_pci_write32(&(tu_ptr->remote_to_local_pd.next_pd_address_low), (uint32_t)bcmos_virt_to_phys(current_device->rx_tu_ring));
410 }
411
412 if (!current_device->tx_tu_ring)
413 current_device->tx_tu_ring = (bcm_pcied_tu*)(opaque_data->tx_tu_ring_offset + current_device->ddr_win_base);
414 else
415 {
416 /* set next to point to the beginning of the ring */
417 tu_ptr = &current_device->tx_tu_ring[current_device->max_tx_index];
418 bcm_pci_write32(&(tu_ptr->remote_to_local_pd.next_pd_address_low), (uint32_t)bcmos_virt_to_phys(current_device->tx_tu_ring));
419 }
420
421 /* now all tu_rings allocated and synchronize , time to set the tx_owner and peer_tx_owner */
422 current_device->tx_owner = (uint32_t*)((unsigned long)current_device->tx_tu_ring + sizeof(bcm_pcied_tu) * current_device->txq_length);
423 current_device->peer_tx_owner = (uint32_t*)((unsigned long)current_device->rx_tu_ring + sizeof(bcm_pcied_tu) * current_device->rxq_length);
424
425 /* preallocate rx net buffers */
426 for (i = 0; i < current_device->rxq_length; i++)
427 {
428 current_device->rx_nbuff_save[i] = bcmos_buf_alloc(current_device->max_mtu);
429 if (!current_device->rx_nbuff_save[i])
430 return free_buffers(device, "Failed to allocate buffer for RX\n");
431 }
432 /**********************************************
433 OK , now we are ready to initialize tu_rings
434 NOTE: all stuff related to create rings (last_next_indicator,next_pd_address_lowr/high already
435 done by bcmtr_create_tu_rings
436 ***********************************************/
437
438 /*Fill TX ring, fill only fields which I "know",
439 all peer related pointers will be updated by peer */
440 for (i = 0, tu_ptr = current_device->tx_tu_ring; i < current_device->txq_length; i++, tu_ptr++)
441 {
442 /* fill the remote_to_local_pd - only last indicator */
443 if (i == current_device->max_tx_index)
444 bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), BCM_PCIED_LAST_RECORD_MASK);
445 else
446 bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), (BCM_PCIED_LAST_RECORD_MASK | BCM_PCIED_NEXT_CONTINOUS_MASK));
447
448 /************************
449 fill data_pd
450 - leave length_and_isrenable in zero , will be set during tx
451 - leave source data buffer pointers in zero, will be set in during tx
452 - dest. data buffer will be set by peer
453 *************************/
454
455 /************************
456 fill remote_to_local_pd
457 dest. data buffer will be set by peer
458 *************************/
459 bcmtr_set_source_buffer_address((uint32_t *)&tu_ptr->remote_to_local_pd, &current_device->shadow_rbd[i]);
460
461 /* set last indicator for the current tu */
462 bcm_pci_write32(&tu_ptr->data_pd.last_next_indicator, BCM_PCIED_NEXT_CONTINOUS_MASK);
463 bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), BCM_PCIED_NEXT_CONTINOUS_MASK);
464
465 /* set the length and interrupt indicator */
466 bcm_pci_write32(&tu_ptr->remote_to_local_pd.length_and_isrenable, (BCM_PCIED_INTR_ENABLE_MASK | (sizeof(uint32_t) << BCM_PCIED_TRANSFER_SIZE_SHIFT)));
467 }
468
469 /*Fill RX ring, fill only fields which I "know",
470 all peer related pointers will be updated by peer */
471 for (i = 0, tu_ptr = current_device->rx_tu_ring; i < current_device->rxq_length; i++, tu_ptr++)
472 {
473 /* fill the remote_to_local_pd - only last indicator */
474 if (i == current_device->max_rx_index)
475 bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), BCM_PCIED_LAST_RECORD_MASK);
476 else
477 bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), (BCM_PCIED_LAST_RECORD_MASK | BCM_PCIED_NEXT_CONTINOUS_MASK));
478
479 /************************
480 fill data_pd
481 - leave length_and_isrenable in zero , will be set by peer during tx
482 - leave source data buffer pointers in zero, will be set by peer during tx
483 *************************/
484
485 /* take data pointer from nbuf saved in rx_nbuff_save*/
486 pkt_ptr = bcmos_buf_data(current_device->rx_nbuff_save[i]);
487
488 /* invalidate cache for the data buffers */
489 bcmos_prepare_for_dma_read(pkt_ptr, current_device->max_mtu);
490
491 /* set destination data buffer */
492 bcmtr_set_dest_buffer_address((uint32_t *)&tu_ptr->data_pd, pkt_ptr);
493
494 /************************
495 fill remote_to_local_pd
496 - remote_to_local_pd.length_and_isrenable already set by peer
497 - last_next_indicator will be set by peer during tx
498 ************************/
499
500 /* set destination for the BD */
501 bcmtr_set_dest_buffer_address((uint32_t *)&tu_ptr->remote_to_local_pd, &current_device->local_bd[i]);
502
503 /* set next PD next pd indicator for both parts of the transfer unit */
504 bcm_pci_write32(&tu_ptr->data_pd.last_next_indicator, BCM_PCIED_NEXT_CONTINOUS_MASK);
505 bcm_pci_write32(&tu_ptr->remote_to_local_pd.last_next_indicator, BCM_PCIED_NEXT_CONTINOUS_MASK);
506
507 /* set the length and interrupt indicator */
508 bcm_pci_write32(&tu_ptr->remote_to_local_pd.length_and_isrenable, (BCM_PCIED_INTR_ENABLE_MASK | (sizeof(uint32_t) << BCM_PCIED_TRANSFER_SIZE_SHIFT)));
509
510 }
511#ifndef SIMULATION_BUILD
512 bcmtr_connect_isr(&current_device->isrdata);
513#endif
514
515 /* Clear and disable all interrupts at L2 */
516 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_SET), BCM_PCIE_ALL_DMA_INTERRUPTS_MASK);
517 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_CLEAR), BCM_PCIE_ALL_DMA_INTERRUPTS_MASK);
518
519 /* Enable L2 interrupts at L1 */
520 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR1_MASK_CLEAR), PCIE_L2_INTR_MASK);
521
522 return BCMTR_SUCCESS;
523}
524
525bcmos_errno bcmtr_pcie_send(uint8_t device, uint8_t channel, bcmos_buf *net_buff)
526{
527 bcm_pcied_comm_data *current_device;
528 bcm_pcied_tu *current_tu;
529 uint32_t current_tx;
530 uint32_t next_tx;
531 uint32_t length;
532 uint8_t *pkt_ptr;
533#ifdef CHECK_PARAM
534 BCMTR_PARANOID_CHECK();
535
536 if(!net_buff)
537 {
538 pcie_print("Network buffer is null\n");
539 return BCM_ERR_NULL;
540 }
541#endif
542
543 current_device = &bcmtr_pcie_data[device];
544
545 length = bcmos_buf_length(net_buff);
546 if((length > current_device->max_mtu) || (length == 0))
547 {
548 pcie_print("Packet length %d error (MTU=%d)\n", length, current_device->max_mtu);
549 return BCM_ERR_RANGE;
550 }
551
552 LOCK_TX();
553
554 current_tx = current_device->current_tx;
555
556 /* check owner in tx_owner list*/
557 if(bcm_pci_read32(&current_device->tx_owner[current_tx]) != LOCAL_OWNER)
558 {
559 UNLOCK_TX();
560 bcmtr_pcie_data[device].tx_pcie_full_counter++;
561 return BCM_ERR_QUEUE_FULL;
562 }
563
564 /* Do not forget change owner */
565 bcm_pci_write32(&current_device->tx_owner[current_tx], REMOTE_OWNER);
566
567 /* Prepare shadow_rbd */
568 *(uint32_t*)(&current_device->shadow_rbd[current_tx])=
569 BCMOS_ENDIAN_CPU_TO_LITTLE_U32((uint32_t)(length | channel << BCM_PCIED_BD_CHANNEL_ID_SHIFT | LOCAL_OWNER << BCM_PCIED_BD_OWNERSHIP_SHIFT));
570
571 current_tu = &current_device->tx_tu_ring[current_tx];
572
573 /* take data buffer from nbuffer */
574 pkt_ptr = bcmos_buf_data(net_buff);
575
576 /* set data_pd destination data buffer*/
577 bcmtr_set_source_buffer_address((uint32_t *)&current_tu->data_pd, pkt_ptr);
578
579#ifndef HARDWARE_TEST1
580 /* flush the cashe for the data buffer */
581 bcmos_prepare_for_dma_write(pkt_ptr, length);
582#endif
583
584 /* set data_pd length_and_isrenable (no need enable interrupt for data_pd */
585 bcm_pci_write32(&current_tu->data_pd.length_and_isrenable, length << BCM_PCIED_TRANSFER_SIZE_SHIFT);
586
587 /* set 'last_indicator' bit into current TU, takes care of wrapping */
588 if(current_tx == current_device->max_tx_index)
589 bcm_pci_write32(&current_tu->remote_to_local_pd.last_next_indicator, BCM_PCIED_LAST_RECORD_MASK);
590 else
591 bcm_pci_write32(&current_tu->remote_to_local_pd.last_next_indicator, BCM_PCIED_LAST_RECORD_MASK | BCM_PCIED_NEXT_CONTINOUS_MASK);
592
593 bcmos_barrier();
594
595 /* clear 'last_indicator' bit into previous_TU, takes care of wrapping */
596 if (current_device->prev_tx != current_device->max_tx_index)
597 bcm_pci_write32(&(current_device->tx_tu_ring[current_device->prev_tx].remote_to_local_pd.last_next_indicator), BCM_PCIED_NEXT_CONTINOUS_MASK);
598 else
599 bcm_pci_write32(&(current_device->tx_tu_ring[current_device->prev_tx].remote_to_local_pd.last_next_indicator), 0);
600
601 bcmos_barrier();
602
603 /* set WAKE bit in DMA registers */
604 bcm_pci_write32(PCI_REG_ADDRESS(WAKEUP_DMA), DMA_TX_WAKE_CTRL_WAKE_MASK);
605
606 INCREMENT_TRANSMITED(device);
607
608 next_tx = current_tx + 1;
609 if (next_tx > current_device->max_tx_index)
610 next_tx = 0;
611
612#ifndef HARDWARE_TEST1
613 /* release previous network buffer */
614 if (current_device->tx_nbuff_save[current_tx])
615 {
616 tx_done_handler(device, current_device->tx_nbuff_save[current_tx]);
617 current_device->conf_tx = next_tx;
618 }
619
620 /* store network buffer pointer */
621 current_device->tx_nbuff_save[current_device->current_tx] = net_buff;
622#endif
623 /* move current_tx index */
624 current_device->prev_tx = (int32_t)current_device->current_tx;
625 current_device->current_tx = next_tx;
626
627 UNLOCK_TX();
628 return BCMTR_SUCCESS;
629}
630
631/** Reclaim buffers that have already been transmitted
632 * \param[in] device Maple device index
633 * \returns: number of reclaimed TX buffers >= 0 or bcmos_errno error code <0
634 */
635int bcmtr_pcie_tx_collect(uint8_t device)
636{
637 bcm_pcied_comm_data *current_device;
638 uint32_t conf_tx;
639 int n = 0;
640
641#ifdef CHECK_PARAM
642 BCMTR_PARANOID_CHECK_TYPE(int);
643#endif
644
645 current_device = &bcmtr_pcie_data[device];
646
647 LOCK_TX();
648
649 conf_tx = current_device->conf_tx;
650 while (bcm_pci_read32(&current_device->tx_owner[conf_tx]) == LOCAL_OWNER &&
651 current_device->tx_nbuff_save[conf_tx])
652 {
653 ++n;
654
655 /* release previous network buffer */
656 tx_done_handler(device, current_device->tx_nbuff_save[conf_tx]);
657 current_device->tx_nbuff_save[conf_tx] = NULL;
658
659 conf_tx++;
660 if (conf_tx > current_device->max_tx_index)
661 conf_tx = 0;
662 }
663 current_device->conf_tx = conf_tx;
664
665 UNLOCK_TX();
666
667 return n;
668}
669
670/*
671 Receive a data packet
672 returns channel and pointer to network buffer containing the data
673*/
674bcmos_errno bcmtr_pcie_receive(uint8_t device, uint8_t *channel, bcmos_buf **buf)
675{
676 bcm_pcied_comm_data *current_device;
677 bcm_pcied_tu *current_tu;
678 uint32_t bd_info;
679 uint32_t length;
680 uint32_t current_rx;
681
682#ifndef HARDWARE_TEST1
683 bcmos_buf *net_ptr = NULL;
684 uint8_t *pkt_ptr = NULL;
685#endif
686
687#ifdef CHECK_PARAM
688 BCMTR_PARANOID_CHECK_EXT();
689 if (!channel || !buf)
690 return BCM_ERR_NULL;
691#endif
692 *buf = NULL;
693
694 current_device = &bcmtr_pcie_data[device];
695
696 current_rx = current_device->current_rx;
697 current_tu = &current_device->rx_tu_ring[current_rx];
698
699 /* Read local_bd to local variable */
700 bd_info = BCMOS_ENDIAN_LITTLE_TO_CPU_U32(*(uint32_t*)(&current_device->local_bd[current_rx]));
701
702 /* check owner in local_bd, it is updated by the peer */
703 if ((bd_info & BCM_PCIED_BD_OWNERSHIP_MASK) >> BCM_PCIED_BD_OWNERSHIP_SHIFT != LOCAL_OWNER)
704 {
705 bcmtr_pcie_data[device].rx_pcie_empty_counter++;
706 return BCM_ERR_QUEUE_EMPTY;
707 }
708
709 /* change the owner in local_bd to the remote side */
710 *(uint32_t*)(&current_device->local_bd[current_rx])=
711 BCMOS_ENDIAN_CPU_TO_LITTLE_U32((uint32_t)((REMOTE_OWNER << BCM_PCIED_BD_OWNERSHIP_SHIFT) | bd_info));
712
713 /* take the packet length from local_bd */
714 length = bd_info & BCM_PCIED_BD_PKT_LENGTH_MASK;
715
716 if ((length == 0) || (length > current_device->max_mtu))
717 {
718 pcie_print("Packet length error : %d\n", length);
719
720 /* update remote side */
721 bcm_pci_write32(&current_device->peer_tx_owner[current_rx], LOCAL_OWNER);
722
723 /* move current rx */
724 NEXT_INDEX(current_device->max_rx_index, current_device->current_rx);
725
726 return BCM_ERR_MSG_ERROR;
727 }
728
729 /* update packet channel id */
730 *channel = (bd_info & BCM_PCIED_BD_CHANNEL_ID_MASK) >> BCM_PCIED_BD_CHANNEL_ID_SHIFT;
731
732#ifndef HARDWARE_TEST1
733 /* allocate new buffer to receive packet */
734 net_ptr = bcmos_buf_alloc(current_device->max_mtu);
735
736 /* If not successes do clean-up */
737 if (!net_ptr)
738 {
739 /* update remote side */
740 bcm_pci_write32(&current_device->peer_tx_owner[current_rx], LOCAL_OWNER);
741
742 /* move current rx */
743 NEXT_INDEX(current_device->max_rx_index, current_device->current_rx);
744
745 return BCM_ERR_NOMEM;
746 }
747#endif
748 /* take the data pointer */
749 *buf = current_device->rx_nbuff_save[current_rx];
750
751 /* invalidate received network buffer for cache */
752 bcmos_prepare_for_dma_read(bcmos_buf_data(*buf), length);
753
754 /* fill network buffer */
755 bcmos_buf_length_set(*buf, length);
756
757 /* update statistics */
758 INCREMENT_RECEIVED(device);
759
760#ifndef HARDWARE_TEST1
761 pkt_ptr = bcmos_buf_data(net_ptr);
762
763 /* invalidate network buffer for cache */
764 bcmos_prepare_for_dma_read(pkt_ptr, current_device->max_mtu);
765
766 /* update rx network buffer with the new packet */
767 current_device->rx_nbuff_save[current_rx] = net_ptr;
768
769 /* set data_pd destination data buffer*/
770 bcmtr_set_dest_buffer_address((uint32_t *)&current_tu->data_pd, pkt_ptr);
771#endif
772 /* update remote side */
773 bcm_pci_write32(&current_device->peer_tx_owner[current_rx], LOCAL_OWNER);
774
775 /* move current rx */
776 NEXT_INDEX(current_device->max_rx_index, current_device->current_rx);
777
778 return BCMTR_SUCCESS;
779}
780
781/* enable level 2 RX interrupts */
782bcmos_errno bcmtr_pcie_rxint_enable(uint8_t device)
783{
784#ifdef CHECK_PARAM
785 BCMTR_PARANOID_CHECK_EXT();
786#endif
787 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_CLEAR), DMA_RX_ERROR_MASK | DMA_RX_DONE_MASK);
788
789 return BCMTR_SUCCESS;
790}
791
792/* disable level 2 RX interrupts */
793bcmos_errno bcmtr_pcie_rxint_disable(uint8_t device)
794{
795#ifdef CHECK_PARAM
796 BCMTR_PARANOID_CHECK_EXT();
797#endif
798 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_SET), DMA_RX_ERROR_MASK | DMA_RX_DONE_MASK);
799
800 return BCMTR_SUCCESS;
801}
802
803bcmos_errno bcmtr_pcie_rxint_clear(uint8_t device)
804{
805#ifdef CHECK_PARAM
806 BCMTR_PARANOID_CHECK_EXT();
807#endif
808 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_CLEAR), DMA_RX_ERROR_MASK | DMA_RX_DONE_MASK);
809
810 return BCMTR_SUCCESS;
811}
812
813/* Enable Level 2 "TX handled" interrupt */
814bcmos_errno bcmtr_pcie_txint_enable(uint8_t device)
815{
816#ifdef CHECK_PARAM
817 BCMTR_PARANOID_CHECK_EXT();
818#endif
819 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_CLEAR), DMA_TX_ERROR_MASK | DMA_TX_DONE_MASK);
820
821 return BCMTR_SUCCESS;
822}
823
824/* Disable level 2 "TX handled" interrupt */
825bcmos_errno bcmtr_pcie_txint_disable(uint8_t device)
826{
827#ifdef CHECK_PARAM
828 BCMTR_PARANOID_CHECK_EXT();
829#endif
830 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_SET), DMA_TX_ERROR_MASK | DMA_TX_DONE_MASK);
831
832 return BCMTR_SUCCESS;
833}
834
835/* Clear level 2 "TX handled" interrupt */
836bcmos_errno bcmtr_pcie_txint_clear(uint8_t device)
837{
838#ifdef CHECK_PARAM
839 BCMTR_PARANOID_CHECK_EXT();
840#endif
841 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_CLEAR), DMA_TX_ERROR_MASK | DMA_TX_DONE_MASK);
842
843 return BCMTR_SUCCESS;
844}
845
846bcmos_errno bcmtr_pcie_disconnect(uint8_t device)
847{
848 BCMTR_PARANOID_CHECK_EXT();
849
850 stop_dma(device);
851
852 /* disable interrupts */
853 bcmtr_pcie_rxint_disable(device);
854 bcmtr_pcie_txint_disable(device);
855
856 /* clear DMA interrupts */
857 bcmtr_pcie_rxint_clear(device);
858 bcmtr_pcie_txint_clear(device);
859
860 /* Disable in L1 controller */
861 bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR1_MASK_SET), PCIE_L2_INTR_MASK);
862
863 /* free irq - on host (if it is shared) must not free it */
864 bcmtr_pcie_free_irq(bcmtr_pcie_data[device].isrdata.rx_irq, &bcmtr_pcie_data[device].isrdata);
865
866 free_buffers(device, "PCIE disconnected\n");
867
868 return BCMTR_SUCCESS;
869}
870
871bcmos_errno bcmtr_pcie_rx_irq_cblk_register(f_bcmtr_int rx_isr_clbk)
872{
873 bcmtr_pcie_rx_irq_handler = rx_isr_clbk;
874
875 return BCMTR_SUCCESS;
876}
877
878bcmos_errno bcmtr_pcie_rx_irq_cblk_unregister(void)
879{
880 bcmtr_pcie_rx_irq_handler = NULL;
881
882 return BCMTR_SUCCESS;
883}
884
885bcmos_errno bcmtr_pcie_tx_irq_cblk_register(f_bcmtr_int rx_isr_clbk)
886{
887 bcmtr_pcie_tx_irq_handler = rx_isr_clbk;
888
889 return BCMTR_SUCCESS;
890}
891
892bcmos_errno bcmtr_pcie_tx_irq_cblk_unregister(void)
893{
894 bcmtr_pcie_tx_irq_handler = NULL;
895
896 return BCMTR_SUCCESS;
897}
898
899bcmos_errno bcmtr_pcie_tx_done_cblk_register(f_bcmtr_done tx_done_cb)
900{
901 tx_done_handler = tx_done_cb;
902
903 return BCMTR_SUCCESS;
904}
905
906bcmos_errno bcmtr_pcie_tx_done_cblk_unregister(void)
907{
908 tx_done_handler = default_tx_done_callback;
909
910 return BCMTR_SUCCESS;
911}
912
913void bcmtr_pcie_exit(void)
914{
915 uint32_t i;
916
917 if (bcmtr_pcie_data)
918 {
919 for (i = 0; i < bcmtr_max_devices_number; i ++)
920 {
921 bcmtr_pcie_disconnect(i);
922 }
923 bcmos_free(bcmtr_pcie_data);
924 }
925 bcmtr_pcie_data = NULL;
926}
927
928bcmos_errno bcmtr_pcie_get_statistics(uint8_t device, uint32_t clear, bcm_pcied_stat *stat)
929{
930 bcm_pcied_comm_data *current_device = &bcmtr_pcie_data[device];
931
932 BCMTR_PARANOID_CHECK();
933
934 if (!stat)
935 return BCM_ERR_NULL;
936
937 stat->rx_counter = current_device->rx_counter;
938 stat->tx_counter = current_device->tx_counter;
939 stat->rx_done_isr_counter = current_device->isrdata.rx_done_num;
940 stat->rx_err_isr_counter = current_device->isrdata.rx_err_num;
941 stat->tx_done_isr_counter = current_device->isrdata.tx_done_num;
942 stat->tx_err_isr_counter = current_device->isrdata.tx_err_num;
943 stat->rx_pcie_empty_counter = current_device->rx_pcie_empty_counter;
944 stat->tx_pcie_full_counter = current_device->tx_pcie_full_counter;
945 if (clear)
946 {
947 current_device->rx_counter = 0;
948 current_device->tx_counter = 0;
949 current_device->isrdata.rx_done_num = 0;
950 current_device->isrdata.rx_err_num = 0;
951 current_device->isrdata.tx_done_num = 0;
952 current_device->isrdata.tx_err_num = 0;
953 current_device->rx_pcie_empty_counter = 0;
954 current_device->tx_pcie_full_counter = 0;
955 }
956
957 return BCMTR_SUCCESS;
958}
959
960/*=============================================*/
961
962/* used by dump procedures - may write to std output or user buffer */
963#define pcie_bprint(buffer, fmt, args...) \
964 {\
965 if (!buffer)\
966 pcie_print(fmt, ##args);\
967 else\
968 sprintf(buffer, fmt, ##args);\
969 }
970
971static inline void dump_bd(char *output, char *title, uint32_t bd)
972{
973 uint32_t owner, chn, length;
974
975 owner = (bd & BCM_PCIED_BD_OWNERSHIP_MASK) >> BCM_PCIED_BD_OWNERSHIP_SHIFT;
976 chn = (bd & BCM_PCIED_BD_CHANNEL_ID_MASK) >> BCM_PCIED_BD_CHANNEL_ID_SHIFT;
977 length = bd & BCM_PCIED_BD_PKT_LENGTH_MASK;
978
979 pcie_bprint(output, "\t%s: (0x%08x) owner = %s channel = 0x%x(%d) length = 0x%x(%d)\n",
980 title, (unsigned int)bd, owner == 0 ? "local" : "peer", (unsigned int)chn, chn, (unsigned int)length, length);
981}
982
983static inline void dump_owner(char *output, char *title, uint32_t *current_entry)
984{
985 uint32_t owner, entry;
986
987 entry = bcm_pci_read32(current_entry);
988 owner = (entry & BCM_PCIED_BD_OWNERSHIP_MASK) >> BCM_PCIED_BD_OWNERSHIP_SHIFT;
989 pcie_bprint(output, "\t%s: owner = %s\n", title, owner == 0 ? "local" : "peer");
990}
991
992static inline void dump_pd(char *output, bcm_pcied_pd *current_pd)
993{
994 uint32_t ddrl,ddrh,pktl,pkth,len,last,nextl,nexth;
995
996 ddrl = bcm_pci_read32((uint32_t *)&current_pd->ddr_buff_address_low);
997 pktl = bcm_pci_read32((uint32_t *)&current_pd->pcie_pkt_address_low);
998 pkth = bcm_pci_read32((uint32_t *)&current_pd->pcie_pkt_address_high);
999 len = bcm_pci_read32((uint32_t *)&current_pd->length_and_isrenable);
1000 last = bcm_pci_read32((uint32_t *)&current_pd->last_next_indicator);
1001 nextl = bcm_pci_read32((uint32_t *)&current_pd->next_pd_address_low);
1002 nexth = bcm_pci_read32((uint32_t *)&current_pd->next_pd_address_high);
1003 ddrh = bcm_pci_read32((uint32_t *)&current_pd->ddr_buff_address_high);
1004
1005 pcie_bprint(output,
1006 "\t\t%-20s = 0x%08x\n"
1007 "\t\t%-20s = 0x%08x\n"
1008 "\t\t%-20s = 0x%08x\n"
1009 "\t\t%-20s = 0x%08x\n"
1010 "\t\t%-20s = 0x%08x\n"
1011 "\t\t%-20s = 0x%08x\n"
1012 "\t\t%-20s = 0x%08x\n"
1013 "\t\t%-20s = 0x%08x\n",
1014 "ddr_low", ddrl,
1015 "pkt_low", pktl,
1016 "pkt_high", pkth,
1017 "length_and_isrenable", len,
1018 "last_next_indicator", last,
1019 "next_low", nextl,
1020 "next_high", nexth,
1021 "ddr_high", ddrh);
1022}
1023
1024
1025static int32_t dump_one_tx(char *output, uint8_t device, uint32_t current_index)
1026{
1027 bcm_pcied_tu *current_tu;
1028 bcm_pcied_comm_data *current_device;
1029 char *buffer = output;
1030
1031 current_device = &bcmtr_pcie_data[device];
1032 current_tu = &current_device->tx_tu_ring[current_index];
1033 dump_bd(buffer, "SBD", BCMOS_ENDIAN_LITTLE_TO_CPU_U32(*(uint32_t*)&current_device->shadow_rbd[current_index]));
1034 if (buffer)
1035 buffer += strlen(buffer);
1036
1037 dump_owner(buffer, "tx_owner", &current_device->tx_owner[current_index]);
1038 if (buffer)
1039 buffer += strlen(buffer);
1040
1041 pcie_bprint(buffer, "\tPD data = 0x%lx\n", (unsigned long)&current_tu->data_pd);
1042 if (buffer)
1043 buffer += strlen(buffer);
1044
1045 dump_pd(buffer, &current_tu->data_pd);
1046 if (buffer)
1047 buffer += strlen(buffer);
1048
1049 pcie_bprint(buffer, "\tPD shadow_rbd = 0x%lx\n", (unsigned long)&current_tu->remote_to_local_pd);
1050 if (buffer)
1051 buffer += strlen(buffer);
1052
1053 dump_pd(buffer, &current_tu->remote_to_local_pd);
1054 if (buffer)
1055 buffer += strlen(buffer);
1056
1057 pcie_bprint(buffer, "\tTX netbuff = 0x%lx\n",(unsigned long)current_device->tx_nbuff_save[current_index]);
1058
1059 if (output)
1060 return strlen(output);
1061
1062 return 0;
1063}
1064
1065static int32_t dump_one_rx(char *output, uint8_t device, uint32_t current_index)
1066{
1067 bcm_pcied_tu *current_tu;
1068 bcm_pcied_comm_data *current_device;
1069 char *buffer = output;
1070
1071 current_device = &bcmtr_pcie_data[device];
1072 current_tu = &current_device->rx_tu_ring[current_index];
1073
1074 dump_bd(buffer, "BD", BCMOS_ENDIAN_LITTLE_TO_CPU_U32(*(uint32_t*)&current_device->local_bd[current_index]));
1075 if (buffer)
1076 buffer += strlen(buffer);
1077
1078 dump_owner(buffer, "peer_tx_owner", &current_device->peer_tx_owner[current_index]);
1079 if (buffer)
1080 buffer += strlen(buffer);
1081
1082 pcie_bprint(buffer, "\tPD data = 0x%lx\n", (unsigned long)&current_tu->data_pd);
1083 if (buffer)
1084 buffer += strlen(buffer);
1085
1086 dump_pd(buffer, &current_tu->data_pd);
1087 if (buffer)
1088 buffer += strlen(buffer);
1089
1090 pcie_bprint(buffer, "\tPD shadow_rbd = 0x%lx\n", (unsigned long)&current_tu->remote_to_local_pd);
1091 if (buffer)
1092 buffer += strlen(buffer);
1093
1094 dump_pd(buffer, &current_tu->remote_to_local_pd);
1095 if (buffer)
1096 buffer += strlen(buffer);
1097
1098 pcie_bprint(buffer, "\tRX netbuff = 0x%lx\n",(unsigned long)current_device->rx_nbuff_save[current_index]);
1099
1100 if (output)
1101 return strlen(output);
1102 return 0;
1103}
1104
1105bcmos_errno bcmtr_pcie_tx_dump(char *output, uint8_t device, int32_t start, int32_t number_of_entries)
1106{
1107 int32_t i;
1108 int32_t len = 0;
1109 char *buffer = output;
1110 int32_t from = start;
1111 int32_t to;
1112
1113 pcie_bprint(buffer, "Dump Tx ring\n");
1114 if (!bcmtr_pcie_data)
1115 {
1116 pcie_print("Driver is not initialized\n");
1117 return BCM_ERR_NORES;
1118 }
1119
1120 if (device >= bcmtr_max_devices_number)
1121 {
1122 pcie_print("Device parameter is greater than maximum devices(%d)\n", bcmtr_max_devices_number);
1123 return BCM_ERR_RANGE;
1124 }
1125
1126 if (device != bcmtr_pcie_data[device].isrdata.device)
1127 {
1128 pcie_print("*** Data Corrupted ***\n");
1129 return BCM_ERR_RANGE;
1130 }
1131
1132 if (start == -1) /* from current, number of entries */
1133 from = bcmtr_pcie_data[device].current_tx;
1134
1135 to = from + number_of_entries;
1136
1137 if (to == from)
1138 to++;
1139
1140 if (to > bcmtr_pcie_data[device].txq_length)
1141 to = bcmtr_pcie_data[device].txq_length;
1142
1143 if (buffer)
1144 len = strlen(buffer);
1145
1146 for (i = from; i < to; i++)
1147 {
1148 if (buffer)
1149 buffer = output + len; /* move buffer to the next space */
1150
1151 if (i == bcmtr_pcie_data[device].current_tx)
1152 {
1153 pcie_bprint(buffer, "Current = %d\n", i);
1154 }
1155 else
1156 {
1157 pcie_bprint(buffer, "Index = %d\n", i);
1158 }
1159
1160 if (buffer)
1161 {
1162 /* add to len the next line - buffer contains only the last line */
1163 len += strlen(buffer);
1164 buffer = output + len;
1165 }
1166 /* returns the length of the last lines */
1167 len += dump_one_tx(buffer, device, i);
1168 }
1169
1170 return BCM_ERR_OK;
1171}
1172
1173bcmos_errno bcmtr_pcie_rx_dump(char *output, uint8_t device, int32_t start, int32_t number_of_entries)
1174{
1175 int32_t i;
1176 int32_t len = 0;
1177 char *buffer = output;
1178 int32_t from = start;
1179 int32_t to;
1180
1181 pcie_bprint(buffer, "Dump Rx ring\n");
1182 if (!bcmtr_pcie_data)
1183 {
1184 pcie_print("Driver is not initialized\n");
1185 return BCM_ERR_NORES;
1186 }
1187
1188 if (device >= bcmtr_max_devices_number)
1189 {
1190 pcie_print("Device parameter is greater than maximum devices(%d)\n", bcmtr_max_devices_number);
1191 return BCM_ERR_RANGE;
1192 }
1193
1194 if (device != bcmtr_pcie_data[device].isrdata.device)
1195 {
1196 pcie_print("*** Data Corrupted ***\n");
1197 return BCM_ERR_RANGE;
1198 }
1199
1200 if (start == -1) /* from current number of entries */
1201 from = bcmtr_pcie_data[device].current_rx;
1202 to = from + number_of_entries;
1203 if (to == from)
1204 to++;
1205 if (to > bcmtr_pcie_data[device].rxq_length)
1206 to = bcmtr_pcie_data[device].rxq_length;
1207
1208 if (buffer)
1209 len = strlen(buffer);
1210 for (i = from; i < to; i++)
1211 {
1212 if (buffer)
1213 buffer = output + len;
1214 if (i == bcmtr_pcie_data[device].current_rx)
1215 {
1216 pcie_bprint(buffer, "Current = %d\n", i);
1217 }
1218 else
1219 {
1220 pcie_bprint(buffer, "Index = %d\n", i);
1221 }
1222 if (buffer)
1223 {
1224 /* add to len the next line - buffer contains only the last line */
1225 len += strlen(buffer);
1226 buffer = output + len;
1227 }
1228 /* returns the length of the last lines */
1229 len += dump_one_rx(buffer, device, i);
1230 }
1231
1232 return BCM_ERR_OK;
1233}
1234
1235#ifdef __KERNEL__
1236EXPORT_SYMBOL(bcmtr_pcie_receive);
1237EXPORT_SYMBOL(bcmtr_pcie_init);
1238EXPORT_SYMBOL(bcmtr_pcie_exit);
1239EXPORT_SYMBOL(bcmtr_pcie_pre_connect);
1240EXPORT_SYMBOL(bcmtr_pcie_connect);
1241EXPORT_SYMBOL(bcmtr_pcie_disconnect);
1242EXPORT_SYMBOL(bcmtr_pcie_rx_irq_cblk_register);
1243EXPORT_SYMBOL(bcmtr_pcie_rx_irq_cblk_unregister);
1244EXPORT_SYMBOL(bcmtr_pcie_tx_irq_cblk_register);
1245EXPORT_SYMBOL(bcmtr_pcie_tx_irq_cblk_unregister);
1246EXPORT_SYMBOL(bcmtr_pcie_send);
1247EXPORT_SYMBOL(bcmtr_pcie_tx_dump);
1248EXPORT_SYMBOL(bcmtr_pcie_rx_dump);
1249EXPORT_SYMBOL(bcmtr_pcie_rxint_enable);
1250EXPORT_SYMBOL(bcmtr_pcie_rxint_disable);
1251EXPORT_SYMBOL(bcmtr_pcie_rxint_clear);
1252EXPORT_SYMBOL(bcmtr_pcie_rx_irq_handler);
1253EXPORT_SYMBOL(bcmtr_pcie_tx_irq_handler);
1254EXPORT_SYMBOL(bcmtr_pcie_get_statistics);
1255#endif
1256