| /* |
| <:copyright-BRCM:2016:DUAL/GPL:standard |
| |
| Broadcom Proprietary and Confidential.(c) 2016 Broadcom |
| All Rights Reserved |
| |
| Unless you and Broadcom execute a separate written software license |
| agreement governing use of this software, this software is licensed |
| to you under the terms of the GNU General Public License version 2 |
| (the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php, |
| with the following added to such license: |
| |
| As a special exception, the copyright holders of this software give |
| you permission to link this software with independent modules, and |
| to copy and distribute the resulting executable under terms of your |
| choice, provided that you also meet, for each linked independent |
| module, the terms and conditions of the license of that module. |
| An independent module is a module which is not derived from this |
| software. The special exception does not apply to any modifications |
| of the software. |
| |
| Not withstanding the above, under no circumstances may you combine |
| this software in any way with any other Broadcom software provided |
| under a license other than the GPL, without Broadcom's express prior |
| written consent. |
| |
| :> |
| */ |
| #include "bcmtr_pcie.h" |
| #include "bcmolt_tr_pcie_specific.h" |
| |
| |
| /* |
| Synchronization flow of DMA data base between Host and Maple |
| The process takes place after Maple is loaded and run from DDR. |
| ======================================================== |
| Host Maple |
| ======================================================== |
| |
| write to SRAM its TX and RX queue size |
| write to SRAM DDR_FINISH indication |
| wait for PRM_BIT from Maple run from DDR |
| get from SRAM host tx and rx queue size |
| calls pre_connect |
| write to SRAM opaque data |
| write to SRAM PRM_BIT |
| read from SRAM opaque data calls connect |
| clear PRM_BIT from Maple wait for PRM_BIT from Host |
| calls pre_connect |
| calls connect |
| write to SRAM PRM_BIT |
| register rx interrupt handler register rx interrupt handler |
| ======================================================== |
| Both ready to send/receive packets thru DMA: |
| ======================================================== |
| */ |
| /* misc local_bd info structure |
| _____________________________________________________ |
| |___31 - 25_____| 24 |____23 -16_____|____15-0_____| |
| |__reserved_____|__OWN__|___chanal ID___|____length___| |
| |
| */ |
| #define BCM_PCIED_BD_OWNERSHIP_SHIFT 24 |
| #define BCM_PCIED_BD_OWNERSHIP_MASK (0x1UL << BCM_PCIED_BD_OWNERSHIP_SHIFT) |
| |
| #define BCM_PCIED_BD_CHANNEL_ID_SHIFT 16 |
| #define BCM_PCIED_BD_CHANNEL_ID_MASK (0xffUL << BCM_PCIED_BD_CHANNEL_ID_SHIFT) |
| |
| #define BCM_PCIED_BD_PKT_LENGTH_MASK 0xffff |
| |
| /* length_and_isrenable */ |
| #define BCM_PCIED_INTR_ENABLE_SHIFT 31 |
| #define BCM_PCIED_INTR_ENABLE_MASK (0x1UL << BCM_PCIED_INTR_ENABLE_SHIFT) |
| |
| #define BCM_PCIED_TRANSFER_SIZE_MASK 0x01ffffffUL |
| #define BCM_PCIED_TRANSFER_SIZE_SHIFT 0 |
| |
| /* last_next_indicator */ |
| #define BCM_PCIED_LAST_RECORD_SHIFT 31 |
| #define BCM_PCIED_LAST_RECORD_MASK (0x1UL << BCM_PCIED_LAST_RECORD_SHIFT) |
| |
| |
| #define BCM_PCIED_NEXT_CONTINOUS_SHIFT 2 |
| #define BCM_PCIED_NEXT_CONTINOUS_MASK (0x1UL << BCM_PCIED_NEXT_CONTINOUS_SHIFT) |
| |
| #define BCM_PCIE_ALL_DMA_INTERRUPTS_MASK (DMA_RX_DONE_MASK | DMA_RX_ERROR_MASK | DMA_TX_DONE_MASK | DMA_TX_ERROR_MASK) |
| |
| /* packet descriptor - used by HW DMA mechanism , defined by HW do not change it */ |
| typedef struct |
| { |
| uint32_t ddr_buff_address_low; /* Maple address - word[0]*/ |
| uint32_t pcie_pkt_address_low; /* Host address - word[1]*/ |
| uint32_t pcie_pkt_address_high; /* - word[2]*/ |
| uint32_t length_and_isrenable; /* bit 31 - interrupt enable; bits 24:2 - transfer length */ |
| uint32_t last_next_indicator; /* bit 31 - last indicator; bit 30 - direction; bit 2 - next descriptor, if 1 -> continuous */ |
| uint32_t next_pd_address_low; |
| uint32_t next_pd_address_high; |
| uint32_t ddr_buff_address_high; /* - word[7]*/ |
| } bcm_pcied_pd; |
| |
| /* transfer unit for TX/RX */ |
| typedef struct |
| { |
| bcm_pcied_pd data_pd; /* PD used to transfer data packet */ |
| bcm_pcied_pd remote_to_local_pd; /* PD used to transfer shadow_rbd field from local CPU |
| to local_bd field on peer CPU */ |
| } bcm_pcied_tu; |
| |
| /* opaque data, in current implementation will be sent from the Maple to Host only. |
| Provide offsets from the start of Maple DDR window for tx and rx TU rings (where tx and rx rings in term of Host) |
| */ |
| struct bcmtr_pcie_opaque_data |
| { |
| uint32_t tx_tu_ring_offset; |
| uint32_t rx_tu_ring_offset; |
| }; |
| |
| /* main structure of the driver */ |
| typedef struct |
| { |
| bcm_pcied_tu *tx_tu_ring; /* TX DMA ring */ |
| bcm_pcied_tu *rx_tu_ring; /* RX DMA ring */ |
| |
| uint32_t *local_bd; /* used for receive, updated by peer in tx */ |
| uint32_t *shadow_rbd; /* used by local in tx as just buffer for update remote local_bd */ |
| |
| uint32_t *tx_owner; /* used for local in tx for lookup that tu mine, updated by local and peer */ |
| uint32_t *peer_tx_owner; /* used in rx for update tx_owner of the remote*/ |
| |
| bcmos_buf **tx_nbuff_save; /* array of network buffers pointers, used by TX, to store sent buffers */ |
| bcmos_buf **rx_nbuff_save; /* array of network buffers pointers, used by RX, to store allocated for rx buffers */ |
| |
| uint32_t current_tx; /* index of current tx tu */ |
| uint32_t conf_tx; /* index of the last reclaimed tx tu */ |
| uint32_t current_rx; /* index of current rx tu */ |
| int32_t prev_tx; |
| |
| uint32_t max_tx_index; |
| uint32_t max_rx_index; |
| |
| uint32_t max_mtu; |
| |
| unsigned long ddr_win_base; |
| bcm_pcied_isr_data isrdata; /* includes pcie register base, id and irq number */ |
| |
| TX_LOCKDEF |
| |
| /* counters */ |
| uint32_t rx_counter; |
| uint32_t tx_counter; |
| uint32_t rx_pcie_empty_counter; |
| uint32_t tx_pcie_full_counter; |
| |
| /* saved meta-data */ |
| uint32_t txq_length; |
| uint32_t rxq_length; |
| uint32_t *tx_tu_ring_orig; /* TX DMA ring */ |
| uint32_t *rx_tu_ring_orig; /* RX DMA ring */ |
| |
| } bcm_pcied_comm_data; |
| |
| f_bcmtr_int bcmtr_pcie_rx_irq_handler; |
| f_bcmtr_int bcmtr_pcie_tx_irq_handler; |
| |
| static void default_tx_done_callback(uint8_t device, bcmos_buf *buf); |
| |
| static f_bcmtr_done tx_done_handler = default_tx_done_callback; |
| |
| /* used for error messages */ |
| #if defined(__KERNEL__) |
| #define pcie_print(fmt, args...) printk("%s: %d: " fmt, __FUNCTION__ , __LINE__, ##args) |
| #else |
| #define pcie_print bcmos_printf |
| #endif |
| |
| #define LOCAL_OWNER 0 |
| #define REMOTE_OWNER 1 |
| |
| #define INCREMENT_RECEIVED(device) bcmtr_pcie_data[device].rx_counter++ |
| #define INCREMENT_TRANSMITED(device) bcmtr_pcie_data[device].tx_counter++ |
| |
| /* set next index into a DMA ring */ |
| #define NEXT_INDEX(maxindex, index) \ |
| do{ \ |
| index++; \ |
| if (index > maxindex) \ |
| index = 0; \ |
| } while(0) |
| |
| /* calculate register address according to registers'base and offset */ |
| #define PCI_REG_ADDRESS(reg) (uint32_t *)(bcmtr_pcie_data[device].isrdata.pcie_reg_base + reg) |
| |
| /* Just for avoid of using this "BCM_ERR_OK" stupid define , i can not understand how "error" can be "ok" (D.B.) */ |
| #define BCMTR_SUCCESS BCM_ERR_OK |
| |
| #define BCMTR_PARANOID_CHECK() \ |
| do { \ |
| if (!bcmtr_pcie_data) \ |
| return BCM_ERR_NORES; \ |
| if (device >= bcmtr_max_devices_number)\ |
| return BCM_ERR_RANGE; \ |
| } while(0) |
| |
| #define BCMTR_PARANOID_CHECK_TYPE(t) \ |
| do { \ |
| if (!bcmtr_pcie_data) \ |
| return (t)BCM_ERR_NORES; \ |
| if (device >= bcmtr_max_devices_number)\ |
| return (t)BCM_ERR_RANGE; \ |
| } while(0) |
| |
| #define BCMTR_PARANOID_CHECK_EXT() \ |
| do { \ |
| if (!bcmtr_pcie_data) \ |
| return BCM_ERR_NORES; \ |
| if (device >= bcmtr_max_devices_number)\ |
| return BCM_ERR_RANGE; \ |
| if (!bcmtr_pcie_data[device].isrdata.pcie_reg_base)\ |
| return BCM_ERR_NORES; \ |
| } while(0) |
| |
| /* device data array */ |
| static bcm_pcied_comm_data *bcmtr_pcie_data; |
| |
| static uint32_t bcmtr_max_devices_number; |
| |
| /* stop one dma engine */ |
| static inline void stop_dma(uint8_t device) |
| { |
| volatile uint32_t value; |
| |
| value = bcm_pci_read32(PCI_REG_ADDRESS((uint32_t)DESCRIPTOR_CONTROL)); |
| value &= ~DMA_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_MASK; |
| bcm_pci_write32(PCI_REG_ADDRESS(DESCRIPTOR_CONTROL), value); |
| } |
| |
| /* Default tx-done callback */ |
| static void default_tx_done_callback(uint8_t device, bcmos_buf *buf) |
| { |
| bcmos_free(buf); |
| } |
| |
| /* free all allocated buffers */ |
| static bcmos_errno free_buffers(uint8_t device, const char *error_string) |
| { |
| uint32_t i; |
| bcm_pcied_comm_data *current_device = &bcmtr_pcie_data[device]; |
| |
| pcie_print("%s", error_string); |
| |
| if (current_device->rx_nbuff_save) |
| { |
| for (i = 0; i < current_device->rxq_length; i++) |
| { |
| if (current_device->rx_nbuff_save[i]) |
| bcmos_buf_free(current_device->rx_nbuff_save[i]); |
| } |
| bcmos_free(current_device->rx_nbuff_save); |
| } |
| |
| if (current_device->tx_nbuff_save) |
| { |
| for (i = 0; i < current_device->txq_length; i++) |
| { |
| if (current_device->tx_nbuff_save[i]) |
| bcmos_buf_free(current_device->tx_nbuff_save[i]); |
| } |
| bcmos_free(current_device->tx_nbuff_save); |
| } |
| |
| if (current_device->local_bd) |
| bcmos_dma_free(device, current_device->local_bd); |
| |
| if (current_device->shadow_rbd) |
| bcmos_dma_free(device, current_device->shadow_rbd); |
| |
| DESTROY_TXLOCK(device); |
| |
| bcmtr_pcie_free_rings(device, current_device->tx_tu_ring_orig, current_device->rx_tu_ring_orig); |
| |
| memset(&bcmtr_pcie_data[device], 0, sizeof(bcm_pcied_comm_data)); |
| |
| return BCM_ERR_NOMEM; |
| } |
| |
| bcmos_errno bcmtr_pcie_init(uint8_t max_devices) |
| { |
| bcmtr_max_devices_number = max_devices; |
| |
| bcmtr_pcie_data = (bcm_pcied_comm_data *)bcmos_calloc(bcmtr_max_devices_number * sizeof(bcm_pcied_comm_data)); |
| if (!bcmtr_pcie_data) |
| { |
| pcie_print("Driver cannot be initialized\n"); |
| return BCM_ERR_NOMEM; |
| } |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_pre_connect(uint8_t device, const bcmtr_pcie_pre_connect_cfg *cfg, bcmtr_pcie_opaque_data *opaque_data) |
| { |
| bcm_pcied_comm_data *current_device; |
| bcmos_errno ret_code; |
| uint32_t i; |
| |
| BCMTR_PARANOID_CHECK(); |
| |
| if (!cfg) |
| { |
| pcie_print("Second parameter(config) is NULL pointer\n"); |
| return BCM_ERR_NULL; |
| } |
| |
| if (!opaque_data) |
| { |
| pcie_print("Return area pointer(Third parameter) is NULL\n"); |
| return BCM_ERR_NULL; |
| } |
| |
| current_device = &bcmtr_pcie_data[device]; |
| |
| CREATE_TXLOCK(device); |
| |
| /* copy user's data to internal data base */ |
| current_device->txq_length = cfg->txq_size; |
| current_device->rxq_length = cfg->rxq_size; |
| current_device->max_tx_index = cfg->txq_size - 1; |
| current_device->max_rx_index = cfg->rxq_size - 1; |
| current_device->max_mtu = cfg->max_mtu; |
| current_device->isrdata.device = device; |
| current_device->isrdata.rx_irq = cfg->rx_irq; |
| current_device->isrdata.pcie_reg_base = cfg->pcie_reg_base; |
| current_device->ddr_win_base = cfg->ddr_win_base; |
| current_device->prev_tx = -1; |
| |
| /**********************/ |
| /* allocate data base */ |
| /**********************/ |
| |
| bcmtr_pcie_specific_init(current_device->isrdata.pcie_reg_base); |
| |
| ret_code = bcmtr_create_tu_rings(current_device->txq_length, current_device->rxq_length, |
| (void **)¤t_device->tx_tu_ring, (void **)¤t_device->rx_tu_ring, |
| (void **)¤t_device->tx_tu_ring_orig, (void **)¤t_device->rx_tu_ring_orig, |
| current_device->isrdata.pcie_reg_base); |
| if (ret_code != BCMTR_SUCCESS) |
| { |
| pcie_print("Failed to create transfer unit rings : (error=%d)\n",ret_code); |
| return ret_code; |
| } |
| |
| /* allocate array of local_bd for RX chain */ |
| current_device->local_bd = (uint32_t *)bcmos_dma_alloc(device, sizeof(*current_device->local_bd) * current_device->rxq_length); |
| if (!current_device->local_bd) |
| return free_buffers(device, "Failed to allocate local_bd for RX ring\n"); |
| |
| /* set remote side as owner for all local_bd */ |
| for (i = 0; i < current_device->rxq_length; i++) |
| *(uint32_t*)(¤t_device->local_bd[i])= |
| BCMOS_ENDIAN_CPU_TO_LITTLE_U32((uint32_t)(REMOTE_OWNER << BCM_PCIED_BD_OWNERSHIP_SHIFT)); |
| |
| /* allocate array of local_bd for TX chain */ |
| current_device->shadow_rbd = (uint32_t *)bcmos_dma_alloc(device, sizeof(*current_device->shadow_rbd) * current_device->txq_length); |
| if (!current_device->shadow_rbd ) |
| return free_buffers(device, "Failed to allocate shadow_rbd for TX ring\n"); |
| /* clear all shadow_rbd , actualy no need but suatble for debugging */ |
| memset(current_device->shadow_rbd, 0, sizeof(*current_device->shadow_rbd) * current_device->txq_length); |
| |
| /* allocate netowrk buffer pointers arraies */ |
| current_device->tx_nbuff_save = (bcmos_buf**)bcmos_calloc(sizeof(bcmos_buf*) * current_device->txq_length); |
| if (!current_device->tx_nbuff_save) |
| return free_buffers(device,"Failed to allocate array for TX buffers pointers\n"); |
| |
| current_device->rx_nbuff_save = (bcmos_buf**)bcmos_calloc(sizeof(bcmos_buf*) * current_device->rxq_length); |
| if (!current_device->rx_nbuff_save) |
| return free_buffers(device, "Failed to allocate array for RX buffers pointers\n"); |
| |
| /* update return value, cross tx and rx for peer |
| If we do not allocate the corresponded area just send zero to peer for indicate it |
| */ |
| if (current_device->tx_tu_ring) |
| opaque_data->rx_tu_ring_offset = (unsigned long)current_device->tx_tu_ring - current_device->ddr_win_base; |
| |
| if (current_device->rx_tu_ring) |
| opaque_data->tx_tu_ring_offset = (unsigned long)current_device->rx_tu_ring - current_device->ddr_win_base; |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_connect(uint8_t device, const bcmtr_pcie_opaque_data *opaque_data) |
| { |
| uint32_t i; |
| bcm_pcied_comm_data *current_device; |
| bcm_pcied_tu *tu_ptr = NULL; |
| uint8_t *pkt_ptr; |
| |
| BCMTR_PARANOID_CHECK(); |
| |
| current_device = &bcmtr_pcie_data[device]; |
| |
| /* update local tu_rings pointers , no need cross since we already do in pre-connect |
| update only not zero pointers */ |
| if (!current_device->rx_tu_ring) |
| current_device->rx_tu_ring = (bcm_pcied_tu*)(opaque_data->rx_tu_ring_offset + current_device->ddr_win_base); |
| else |
| { |
| /* set next to point to the beginning of the ring */ |
| tu_ptr = ¤t_device->rx_tu_ring[current_device->max_rx_index]; |
| bcm_pci_write32(&(tu_ptr->remote_to_local_pd.next_pd_address_low), (uint32_t)bcmos_virt_to_phys(current_device->rx_tu_ring)); |
| } |
| |
| if (!current_device->tx_tu_ring) |
| current_device->tx_tu_ring = (bcm_pcied_tu*)(opaque_data->tx_tu_ring_offset + current_device->ddr_win_base); |
| else |
| { |
| /* set next to point to the beginning of the ring */ |
| tu_ptr = ¤t_device->tx_tu_ring[current_device->max_tx_index]; |
| bcm_pci_write32(&(tu_ptr->remote_to_local_pd.next_pd_address_low), (uint32_t)bcmos_virt_to_phys(current_device->tx_tu_ring)); |
| } |
| |
| /* now all tu_rings allocated and synchronize , time to set the tx_owner and peer_tx_owner */ |
| current_device->tx_owner = (uint32_t*)((unsigned long)current_device->tx_tu_ring + sizeof(bcm_pcied_tu) * current_device->txq_length); |
| current_device->peer_tx_owner = (uint32_t*)((unsigned long)current_device->rx_tu_ring + sizeof(bcm_pcied_tu) * current_device->rxq_length); |
| |
| /* preallocate rx net buffers */ |
| for (i = 0; i < current_device->rxq_length; i++) |
| { |
| current_device->rx_nbuff_save[i] = bcmos_buf_alloc(current_device->max_mtu); |
| if (!current_device->rx_nbuff_save[i]) |
| return free_buffers(device, "Failed to allocate buffer for RX\n"); |
| } |
| /********************************************** |
| OK , now we are ready to initialize tu_rings |
| NOTE: all stuff related to create rings (last_next_indicator,next_pd_address_lowr/high already |
| done by bcmtr_create_tu_rings |
| ***********************************************/ |
| |
| /*Fill TX ring, fill only fields which I "know", |
| all peer related pointers will be updated by peer */ |
| for (i = 0, tu_ptr = current_device->tx_tu_ring; i < current_device->txq_length; i++, tu_ptr++) |
| { |
| /* fill the remote_to_local_pd - only last indicator */ |
| if (i == current_device->max_tx_index) |
| bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), BCM_PCIED_LAST_RECORD_MASK); |
| else |
| bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), (BCM_PCIED_LAST_RECORD_MASK | BCM_PCIED_NEXT_CONTINOUS_MASK)); |
| |
| /************************ |
| fill data_pd |
| - leave length_and_isrenable in zero , will be set during tx |
| - leave source data buffer pointers in zero, will be set in during tx |
| - dest. data buffer will be set by peer |
| *************************/ |
| |
| /************************ |
| fill remote_to_local_pd |
| dest. data buffer will be set by peer |
| *************************/ |
| bcmtr_set_source_buffer_address((uint32_t *)&tu_ptr->remote_to_local_pd, ¤t_device->shadow_rbd[i]); |
| |
| /* set last indicator for the current tu */ |
| bcm_pci_write32(&tu_ptr->data_pd.last_next_indicator, BCM_PCIED_NEXT_CONTINOUS_MASK); |
| bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), BCM_PCIED_NEXT_CONTINOUS_MASK); |
| |
| /* set the length and interrupt indicator */ |
| bcm_pci_write32(&tu_ptr->remote_to_local_pd.length_and_isrenable, (BCM_PCIED_INTR_ENABLE_MASK | (sizeof(uint32_t) << BCM_PCIED_TRANSFER_SIZE_SHIFT))); |
| } |
| |
| /*Fill RX ring, fill only fields which I "know", |
| all peer related pointers will be updated by peer */ |
| for (i = 0, tu_ptr = current_device->rx_tu_ring; i < current_device->rxq_length; i++, tu_ptr++) |
| { |
| /* fill the remote_to_local_pd - only last indicator */ |
| if (i == current_device->max_rx_index) |
| bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), BCM_PCIED_LAST_RECORD_MASK); |
| else |
| bcm_pci_write32(&(tu_ptr->remote_to_local_pd.last_next_indicator), (BCM_PCIED_LAST_RECORD_MASK | BCM_PCIED_NEXT_CONTINOUS_MASK)); |
| |
| /************************ |
| fill data_pd |
| - leave length_and_isrenable in zero , will be set by peer during tx |
| - leave source data buffer pointers in zero, will be set by peer during tx |
| *************************/ |
| |
| /* take data pointer from nbuf saved in rx_nbuff_save*/ |
| pkt_ptr = bcmos_buf_data(current_device->rx_nbuff_save[i]); |
| |
| /* invalidate cache for the data buffers */ |
| bcmos_prepare_for_dma_read(pkt_ptr, current_device->max_mtu); |
| |
| /* set destination data buffer */ |
| bcmtr_set_dest_buffer_address((uint32_t *)&tu_ptr->data_pd, pkt_ptr); |
| |
| /************************ |
| fill remote_to_local_pd |
| - remote_to_local_pd.length_and_isrenable already set by peer |
| - last_next_indicator will be set by peer during tx |
| ************************/ |
| |
| /* set destination for the BD */ |
| bcmtr_set_dest_buffer_address((uint32_t *)&tu_ptr->remote_to_local_pd, ¤t_device->local_bd[i]); |
| |
| /* set next PD next pd indicator for both parts of the transfer unit */ |
| bcm_pci_write32(&tu_ptr->data_pd.last_next_indicator, BCM_PCIED_NEXT_CONTINOUS_MASK); |
| bcm_pci_write32(&tu_ptr->remote_to_local_pd.last_next_indicator, BCM_PCIED_NEXT_CONTINOUS_MASK); |
| |
| /* set the length and interrupt indicator */ |
| bcm_pci_write32(&tu_ptr->remote_to_local_pd.length_and_isrenable, (BCM_PCIED_INTR_ENABLE_MASK | (sizeof(uint32_t) << BCM_PCIED_TRANSFER_SIZE_SHIFT))); |
| |
| } |
| #ifndef SIMULATION_BUILD |
| bcmtr_connect_isr(¤t_device->isrdata); |
| #endif |
| |
| /* Clear and disable all interrupts at L2 */ |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_SET), BCM_PCIE_ALL_DMA_INTERRUPTS_MASK); |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_CLEAR), BCM_PCIE_ALL_DMA_INTERRUPTS_MASK); |
| |
| /* Enable L2 interrupts at L1 */ |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR1_MASK_CLEAR), PCIE_L2_INTR_MASK); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_send(uint8_t device, uint8_t channel, bcmos_buf *net_buff) |
| { |
| bcm_pcied_comm_data *current_device; |
| bcm_pcied_tu *current_tu; |
| uint32_t current_tx; |
| uint32_t next_tx; |
| uint32_t length; |
| uint8_t *pkt_ptr; |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK(); |
| |
| if(!net_buff) |
| { |
| pcie_print("Network buffer is null\n"); |
| return BCM_ERR_NULL; |
| } |
| #endif |
| |
| current_device = &bcmtr_pcie_data[device]; |
| |
| length = bcmos_buf_length(net_buff); |
| if((length > current_device->max_mtu) || (length == 0)) |
| { |
| pcie_print("Packet length %d error (MTU=%d)\n", length, current_device->max_mtu); |
| return BCM_ERR_RANGE; |
| } |
| |
| LOCK_TX(); |
| |
| current_tx = current_device->current_tx; |
| |
| /* check owner in tx_owner list*/ |
| if(bcm_pci_read32(¤t_device->tx_owner[current_tx]) != LOCAL_OWNER) |
| { |
| UNLOCK_TX(); |
| bcmtr_pcie_data[device].tx_pcie_full_counter++; |
| return BCM_ERR_QUEUE_FULL; |
| } |
| |
| /* Do not forget change owner */ |
| bcm_pci_write32(¤t_device->tx_owner[current_tx], REMOTE_OWNER); |
| |
| /* Prepare shadow_rbd */ |
| *(uint32_t*)(¤t_device->shadow_rbd[current_tx])= |
| BCMOS_ENDIAN_CPU_TO_LITTLE_U32((uint32_t)(length | channel << BCM_PCIED_BD_CHANNEL_ID_SHIFT | LOCAL_OWNER << BCM_PCIED_BD_OWNERSHIP_SHIFT)); |
| |
| current_tu = ¤t_device->tx_tu_ring[current_tx]; |
| |
| /* take data buffer from nbuffer */ |
| pkt_ptr = bcmos_buf_data(net_buff); |
| |
| /* set data_pd destination data buffer*/ |
| bcmtr_set_source_buffer_address((uint32_t *)¤t_tu->data_pd, pkt_ptr); |
| |
| #ifndef HARDWARE_TEST1 |
| /* flush the cashe for the data buffer */ |
| bcmos_prepare_for_dma_write(pkt_ptr, length); |
| #endif |
| |
| /* set data_pd length_and_isrenable (no need enable interrupt for data_pd */ |
| bcm_pci_write32(¤t_tu->data_pd.length_and_isrenable, length << BCM_PCIED_TRANSFER_SIZE_SHIFT); |
| |
| /* set 'last_indicator' bit into current TU, takes care of wrapping */ |
| if(current_tx == current_device->max_tx_index) |
| bcm_pci_write32(¤t_tu->remote_to_local_pd.last_next_indicator, BCM_PCIED_LAST_RECORD_MASK); |
| else |
| bcm_pci_write32(¤t_tu->remote_to_local_pd.last_next_indicator, BCM_PCIED_LAST_RECORD_MASK | BCM_PCIED_NEXT_CONTINOUS_MASK); |
| |
| bcmos_barrier(); |
| |
| /* clear 'last_indicator' bit into previous_TU, takes care of wrapping */ |
| if (current_device->prev_tx != current_device->max_tx_index) |
| bcm_pci_write32(&(current_device->tx_tu_ring[current_device->prev_tx].remote_to_local_pd.last_next_indicator), BCM_PCIED_NEXT_CONTINOUS_MASK); |
| else |
| bcm_pci_write32(&(current_device->tx_tu_ring[current_device->prev_tx].remote_to_local_pd.last_next_indicator), 0); |
| |
| bcmos_barrier(); |
| |
| /* set WAKE bit in DMA registers */ |
| bcm_pci_write32(PCI_REG_ADDRESS(WAKEUP_DMA), DMA_TX_WAKE_CTRL_WAKE_MASK); |
| |
| INCREMENT_TRANSMITED(device); |
| |
| next_tx = current_tx + 1; |
| if (next_tx > current_device->max_tx_index) |
| next_tx = 0; |
| |
| #ifndef HARDWARE_TEST1 |
| /* release previous network buffer */ |
| if (current_device->tx_nbuff_save[current_tx]) |
| { |
| tx_done_handler(device, current_device->tx_nbuff_save[current_tx]); |
| current_device->conf_tx = next_tx; |
| } |
| |
| /* store network buffer pointer */ |
| current_device->tx_nbuff_save[current_device->current_tx] = net_buff; |
| #endif |
| /* move current_tx index */ |
| current_device->prev_tx = (int32_t)current_device->current_tx; |
| current_device->current_tx = next_tx; |
| |
| UNLOCK_TX(); |
| return BCMTR_SUCCESS; |
| } |
| |
| /** Reclaim buffers that have already been transmitted |
| * \param[in] device Maple device index |
| * \returns: number of reclaimed TX buffers >= 0 or bcmos_errno error code <0 |
| */ |
| int bcmtr_pcie_tx_collect(uint8_t device) |
| { |
| bcm_pcied_comm_data *current_device; |
| uint32_t conf_tx; |
| int n = 0; |
| |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK_TYPE(int); |
| #endif |
| |
| current_device = &bcmtr_pcie_data[device]; |
| |
| LOCK_TX(); |
| |
| conf_tx = current_device->conf_tx; |
| while (bcm_pci_read32(¤t_device->tx_owner[conf_tx]) == LOCAL_OWNER && |
| current_device->tx_nbuff_save[conf_tx]) |
| { |
| ++n; |
| |
| /* release previous network buffer */ |
| tx_done_handler(device, current_device->tx_nbuff_save[conf_tx]); |
| current_device->tx_nbuff_save[conf_tx] = NULL; |
| |
| conf_tx++; |
| if (conf_tx > current_device->max_tx_index) |
| conf_tx = 0; |
| } |
| current_device->conf_tx = conf_tx; |
| |
| UNLOCK_TX(); |
| |
| return n; |
| } |
| |
| /* |
| Receive a data packet |
| returns channel and pointer to network buffer containing the data |
| */ |
| bcmos_errno bcmtr_pcie_receive(uint8_t device, uint8_t *channel, bcmos_buf **buf) |
| { |
| bcm_pcied_comm_data *current_device; |
| bcm_pcied_tu *current_tu; |
| uint32_t bd_info; |
| uint32_t length; |
| uint32_t current_rx; |
| |
| #ifndef HARDWARE_TEST1 |
| bcmos_buf *net_ptr = NULL; |
| uint8_t *pkt_ptr = NULL; |
| #endif |
| |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK_EXT(); |
| if (!channel || !buf) |
| return BCM_ERR_NULL; |
| #endif |
| *buf = NULL; |
| |
| current_device = &bcmtr_pcie_data[device]; |
| |
| current_rx = current_device->current_rx; |
| current_tu = ¤t_device->rx_tu_ring[current_rx]; |
| |
| /* Read local_bd to local variable */ |
| bd_info = BCMOS_ENDIAN_LITTLE_TO_CPU_U32(*(uint32_t*)(¤t_device->local_bd[current_rx])); |
| |
| /* check owner in local_bd, it is updated by the peer */ |
| if ((bd_info & BCM_PCIED_BD_OWNERSHIP_MASK) >> BCM_PCIED_BD_OWNERSHIP_SHIFT != LOCAL_OWNER) |
| { |
| bcmtr_pcie_data[device].rx_pcie_empty_counter++; |
| return BCM_ERR_QUEUE_EMPTY; |
| } |
| |
| /* change the owner in local_bd to the remote side */ |
| *(uint32_t*)(¤t_device->local_bd[current_rx])= |
| BCMOS_ENDIAN_CPU_TO_LITTLE_U32((uint32_t)((REMOTE_OWNER << BCM_PCIED_BD_OWNERSHIP_SHIFT) | bd_info)); |
| |
| /* take the packet length from local_bd */ |
| length = bd_info & BCM_PCIED_BD_PKT_LENGTH_MASK; |
| |
| if ((length == 0) || (length > current_device->max_mtu)) |
| { |
| pcie_print("Packet length error : %d\n", length); |
| |
| /* update remote side */ |
| bcm_pci_write32(¤t_device->peer_tx_owner[current_rx], LOCAL_OWNER); |
| |
| /* move current rx */ |
| NEXT_INDEX(current_device->max_rx_index, current_device->current_rx); |
| |
| return BCM_ERR_MSG_ERROR; |
| } |
| |
| /* update packet channel id */ |
| *channel = (bd_info & BCM_PCIED_BD_CHANNEL_ID_MASK) >> BCM_PCIED_BD_CHANNEL_ID_SHIFT; |
| |
| #ifndef HARDWARE_TEST1 |
| /* allocate new buffer to receive packet */ |
| net_ptr = bcmos_buf_alloc(current_device->max_mtu); |
| |
| /* If not successes do clean-up */ |
| if (!net_ptr) |
| { |
| /* update remote side */ |
| bcm_pci_write32(¤t_device->peer_tx_owner[current_rx], LOCAL_OWNER); |
| |
| /* move current rx */ |
| NEXT_INDEX(current_device->max_rx_index, current_device->current_rx); |
| |
| return BCM_ERR_NOMEM; |
| } |
| #endif |
| /* take the data pointer */ |
| *buf = current_device->rx_nbuff_save[current_rx]; |
| |
| /* invalidate received network buffer for cache */ |
| bcmos_prepare_for_dma_read(bcmos_buf_data(*buf), length); |
| |
| /* fill network buffer */ |
| bcmos_buf_length_set(*buf, length); |
| |
| /* update statistics */ |
| INCREMENT_RECEIVED(device); |
| |
| #ifndef HARDWARE_TEST1 |
| pkt_ptr = bcmos_buf_data(net_ptr); |
| |
| /* invalidate network buffer for cache */ |
| bcmos_prepare_for_dma_read(pkt_ptr, current_device->max_mtu); |
| |
| /* update rx network buffer with the new packet */ |
| current_device->rx_nbuff_save[current_rx] = net_ptr; |
| |
| /* set data_pd destination data buffer*/ |
| bcmtr_set_dest_buffer_address((uint32_t *)¤t_tu->data_pd, pkt_ptr); |
| #endif |
| /* update remote side */ |
| bcm_pci_write32(¤t_device->peer_tx_owner[current_rx], LOCAL_OWNER); |
| |
| /* move current rx */ |
| NEXT_INDEX(current_device->max_rx_index, current_device->current_rx); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| /* enable level 2 RX interrupts */ |
| bcmos_errno bcmtr_pcie_rxint_enable(uint8_t device) |
| { |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK_EXT(); |
| #endif |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_CLEAR), DMA_RX_ERROR_MASK | DMA_RX_DONE_MASK); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| /* disable level 2 RX interrupts */ |
| bcmos_errno bcmtr_pcie_rxint_disable(uint8_t device) |
| { |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK_EXT(); |
| #endif |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_SET), DMA_RX_ERROR_MASK | DMA_RX_DONE_MASK); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_rxint_clear(uint8_t device) |
| { |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK_EXT(); |
| #endif |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_CLEAR), DMA_RX_ERROR_MASK | DMA_RX_DONE_MASK); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| /* Enable Level 2 "TX handled" interrupt */ |
| bcmos_errno bcmtr_pcie_txint_enable(uint8_t device) |
| { |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK_EXT(); |
| #endif |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_CLEAR), DMA_TX_ERROR_MASK | DMA_TX_DONE_MASK); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| /* Disable level 2 "TX handled" interrupt */ |
| bcmos_errno bcmtr_pcie_txint_disable(uint8_t device) |
| { |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK_EXT(); |
| #endif |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_MASK_SET), DMA_TX_ERROR_MASK | DMA_TX_DONE_MASK); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| /* Clear level 2 "TX handled" interrupt */ |
| bcmos_errno bcmtr_pcie_txint_clear(uint8_t device) |
| { |
| #ifdef CHECK_PARAM |
| BCMTR_PARANOID_CHECK_EXT(); |
| #endif |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR_CLEAR), DMA_TX_ERROR_MASK | DMA_TX_DONE_MASK); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_disconnect(uint8_t device) |
| { |
| BCMTR_PARANOID_CHECK_EXT(); |
| |
| stop_dma(device); |
| |
| /* disable interrupts */ |
| bcmtr_pcie_rxint_disable(device); |
| bcmtr_pcie_txint_disable(device); |
| |
| /* clear DMA interrupts */ |
| bcmtr_pcie_rxint_clear(device); |
| bcmtr_pcie_txint_clear(device); |
| |
| /* Disable in L1 controller */ |
| bcm_pci_write32(PCI_REG_ADDRESS(DMA_INTR1_MASK_SET), PCIE_L2_INTR_MASK); |
| |
| /* free irq - on host (if it is shared) must not free it */ |
| bcmtr_pcie_free_irq(bcmtr_pcie_data[device].isrdata.rx_irq, &bcmtr_pcie_data[device].isrdata); |
| |
| free_buffers(device, "PCIE disconnected\n"); |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_rx_irq_cblk_register(f_bcmtr_int rx_isr_clbk) |
| { |
| bcmtr_pcie_rx_irq_handler = rx_isr_clbk; |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_rx_irq_cblk_unregister(void) |
| { |
| bcmtr_pcie_rx_irq_handler = NULL; |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_tx_irq_cblk_register(f_bcmtr_int rx_isr_clbk) |
| { |
| bcmtr_pcie_tx_irq_handler = rx_isr_clbk; |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_tx_irq_cblk_unregister(void) |
| { |
| bcmtr_pcie_tx_irq_handler = NULL; |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_tx_done_cblk_register(f_bcmtr_done tx_done_cb) |
| { |
| tx_done_handler = tx_done_cb; |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| bcmos_errno bcmtr_pcie_tx_done_cblk_unregister(void) |
| { |
| tx_done_handler = default_tx_done_callback; |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| void bcmtr_pcie_exit(void) |
| { |
| uint32_t i; |
| |
| if (bcmtr_pcie_data) |
| { |
| for (i = 0; i < bcmtr_max_devices_number; i ++) |
| { |
| bcmtr_pcie_disconnect(i); |
| } |
| bcmos_free(bcmtr_pcie_data); |
| } |
| bcmtr_pcie_data = NULL; |
| } |
| |
| bcmos_errno bcmtr_pcie_get_statistics(uint8_t device, uint32_t clear, bcm_pcied_stat *stat) |
| { |
| bcm_pcied_comm_data *current_device = &bcmtr_pcie_data[device]; |
| |
| BCMTR_PARANOID_CHECK(); |
| |
| if (!stat) |
| return BCM_ERR_NULL; |
| |
| stat->rx_counter = current_device->rx_counter; |
| stat->tx_counter = current_device->tx_counter; |
| stat->rx_done_isr_counter = current_device->isrdata.rx_done_num; |
| stat->rx_err_isr_counter = current_device->isrdata.rx_err_num; |
| stat->tx_done_isr_counter = current_device->isrdata.tx_done_num; |
| stat->tx_err_isr_counter = current_device->isrdata.tx_err_num; |
| stat->rx_pcie_empty_counter = current_device->rx_pcie_empty_counter; |
| stat->tx_pcie_full_counter = current_device->tx_pcie_full_counter; |
| if (clear) |
| { |
| current_device->rx_counter = 0; |
| current_device->tx_counter = 0; |
| current_device->isrdata.rx_done_num = 0; |
| current_device->isrdata.rx_err_num = 0; |
| current_device->isrdata.tx_done_num = 0; |
| current_device->isrdata.tx_err_num = 0; |
| current_device->rx_pcie_empty_counter = 0; |
| current_device->tx_pcie_full_counter = 0; |
| } |
| |
| return BCMTR_SUCCESS; |
| } |
| |
| /*=============================================*/ |
| |
| /* used by dump procedures - may write to std output or user buffer */ |
| #define pcie_bprint(buffer, fmt, args...) \ |
| {\ |
| if (!buffer)\ |
| pcie_print(fmt, ##args);\ |
| else\ |
| sprintf(buffer, fmt, ##args);\ |
| } |
| |
| static inline void dump_bd(char *output, char *title, uint32_t bd) |
| { |
| uint32_t owner, chn, length; |
| |
| owner = (bd & BCM_PCIED_BD_OWNERSHIP_MASK) >> BCM_PCIED_BD_OWNERSHIP_SHIFT; |
| chn = (bd & BCM_PCIED_BD_CHANNEL_ID_MASK) >> BCM_PCIED_BD_CHANNEL_ID_SHIFT; |
| length = bd & BCM_PCIED_BD_PKT_LENGTH_MASK; |
| |
| pcie_bprint(output, "\t%s: (0x%08x) owner = %s channel = 0x%x(%d) length = 0x%x(%d)\n", |
| title, (unsigned int)bd, owner == 0 ? "local" : "peer", (unsigned int)chn, chn, (unsigned int)length, length); |
| } |
| |
| static inline void dump_owner(char *output, char *title, uint32_t *current_entry) |
| { |
| uint32_t owner, entry; |
| |
| entry = bcm_pci_read32(current_entry); |
| owner = (entry & BCM_PCIED_BD_OWNERSHIP_MASK) >> BCM_PCIED_BD_OWNERSHIP_SHIFT; |
| pcie_bprint(output, "\t%s: owner = %s\n", title, owner == 0 ? "local" : "peer"); |
| } |
| |
| static inline void dump_pd(char *output, bcm_pcied_pd *current_pd) |
| { |
| uint32_t ddrl,ddrh,pktl,pkth,len,last,nextl,nexth; |
| |
| ddrl = bcm_pci_read32((uint32_t *)¤t_pd->ddr_buff_address_low); |
| pktl = bcm_pci_read32((uint32_t *)¤t_pd->pcie_pkt_address_low); |
| pkth = bcm_pci_read32((uint32_t *)¤t_pd->pcie_pkt_address_high); |
| len = bcm_pci_read32((uint32_t *)¤t_pd->length_and_isrenable); |
| last = bcm_pci_read32((uint32_t *)¤t_pd->last_next_indicator); |
| nextl = bcm_pci_read32((uint32_t *)¤t_pd->next_pd_address_low); |
| nexth = bcm_pci_read32((uint32_t *)¤t_pd->next_pd_address_high); |
| ddrh = bcm_pci_read32((uint32_t *)¤t_pd->ddr_buff_address_high); |
| |
| pcie_bprint(output, |
| "\t\t%-20s = 0x%08x\n" |
| "\t\t%-20s = 0x%08x\n" |
| "\t\t%-20s = 0x%08x\n" |
| "\t\t%-20s = 0x%08x\n" |
| "\t\t%-20s = 0x%08x\n" |
| "\t\t%-20s = 0x%08x\n" |
| "\t\t%-20s = 0x%08x\n" |
| "\t\t%-20s = 0x%08x\n", |
| "ddr_low", ddrl, |
| "pkt_low", pktl, |
| "pkt_high", pkth, |
| "length_and_isrenable", len, |
| "last_next_indicator", last, |
| "next_low", nextl, |
| "next_high", nexth, |
| "ddr_high", ddrh); |
| } |
| |
| |
| static int32_t dump_one_tx(char *output, uint8_t device, uint32_t current_index) |
| { |
| bcm_pcied_tu *current_tu; |
| bcm_pcied_comm_data *current_device; |
| char *buffer = output; |
| |
| current_device = &bcmtr_pcie_data[device]; |
| current_tu = ¤t_device->tx_tu_ring[current_index]; |
| dump_bd(buffer, "SBD", BCMOS_ENDIAN_LITTLE_TO_CPU_U32(*(uint32_t*)¤t_device->shadow_rbd[current_index])); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| dump_owner(buffer, "tx_owner", ¤t_device->tx_owner[current_index]); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| pcie_bprint(buffer, "\tPD data = 0x%lx\n", (unsigned long)¤t_tu->data_pd); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| dump_pd(buffer, ¤t_tu->data_pd); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| pcie_bprint(buffer, "\tPD shadow_rbd = 0x%lx\n", (unsigned long)¤t_tu->remote_to_local_pd); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| dump_pd(buffer, ¤t_tu->remote_to_local_pd); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| pcie_bprint(buffer, "\tTX netbuff = 0x%lx\n",(unsigned long)current_device->tx_nbuff_save[current_index]); |
| |
| if (output) |
| return strlen(output); |
| |
| return 0; |
| } |
| |
| static int32_t dump_one_rx(char *output, uint8_t device, uint32_t current_index) |
| { |
| bcm_pcied_tu *current_tu; |
| bcm_pcied_comm_data *current_device; |
| char *buffer = output; |
| |
| current_device = &bcmtr_pcie_data[device]; |
| current_tu = ¤t_device->rx_tu_ring[current_index]; |
| |
| dump_bd(buffer, "BD", BCMOS_ENDIAN_LITTLE_TO_CPU_U32(*(uint32_t*)¤t_device->local_bd[current_index])); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| dump_owner(buffer, "peer_tx_owner", ¤t_device->peer_tx_owner[current_index]); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| pcie_bprint(buffer, "\tPD data = 0x%lx\n", (unsigned long)¤t_tu->data_pd); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| dump_pd(buffer, ¤t_tu->data_pd); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| pcie_bprint(buffer, "\tPD shadow_rbd = 0x%lx\n", (unsigned long)¤t_tu->remote_to_local_pd); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| dump_pd(buffer, ¤t_tu->remote_to_local_pd); |
| if (buffer) |
| buffer += strlen(buffer); |
| |
| pcie_bprint(buffer, "\tRX netbuff = 0x%lx\n",(unsigned long)current_device->rx_nbuff_save[current_index]); |
| |
| if (output) |
| return strlen(output); |
| return 0; |
| } |
| |
| bcmos_errno bcmtr_pcie_tx_dump(char *output, uint8_t device, int32_t start, int32_t number_of_entries) |
| { |
| int32_t i; |
| int32_t len = 0; |
| char *buffer = output; |
| int32_t from = start; |
| int32_t to; |
| |
| pcie_bprint(buffer, "Dump Tx ring\n"); |
| if (!bcmtr_pcie_data) |
| { |
| pcie_print("Driver is not initialized\n"); |
| return BCM_ERR_NORES; |
| } |
| |
| if (device >= bcmtr_max_devices_number) |
| { |
| pcie_print("Device parameter is greater than maximum devices(%d)\n", bcmtr_max_devices_number); |
| return BCM_ERR_RANGE; |
| } |
| |
| if (device != bcmtr_pcie_data[device].isrdata.device) |
| { |
| pcie_print("*** Data Corrupted ***\n"); |
| return BCM_ERR_RANGE; |
| } |
| |
| if (start == -1) /* from current, number of entries */ |
| from = bcmtr_pcie_data[device].current_tx; |
| |
| to = from + number_of_entries; |
| |
| if (to == from) |
| to++; |
| |
| if (to > bcmtr_pcie_data[device].txq_length) |
| to = bcmtr_pcie_data[device].txq_length; |
| |
| if (buffer) |
| len = strlen(buffer); |
| |
| for (i = from; i < to; i++) |
| { |
| if (buffer) |
| buffer = output + len; /* move buffer to the next space */ |
| |
| if (i == bcmtr_pcie_data[device].current_tx) |
| { |
| pcie_bprint(buffer, "Current = %d\n", i); |
| } |
| else |
| { |
| pcie_bprint(buffer, "Index = %d\n", i); |
| } |
| |
| if (buffer) |
| { |
| /* add to len the next line - buffer contains only the last line */ |
| len += strlen(buffer); |
| buffer = output + len; |
| } |
| /* returns the length of the last lines */ |
| len += dump_one_tx(buffer, device, i); |
| } |
| |
| return BCM_ERR_OK; |
| } |
| |
| bcmos_errno bcmtr_pcie_rx_dump(char *output, uint8_t device, int32_t start, int32_t number_of_entries) |
| { |
| int32_t i; |
| int32_t len = 0; |
| char *buffer = output; |
| int32_t from = start; |
| int32_t to; |
| |
| pcie_bprint(buffer, "Dump Rx ring\n"); |
| if (!bcmtr_pcie_data) |
| { |
| pcie_print("Driver is not initialized\n"); |
| return BCM_ERR_NORES; |
| } |
| |
| if (device >= bcmtr_max_devices_number) |
| { |
| pcie_print("Device parameter is greater than maximum devices(%d)\n", bcmtr_max_devices_number); |
| return BCM_ERR_RANGE; |
| } |
| |
| if (device != bcmtr_pcie_data[device].isrdata.device) |
| { |
| pcie_print("*** Data Corrupted ***\n"); |
| return BCM_ERR_RANGE; |
| } |
| |
| if (start == -1) /* from current number of entries */ |
| from = bcmtr_pcie_data[device].current_rx; |
| to = from + number_of_entries; |
| if (to == from) |
| to++; |
| if (to > bcmtr_pcie_data[device].rxq_length) |
| to = bcmtr_pcie_data[device].rxq_length; |
| |
| if (buffer) |
| len = strlen(buffer); |
| for (i = from; i < to; i++) |
| { |
| if (buffer) |
| buffer = output + len; |
| if (i == bcmtr_pcie_data[device].current_rx) |
| { |
| pcie_bprint(buffer, "Current = %d\n", i); |
| } |
| else |
| { |
| pcie_bprint(buffer, "Index = %d\n", i); |
| } |
| if (buffer) |
| { |
| /* add to len the next line - buffer contains only the last line */ |
| len += strlen(buffer); |
| buffer = output + len; |
| } |
| /* returns the length of the last lines */ |
| len += dump_one_rx(buffer, device, i); |
| } |
| |
| return BCM_ERR_OK; |
| } |
| |
| #ifdef __KERNEL__ |
| EXPORT_SYMBOL(bcmtr_pcie_receive); |
| EXPORT_SYMBOL(bcmtr_pcie_init); |
| EXPORT_SYMBOL(bcmtr_pcie_exit); |
| EXPORT_SYMBOL(bcmtr_pcie_pre_connect); |
| EXPORT_SYMBOL(bcmtr_pcie_connect); |
| EXPORT_SYMBOL(bcmtr_pcie_disconnect); |
| EXPORT_SYMBOL(bcmtr_pcie_rx_irq_cblk_register); |
| EXPORT_SYMBOL(bcmtr_pcie_rx_irq_cblk_unregister); |
| EXPORT_SYMBOL(bcmtr_pcie_tx_irq_cblk_register); |
| EXPORT_SYMBOL(bcmtr_pcie_tx_irq_cblk_unregister); |
| EXPORT_SYMBOL(bcmtr_pcie_send); |
| EXPORT_SYMBOL(bcmtr_pcie_tx_dump); |
| EXPORT_SYMBOL(bcmtr_pcie_rx_dump); |
| EXPORT_SYMBOL(bcmtr_pcie_rxint_enable); |
| EXPORT_SYMBOL(bcmtr_pcie_rxint_disable); |
| EXPORT_SYMBOL(bcmtr_pcie_rxint_clear); |
| EXPORT_SYMBOL(bcmtr_pcie_rx_irq_handler); |
| EXPORT_SYMBOL(bcmtr_pcie_tx_irq_handler); |
| EXPORT_SYMBOL(bcmtr_pcie_get_statistics); |
| #endif |
| |