blob: 624101e064274bc3fdf666118c1a7004a854d1b5 [file] [log] [blame]
Shad Ansari2f7f9be2017-06-07 13:34:53 -07001/*
2<:copyright-BRCM:2016:DUAL/GPL:standard
3
4 Broadcom Proprietary and Confidential.(c) 2016 Broadcom
5 All Rights Reserved
6
7Unless you and Broadcom execute a separate written software license
8agreement governing use of this software, this software is licensed
9to you under the terms of the GNU General Public License version 2
10(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
11with the following added to such license:
12
13 As a special exception, the copyright holders of this software give
14 you permission to link this software with independent modules, and
15 to copy and distribute the resulting executable under terms of your
16 choice, provided that you also meet, for each linked independent
17 module, the terms and conditions of the license of that module.
18 An independent module is a module which is not derived from this
19 software. The special exception does not apply to any modifications
20 of the software.
21
22Not withstanding the above, under no circumstances may you combine
23this software in any way with any other Broadcom software provided
24under a license other than the GPL, without Broadcom's express prior
25written consent.
26
27:>
28 */
29#include <bcmtr_pcie.h>
30#include <bcmolt_tr_pcie_specific.h>
31#include "bcmtr_pcie_sw_queue.h"
32
33/*
34 * bcm_pcie_sw_queue.c
35 * Software layer on top of low level PCIe driver that adds support
36 * for s/w queue and priority
37 */
38typedef struct
39{
40 bcmos_buf_queue txq; /* Transmit queue */
41 bcmos_buf_queue rxq; /* Receive queue */
42 uint32_t max_hwq_size;
43 uint32_t max_swq_size;
44 bcmtr_swq_rx_cb rx_cb; /* Optional rx_cb for callback-based RX buffer delivery */
45} pcie_swq;
46
47static pcie_swq swq_info[BCMTR_MAX_OLTS][BCMTR_PCIE_PRTY__NUM_OF];
48static uint32_t hwq_occupancy[BCMTR_MAX_OLTS]; /* Number of unacknowledged buffers in hw tx queue */
49static bcmos_bool swq_initialized[BCMTR_MAX_OLTS];
50static bcmos_fastlock tx_lock[BCMTR_MAX_OLTS];
51static bcmos_fastlock rx_lock[BCMTR_MAX_OLTS];
52
53#define BCMTR_SWQ_GET_RETURN_IF_ERROR(device,prty,swq) \
54 do { \
55 if (device >= BCMTR_MAX_OLTS) \
56 return BCM_ERR_PARM; \
57 swq = &swq_info[device][prty]; \
58 } while (0)
59
60static inline long _bcmtr_swq_tx_lock(uint8_t device)
61{
62 return bcmos_fastlock_lock(&tx_lock[device]);
63}
64
65static inline void _bcmtr_swq_tx_unlock(uint8_t device, long flags)
66{
67 bcmos_fastlock_unlock(&tx_lock[device], flags);
68}
69
70static inline long _bcmtr_swq_rx_lock(uint8_t device)
71{
72 return bcmos_fastlock_lock(&rx_lock[device]);
73}
74
75static inline void _bcmtr_swq_rx_unlock(uint8_t device, long flags)
76{
77 bcmos_fastlock_unlock(&rx_lock[device], flags);
78}
79
80/** Tx done callback.
81 * Must be called under tx_lock
82 */
83static void _bcmtr_swq_tx_done_cb(uint8_t device, bcmos_buf *buf)
84{
85 BUG_ON(!hwq_occupancy[device]);
86 --hwq_occupancy[device];
87 bcmos_buf_free(buf);
88}
89
90/* Initialize PCI software queue module */
91bcmos_errno bcmtr_swq_init(void)
92{
93 return bcmtr_pcie_tx_done_cblk_register(_bcmtr_swq_tx_done_cb);
94}
95
96/* Cleanup software queue module
97 */
98void bcmtr_swq_exit(void)
99{
100 int i;
101
102 /* Unregister from bcmtr_pcie driver */
103 bcmtr_pcie_tx_done_cblk_unregister();
104
105 for (i = 0; i < BCMTR_MAX_OLTS; i++)
106 bcmtr_swq_device_exit(i);
107}
108
109/* Initialize PCI software queue module */
110bcmos_errno bcmtr_swq_device_init(uint8_t device)
111{
112 bcmtr_pcie_prty prty;
113
114 if (device >= BCMTR_MAX_OLTS)
115 return BCM_ERR_PARM;
116
117 if (swq_initialized[device])
118 return BCM_ERR_ALREADY;
119
120 bcmos_fastlock_init(&tx_lock[device], 0);
121 bcmos_fastlock_init(&rx_lock[device], 0);
122 for (prty = 0; prty < BCMTR_PCIE_PRTY__NUM_OF; prty++)
123 {
124 pcie_swq *swq = &swq_info[device][prty];
125 bcmos_buf_queue_init(&swq->txq);
126 bcmos_buf_queue_init(&swq->rxq);
127 swq->rx_cb = NULL;
128 swq->max_hwq_size = swq->max_swq_size = 0;
129 }
130 swq_initialized[device] = BCMOS_TRUE;
131
132 return BCM_ERR_OK;
133}
134
135/* Cleanup software queue module */
136void bcmtr_swq_device_exit(uint8_t device)
137{
138 bcmtr_pcie_prty prty;
139
140 if (!swq_initialized[device])
141 return;
142
143 for (prty = 0; prty < BCMTR_PCIE_PRTY__NUM_OF; prty++)
144 {
145 pcie_swq *swq = &swq_info[device][prty];
146 bcmos_buf *buf;
147
148 while ((buf=bcmos_buf_queue_get(&swq->txq)))
149 bcmos_buf_free(buf);
150 while ((buf=bcmos_buf_queue_get(&swq->rxq)))
151 bcmos_buf_free(buf);
152 }
153 swq_initialized[device] = BCMOS_FALSE;
154
155 return;
156}
157
158/** Send buffer to the peer
159 * \param[in] device Maple device index
160 * \param[in] channel Channel id (opaque to the bcmtr_pcie driver)
161 * \param[in] buf Buffer to be transferred
162 * \returns: 0 in case of success or error code < 0
163 */
164bcmos_errno bcmtr_swq_send(uint8_t device, uint8_t channel, bcmos_buf *buf)
165{
166 bcmtr_pcie_prty prty = (channel >= BCMTR_SWQ_FIRST_URGENT_CHANNEL) ?
167 BCMTR_PCIE_PRTY_URGENT : BCMTR_PCIE_PRTY_NORMAL;
168 pcie_swq *swq;
169 bcmos_bool was_empty;
170 bcmos_bool hw_queue_full;
171 bcmos_errno err;
172 long flags;
173
174 BCMTR_SWQ_GET_RETURN_IF_ERROR(device, prty, swq);
175
176 /* Store channel in the buffer */
177 bcmos_buf_channel_set(buf, channel);
178
179 /* Prevent concurrent access to the queue */
180 flags = _bcmtr_swq_tx_lock(device);
181
182 /* Store q-was-empty status */
183 was_empty = bcmos_buf_queue_is_empty(&swq->txq);
184
185 /* Check if max h/w queue occupancy isn't exceeded. If it isn't and s/w queue is empty
186 * submit directly to the h/w queue.
187 */
188 hw_queue_full = (swq->max_hwq_size && hwq_occupancy[device] >= swq->max_hwq_size);
189 if (was_empty && !hw_queue_full)
190 {
191 ++hwq_occupancy[device];
192 _bcmtr_swq_tx_unlock(device, flags);
193 err = bcmtr_pcie_send(device, channel, buf);
194 if (err)
195 {
196 flags = _bcmtr_swq_tx_lock(device);
197 --hwq_occupancy[device];
198 /* If sw q is enabled, enque the buffer, otherwise, just return */
199 if (swq->max_swq_size || swq->max_hwq_size)
200 {
201 bcmos_buf_queue_put(&swq->txq, buf);
202 err = BCM_ERR_OK;
203 }
204 _bcmtr_swq_tx_unlock(device, flags);
205 }
206 }
207 else
208 {
209 bcmos_buf_queue_put(&swq->txq, buf);
210 _bcmtr_swq_tx_unlock(device, flags);
211 err = BCM_ERR_OK;
212 }
213
214
215 return err;
216}
217
218/** Receive packet from device
219 * \param[in] device Maple device index
220 * \param[in] prty Priority
221 * \param[out] channel message channel from the BD
222 * \param[out] buf pointer to network buffer containing the
223 * received packet
224 * \returns: 0 in case of success or error code < 0
225 */
226bcmos_errno bcmtr_swq_receive(uint8_t device, bcmtr_pcie_prty prty, uint8_t *channel, bcmos_buf **buf)
227{
228 pcie_swq *swq;
229 long flags;
230 bcmos_errno err;
231
232 BCMTR_SWQ_GET_RETURN_IF_ERROR(device, prty, swq);
233
234 /* Peevent concurent access to the queue */
235 flags = _bcmtr_swq_rx_lock(device);
236 *buf = bcmos_buf_queue_get(&swq->rxq);
237 if (*buf)
238 {
239 *channel = bcmos_buf_channel(*buf);
240 err = BCM_ERR_OK;
241 }
242 else
243 {
244 err = BCM_ERR_QUEUE_EMPTY;
245 }
246 _bcmtr_swq_rx_unlock(device, flags);
247
248 return err;
249}
250
251/** Configure TX queue.
252 */
253bcmos_errno bcmtr_swq_tx_queue_cfg(uint8_t device, bcmtr_pcie_prty prty, uint32_t hardq_size, uint32_t softq_size)
254{
255 pcie_swq *swq;
256
257 BCMTR_SWQ_GET_RETURN_IF_ERROR(device, prty, swq);
258
259 swq->max_hwq_size = hardq_size;
260 swq->max_swq_size = softq_size;
261
262 return BCM_ERR_OK;
263}
264
265/** Register for "data received indication"
266 * \param[in] device Maple device index
267 * \param[in] prty Priority
268 * \param[in] cb Callback pointer
269 * \returns: 0 in case of success or error code < 0
270 */
271bcmos_errno bcmtr_swq_rx_cb_register(uint8_t device, bcmtr_pcie_prty prty, bcmtr_swq_rx_cb rx_cb)
272{
273 if (device >= BCMTR_MAX_OLTS)
274 return BCM_ERR_PARM;
275 swq_info[device][prty].rx_cb = rx_cb;
276
277 return BCM_ERR_OK;
278}
279
280/** Unregister "data received indication" callback
281 * \param[in] device Maple device index
282 * \param[in] prty Priority
283 * \returns: 0 in case of success or error code < 0
284 */
285bcmos_errno bcmtr_swq_rx_cb_unregister(uint8_t device, bcmtr_pcie_prty prty)
286{
287 if (device >= BCMTR_MAX_OLTS)
288 return BCM_ERR_PARM;
289 swq_info[device][prty].rx_cb = NULL;
290
291 return BCM_ERR_OK;
292}
293
294/* Fetch data from the hw to the sw queue. */
295void bcmtr_swq_rx_poll(uint8_t device, uint32_t nbuf[])
296{
297 uint8_t channel;
298 bcmos_buf *buf;
299 int n[BCMTR_PCIE_PRTY__NUM_OF] = {};
300 long flags;
301 bcmos_errno err;
302
303 do
304 {
305 bcmtr_pcie_prty prty;
306 pcie_swq *swq;
307
308 err = bcmtr_pcie_receive(device, &channel, &buf);
309 if (err != BCM_ERR_OK)
310 break;
311 prty = (channel >= BCMTR_SWQ_FIRST_URGENT_CHANNEL) ?
312 BCMTR_PCIE_PRTY_URGENT : BCMTR_PCIE_PRTY_NORMAL;
313 /* If callback based delivery - deliver buffer now, otherwise, place on s/w queue */
314 swq = &swq_info[device][prty];
315 if (swq->rx_cb)
316 {
317 swq->rx_cb(device, channel, buf);
318 }
319 else
320 {
321 bcmos_buf_channel_set(buf, channel);
322 flags = _bcmtr_swq_rx_lock(device);
323 bcmos_buf_queue_put(&swq->rxq, buf);
324 _bcmtr_swq_rx_unlock(device, flags);
325 }
326 ++n[prty];
327 } while (BCMOS_TRUE);
328
329
330 nbuf[0] = n[0];
331 nbuf[1] = n[1];
332}
333
334/* Submit data from the sw TX queue to the h/w */
335static void _bcmtr_swq_tx_submit_prty(uint8_t device, bcmtr_pcie_prty prty)
336{
337 bcmos_errno err = BCM_ERR_OK;
338 pcie_swq *swq;
339 bcmos_buf *buf;
340 uint8_t channel;
341 bcmos_bool hw_queue_full;
342 long flags;
343
344 swq = &swq_info[device][prty];
345 do
346 {
347 flags = _bcmtr_swq_tx_lock(device);
348
349 /* Check if not over limit */
350 hw_queue_full = (swq->max_hwq_size && hwq_occupancy[device] >= swq->max_hwq_size);
351 if (hw_queue_full)
352 {
353 _bcmtr_swq_tx_unlock(device, flags);
354 break;
355 }
356
357 /* Get from s/w queue and submit to the h/w queue */
358 buf = bcmos_buf_queue_peek(&swq->rxq);
359 _bcmtr_swq_tx_unlock(device, flags);
360 if (!buf)
361 break;
362
363 channel = bcmos_buf_channel(buf);
364 err = bcmtr_pcie_send(device, channel, buf);
365 if (err != BCM_ERR_OK)
366 break;
367
368 flags = _bcmtr_swq_tx_lock(device);
369 ++hwq_occupancy[device];
370 bcmos_buf_queue_get(&swq->txq);
371 _bcmtr_swq_tx_unlock(device, flags);
372
373 } while (BCMOS_TRUE);
374}
375
376/* Submit data from the sw TX queue to the h/w */
377void bcmtr_swq_tx_submit(uint8_t device)
378{
379 if (bcmtr_pcie_tx_collect(device) > 0)
380 {
381 _bcmtr_swq_tx_submit_prty(device, BCMTR_PCIE_PRTY_URGENT);
382 _bcmtr_swq_tx_submit_prty(device, BCMTR_PCIE_PRTY_NORMAL);
383 }
384}
385
386#ifdef __KERNEL__
387EXPORT_SYMBOL(bcmtr_swq_tx_queue_cfg);
388EXPORT_SYMBOL(bcmtr_swq_send);
389#endif