blob: d2d552afa2e986cef07c208da00c25b123b91e13 [file] [log] [blame]
Shad Ansari2f7f9be2017-06-07 13:34:53 -07001/*
2<:copyright-BRCM:2016:DUAL/GPL:standard
3
4 Broadcom Proprietary and Confidential.(c) 2016 Broadcom
5 All Rights Reserved
6
7Unless you and Broadcom execute a separate written software license
8agreement governing use of this software, this software is licensed
9to you under the terms of the GNU General Public License version 2
10(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
11with the following added to such license:
12
13 As a special exception, the copyright holders of this software give
14 you permission to link this software with independent modules, and
15 to copy and distribute the resulting executable under terms of your
16 choice, provided that you also meet, for each linked independent
17 module, the terms and conditions of the license of that module.
18 An independent module is a module which is not derived from this
19 software. The special exception does not apply to any modifications
20 of the software.
21
22Not withstanding the above, under no circumstances may you combine
23this software in any way with any other Broadcom software provided
24under a license other than the GPL, without Broadcom's express prior
25written consent.
26
27:>
28 */
29
30#include "bcmos_system.h"
31
32/* task control blocks */
33STAILQ_HEAD(task_list, bcmos_task) task_list = STAILQ_HEAD_INITIALIZER(task_list);
34
35/* module control blocks */
36bcmos_module *bcmos_modules[BCMOS_MODULE_ID__NUM_OF];
37
38/* event control blocks */
39bcmos_event *bcmos_events[BCMOS_EVENT_ID__NUM_OF];
40
41/* Global trace level */
42bcmos_trace_level bcmos_sys_trace_level = BCMOS_TRACE_LEVEL_ERROR;
43
44/* Global OS mutex */
45bcmos_mutex bcmos_res_lock;
46
47/* Total memory occupied by block pools */
48uint32_t bcmos_total_blk_pool_size;
49
50/* Total memory occupied by message pools */
51uint32_t bcmos_total_msg_pool_size;
52
53f_bcmolt_sw_error_handler sw_error_handler;
54
55/* Block pools */
56static STAILQ_HEAD(blk_pool_list, bcmos_blk_pool) blk_pool_list = STAILQ_HEAD_INITIALIZER(blk_pool_list);
57
58/* Message pools */
59static STAILQ_HEAD(msg_pool_list, bcmos_blk_pool) msg_pool_list = STAILQ_HEAD_INITIALIZER(msg_pool_list);
60
61/* Message queues */
62static STAILQ_HEAD(msg_queue_list, bcmos_msg_queue) msg_queue_list = STAILQ_HEAD_INITIALIZER(msg_queue_list);
63
64/* Message queue groups */
65static STAILQ_HEAD(msg_qgroup_list, bcmos_msg_qgroup) msg_qgroup_list = STAILQ_HEAD_INITIALIZER(msg_qgroup_list);
66
67/* Lock used to protect msg registration / deregistration */
68static bcmos_fastlock bcmos_msg_register_lock;
69
70/* Shutdown mode: when this is set, we expect message handler deregistration to happen while messages are still being
71 * sent/received. We should handle this gracefully. */
72static bcmos_bool bcmos_msg_shutdown_mode = BCMOS_FALSE;
73
74/* Timer management block */
75typedef struct bcmos_timer_pool
76{
77 bcmos_fastlock lock; /* Pool protection lock */
78 bcmos_sys_timer sys_timer; /* System timer handle */
79 uint32_t num_active_timers; /* Number of active timers in the pool */
80#ifdef BCMOS_TIMER_RB_TREE
81 RB_HEAD(bcmos_timers, bcmos_timer) pool; /* Timer pool. RB tree */
82#else
83 TAILQ_HEAD(bcmos_timers, bcmos_timer) pool; /* Timer pool: TAILQ */
84#endif
85} bcmos_timer_pool;
86
87static int32_t _bcmos_timer_compare(bcmos_timer *t1, bcmos_timer *t2);
88
89/*
90 * Macros for RB-TREE and TAILQ-based timer tool implementations
91 */
92
93#ifdef BCMOS_TIMER_RB_TREE
94
95#ifdef __arm__
96/* ARM compiler doesn't like unused inline functions. Disable the warning */
97#pragma diag_suppress 177
98#endif
99
100/* Generate RB tree functions */
101RB_GENERATE_INLINE(bcmos_timers, bcmos_timer, entry, _bcmos_timer_compare)
102
103#define TMR_POOL_INIT(tmr_pool) RB_INIT(&(tmr_pool)->pool)
104#define TMR_POOL_INSERT(tmr_pool, tmr) RB_INSERT(bcmos_timers, &(tmr_pool)->pool, tmr)
105#define TMR_POOL_REMOVE(tmr_pool, tmr) RB_REMOVE(bcmos_timers, &(tmr_pool)->pool, tmr)
106#define TMR_POOL_FIRST(tmr_pool) RB_MIN(bcmos_timers, &(tmr_pool)->pool)
107
108#else
109
110#define TMR_POOL_INIT(tmr_pool) TAILQ_INIT(&(tmr_pool)->pool)
111
112#define TMR_POOL_INSERT(tmr_pool, tmr) \
113 do { \
114 bcmos_timer *_last = TAILQ_LAST(&(tmr_pool)->pool, bcmos_timers); \
115 if (_last) \
116 { \
117 if (_bcmos_timer_compare(tmr, _last) >= 0) \
118 { \
119 TAILQ_INSERT_TAIL(&(tmr_pool)->pool, tmr, entry); \
120 } \
121 else \
122 { \
123 bcmos_timer *_t; \
124 uint32_t iter = 0; \
125 TAILQ_FOREACH(_t, &(tmr_pool)->pool, entry) \
126 { \
127 BUG_ON(iter >= (tmr_pool)->num_active_timers);\
128 ++iter; \
129 if (_bcmos_timer_compare(tmr, _t) <= 0) \
130 { \
131 TAILQ_INSERT_BEFORE(_t, tmr, entry); \
132 break; \
133 } \
134 } \
135 } \
136 } \
137 else \
138 { \
139 TAILQ_INSERT_HEAD(&(tmr_pool)->pool, tmr, entry); \
140 } \
141 ++(tmr_pool)->num_active_timers; \
142 } while (0)
143
144#define TMR_POOL_REMOVE(tmr_pool, tmr) \
145 do { \
146 BUG_ON(!(tmr_pool)->num_active_timers); \
147 TAILQ_REMOVE(&(tmr_pool)->pool, tmr, entry); \
148 TAILQ_NEXT(tmr, entry) = NULL; \
149 --(tmr_pool)->num_active_timers; \
150 } while (0)
151
152#define TMR_POOL_FIRST(tmr_pool) TAILQ_FIRST(&(tmr_pool)->pool)
153
154#endif
155
156#define BCMOS_TIMER_IS_RUNNING(tmr) ((tmr->flags & BCMOS_TIMER_FLAG_RUNNING) != 0)
157#define BCMOS_TIMER_IS_EXPIRED(tmr) ((tmr->flags & BCMOS_TIMER_FLAG_EXPIRED) != 0)
158#define BCMOS_TIMER_IS_VALID(tmr) ((tmr->flags & BCMOS_TIMER_FLAG_VALID) != 0)
159#define BCMOS_TIMER_IS_ACTIVE(tmr) ((tmr->flags & BCMOS_TIMER_FLAG_ACTIVE) != 0)
160
161static bcmos_bool bcmos_initialized;
162static bcmos_timer_pool tmr_pool;
163static void _sys_timer_handler(void *data);
164
165/*
166 * Print variables
167 */
168
169static bcmos_print_redirect_mode print_redirect_mode;
170static bcmos_print_redirect_cb print_redirect_cb;
171static void *print_redirect_cb_data;
172static bcmos_mutex bcmos_print_lock;
173#ifdef BCMOS_BUF_POOL_SIZE
174static bcmos_blk_pool sys_buf_pool;
175static bcmos_errno bcmos_buf_pool_create(void);
176#endif
177
178/** Initialize system library
179 * \ingroup system
180 * Must be called before any other system function
181 * \returns 0=OK or error code <0
182 */
183bcmos_errno bcmos_init(void)
184{
185 bcmos_errno rc;
186
187 if (bcmos_initialized)
188 return BCM_ERR_OK;
189 rc = bcmos_sys_init();
190 bcmos_mutex_create(&bcmos_res_lock, 0, "res_lock");
191 bcmos_mutex_create(&bcmos_print_lock, 0, "print_lock");
192 TMR_POOL_INIT(&tmr_pool);
193 bcmos_fastlock_init(&tmr_pool.lock, 0);
194 bcmos_fastlock_init(&bcmos_msg_register_lock, 0);
195 rc = rc ? rc : bcmos_sys_timer_create(&tmr_pool.sys_timer, _sys_timer_handler, &tmr_pool);
196
197 /* Create default buffer pool if so requested by compilation options */
198#ifdef BCMOS_BUF_POOL_SIZE
199 rc = rc ? rc : bcmos_buf_pool_create();
200#endif
201
202 if (!rc)
203 {
204 bcmos_initialized = BCMOS_TRUE;
205 }
206
207 return rc;
208}
209
210/** Cleanup system library
211 * \ingroup system
212 */
213void bcmos_exit(void)
214{
215 if (!bcmos_initialized)
216 return;
217 bcmos_sys_timer_destroy(&tmr_pool.sys_timer);
218 bcmos_mutex_destroy(&bcmos_print_lock);
219 bcmos_mutex_destroy(&bcmos_res_lock);
220#ifdef BCMOS_BUF_POOL_SIZE
221 bcmos_blk_pool_reset(&sys_buf_pool);
222 bcmos_blk_pool_destroy(&sys_buf_pool);
223#endif
224 bcmos_sys_exit();
225 bcmos_initialized = BCMOS_FALSE;
226}
227
228/*
229 * Common task services
230 */
231
232/* Query task info */
233bcmos_errno bcmos_task_query(const bcmos_task *task, bcmos_task_parm *parm)
234{
235 if (task == NULL || task->magic != BCMOS_TASK_MAGIC || parm == NULL)
236 {
237 return BCM_ERR_PARM;
238 }
239 *parm = task->parm;
240 return BCM_ERR_OK;
241}
242
243/** Task iterator
244 * \param[in] prev Previous task. *prev==NULL - get first
245 * \return: BCM_ERR_OK, BCM_ERR_NOENT, BCM_ERR_NO_MORE
246 */
247bcmos_errno bcmos_task_get_next(bcmos_task **prev)
248{
249 bcmos_task *task;
250 if (prev == NULL)
251 {
252 return BCM_ERR_PARM;
253 }
254 task = *prev;
255 if (task && task->magic != BCMOS_TASK_MAGIC)
256 {
257 return BCM_ERR_PARM;
258 }
259 if (task)
260 {
261 task = STAILQ_NEXT(task, list);
262 }
263 else
264 {
265 task = STAILQ_FIRST(&task_list);
266 }
267 *prev = task;
268 if (!task)
269 {
270 return BCM_ERR_NO_MORE;
271 }
272 return BCM_ERR_OK;
273}
274
275/*
276 * Message queue
277 */
278
279static void _bcmos_msgq_notify(bcmos_msg_queue *q, const char *txt)
280{
281 BCMOS_TRACE_INFO("Msg queue %s: %s\n", q->q.parm.name, txt);
282}
283
284/* Decrement in-queue statistics */
285static inline void _bcmos_msgq_stat_dec(bcmos_msg_queue_nw *q)
286{
287 bcmos_msg_queue_stat *stat = &q->stat;
288 uint32_t old_in = stat->msg_in;
289
290 if (stat->msg_in == 0)
291 {
292 BCMOS_TRACE_ERR("Attempt to decrement statistics for an empty queue ('%s')\n", q->parm.name);
293 }
294 else
295 {
296 --stat->msg_in;
297 ++stat->msg_received;
298 }
299
300 if (old_in == q->parm.low_wm)
301 {
302 q->parm.notify((bcmos_msg_queue *)q, "becomes uncongested");
303 stat->is_congested = BCMOS_FALSE;
304 }
305}
306
307/* Increment in-queue statistics */
308static inline void _bcmos_msgq_stat_inc(bcmos_msg_queue_nw *q)
309{
310 bcmos_msg_queue_stat *stat = &q->stat;
311 uint32_t old_in = stat->msg_in;
312
313 ++stat->msg_in;
314 ++stat->msg_sent;
315 if (old_in == q->parm.high_wm)
316 {
317 q->parm.notify((bcmos_msg_queue *)q, "becomes congested");
318 stat->is_congested = BCMOS_TRUE;
319 }
320 if (stat->is_congested)
321 ++stat->msg_almost_full;
322}
323
324static void _bcmos_qgroup_notify(bcmos_msg_qgroup *qgroup, const char *txt)
325{
326 BCMOS_TRACE_INFO("Msg queue %s: %s\n", qgroup->parm.name, txt);
327}
328
329/* Decrement in-queue statistics for queue group */
330static inline void _bcmos_qgroup_stat_dec(bcmos_msg_qgroup *qgroup)
331{
332 bcmos_msg_queue_stat *stat = &qgroup->stat;
333 uint32_t old_in = stat->msg_in;
334
335 if (stat->msg_in == 0)
336 {
337 BCMOS_TRACE_ERR("Attempt to decrement statistics for an empty queue ('%s')\n", qgroup->parm.name);
338 }
339 else
340 {
341 --stat->msg_in;
342 ++stat->msg_received;
343 }
344
345 if (old_in == qgroup->parm.low_wm)
346 {
347 qgroup->parm.notify(qgroup, "becomes uncongested");
348 stat->is_congested = BCMOS_FALSE;
349 }
350}
351
352/* Increment in-queue statistics */
353static inline void _bcmos_qgroup_stat_inc(bcmos_msg_qgroup *qgroup)
354{
355 bcmos_msg_queue_stat *stat = &qgroup->stat;
356 uint32_t old_in = stat->msg_in;
357
358 ++stat->msg_in;
359 ++stat->msg_sent;
360 if (old_in == qgroup->parm.high_wm)
361 {
362 qgroup->parm.notify(qgroup, "becomes congested");
363 stat->is_congested = BCMOS_TRUE;
364 }
365 if (stat->is_congested)
366 ++stat->msg_almost_full;
367}
368
369
370/* Get message from queue.
371 * Urgent queue is checked 1st, then regular queue
372 * Must be called under lock!
373 * Returns msg or NULL if queue is empty
374 */
375static inline bcmos_msg *_bcmos_msg_get(bcmos_msg_queue_nw *q)
376{
377 bcmos_msg *msg;
378
379 msg = STAILQ_FIRST(&q->msgl_urg);
380 if (msg)
381 {
382 STAILQ_REMOVE_HEAD(&q->msgl_urg, next);
383 _bcmos_msgq_stat_dec(q);
384 return msg;
385 }
386
387 msg = STAILQ_FIRST(&q->msgl);
388 if (msg)
389 {
390 STAILQ_REMOVE_HEAD(&q->msgl, next);
391 _bcmos_msgq_stat_dec(q);
392 }
393
394 return msg;
395}
396
397/* Put message on queue.
398 * Must be called under lock!
399 * Returns error in case of queue overflow
400 */
401static inline bcmos_errno _bcmos_msg_put(bcmos_msg_queue_nw *q, bcmos_msg *msg, bcmos_msg_send_flags flags)
402{
403 /* Overflow check */
404 if (q->stat.msg_in >= q->parm.size)
405 {
406 if (!(flags & BCMOS_MSG_SEND_NOLIMIT))
407 {
408 ++q->stat.msg_discarded;
409 return BCM_ERR_OVERFLOW;
410 }
411 }
412 /* Put onto the relevant queue */
413 if ((flags & BCMOS_MSG_SEND_URGENT))
414 STAILQ_INSERT_TAIL(&q->msgl_urg, msg, next);
415 else
416 STAILQ_INSERT_TAIL(&q->msgl, msg, next);
417 _bcmos_msgq_stat_inc(q);
418
419 return BCM_ERR_OK;
420}
421
422/* Create message queue without wait support */
423static void bcmos_msg_queue_nw_init(bcmos_msg_queue_nw *q, const bcmos_msg_queue_parm *parm)
424{
425 q->parm = *parm;
426 STAILQ_INIT(&q->msgl);
427 STAILQ_INIT(&q->msgl_urg);
428 bcmos_fastlock_init(&q->lock, parm->flags);
429 memset(&q->stat, 0, sizeof(q->stat));
430 q->flags = 0;
431 if (!q->parm.size)
432 q->parm.size = BCMOS_MSG_QUEUE_SIZE_UNLIMITED;
433 if (!q->parm.high_wm)
434 q->parm.high_wm = BCMOS_MSG_QUEUE_SIZE_UNLIMITED;
435 if (!q->parm.low_wm)
436 q->parm.low_wm = q->parm.high_wm;
437 if (!q->parm.notify)
438 q->parm.notify = _bcmos_msgq_notify;
439}
440
441/* Destroy message list */
442static void bcmos_msg_list_destroy(bcmos_msg_list *l)
443{
444 bcmos_msg *msg, *tmp;
445 STAILQ_FOREACH_SAFE(msg, l, next, tmp)
446 {
447 STAILQ_REMOVE_HEAD(l, next);
448 bcmos_msg_free(msg);
449 }
450}
451
452#ifdef BCMOS_MSG_QUEUE_REMOTE_SUPPORT
453
454/* Remote queue support - common part of create/destroy */
455
456static bcmos_errno _bcmos_msg_queue_destroy_remote_ep(bcmos_msg_queue *queue)
457{
458 if (queue->q.parm.close)
459 queue->q.parm.close(queue);
460 bcmos_mutex_destroy(&queue->send_lock);
461 if (queue->send_buf)
462 bcmos_free(queue->send_buf);
463 if (queue->recv_buf)
464 bcmos_free(queue->recv_buf);
465 return BCM_ERR_OK;
466}
467
468static bcmos_errno _bcmos_msg_queue_create_remote_ep(bcmos_msg_queue *queue)
469{
470 bcmos_errno rc;
471
472 /* Allocate tx and rx buffers */
473 if (!queue->q.parm.max_mtu)
474 {
475 queue->q.parm.max_mtu = BCMOS_MSG_QUEUE_DEFAULT_MAX_MTU;
476 }
477 queue->send_buf = bcmos_calloc(queue->q.parm.max_mtu);
478 if (!queue->send_buf)
479 {
480 BCMOS_TRACE_RETURN(BCM_ERR_NOMEM, "Can't allocate send_buf\n");
481 }
482
483 queue->recv_buf = bcmos_calloc(queue->q.parm.max_mtu);
484 if (!queue->recv_buf)
485 {
486 bcmos_free(queue->send_buf);
487 BCMOS_TRACE_RETURN(BCM_ERR_NOMEM, "Can't allocate recv_buf\n");
488 }
489 bcmos_mutex_create(&queue->send_lock, 0, queue->q.parm.name);
490
491 switch (queue->q.parm.ep_type)
492 {
493#ifdef BCMOS_MSG_QUEUE_DOMAIN_SOCKET
494 case BCMOS_MSG_QUEUE_EP_DOMAIN_SOCKET:
495 rc = bcmos_sys_msg_queue_domain_socket_open(queue);
496 break;
497#endif
498#ifdef BCMOS_MSG_QUEUE_UDP_SOCKET
499 case BCMOS_MSG_QUEUE_EP_UDP_SOCKET:
500 rc = bcmos_sys_msg_queue_udp_socket_open(queue);
501 break;
502#endif
503#ifdef BCMOS_MSG_QUEUE_USER_DEFINED
504 case BCMOS_MSG_QUEUE_EP_USER_DEFINED:
505 if (parm.open == NULL || parm.close == NULL || parm.send==NULL || parm.recv == NULL)
506 {
507 rc = BCM_ERR_PARM;
508 break;
509 }
510 rc = parm.open(queue);
511 break;
512#endif
513 default:
514 rc = BCM_ERR_PARM;
515 break;
516 }
517
518 if (rc)
519 {
520 _bcmos_msg_queue_destroy_remote_ep(queue);
521 }
522
523 return rc;
524}
525
526#endif /* #ifdef BCMOS_MSG_QUEUE_REMOTE_SUPPORT */
527
528/* Create message queue. */
529bcmos_errno bcmos_msg_queue_create(bcmos_msg_queue *queue, const bcmos_msg_queue_parm *parm)
530{
531 bcmos_errno rc;
532
533 if (!queue || !parm)
534 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "queue %p, parm %p\n", queue, parm);
535 memset(queue, 0, sizeof(*queue));
536 queue->q.parm = *parm;
537
538 if (parm->ep_type == BCMOS_MSG_QUEUE_EP_LOCAL)
539 {
540 rc = bcmos_sem_create(&queue->m, 0, parm->flags, queue->q.parm.name);
541 if (!rc)
542 bcmos_msg_queue_nw_init(&queue->q, parm);
543 }
544 else
545#ifdef BCMOS_MSG_QUEUE_REMOTE_SUPPORT
546 {
547 rc = _bcmos_msg_queue_create_remote_ep(queue);
548 }
549#else
550 {
551 rc = BCM_ERR_PARM;
552 }
553#endif
554
555 if (rc)
556 return rc;
557
558 queue->magic = BCMOS_MSG_QUEUE_VALID;
559 /* Copy name to make sure that it is not released - in case it was on the stack */
560 if (queue->q.parm.name)
561 {
562 strncpy(queue->name, queue->q.parm.name, sizeof(queue->name) - 1);
563 queue->q.parm.name = queue->name;
564 }
565 bcmos_mutex_lock(&bcmos_res_lock);
566 STAILQ_INSERT_TAIL(&msg_queue_list, queue, list);
567 bcmos_mutex_unlock(&bcmos_res_lock);
568 return BCM_ERR_OK;
569}
570
571/* Destroy queue */
572bcmos_errno bcmos_msg_queue_destroy(bcmos_msg_queue *queue)
573{
574 bcmos_errno rc = BCM_ERR_OK;
575
576 if (!queue || queue->magic != BCMOS_MSG_QUEUE_VALID)
577 {
578 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "queue handle is invalid\n");
579 }
580 queue->magic = BCMOS_MSG_QUEUE_DELETED;
581 bcmos_mutex_lock(&bcmos_res_lock);
582 STAILQ_REMOVE(&msg_queue_list, queue, bcmos_msg_queue, list);
583 bcmos_mutex_unlock(&bcmos_res_lock);
584
585 if (queue->q.parm.ep_type == BCMOS_MSG_QUEUE_EP_LOCAL)
586 {
587 bcmos_sem_destroy(&queue->m);
588 /* Release all pending messages */
589 bcmos_msg_list_destroy(&queue->q.msgl_urg);
590 bcmos_msg_list_destroy(&queue->q.msgl);
591 }
592#ifdef BCMOS_MSG_QUEUE_REMOTE_SUPPORT
593 else
594 {
595 rc = _bcmos_msg_queue_destroy_remote_ep(queue);
596 }
597#endif
598
599 return rc;
600}
601
602/* Get queue info */
603bcmos_errno bcmos_msg_queue_query(const bcmos_msg_queue *queue, bcmos_msg_queue_info *info)
604{
605 if (!queue || !info)
606 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "queue %p, info %p\n", queue, info);
607 info->stat = queue->q.stat;
608 info->parm = queue->q.parm;
609 return BCM_ERR_OK;
610}
611
612/* Message queue iterator */
613bcmos_errno bcmos_msg_queue_get_next(const bcmos_msg_queue **prev)
614{
615 const bcmos_msg_queue *queue;
616
617 if (prev == NULL)
618 {
619 return BCM_ERR_PARM;
620 }
621 queue = *prev;
622 if (queue && queue->magic != BCMOS_MSG_QUEUE_VALID)
623 {
624 return BCM_ERR_PARM;
625 }
626 if (queue)
627 {
628 queue = STAILQ_NEXT(queue, list);
629 }
630 else
631 {
632 queue = STAILQ_FIRST(&msg_queue_list);
633 }
634 *prev = queue;
635 if (!queue)
636 {
637 return BCM_ERR_NO_MORE;
638 }
639 return BCM_ERR_OK;
640}
641
642#ifdef BCMOS_MSG_QUEUE_REMOTE_SUPPORT
643
644/* Pack / unpack message header.
645 * In the future we might adopt bcmolt_buf service into OS abstraction and use it
646 * for packing / unpacking.
647 */
648
649void bcmos_msg_hdr_pack(const bcmos_msg *msg, uint8_t *buf, uint32_t data_size)
650{
651 uint16_t val16;
652 uint32_t val32;
653 val16 = BCMOS_ENDIAN_CPU_TO_BIG_U16(msg->type);
654 memcpy(buf, &val16, sizeof(val16));
655 buf[2] = (uint8_t)msg->instance;
656 buf[3] = (uint8_t)msg->sender;
657 val32 = BCMOS_ENDIAN_CPU_TO_BIG_U32(data_size);
658 memcpy(&buf[4], &val32, sizeof(val32));
659}
660
661void bcmos_msg_hdr_unpack(const uint8_t *buf, bcmos_msg *msg)
662{
663 uint16_t val16;
664 uint32_t val32;
665 memcpy(&val16, buf, sizeof(val16));
666 val16 = BCMOS_ENDIAN_BIG_TO_CPU_U16(val16);
667 msg->type = (bcmos_msg_id)val16;
668 msg->instance = (bcmos_msg_instance)buf[2];
669 msg->sender = (bcmos_module_id)buf[3];
670 memcpy(&val32, &buf[4], sizeof(val32));
671 msg->size = BCMOS_ENDIAN_BIG_TO_CPU_U32(val32); /* can be overwritten by unpacker */
672 msg->handler = NULL;
673 msg->send_flags = 0;
674}
675
676/* Send message to remote EP wrapper */
677static bcmos_errno _bcmos_msg_send_to_remote_ep(bcmos_msg_queue *queue, bcmos_msg *msg, bcmos_msg_send_flags flags)
678{
679 uint8_t *buf = NULL;
680 uint32_t buf_length = 0;
681 bcmos_errno rc;
682
683 bcmos_mutex_lock(&queue->send_lock);
684 rc = queue->q.parm.pack(queue, msg, &buf, &buf_length);
685 rc = rc ? rc : queue->q.parm.send(queue, buf, buf_length);
686 bcmos_mutex_unlock(&queue->send_lock);
687
688 /* Release if sent successfully or if auto-release flag is set */
689 if (rc == BCM_ERR_OK || !(flags & BCMOS_MSG_SEND_NO_FREE_ON_ERROR))
690 {
691 bcmos_msg_free(msg);
692 }
693 if (buf && queue->q.parm.free_packed)
694 {
695 queue->q.parm.free_packed(queue, buf);
696 }
697 if (rc)
698 {
699 ++queue->q.stat.msg_discarded;
700 }
701 else
702 {
703 ++queue->q.stat.msg_sent;
704 }
705
706 return rc;
707}
708
709/* Receive message from remote EP wrapper */
710static bcmos_errno _bcmos_msg_recv_from_remote_ep(bcmos_msg_queue *queue, uint32_t timeout, bcmos_msg **msg)
711{
712 uint8_t *buf = NULL;
713 uint32_t buf_length = 0;
714 bcmos_errno rc;
715
716 rc = queue->q.parm.recv(queue, timeout, &buf, &buf_length);
717 rc = rc ? rc : queue->q.parm.unpack(queue, buf, buf_length, msg);
718 if (buf && queue->q.parm.free_packed)
719 {
720 queue->q.parm.free_packed(queue, buf);
721 }
722 if (!rc)
723 {
724 ++queue->q.stat.msg_received;
725 }
726
727 return rc;
728}
729
730#endif /* BCMOS_MSG_QUEUE_REMOTE_SUPPORT */
731
732/* Send message to queue */
733bcmos_errno bcmos_msg_send(bcmos_msg_queue *queue, bcmos_msg *msg, bcmos_msg_send_flags flags)
734{
735 long lock_flags;
736 bcmos_errno rc;
737
738#ifdef BCMOS_MSG_QUEUE_REMOTE_SUPPORT
739 if (queue->q.parm.ep_type != BCMOS_MSG_QUEUE_EP_LOCAL)
740 {
741 return _bcmos_msg_send_to_remote_ep(queue, msg, flags);
742 }
743#endif
744
745 lock_flags = bcmos_fastlock_lock(&queue->q.lock);
746 rc = _bcmos_msg_put(&queue->q, msg, flags);
747 if (rc)
748 {
749 bcmos_fastlock_unlock(&queue->q.lock, lock_flags);
750 if (!(flags & BCMOS_MSG_SEND_NO_FREE_ON_ERROR))
751 {
752 bcmos_msg_free(msg);
753 }
754 BCMOS_TRACE_ERR("Overflow: Queue %s\n", queue->q.parm.name);
755 return rc;
756 }
757
758 /* Success */
759 if (queue->is_waiting)
760 {
761 /* Kick waiting task */
762 queue->is_waiting = BCMOS_FALSE;
763 bcmos_fastlock_unlock(&queue->q.lock, lock_flags);
764 bcmos_sem_post(&queue->m);
765 }
766 else
767 {
768 bcmos_fastlock_unlock(&queue->q.lock, lock_flags);
769 }
770
771 return BCM_ERR_OK;
772}
773
774/* Send message to module (internal) - doesn't post any semaphores so it's safe to call under a fastlock */
775static bcmos_errno _bcmos_msg_send_to_module(
776 bcmos_module_id module_id,
777 bcmos_msg *msg,
778 bcmos_msg_send_flags flags,
779 bcmos_sem **sem_to_post)
780{
781 bcmos_module *module = _bcmos_module_get(module_id);
782 bcmos_task *task;
783 long lock_flags, q_lock_flags;
784 uint32_t active_modules;
785 bcmos_errno rc;
786 *sem_to_post = NULL;
787
788 if (!module || !msg->handler)
789 {
790 if (!module)
791 {
792 BCMOS_TRACE_ERR("Module %d doesn't exist\n", module_id);
793 rc = BCM_ERR_NOENT;
794 }
795 else
796 {
797 BCMOS_TRACE_ERR("msg->handler is not set. msg->type=%d\n", msg->type);
798 rc = BCM_ERR_PARM;
799 }
800 if (!(flags & BCMOS_MSG_SEND_NO_FREE_ON_ERROR))
801 {
802 bcmos_msg_free(msg);
803 }
804 return rc;
805 }
806
807 task = module->my_task;
808 lock_flags = bcmos_fastlock_lock(&task->active_lock);
809 q_lock_flags = bcmos_fastlock_lock(&module->msgq.lock);
810
811 rc = _bcmos_msg_put(&module->msgq, msg, flags);
812 if (rc)
813 {
814 bcmos_fastlock_unlock(&module->msgq.lock, q_lock_flags);
815 bcmos_fastlock_unlock(&task->active_lock, lock_flags);
816
817 /* Queue overflow */
818 if (!(flags & BCMOS_MSG_SEND_NO_FREE_ON_ERROR))
819 bcmos_msg_free(msg);
820
821 BCMOS_TRACE_ERR("Overflow: Queue %s\n", module->parm.qparm.name);
822
823 return rc;
824 }
825 bcmos_fastlock_unlock(&module->msgq.lock, q_lock_flags);
826
827 /* Success. Update active_modules task and kick the task */
828 active_modules = task->active_modules;
829 task->active_modules |= (1 << module->idx);
830 bcmos_fastlock_unlock(&task->active_lock, lock_flags);
831
832 /* Notify caller to kick task if there is a chance it was waiting */
833 if (!active_modules)
834 *sem_to_post = &task->active_sem;
835
836 return BCM_ERR_OK;
837}
838
839/* Send message to module */
840bcmos_errno bcmos_msg_send_to_module(bcmos_module_id module_id, bcmos_msg *msg, bcmos_msg_send_flags flags)
841{
842 bcmos_sem *sem_to_post;
843 bcmos_errno err = _bcmos_msg_send_to_module(module_id, msg, flags, &sem_to_post);
844 if (sem_to_post)
845 bcmos_sem_post(sem_to_post);
846 return err;
847}
848
849/* Get message from the head of message queue */
850bcmos_errno bcmos_msg_recv(bcmos_msg_queue *queue, uint32_t timeout, bcmos_msg **msg)
851{
852 long lock_flags;
853
854 if (!queue || !msg)
855 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "queue %p, msg %p\n", queue, msg);
856
857#ifdef BCMOS_MSG_QUEUE_REMOTE_SUPPORT
858 if (queue->q.parm.ep_type != BCMOS_MSG_QUEUE_EP_LOCAL)
859 {
860 return _bcmos_msg_recv_from_remote_ep(queue, timeout, msg);
861 }
862#endif
863
864 lock_flags = bcmos_fastlock_lock(&queue->q.lock);
865 *msg = _bcmos_msg_get(&queue->q);
866 if (*msg)
867 {
868 bcmos_fastlock_unlock(&queue->q.lock, lock_flags);
869 return BCM_ERR_OK;
870 }
871 if (!timeout)
872 {
873 bcmos_fastlock_unlock(&queue->q.lock, lock_flags);
874 return BCM_ERR_NOENT;
875 }
876
877 /* Receive with wait */
878 queue->is_waiting = BCMOS_TRUE;
879 bcmos_fastlock_unlock(&queue->q.lock, lock_flags);
880
881 /* wait for it */
882 bcmos_sem_wait(&queue->m, timeout);
883 lock_flags = bcmos_fastlock_lock(&queue->q.lock);
884 *msg = _bcmos_msg_get(&queue->q);
885 queue->is_waiting = BCMOS_FALSE;
886 bcmos_fastlock_unlock(&queue->q.lock, lock_flags);
887 if (!*msg)
888 return BCM_ERR_TIMEOUT;
889
890 return BCM_ERR_OK;
891}
892
893
894/*
895 * Queue group support
896 */
897
898/* Create message queue group. */
899bcmos_errno bcmos_msg_qgroup_create(bcmos_msg_qgroup *qgroup, const bcmos_msg_qgroup_parm *parm)
900{
901 bcmos_errno rc;
902 bcmos_qgroup_prty prty;
903
904 if (!qgroup || !parm || !parm->nqueues || parm->nqueues>32)
905 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "queue %p, parm %p nqueues %u\n", qgroup, parm, parm ? parm->nqueues: 0);
906 memset(qgroup, 0, sizeof(*qgroup));
907 rc = bcmos_sem_create(&qgroup->m, 0, parm->flags, NULL);
908 if (rc)
909 return rc;
910 qgroup->parm = *parm;
911 bcmos_fastlock_init(&qgroup->lock, parm->flags);
912 if (!qgroup->parm.size)
913 qgroup->parm.size = BCMOS_MSG_QUEUE_SIZE_UNLIMITED;
914 if (!qgroup->parm.high_wm)
915 qgroup->parm.high_wm = BCMOS_MSG_QUEUE_SIZE_UNLIMITED;
916 if (!qgroup->parm.low_wm)
917 qgroup->parm.low_wm = qgroup->parm.high_wm;
918 if (!qgroup->parm.notify)
919 qgroup->parm.notify = _bcmos_qgroup_notify;
920 qgroup->msgl = bcmos_calloc(sizeof(bcmos_msg_list) * (uint32_t)parm->nqueues);
921 if (!qgroup->msgl)
922 {
923 bcmos_msg_qgroup_destroy(qgroup);
924 return BCM_ERR_NOMEM;
925 }
926 for (prty = 0; prty < qgroup->parm.nqueues; prty++)
927 {
928 STAILQ_INIT(&qgroup->msgl[prty]);
929 }
930 /* Copy name to make sure that it is not released - in case it was on the stack */
931 if (qgroup->parm.name)
932 {
933 strncpy(qgroup->name, qgroup->parm.name, sizeof(qgroup->parm.name) - 1);
934 qgroup->parm.name = qgroup->name;
935 }
936 qgroup->magic = BCMOS_MSG_QGROUP_VALID;
937 bcmos_mutex_lock(&bcmos_res_lock);
938 STAILQ_INSERT_TAIL(&msg_qgroup_list, qgroup, list);
939 bcmos_mutex_unlock(&bcmos_res_lock);
940
941 return BCM_ERR_OK;
942}
943
944/** Destroy queue group
945 *
946 * \param[in] qgroup Queue group handle
947 * \returns 0=OK or error code <0
948 */
949bcmos_errno bcmos_msg_qgroup_destroy(bcmos_msg_qgroup *qgroup)
950{
951 bcmos_qgroup_prty prty;
952
953 if (!qgroup || qgroup->magic != BCMOS_MSG_QGROUP_VALID)
954 {
955 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "queue group handle is invalid\n");
956 }
957
958 qgroup->magic = BCMOS_MSG_QGROUP_DELETED;
959
960 bcmos_mutex_lock(&bcmos_res_lock);
961 STAILQ_REMOVE(&msg_qgroup_list, qgroup, bcmos_msg_qgroup, list);
962 bcmos_mutex_unlock(&bcmos_res_lock);
963
964 bcmos_sem_destroy(&qgroup->m);
965
966 /* Release all pending messages */
967 if (qgroup->msgl)
968 {
969 for (prty = 0; prty < qgroup->parm.nqueues; prty++)
970 {
971 bcmos_msg_list_destroy(&qgroup->msgl[prty]);
972 }
973 bcmos_free(qgroup->msgl);
974 }
975 return BCM_ERR_OK;
976
977}
978
979/** Get queue group info */
980bcmos_errno bcmos_msg_qgroup_query(const bcmos_msg_qgroup *qgroup, bcmos_msg_qgroup_info *info)
981{
982 if (!qgroup || !info)
983 return BCM_ERR_PARM;
984
985 info->parm = qgroup->parm;
986 info->stat = qgroup->stat;
987
988 return BCM_ERR_OK;
989}
990
991/** Message queue group iterator
992 * \param[in] prev Previous queue group. *prev==NULL - get first
993 * \return: BCM_ERR_OK, BCM_ERR_NOENT, BCM_ERR_NO_MORE
994 */
995bcmos_errno bcmos_msg_qgroup_get_next(const bcmos_msg_qgroup **prev)
996{
997 const bcmos_msg_qgroup *qgrp;
998
999 if (prev == NULL)
1000 {
1001 return BCM_ERR_PARM;
1002 }
1003 qgrp = *prev;
1004 if (qgrp && qgrp->magic != BCMOS_MSG_QGROUP_VALID)
1005 {
1006 return BCM_ERR_PARM;
1007 }
1008 if (qgrp)
1009 {
1010 qgrp = STAILQ_NEXT(qgrp, list);
1011 }
1012 else
1013 {
1014 qgrp = STAILQ_FIRST(&msg_qgroup_list);
1015 }
1016 *prev = qgrp;
1017 if (!qgrp)
1018 {
1019 return BCM_ERR_NO_MORE;
1020 }
1021 return BCM_ERR_OK;
1022}
1023
1024/* get message from non-empty queue group queue */
1025static inline bcmos_msg *_bcmos_qgroup_msg_get(bcmos_msg_qgroup *qgroup, bcmos_qgroup_prty prty)
1026{
1027 bcmos_msg *msg;
1028 msg = STAILQ_FIRST(&qgroup->msgl[prty]);
1029 BUG_ON(!msg);
1030 STAILQ_REMOVE_HEAD(&qgroup->msgl[prty], next);
1031 if (STAILQ_EMPTY(&qgroup->msgl[prty]))
1032 {
1033 qgroup->active_mask &= ~(1 << prty);
1034 }
1035 return msg;
1036}
1037
1038/* Send message to queue group */
1039bcmos_errno bcmos_msg_send_to_qgroup(bcmos_msg_qgroup *qgroup, bcmos_msg *msg, bcmos_qgroup_prty prty,
1040 bcmos_msg_send_flags flags)
1041{
1042 long lock_flags;
1043 bcmos_errno rc = BCM_ERR_OK;
1044
1045 if (prty >= qgroup->parm.nqueues)
1046 {
1047 if (!(flags & BCMOS_MSG_SEND_NO_FREE_ON_ERROR))
1048 {
1049 bcmos_msg_free(msg);
1050 }
1051 return BCM_ERR_PARM;
1052 }
1053 msg->send_flags = flags;
1054
1055 lock_flags = bcmos_fastlock_lock(&qgroup->lock);
1056
1057 /* Put into the relevant queue */
1058 STAILQ_INSERT_TAIL(&qgroup->msgl[prty], msg, next);
1059 qgroup->active_mask |= (1 << prty);
1060 _bcmos_qgroup_stat_inc(qgroup);
1061
1062 /* Overflow check */
1063 if ((qgroup->stat.msg_in > qgroup->parm.size))
1064 {
1065 bcmos_msg *m;
1066 bcmos_qgroup_prty i;
1067 /* Find the lowest-priority queue with data and discard the head message.
1068 * The loop always finds something because we've just added a message
1069 */
1070 for (i = qgroup->parm.nqueues - 1; (qgroup->active_mask & (1 << i)) == 0; i--)
1071 ;
1072 m = _bcmos_qgroup_msg_get(qgroup, i);
1073 --qgroup->stat.msg_in;
1074 ++qgroup->stat.msg_discarded;
1075 if (!(m->send_flags & BCMOS_MSG_SEND_NO_FREE_ON_ERROR))
1076 {
1077 bcmos_msg_free(m);
1078 }
1079 rc = BCM_ERR_OVERFLOW;
1080 }
1081
1082 /* Kick waiting task */
1083 if (qgroup->is_waiting)
1084 {
1085 qgroup->is_waiting = BCMOS_FALSE;
1086 bcmos_fastlock_unlock(&qgroup->lock, lock_flags);
1087 bcmos_sem_post(&qgroup->m);
1088 }
1089 else
1090 {
1091 bcmos_fastlock_unlock(&qgroup->lock, lock_flags);
1092 }
1093
1094 return rc;
1095
1096}
1097
1098/* Get highest priority message from queue group */
1099bcmos_errno bcmos_msg_recv_from_qgroup(bcmos_msg_qgroup *qgroup, uint32_t timeout, bcmos_msg **msg)
1100{
1101 long lock_flags;
1102 bcmos_qgroup_prty prty;
1103
1104 if (!qgroup || !msg)
1105 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "qgroup %p, msg %p\n", qgroup, msg);
1106
1107 lock_flags = bcmos_fastlock_lock(&qgroup->lock);
1108 if (!qgroup->active_mask)
1109 {
1110 if (!timeout)
1111 {
1112 bcmos_fastlock_unlock(&qgroup->lock, lock_flags);
1113 return BCM_ERR_NOENT;
1114 }
1115
1116 /* Receive with wait */
1117 qgroup->is_waiting = BCMOS_TRUE;
1118 bcmos_fastlock_unlock(&qgroup->lock, lock_flags);
1119
1120 /* wait for it */
1121 bcmos_sem_wait(&qgroup->m, timeout);
1122
1123 /* Done waiting. Either got a message or a timeout */
1124 lock_flags = bcmos_fastlock_lock(&qgroup->lock);
1125 qgroup->is_waiting = BCMOS_FALSE;
1126 if (!qgroup->active_mask)
1127 {
1128 bcmos_fastlock_unlock(&qgroup->lock, lock_flags);
1129 return BCM_ERR_TIMEOUT;
1130 }
1131 }
1132
1133 prty = (bcmos_qgroup_prty)(ffs(qgroup->active_mask) - 1);
1134 BUG_ON(prty >= qgroup->parm.nqueues);
1135 *msg = _bcmos_qgroup_msg_get(qgroup, prty);
1136 _bcmos_qgroup_stat_dec(qgroup);
1137 bcmos_fastlock_unlock(&qgroup->lock, lock_flags);
1138
1139 return BCM_ERR_OK;
1140}
1141
1142
1143/*
1144 * Message registration and dispatch
1145 */
1146
1147/* Hash entry */
1148typedef struct msg_hash_entry msg_hash_entry;
1149
1150struct msg_hash_entry
1151{
1152 /* Key */
1153 uint32_t key; /* msg_type + instance */
1154
1155 /* Value */
1156 bcmos_module_id module_id;
1157 F_bcmos_msg_handler handler;
1158
1159 /* List of entries with the same key */
1160 SLIST_ENTRY(msg_hash_entry) list;
1161};
1162
1163/* Hash table */
1164static SLIST_HEAD(msg_hash, msg_hash_entry) msg_hash_table[BCMOS_MSG_HASH_SIZE];
1165
1166/* Make hash key from msg_type and instance */
1167static inline uint32_t _bcmos_msg_hash_key(bcmos_msg_id msg_type, bcmos_msg_instance instance)
1168{
1169 return ((uint32_t)instance << 16) | (uint32_t)msg_type;
1170}
1171
1172/* Hash function */
1173static inline uint32_t _bcmos_msg_hash_func(uint32_t key)
1174{
1175 key ^= (key >> 9);
1176 key ^= (key << 3);
1177 key ^= (key >> 15);
1178 return key % BCMOS_MSG_HASH_SIZE;
1179}
1180
1181/* Find entry in hash */
1182static inline msg_hash_entry *_bcmos_msg_hash_find(bcmos_msg_id msg_type, bcmos_msg_instance instance)
1183{
1184 uint32_t key = _bcmos_msg_hash_key(msg_type, instance);
1185 uint32_t hash = _bcmos_msg_hash_func(key);
1186 msg_hash_entry *entry;
1187 SLIST_FOREACH(entry, &msg_hash_table[hash], list)
1188 {
1189 if (entry->key == key)
1190 break;
1191 }
1192 return entry;
1193}
1194
1195/* Register message_type+instance --> module+handler */
1196bcmos_errno bcmos_msg_register(bcmos_msg_id msg_type, bcmos_msg_instance instance,
1197 bcmos_module_id module_id, F_bcmos_msg_handler handler)
1198{
1199 uint32_t key = _bcmos_msg_hash_key(msg_type, instance);
1200 uint32_t hash = _bcmos_msg_hash_func(key);
1201 msg_hash_entry *entry;
1202 long lock_flags;
1203
1204 if (!handler)
1205 return BCM_ERR_PARM;
1206
1207 entry = bcmos_calloc(sizeof(*entry));
1208 if (!entry)
1209 return BCM_ERR_NOMEM;
1210
1211 entry->key = key;
1212 entry->module_id = module_id;
1213 entry->handler = handler;
1214 lock_flags = bcmos_fastlock_lock(&bcmos_msg_register_lock);
1215 if (_bcmos_msg_hash_find(msg_type, instance) != NULL)
1216 {
1217 bcmos_fastlock_unlock(&bcmos_msg_register_lock, lock_flags);
1218 bcmos_free(entry);
1219 return BCM_ERR_ALREADY;
1220 }
1221 SLIST_INSERT_HEAD(&msg_hash_table[hash], entry, list);
1222 bcmos_fastlock_unlock(&bcmos_msg_register_lock, lock_flags);
1223
1224 return BCM_ERR_OK;
1225}
1226
1227bcmos_errno bcmos_msg_unregister(bcmos_msg_id msg_type, bcmos_msg_instance instance, bcmos_module_id module_id)
1228{
1229 uint32_t key = _bcmos_msg_hash_key(msg_type, instance);
1230 uint32_t hash = _bcmos_msg_hash_func(key);
1231 msg_hash_entry *entry;
1232 long lock_flags;
1233
1234 lock_flags = bcmos_fastlock_lock(&bcmos_msg_register_lock);
1235 entry = _bcmos_msg_hash_find(msg_type, instance);
1236 if (!entry)
1237 {
1238 bcmos_fastlock_unlock(&bcmos_msg_register_lock, lock_flags);
1239 return BCM_ERR_NOENT;
1240 }
1241
1242 if (entry->module_id != module_id)
1243 {
1244 bcmos_fastlock_unlock(&bcmos_msg_register_lock, lock_flags);
1245 return BCM_ERR_INVALID_OP;
1246 }
1247
1248 SLIST_REMOVE(&msg_hash_table[hash], entry, msg_hash_entry, list);
1249 bcmos_fastlock_unlock(&bcmos_msg_register_lock, lock_flags);
1250 bcmos_free(entry);
1251
1252 return BCM_ERR_OK;
1253}
1254
1255void bcmos_msg_shutdown_mode_set(bcmos_bool shutdown_mode)
1256{
1257 bcmos_msg_shutdown_mode = shutdown_mode;
1258}
1259
1260bcmos_bool bcmos_msg_shutdown_mode_get(void)
1261{
1262 return bcmos_msg_shutdown_mode;
1263}
1264
1265/* Dispatch message to registered module */
1266bcmos_errno bcmos_msg_dispatch(bcmos_msg *msg, bcmos_msg_send_flags flags)
1267{
1268 bcmos_errno err;
1269
1270 if (unlikely(bcmos_msg_shutdown_mode))
1271 {
1272 /* In shutdown mode, we need to acquire the same lock used to protect bcmos_msg_register() /
1273 * bcmos_msg_unregister(), since we must support calling these functions concurrently. */
1274 msg_hash_entry *entry;
1275 bcmos_sem *sem_to_post = NULL;
1276 long lock_flags = bcmos_fastlock_lock(&bcmos_msg_register_lock);
1277 entry = _bcmos_msg_hash_find(msg->type, msg->instance);
1278
1279 if (entry)
1280 {
1281 msg->handler = entry->handler;
1282 err = _bcmos_msg_send_to_module(entry->module_id, msg, flags, &sem_to_post);
1283 }
1284 else
1285 {
1286 /* Not found. Release automatically if requested. */
1287 if (!(flags & BCMOS_MSG_SEND_NO_FREE_ON_ERROR))
1288 bcmos_msg_free(msg);
1289 err = BCM_ERR_OK;
1290 }
1291
1292 bcmos_fastlock_unlock(&bcmos_msg_register_lock, lock_flags);
1293 if (sem_to_post)
1294 bcmos_sem_post(sem_to_post);
1295 }
1296 else
1297 {
1298 msg_hash_entry *entry = _bcmos_msg_hash_find(msg->type, msg->instance);
1299
1300 if (entry)
1301 {
1302 msg->handler = entry->handler;
1303 err = bcmos_msg_send_to_module(entry->module_id, msg, flags);
1304 }
1305 else
1306 {
1307 /* Not found. Release automatically if requested. */
1308 BCMOS_TRACE_ERR("Can't dispatch unregistered msg %d:%d\n", msg->type, msg->instance);
1309 if (!(flags & BCMOS_MSG_SEND_NO_FREE_ON_ERROR))
1310 bcmos_msg_free(msg);
1311 err = BCM_ERR_NOENT;
1312 }
1313 }
1314
1315 return err;
1316}
1317
1318/*
1319 * Task management
1320 */
1321
1322/*
1323 * Default task handler
1324 */
1325/*lint -e{632,633,634}
1326 * There are a few warnings about
1327 * implicit bcmos_errno conversion to int. It is to bothersome now
1328 * to change task handler prototype everywhere and the warning is harmless
1329 */
1330int bcmos_dft_task_handler(long data)
1331{
1332 bcmos_task *task = (bcmos_task *)data;
1333 long flags = 0, q_flags = 0;
1334 uint32_t active_modules;
1335 int last_module = 0; /* 1-based last handled module index */
1336 bcmos_module *module;
1337 bcmos_msg *msg;
1338 int rc;
1339
1340
1341 /* Set / validate task timeout */
1342 rc = bcmos_task_timeout_set(task, task->parm.msg_wait_timeout, task->parm.timeout_handler);
1343 if (rc)
1344 {
1345 return rc;
1346 }
1347
1348 /* Call init callback if any */
1349 if (task->parm.init_handler)
1350 {
1351 rc = task->parm.init_handler(task->parm.data);
1352 if (rc)
1353 {
1354 BCMOS_TRACE_ERR("Task %s: init_handler returned error %s (%d)\n",
1355 task->parm.name, bcmos_strerror((bcmos_errno)rc), rc);
1356 bcmos_task_destroy(task);
1357 return rc;
1358 }
1359 }
1360
1361 /* Wait for module activity */
1362 while (!task->destroy_request)
1363 {
1364 task->current_module = BCMOS_MODULE_ID_NONE;
1365
1366 /* Wait for module activity */
1367 rc = bcmos_sem_wait(&task->active_sem, task->parm.msg_wait_timeout);
1368 if (rc == BCM_ERR_TIMEOUT)
1369 {
1370 F_bcmos_task_handler timeout_handler = task->parm.timeout_handler;
1371
1372 /* Handle possible race condition */
1373 if (!timeout_handler)
1374 continue;
1375 rc = timeout_handler(data);
1376 if (rc != BCM_ERR_OK)
1377 {
1378 BCMOS_TRACE_ERR("Task %s: terminated by timeout_handler. error %s (%d)\n",
1379 task->parm.name, bcmos_strerror(rc), rc);
1380 break;
1381 }
1382 /* Keep waiting */
1383 continue;
1384 }
1385
1386 /* RR active modules */
1387 do
1388 {
1389 flags = bcmos_fastlock_lock(&task->active_lock);
1390 active_modules = (task->active_modules >> last_module);
1391 if (!active_modules)
1392 {
1393 last_module = 0;
1394 active_modules = task->active_modules;
1395 if (!active_modules)
1396 {
1397 /* No modules with work to do */
1398 bcmos_fastlock_unlock(&task->active_lock, flags);
1399 continue;
1400 }
1401 }
1402 last_module += ffs(active_modules);
1403 BUG_ON(last_module > BCMOS_MAX_MODULES_PER_TASK);
1404 module = task->modules[last_module - 1];
1405 BUG_ON(!module);
1406
1407 q_flags = bcmos_fastlock_lock(&module->msgq.lock);
1408 /* Get message from the module's message queue */
1409 msg = _bcmos_msg_get(&module->msgq);
1410 if (!msg)
1411 {
1412 bcmos_fastlock_unlock(&module->msgq.lock, q_flags);
1413 task->active_modules &= ~(1 << (last_module - 1));
1414 bcmos_fastlock_unlock(&task->active_lock, flags);
1415 continue;
1416 }
1417
1418 bcmos_fastlock_unlock(&module->msgq.lock, q_flags);
1419 bcmos_fastlock_unlock(&task->active_lock, flags);
1420
1421 /* Handle the message */
1422 if (msg->handler)
1423 {
1424 task->current_module = module->id;
1425 msg->handler(module->id, msg);
1426 }
1427 else
1428 {
1429 BCMOS_TRACE_ERR("msg->handler is not set. msg->type=%d\n", msg->type);
1430 bcmos_msg_free(msg);
1431 }
1432
1433 } while (task->active_modules);
1434 }
1435
1436 return 0;
1437}
1438
1439/* Set task message timeout.
1440 * The function is only supported in integration mode
1441 */
1442bcmos_errno bcmos_task_timeout_set(bcmos_task *task, uint32_t timeout, F_bcmos_task_handler timeout_handler)
1443{
1444 if (task->parm.handler)
1445 {
1446 BCMOS_TRACE_ERR("%s: The function is only supported in integration mode (task handler == NULL)\n", task->parm.name);
1447 return BCM_ERR_NOT_SUPPORTED;
1448 }
1449 if ((timeout && timeout != BCMOS_WAIT_FOREVER) && !timeout_handler)
1450 {
1451 BCMOS_TRACE_ERR("%s: timeout_handler is not set\n", task->parm.name);
1452 return BCM_ERR_PARM;
1453 }
1454
1455 /* 0 means FOREVER here */
1456 if (!timeout)
1457 timeout = BCMOS_WAIT_FOREVER;
1458
1459 task->parm.timeout_handler = timeout_handler;
1460 task->parm.msg_wait_timeout = timeout;
1461
1462 return BCM_ERR_OK;
1463}
1464
1465/*
1466 * Module
1467 */
1468
1469/* Register module */
1470bcmos_errno bcmos_module_create(bcmos_module_id module_id, bcmos_task *task, bcmos_module_parm *parm)
1471{
1472 bcmos_module *module;
1473 bcmos_errno rc = BCM_ERR_OK;
1474 int i;
1475
1476 if ((unsigned)module_id >= (unsigned)BCMOS_MODULE_ID__NUM_OF || !parm)
1477 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "module %d, parm %p\n", module_id, parm);
1478 if (!task)
1479 BCMOS_TRACE_RETURN(BCM_ERR_NOT_SUPPORTED, "No task\n");
1480 if (bcmos_modules[module_id])
1481 BCMOS_TRACE_RETURN(BCM_ERR_ALREADY, "module_id %d\n", module_id);
1482
1483 module = bcmos_calloc(sizeof(bcmos_module));
1484 if (!module)
1485 BCMOS_TRACE_RETURN(BCM_ERR_NOMEM, "module_id %d\n", module_id);
1486 module->id = module_id;
1487 module->my_task = task;
1488 module->parm = *parm;
1489 module->context = (void *)parm->data;
1490 bcmos_msg_queue_nw_init(&module->msgq, &parm->qparm);
1491 /* Copy name to make sure that it is not released - in case it was on the stack */
1492 if (module->parm.qparm.name)
1493 {
1494 strncpy(module->name, module->parm.qparm.name, sizeof(module->name) - 1);
1495 module->parm.qparm.name = module->name;
1496 }
1497
1498 /* Assign module id */
1499 for (i = 0; i < BCMOS_MAX_MODULES_PER_TASK; i++)
1500 {
1501 if (!task->modules[i])
1502 {
1503 task->modules[i] = module;
1504 module->idx = i;
1505 break;
1506 }
1507 }
1508 if (i == BCMOS_MAX_MODULES_PER_TASK)
1509 {
1510 bcmos_free(module);
1511 BCMOS_TRACE_RETURN(BCM_ERR_TOO_MANY, "module_id %d\n", module_id);
1512 }
1513
1514 bcmos_modules[module_id] = module;
1515
1516 /* Init module */
1517 if (parm->init)
1518 {
1519 rc = parm->init(parm->data);
1520 if (rc)
1521 bcmos_module_destroy(module_id);
1522 }
1523
1524 return rc;
1525}
1526
1527/* Un-register module */
1528bcmos_errno bcmos_module_destroy(bcmos_module_id module_id)
1529{
1530 bcmos_module *module = _bcmos_module_get(module_id);
1531 bcmos_task *task;
1532 long lock_flags, q_lock_flags;
1533 bcmos_msg_list msgl_urg, msgl;
1534
1535 if (!module)
1536 BCMOS_TRACE_RETURN(BCM_ERR_NOENT, "module_id %d\n", module_id);
1537
1538 task = module->my_task;
1539 lock_flags = bcmos_fastlock_lock(&task->active_lock);
1540 task->modules[module->idx] = NULL;
1541 task->active_modules &= ~(1 << module->idx);
1542
1543 /* Because we are not allowed to free memory (via bcmos_free()) when interrupts are locked, we only empty the linked list (via SLIST_INIT()) and the free comes outside the locked
1544 * section. */
1545 q_lock_flags = bcmos_fastlock_lock(&module->msgq.lock);
1546 msgl_urg = module->msgq.msgl_urg;
1547 msgl = module->msgq.msgl;
1548 STAILQ_INIT(&module->msgq.msgl_urg);
1549 STAILQ_INIT(&module->msgq.msgl);
1550 bcmos_fastlock_unlock(&module->msgq.lock, q_lock_flags);
1551 bcmos_fastlock_unlock(&task->active_lock, lock_flags);
1552
1553 bcmos_msg_list_destroy(&msgl_urg);
1554 bcmos_msg_list_destroy(&msgl);
1555
1556 if (module->parm.exit)
1557 module->parm.exit(module->parm.data);
1558 bcmos_modules[module_id] = NULL;
1559 bcmos_free(module);
1560
1561 return BCM_ERR_OK;
1562}
1563
1564/* Get current module id in the current task */
1565bcmos_module_id bcmos_module_current(void)
1566{
1567 bcmos_task *task = bcmos_task_current();
1568
1569 if (!task)
1570 return BCMOS_MODULE_ID_NONE;
1571 return task->current_module;
1572}
1573
1574/* Get module context set by bcmos_module_context_set() */
1575void *bcmos_module_context(bcmos_module_id module_id)
1576{
1577 bcmos_module *module = _bcmos_module_get(module_id);
1578 if (!module)
1579 return NULL;
1580 return module->context;
1581}
1582
1583/* Set module context */
1584bcmos_errno bcmos_module_context_set(bcmos_module_id module_id, void *context)
1585{
1586 bcmos_module *module = _bcmos_module_get(module_id);
1587
1588 if (!module)
1589 BCMOS_TRACE_RETURN(BCM_ERR_NOENT, "module_id %d\n", module_id);
1590 module->context = context;
1591 return BCM_ERR_OK;
1592}
1593
1594/* Query module info */
1595bcmos_errno bcmos_module_query(bcmos_module_id module_id, const bcmos_task **task, bcmos_msg_queue_info *info)
1596{
1597 bcmos_module *module = _bcmos_module_get(module_id);
1598
1599 if (!module)
1600 {
1601 return BCM_ERR_NOENT;
1602 }
1603 if (task)
1604 {
1605 *task = module->my_task;
1606 }
1607 if (info)
1608 {
1609 info->parm = module->parm.qparm;
1610 info->stat = module->msgq.stat;
1611 }
1612 return BCM_ERR_OK;
1613}
1614
1615/*
1616 * Events
1617 */
1618
1619/* This function handles event arrival in module context */
1620static void _bcmos_ev_in_module_handler(bcmos_module_id module_id, bcmos_msg *msg)
1621{
1622 bcmos_event *ev = _bcmos_msg_to_event(msg);
1623 uint32_t active_bits;
1624 long lock_flags;
1625
1626 active_bits = ev->active_bits & ev->parm.mask;
1627 ev->parm.handler(ev->id, active_bits);
1628
1629 lock_flags = bcmos_fastlock_lock(&ev->lock);
1630 ev->active_bits &= ~active_bits;
1631 ev->is_waiting = BCMOS_TRUE;
1632 bcmos_fastlock_unlock(&ev->lock, lock_flags);
1633}
1634
1635/* Release event message. Only called in exceptional situations,
1636 * such as module queue destroy. Do nothing.
1637 */
1638static void _bcmos_ev_msg_release(bcmos_msg *msg)
1639{
1640}
1641
1642/* Create event set */
1643bcmos_errno bcmos_event_create(bcmos_event_id event_id, bcmos_event_parm *parm)
1644{
1645 bcmos_event *ev;
1646 bcmos_module *module = NULL;
1647 bcmos_errno rc;
1648
1649 if ((unsigned)event_id >= (unsigned)BCMOS_EVENT_ID__NUM_OF)
1650 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "event_id %d\n", event_id);
1651
1652 if (_bcmos_event_get(event_id))
1653 BCMOS_TRACE_RETURN(BCM_ERR_ALREADY, "event_id %d\n", event_id);
1654
1655 if (parm && parm->module_id != BCMOS_MODULE_ID_NONE)
1656 {
1657 module = _bcmos_module_get(parm->module_id);
1658 if (!module)
1659 BCMOS_TRACE_RETURN(BCM_ERR_NOENT, "module_id %d\n", parm->module_id);
1660 if (!parm->handler || !parm->mask)
1661 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "event_id %d, handler %p, mask %x\n", event_id, parm->handler, parm->mask);
1662 }
1663
1664 ev = bcmos_calloc(sizeof(bcmos_event));
1665 if (!ev)
1666 BCMOS_TRACE_RETURN(BCM_ERR_NOMEM, "event_id %d\n", event_id);
1667
1668 ev->id = event_id;
1669 if (parm)
1670 ev->parm = *parm;
1671 bcmos_fastlock_init(&ev->lock, ev->parm.flags);
1672 {
1673 rc = bcmos_sem_create(&ev->m, 0, ev->parm.flags, ev->parm.name);
1674 if (rc)
1675 {
1676 bcmos_free(ev);
1677 return rc;
1678 }
1679 }
1680
1681 /* Initialize event message in integration mode */
1682 if (ev->parm.module_id != BCMOS_MODULE_ID_NONE)
1683 {
1684 ev->msg.handler = _bcmos_ev_in_module_handler;
1685 ev->msg.release = _bcmos_ev_msg_release;
1686 ev->msg.sender = BCMOS_MODULE_ID_NONE;
1687 ev->msg.type = BCMOS_MSG_ID_INTERNAL_EVENT;
1688 ev->is_waiting = BCMOS_TRUE;
1689 }
1690
1691 /* Copy name to make sure that it is not released - in case it was on the stack */
1692 if (ev->parm.name)
1693 {
1694 strncpy(ev->name, ev->parm.name, sizeof(ev->name) - 1);
1695 ev->parm.name = ev->name;
1696 }
1697
1698 bcmos_events[event_id] = ev;
1699
1700 return BCM_ERR_OK;
1701}
1702
1703/* Destroy event set created by bcmos_event_create() */
1704bcmos_errno bcmos_event_destroy(bcmos_event_id event_id)
1705{
1706 return BCM_ERR_NOT_SUPPORTED;
1707}
1708
1709/* Raise event */
1710bcmos_errno bcmos_event_raise(bcmos_event_id event_id, uint32_t active_bits)
1711{
1712 bcmos_event *ev = _bcmos_event_get(event_id);
1713 long lock_flags;
1714
1715 if (!ev)
1716 BCMOS_TRACE_RETURN(BCM_ERR_NOENT, "event_id %d\n", event_id);
1717
1718 lock_flags = bcmos_fastlock_lock(&ev->lock);
1719 ev->active_bits |= active_bits;
1720 if (ev->is_waiting && (ev->active_bits & ev->parm.mask))
1721 {
1722 ev->is_waiting = BCMOS_FALSE;
1723 bcmos_fastlock_unlock(&ev->lock, lock_flags);
1724 if (ev->parm.module_id != BCMOS_MODULE_ID_NONE)
1725 bcmos_msg_send_to_module(ev->parm.module_id, &ev->msg, BCMOS_MSG_SEND_URGENT | BCMOS_MSG_SEND_NOLIMIT);
1726 else
1727 bcmos_sem_post(&ev->m);
1728 }
1729 else
1730 {
1731 bcmos_fastlock_unlock(&ev->lock, lock_flags);
1732 }
1733
1734 return BCM_ERR_OK;
1735}
1736
1737/* Wait for event */
1738bcmos_errno bcmos_event_recv(bcmos_event_id event_id, uint32_t mask,
1739 uint32_t timeout, uint32_t *active_bits)
1740{
1741 bcmos_event *ev = _bcmos_event_get(event_id);
1742 long lock_flags;
1743
1744 BUG_ON(!active_bits);
1745 if (!ev)
1746 BCMOS_TRACE_RETURN(BCM_ERR_NOENT, "event_id %d is not registered\n", event_id);
1747
1748 lock_flags = bcmos_fastlock_lock(&ev->lock);
1749 *active_bits = ev->active_bits & mask;
1750 if (*active_bits)
1751 {
1752 ev->active_bits &= ~ *active_bits;
1753 bcmos_fastlock_unlock(&ev->lock, lock_flags);
1754 return BCM_ERR_OK;
1755 }
1756 if (!timeout)
1757 {
1758 bcmos_fastlock_unlock(&ev->lock, lock_flags);
1759 return BCM_ERR_NOENT;
1760 }
1761
1762 /* recv with wait */
1763 ev->is_waiting = BCMOS_TRUE;
1764 bcmos_fastlock_unlock(&ev->lock, lock_flags);
1765
1766 /* wait for it */
1767 bcmos_sem_wait(&ev->m, timeout);
1768 /* Either got event or timeout */
1769 lock_flags = bcmos_fastlock_lock(&ev->lock);
1770 *active_bits = ev->active_bits & mask;
1771 ev->active_bits &= ~ *active_bits;
1772 ev->is_waiting = BCMOS_FALSE;
1773 bcmos_fastlock_unlock(&ev->lock, lock_flags);
1774 /* If we wait forever and we got an event that does not match the mask we wait on (this is the only possible reason getting here if waiting forever), then we
1775 * want to avoid returning BCM_ERR_TIMEOUT - it's not an error. */
1776 if (timeout != BCMOS_WAIT_FOREVER && !*active_bits)
1777 return BCM_ERR_TIMEOUT;
1778 return BCM_ERR_OK;
1779}
1780
1781/*
1782 * Timer
1783 */
1784
1785/* compare timestamps minding wrap-around
1786 * returns delta >= 0 if ts1 >= ts2
1787 */
1788static inline int32_t _bcmos_timer_ts_delta(uint32_t ts1, uint32_t ts2)
1789{
1790 int32_t delta = (int)(ts1 - ts2);
1791 return delta;
1792}
1793
1794static int32_t _bcmos_timer_compare(struct bcmos_timer *t1, struct bcmos_timer *t2)
1795{
1796 int32_t delta = _bcmos_timer_ts_delta(t1->expire_at, t2->expire_at);
1797#if defined(BCMOS_TIMER_RB_TREE) && !defined(BCMOS_TIMER_RB_TREE_LIST)
1798 /* FreeBSD RB tree implementation doesn't support 2 nodes with the same key */
1799 if (!delta)
1800 delta = 1;
1801#endif
1802 return delta;
1803}
1804
1805static inline void _bcmos_start_system_timer(bcmos_timer *head)
1806{
1807 if (head)
1808 {
1809 int32_t delay = _bcmos_timer_ts_delta(head->expire_at, bcmos_timestamp());
1810 /* Handle rare race condition when next timer expired while we were fiddling
1811 * with the pool. Just give it 1 more "tick". System handler handles all timers
1812 * expired (<now .. now + PRECISION/2)
1813 */
1814 if (delay <= 0)
1815 {
1816 delay = BCMOS_TIMER_PRECISION_US / 2;
1817 }
1818 bcmos_sys_timer_start(&tmr_pool.sys_timer, delay);
1819 }
1820 else
1821 {
1822 bcmos_sys_timer_stop(&tmr_pool.sys_timer);
1823 }
1824}
1825
1826/*
1827 * Timer pool: RB tree or TAILQ-based implementation
1828 */
1829static void _bcmos_timer_pool_insert(bcmos_timer *timer, uint32_t delay, bcmos_bool start_sys_timer)
1830{
1831 long flags;
1832 bcmos_timer *head;
1833
1834 flags = bcmos_fastlock_lock(&tmr_pool.lock);
1835 if (BCMOS_TIMER_IS_RUNNING(timer))
1836 {
1837 bcmos_fastlock_unlock(&tmr_pool.lock, flags);
1838 return;
1839 }
1840 timer->period = timer->parm.periodic ? delay : 0;
1841 timer->expire_at = BCMOS_ROUND_UP(bcmos_timestamp() + delay, BCMOS_TIMER_PRECISION_US / 2);
1842 TMR_POOL_INSERT(&tmr_pool, timer);
1843 timer->flags &= ~BCMOS_TIMER_FLAG_EXPIRED;
1844 timer->flags |= BCMOS_TIMER_FLAG_RUNNING;
1845
1846 /* If new timer is at the top - kick system timer */
1847 if (start_sys_timer)
1848 {
1849 head = TMR_POOL_FIRST(&tmr_pool);
1850 if (timer == head)
1851 {
1852 _bcmos_start_system_timer(head);
1853 }
1854 }
1855 bcmos_fastlock_unlock(&tmr_pool.lock, flags);
1856}
1857
1858static void _bcmos_timer_stop(bcmos_timer *timer)
1859{
1860 long flags;
1861 bcmos_bool was_top;
1862 bcmos_msg_queue_nw *queue;
1863
1864 /* First take running timer out of the active pool */
1865 flags = bcmos_fastlock_lock(&tmr_pool.lock);
1866 timer->period = 0; /* Prevent periodic timer restart */
1867 if (BCMOS_TIMER_IS_RUNNING(timer))
1868 {
1869 timer->flags &= ~BCMOS_TIMER_FLAG_RUNNING;
1870 was_top = (timer == TMR_POOL_FIRST(&tmr_pool));
1871 TMR_POOL_REMOVE(&tmr_pool, timer);
1872
1873 /* If timer was the top - stop/restart system timer */
1874 if (was_top)
1875 {
1876 _bcmos_start_system_timer(TMR_POOL_FIRST(&tmr_pool));
1877 }
1878 }
1879 bcmos_fastlock_unlock(&tmr_pool.lock, flags);
1880
1881 /* Now timer is not in the active pool. Perhaps it is already in
1882 * destination module's queue. Take it out if yes.
1883 */
1884 queue = timer->queue;
1885 if (queue)
1886 {
1887 flags = bcmos_fastlock_lock(&queue->lock);
1888 /* Check queue again because the previous check was unprotected */
1889 if (timer->queue)
1890 {
1891 bcmos_msg_list *msg_list = ((timer->parm.flags & BCMOS_TIMER_PARM_FLAGS_NON_URGENT))
1892 ? &queue->msgl : &queue->msgl_urg;
1893 if (STAILQ_REMOVE_SAFE(msg_list, &timer->msg, bcmos_msg, next) != NULL)
1894 {
1895 _bcmos_msgq_stat_dec(queue);
1896 }
1897 timer->queue = NULL;
1898 }
1899 timer->flags &= ~BCMOS_TIMER_FLAG_EXPIRED;
1900 bcmos_fastlock_unlock(&queue->lock, flags);
1901 }
1902
1903 /* If timer has already expired and we weren't able to stop it -
1904 * wait for expiration callback to finish before leaving _bcmos_timer_stop()
1905 */
1906 if (BCMOS_TIMER_IS_EXPIRED(timer))
1907 {
1908 bcmos_task *t = bcmos_task_current();
1909
1910 /* Skip wait if timer is being stopped / restarted from inside the handler */
1911 if (t != timer->task)
1912 {
1913 while (BCMOS_TIMER_IS_EXPIRED(timer) && BCMOS_TIMER_IS_VALID(timer))
1914 {
1915 bcmos_usleep(1000);
1916 }
1917 timer->flags &= ~BCMOS_TIMER_FLAG_EXPIRED;
1918 }
1919 }
1920}
1921
1922
1923/* System timer expiration handler.
1924 * Execute all timers that expired and restart system timer
1925 */
1926static void _sys_timer_handler(void *data)
1927{
1928 bcmos_timer_pool *pool = (bcmos_timer_pool *)data;
1929 bcmos_timer *timer;
1930 bcmos_timer_rc rc;
1931 long flags;
1932
1933 BUG_ON(pool != &tmr_pool);
1934 flags = bcmos_fastlock_lock(&pool->lock);
1935 while ((timer=TMR_POOL_FIRST(pool)) != NULL)
1936 {
1937 /* Stop when reached timer that hasn't expired yet */
1938 if (_bcmos_timer_ts_delta(timer->expire_at, bcmos_timestamp()) > BCMOS_TIMER_PRECISION_US / 2)
1939 break;
1940 timer->flags |= BCMOS_TIMER_FLAG_EXPIRED;
1941 timer->flags &= ~BCMOS_TIMER_FLAG_RUNNING;
1942 /* IT: Barrier here ? */
1943 TMR_POOL_REMOVE(pool, timer);
1944
1945 /* Execute handler. Unlock first and re-lock in the end
1946 * It is safe to unlock here because the top loop starts from MIN every time
1947 */
1948 bcmos_fastlock_unlock(&pool->lock, flags);
1949 rc = timer->handler(timer, timer->parm.data);
1950 if (!timer->parm.owner)
1951 {
1952 if (rc == BCMOS_TIMER_OK && timer->period)
1953 {
1954 uint32_t interval = timer->period;
1955 timer->period = 0;
1956 _bcmos_timer_pool_insert(timer, interval, BCMOS_FALSE);
1957 }
1958 else
1959 {
1960 timer->flags &= ~BCMOS_TIMER_FLAG_EXPIRED;
1961 }
1962 }
1963 flags = bcmos_fastlock_lock(&pool->lock);
1964 }
1965 /* Finally kick system timer */
1966 _bcmos_start_system_timer(timer);
1967 bcmos_fastlock_unlock(&pool->lock, flags);
1968}
1969
1970/* Send timer expiration to the target module as urgent message.
1971 * _bcmos_timer_in_module_handler() will get called in the module context
1972 */
1973static bcmos_timer_rc _bcmos_timer_send_to_module_handler(bcmos_timer *timer, long data)
1974{
1975 bcmos_errno rc;
1976 bcmos_module *module = _bcmos_module_get(timer->parm.owner);
1977 bcmos_msg_send_flags send_flags;
1978 if (!module)
1979 {
1980 /* Shouldn't happen, unless the module was destroyed */
1981 BCMOS_TRACE_ERR("_bcmos_timer_send_to_module_handler() -- no module=%u (timer->parm.handler=0x%p)\n", timer->parm.owner, timer->parm.handler);
1982 timer->flags &= ~BCMOS_TIMER_FLAG_EXPIRED;
1983 return BCMOS_TIMER_STOP; /* will restart in module context if necessary */
1984 }
1985 timer->queue = &module->msgq;
1986 send_flags = BCMOS_MSG_SEND_NOLIMIT;
1987 if (!((timer->parm.flags & BCMOS_TIMER_PARM_FLAGS_NON_URGENT)))
1988 send_flags |= BCMOS_MSG_SEND_URGENT;
1989 rc = bcmos_msg_send_to_module(timer->parm.owner, &timer->msg, send_flags);
1990 if (rc)
1991 {
1992 /* Shouldn't happen, unless the module was destroyed. Very short race condition here */
1993 timer->queue = NULL;
1994 timer->flags &= ~BCMOS_TIMER_FLAG_EXPIRED;
1995 BCMOS_TRACE_ERR("_bcmos_timer_send_to_module_handler() --> %d\n", rc);
1996 }
1997 return BCMOS_TIMER_STOP; /* will restart in module context if necessary */
1998}
1999
2000/* This function handles timer expiration in module context */
2001static void _bcmos_timer_in_module_handler(bcmos_module_id module_id, bcmos_msg *msg)
2002{
2003 bcmos_timer *timer = _bcmos_msg_to_timer(msg);
2004 bcmos_module *module = _bcmos_module_get(timer->parm.owner);
2005 bcmos_timer_rc rc;
2006
2007 /* Call timer's callback function and restart the timer if necessary */
2008 timer->queue = NULL;
2009 /* module can't be NULL here. it is checked anyway to keep static code analyzer happy */
2010 timer->task = module ? module->my_task : (bcmos_task*)NULL;
2011 rc = timer->parm.handler(timer, timer->parm.data);
2012 timer->task = NULL;
2013 if (rc == BCMOS_TIMER_OK && timer->period)
2014 _bcmos_timer_pool_insert(timer, timer->period, BCMOS_TRUE);
2015 else
2016 timer->flags &= ~BCMOS_TIMER_FLAG_EXPIRED;
2017}
2018
2019/* Release timer message. Only called in exceptional situations,
2020 * such as module queue destroy. Do nothing
2021 */
2022static void _bcmos_timer_msg_release(bcmos_msg *msg)
2023{
2024}
2025
2026/* Create timer */
2027bcmos_errno bcmos_timer_create(bcmos_timer *timer, bcmos_timer_parm *parm)
2028{
2029 if (!timer || !parm || !parm->handler)
2030 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "timer %p, parm %p handler %p\n", timer, parm, parm ? parm->handler : NULL);
2031 if (parm->owner != BCMOS_MODULE_ID_NONE && _bcmos_module_get(parm->owner) == NULL)
2032 BCMOS_TRACE_RETURN(BCM_ERR_NOENT, "module_id %d\n", parm->owner);
2033 memset(timer, 0, sizeof(bcmos_timer));
2034 timer->parm = *parm;
2035 if (parm->owner == BCMOS_MODULE_ID_NONE)
2036 timer->handler = parm->handler;
2037 else
2038 {
2039 timer->handler = _bcmos_timer_send_to_module_handler;
2040 timer->msg.handler = _bcmos_timer_in_module_handler;
2041 timer->msg.release = _bcmos_timer_msg_release;
2042 timer->msg.sender = BCMOS_MODULE_ID_NONE;
2043 timer->msg.type = BCMOS_MSG_ID_INTERNAL_TIMER;
2044 }
2045 timer->flags |= BCMOS_TIMER_FLAG_VALID;
2046 return BCM_ERR_OK;
2047}
2048
2049/* Destroy timer */
2050void bcmos_timer_destroy(bcmos_timer *timer)
2051{
2052 BUG_ON(!timer);
2053 bcmos_timer_stop(timer);
2054 timer->flags &= ~BCMOS_TIMER_FLAG_VALID;
2055}
2056
2057/* (Re)start timer */
2058void bcmos_timer_start(bcmos_timer *timer, uint32_t delay)
2059{
2060 BUG_ON(!timer);
2061 BUG_ON(!BCMOS_TIMER_IS_VALID(timer));
2062 if ((int32_t)delay < 0)
2063 {
2064 BCMOS_TRACE_ERR("Attempt to start timer (%s) for period longer than 2^31-1. Reduced to 2^31-1\n",
2065 timer->parm.name ? timer->parm.name : "*unnamed*");
2066 delay = 0x7fffffff;
2067 }
2068
2069 if (BCMOS_TIMER_IS_RUNNING(timer) || BCMOS_TIMER_IS_EXPIRED(timer))
2070 {
2071 _bcmos_timer_stop(timer);
2072 }
2073 _bcmos_timer_pool_insert(timer, delay, BCMOS_TRUE);
2074}
2075
2076/* Stop timer if running */
2077void bcmos_timer_stop(bcmos_timer *timer)
2078{
2079 BUG_ON(!timer);
2080 _bcmos_timer_stop(timer);
2081}
2082
2083/** Set timer handler */
2084bcmos_errno bcmos_timer_handler_set(bcmos_timer *timer, F_bcmos_timer_handler handler, long data)
2085{
2086 BUG_ON(!timer);
2087 BUG_ON(!handler);
2088 timer->parm.handler = handler;
2089 timer->parm.data = data;
2090 if (timer->parm.owner == BCMOS_MODULE_ID_NONE)
2091 timer->handler = handler;
2092 return BCM_ERR_OK;
2093}
2094
2095/*
2096 * Block memory pool
2097 */
2098
2099/* Memory block structure:
2100 * - bcmos_memblk
2101 * - blk_size bytes of user data
2102 * - [magic - for block overflow-corruption check]
2103 * - [padding to align to pointer size]
2104 */
2105
2106struct bcmos_memblk
2107{
2108 STAILQ_ENTRY(bcmos_memblk) next; /**< Next block pointer */
2109 bcmos_blk_pool *pool; /** pool that owns the block */
2110#ifdef BCMOS_MEM_DEBUG
2111 uint32_t magic; /** magic number */
2112#define BCMOS_MEM_MAGIC_ALLOC (('m'<<24) | ('b' << 16) | ('l' << 8) | 'k')
2113#define BCMOS_MEM_MAGIC_FREE (('m'<<24) | ('b' << 16) | ('l' << 8) | '~')
2114#define BCMOS_MEM_MAGIC_SUFFIX (('m'<<24) | ('b' << 16) | ('l' << 8) | 's')
2115 uint32_t lineno; /** line number where the block was allocated/released. FFU */
2116#endif
2117};
2118
2119/* Create byte memory pool */
2120bcmos_errno bcmos_blk_pool_create(bcmos_blk_pool *pool, const bcmos_blk_pool_parm *parm)
2121{
2122 uint32_t blk_size;
2123
2124 if (!pool || !parm || !parm->blk_size)
2125 {
2126 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "pool %p, parm %p, blk_size=%u, num_blks=%u\n",
2127 pool, parm, parm ? parm->blk_size : 0, parm ? parm->num_blks : 0);
2128 }
2129 if (parm->num_blks & parm->pool_size)
2130 {
2131 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "One and only one of num_blks (%u) and pool_size (%u) must be set\n",
2132 parm->num_blks, parm->pool_size);
2133 }
2134 if (parm->num_blks && parm->start != NULL)
2135 {
2136 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "num_blks!=0 can't be used with start!=NULL. Use byte size instead\n");
2137 }
2138
2139 BCM_MEMZERO_STRUCT(pool);
2140 pool->parm = *parm;
2141
2142 /* Copy name to make sure that it is not released - in case it was on the stack */
2143 if (pool->parm.name)
2144 {
2145 strncpy(pool->name, pool->parm.name, sizeof(pool->name) - 1);
2146 pool->parm.name = pool->name;
2147 }
2148
2149 /*
2150 * Calculate total block size in bytes, including overheads
2151 */
2152 /* Round up block size to the nearest 32-bit word to make MAGIC check cheaper.
2153 * It doesn't affect the actual overhead size because of the final
2154 * rounding up to pointer size.
2155 */
2156 pool->parm.blk_size = BCMOS_ROUND_UP(pool->parm.blk_size, sizeof(uint32_t));
2157 blk_size = pool->parm.blk_size + sizeof(bcmos_memblk);
2158#ifdef BCMOS_MEM_DEBUG
2159 blk_size += sizeof(uint32_t); /* room for magic after user data block */
2160#endif
2161 blk_size = BCMOS_ROUND_UP(blk_size, sizeof(void *));
2162
2163 /* Derive num_blks / pool_size from one another */
2164 if (pool->parm.num_blks)
2165 {
2166 pool->parm.pool_size = parm->num_blks * blk_size;
2167 }
2168 else
2169 {
2170 pool->parm.num_blks = pool->parm.pool_size / blk_size;
2171 if (!pool->parm.num_blks)
2172 {
2173 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "pool_size (%u) is too small\n", parm->pool_size);
2174 }
2175 }
2176
2177 /* Allocate memory for the pool if "start" is not set */
2178 pool->start = pool->parm.start;
2179 if (!pool->start)
2180 {
2181 pool->start = bcmos_alloc(pool->parm.pool_size);
2182 if (!pool->start)
2183 {
2184 BCMOS_TRACE_RETURN(BCM_ERR_NOMEM, "Can't allocate memory for block pool %s\n", parm->name);
2185 }
2186 }
2187
2188 bcmos_fastlock_init(&pool->lock, parm->flags);
2189
2190 /* Put all blocks on free list */
2191 bcmos_blk_pool_reset(pool);
2192
2193 pool->magic = BCMOS_BLK_POOL_VALID;
2194 if (!(pool->parm.flags & BCMOS_BLK_POOL_FLAG_MSG_POOL))
2195 {
2196 bcmos_mutex_lock(&bcmos_res_lock);
2197 STAILQ_INSERT_TAIL(&blk_pool_list, pool, list);
2198 bcmos_total_blk_pool_size += pool->parm.pool_size;
2199 bcmos_mutex_unlock(&bcmos_res_lock);
2200 }
2201
2202 return BCM_ERR_OK;
2203}
2204
2205/* Destroy memory pool */
2206bcmos_errno bcmos_blk_pool_destroy(bcmos_blk_pool *pool)
2207{
2208 if (!pool || pool->magic != BCMOS_BLK_POOL_VALID)
2209 {
2210 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "pool handle is invalid\n");
2211 }
2212 if (pool->stat.free < pool->parm.num_blks)
2213 {
2214 BCMOS_TRACE_RETURN(BCM_ERR_STATE, "%i blocks are still allocated from the pool %s\n",
2215 pool->parm.num_blks - pool->stat.free, pool->parm.name);
2216 }
2217 if (!(pool->parm.flags & BCMOS_BLK_POOL_FLAG_MSG_POOL))
2218 {
2219 bcmos_mutex_lock(&bcmos_res_lock);
2220 STAILQ_REMOVE(&blk_pool_list, pool, bcmos_blk_pool, list);
2221 bcmos_total_blk_pool_size -= pool->parm.pool_size;
2222 bcmos_mutex_unlock(&bcmos_res_lock);
2223 }
2224 if (!pool->parm.start && pool->start)
2225 {
2226 bcmos_free(pool->start);
2227 }
2228 pool->magic = BCMOS_BLK_POOL_DELETED;
2229 return BCM_ERR_OK;
2230}
2231
2232/** Release all blocks in memory pool . Block content is not affected */
2233void bcmos_blk_pool_reset(bcmos_blk_pool *pool)
2234{
2235 uint32_t blk_size;
2236 bcmos_memblk *blk;
2237 uint32_t i;
2238
2239 STAILQ_INIT(&pool->free_list);
2240
2241 blk_size = pool->parm.blk_size + sizeof(bcmos_memblk);
2242#ifdef BCMOS_MEM_DEBUG
2243 blk_size += sizeof(uint32_t); /* room for magic after user data block */
2244#endif
2245 blk_size = BCMOS_ROUND_UP(blk_size, sizeof(void *));
2246
2247 /* Put all blocks on free list */
2248 blk = (bcmos_memblk *)pool->start;
2249 for (i = 0; i < pool->parm.num_blks; i++)
2250 {
2251 blk->pool = pool;
2252 STAILQ_INSERT_TAIL(&pool->free_list, blk, next);
2253#ifdef BCMOS_MEM_DEBUG
2254 *(uint32_t*)((long)blk + sizeof(bcmos_memblk) + pool->parm.blk_size) = BCMOS_MEM_MAGIC_SUFFIX;
2255 blk->lineno = 0;
2256 blk->magic = BCMOS_MEM_MAGIC_FREE;
2257#endif
2258 blk = (bcmos_memblk *)((long)blk + blk_size);
2259 }
2260
2261 /* Init statistics */
2262 memset(&pool->stat, 0, sizeof(pool->stat));
2263 pool->stat.free = pool->parm.num_blks;
2264}
2265
2266/* Allocate block from block memory pool */
2267void *bcmos_blk_pool_alloc(bcmos_blk_pool *pool)
2268{
2269 bcmos_memblk *blk;
2270 long flags;
2271
2272#ifdef BCMOS_MEM_DEBUG
2273 BUG_ON(!pool);
2274 BUG_ON(pool->magic != BCMOS_BLK_POOL_VALID);
2275#endif
2276 flags = bcmos_fastlock_lock(&pool->lock);
2277 blk = STAILQ_FIRST(&pool->free_list);
2278 if (blk)
2279 {
2280 STAILQ_REMOVE_HEAD(&pool->free_list, next);
2281 ++pool->stat.allocated;
2282#ifdef BCMOS_MEM_DEBUG
2283 blk->magic = BCMOS_MEM_MAGIC_ALLOC;
2284#endif
2285 bcmos_fastlock_unlock(&pool->lock, flags);
2286 return (void *)(blk + 1);
2287 }
2288
2289 /* No memory */
2290 ++pool->stat.alloc_failed;
2291 bcmos_fastlock_unlock(&pool->lock, flags);
2292 return NULL;
2293}
2294
2295/* Allocate block from block memory pool and zero the block */
2296void *bcmos_blk_pool_calloc(bcmos_blk_pool *pool)
2297{
2298 void *ptr = bcmos_blk_pool_alloc(pool);
2299 if (ptr)
2300 {
2301 memset(ptr, 0, pool->parm.blk_size);
2302 }
2303 return ptr;
2304}
2305
2306/* Release memory allocated using bcmos_pool_byte_alloc() or bcmos_pool_blk_alloc() */
2307void bcmos_blk_pool_free(void *ptr)
2308{
2309 bcmos_memblk *blk;
2310 bcmos_blk_pool *pool;
2311 long flags;
2312
2313 blk = (bcmos_memblk *)((long)ptr - sizeof(bcmos_memblk));
2314 pool = blk->pool;
2315#ifdef BCMOS_MEM_DEBUG
2316 BUG_ON(blk->magic != BCMOS_MEM_MAGIC_ALLOC);
2317 BUG_ON(pool == NULL);
2318 BUG_ON(pool->magic != BCMOS_BLK_POOL_VALID);
2319 BUG_ON(*(uint32_t *)((long)ptr + pool->parm.blk_size) != BCMOS_MEM_MAGIC_SUFFIX);
2320 blk->magic = BCMOS_MEM_MAGIC_FREE;
2321#endif
2322 flags = bcmos_fastlock_lock(&pool->lock);
2323 STAILQ_INSERT_HEAD(&pool->free_list, blk, next);
2324 ++pool->stat.released;
2325 bcmos_fastlock_unlock(&pool->lock, flags);
2326}
2327
2328/* Get pool info */
2329bcmos_errno bcmos_blk_pool_query(const bcmos_blk_pool *pool, bcmos_blk_pool_info *info)
2330{
2331 if (!pool || !info)
2332 {
2333 return BCM_ERR_PARM;
2334 }
2335 info->parm = pool->parm;
2336 info->stat = pool->stat;
2337 info->stat.free = pool->parm.num_blks - (info->stat.allocated - info->stat.released);
2338 return BCM_ERR_OK;
2339}
2340
2341/* Block pool iterator */
2342bcmos_errno bcmos_blk_pool_get_next(const bcmos_blk_pool **prev)
2343{
2344 const bcmos_blk_pool *pool;
2345
2346 if (prev == NULL)
2347 {
2348 return BCM_ERR_PARM;
2349 }
2350 pool = *prev;
2351 if (pool && pool->magic != BCMOS_BLK_POOL_VALID)
2352 {
2353 return BCM_ERR_PARM;
2354 }
2355 if (pool)
2356 {
2357 pool = STAILQ_NEXT(pool, list);
2358 }
2359 else
2360 {
2361 pool = STAILQ_FIRST(&blk_pool_list);
2362 }
2363 *prev = pool;
2364 if (!pool)
2365 {
2366 return BCM_ERR_NO_MORE;
2367 }
2368 return BCM_ERR_OK;
2369}
2370
2371/*
2372 * Message pool
2373 */
2374
2375/* release message callback */
2376static void _bcmos_msg_pool_release(bcmos_msg *msg)
2377{
2378 if (msg->data_release)
2379 msg->data_release(msg);
2380 bcmos_blk_pool_free(msg);
2381}
2382
2383/* Create message pool */
2384bcmos_errno bcmos_msg_pool_create(bcmos_msg_pool *pool, const bcmos_msg_pool_parm *parm)
2385{
2386 bcmos_blk_pool_parm pool_parm = {};
2387 bcmos_memblk *blk;
2388 bcmos_errno err;
2389
2390 if (!pool || !parm || !parm->size)
2391 {
2392 return BCM_ERR_PARM;
2393 }
2394 BCM_MEMZERO_STRUCT(pool);
2395 pool->parm = *parm;
2396 pool_parm.num_blks = parm->size;
2397 pool_parm.blk_size = parm->data_size + sizeof(bcmos_msg);
2398 pool_parm.flags = parm->flags | BCMOS_BLK_POOL_FLAG_MSG_POOL;
2399 pool_parm.name = parm->name;
2400
2401 /* Create underlying block pool */
2402 err = bcmos_blk_pool_create(&pool->blk_pool, &pool_parm);
2403 if (err)
2404 {
2405 return err;
2406 }
2407 pool->parm.name = pool->blk_pool.name;
2408
2409 /* Pre-initialize all messages */
2410 STAILQ_FOREACH(blk, &pool->blk_pool.free_list, next)
2411 {
2412 bcmos_msg *msg = (bcmos_msg *)(blk + 1);
2413 msg->data = (void *)(msg + 1);
2414 msg->size = pool->parm.data_size;
2415 msg->release = _bcmos_msg_pool_release;
2416 msg->data_release = parm->data_release;
2417 }
2418 bcmos_mutex_lock(&bcmos_res_lock);
2419 STAILQ_INSERT_TAIL(&msg_pool_list, &pool->blk_pool, list);
2420 bcmos_total_msg_pool_size += pool->blk_pool.parm.pool_size;
2421 bcmos_mutex_unlock(&bcmos_res_lock);
2422
2423 return BCM_ERR_OK;
2424}
2425
2426/** Destroy message pool */
2427bcmos_errno bcmos_msg_pool_destroy(bcmos_msg_pool *pool)
2428{
2429 bcmos_errno rc;
2430
2431 rc = bcmos_blk_pool_destroy(&pool->blk_pool);
2432 if (rc)
2433 return rc;
2434
2435 bcmos_mutex_lock(&bcmos_res_lock);
2436 STAILQ_REMOVE(&msg_pool_list, &pool->blk_pool, bcmos_blk_pool, list);
2437 bcmos_total_msg_pool_size -= pool->blk_pool.parm.pool_size;
2438 bcmos_mutex_unlock(&bcmos_res_lock);
2439
2440 pool->parm.size = pool->parm.data_size = 0;
2441 return BCM_ERR_OK;
2442}
2443
2444/* Allocate message from pool */
2445bcmos_msg *bcmos_msg_pool_alloc(bcmos_msg_pool *pool)
2446{
2447 return bcmos_blk_pool_alloc(&pool->blk_pool);
2448}
2449
2450/* Get pool info */
2451bcmos_errno bcmos_msg_pool_query(const bcmos_msg_pool *pool, bcmos_msg_pool_info *info)
2452{
2453 bcmos_blk_pool_info pool_info;
2454 bcmos_errno err;
2455
2456 if (!pool || !info)
2457 {
2458 return BCM_ERR_PARM;
2459 }
2460 err = bcmos_blk_pool_query(&pool->blk_pool, &pool_info);
2461 if (err)
2462 {
2463 return err;
2464 }
2465 info->parm = pool->parm;
2466 info->stat = pool_info.stat;
2467 return BCM_ERR_OK;
2468}
2469
2470/* Block pool iterator */
2471bcmos_errno bcmos_msg_pool_get_next(const bcmos_msg_pool **prev)
2472{
2473 const bcmos_msg_pool *pool;
2474
2475 if (prev == NULL)
2476 {
2477 return BCM_ERR_PARM;
2478 }
2479 pool = *prev;
2480 if (pool && pool->blk_pool.magic != BCMOS_BLK_POOL_VALID)
2481 {
2482 return BCM_ERR_PARM;
2483 }
2484 if (pool)
2485 {
2486 pool = container_of(STAILQ_NEXT(&pool->blk_pool, list), bcmos_msg_pool, blk_pool);
2487 }
2488 else
2489 {
2490 pool = container_of(STAILQ_FIRST(&msg_pool_list), bcmos_msg_pool, blk_pool);
2491 }
2492 *prev = pool;
2493 if (!pool)
2494 {
2495 return BCM_ERR_NO_MORE;
2496 }
2497 return BCM_ERR_OK;
2498}
2499
2500/** Set up print redirection/cloning
2501 * \param[in] mode redirection/cloning mode
2502 * \param[in] cb redirection callback
2503 * \param[in] data opaque data to be passed to cb
2504 */
2505bcmos_errno bcmos_print_redirect(bcmos_print_redirect_mode mode, bcmos_print_redirect_cb cb, void *data)
2506{
2507 if (mode != BCMOS_PRINT_REDIRECT_MODE_NONE && cb == NULL)
2508 {
2509 BCMOS_TRACE_RETURN(BCM_ERR_PARM, "Redirection callback must be set\n");
2510 }
2511 print_redirect_mode = mode;
2512 if (mode == BCMOS_PRINT_REDIRECT_MODE_NONE)
2513 {
2514 print_redirect_cb = NULL;
2515 print_redirect_cb_data = NULL;
2516 }
2517 else
2518 {
2519 print_redirect_cb = cb;
2520 print_redirect_cb_data = data;
2521 }
2522 return BCM_ERR_OK;
2523}
2524
2525/* Print on the console with optional cloning / redirection */
2526/*lint -e{454}*/
2527int bcmos_vprintf(const char *format, va_list ap)
2528{
2529 int rc = 0;
2530 bcmos_bool protected_section = is_irq_mode() || is_irq_disabled();
2531
2532 /* Only protect if in task context */
2533 if (!protected_section)
2534 {
2535 bcmos_mutex_lock(&bcmos_print_lock);
2536 }
2537 if (print_redirect_mode != BCMOS_PRINT_REDIRECT_MODE_REDIRECT)
2538 {
2539 rc = bcmos_sys_vprintf(format, ap);
2540 }
2541 if (print_redirect_mode != BCMOS_PRINT_REDIRECT_MODE_NONE)
2542 {
2543 rc = print_redirect_cb(print_redirect_cb_data, format, ap);
2544 }
2545 if (!protected_section)
2546 {
2547 bcmos_mutex_unlock(&bcmos_print_lock);
2548 }
2549
2550 return rc;
2551}
2552/*lint -e{454}*/
2553
2554/* Print on the console with optional cloning / redirection */
2555int bcmos_printf(const char *format, ...)
2556{
2557 va_list args;
2558 int rc;
2559
2560 va_start(args, format);
2561 rc = bcmos_vprintf(format, args);
2562 va_end(args);
2563 return rc;
2564}
2565
2566#ifndef BCMOS_PUTCHAR_INLINE
2567/*lint -e{454}*/
2568void bcmos_putchar(int c)
2569{
2570 bcmos_bool protected_section = is_irq_mode() || is_irq_disabled();
2571
2572 /* Only protect if in task context */
2573 if (!protected_section)
2574 {
2575 bcmos_mutex_lock(&bcmos_print_lock);
2576 }
2577 putchar(c);
2578 fflush(stdout);
2579 if (!protected_section)
2580 {
2581 bcmos_mutex_unlock(&bcmos_print_lock);
2582 }
2583}
2584/*lint +e{454}*/
2585#endif
2586
2587#ifndef BCMOS_BUF_OS_SPECIFIC
2588
2589/*
2590 * Buffer allocation/release
2591 */
2592
2593#ifdef BCMOS_BUF_POOL_SIZE
2594/** Create buffer pool
2595 * \param[in] num Number of buffers
2596 * \param[in] size Buffer size
2597 * \param[in] align Buffer alignment
2598 * \returns o=OK or error <0
2599 */
2600static bcmos_errno bcmos_buf_pool_create(void)
2601{
2602 bcmos_blk_pool_parm pool_parm =
2603 {
2604 .name = "sysbuf"
2605 };
2606 bcmos_errno rc;
2607
2608#ifndef BCMOS_BUF_POOL_BUF_SIZE
2609#error BCMOS_BUF_POOL_BUF_SIZE must be defined
2610#endif
2611
2612 /* If buffer memory should be allocated by bcmos_dma_alloc - allocate
2613 * memory for the whole pool here */
2614 pool_parm.blk_size = BCMOS_BUF_POOL_BUF_SIZE + sizeof(bcmos_buf) + BCMTR_BUF_EXTRA_HEADROOM +
2615 2*BCMOS_BUF_DATA_ALIGNMENT + BCMOS_BUF_DATA_GUARD;
2616
2617#ifdef BCMOS_BUF_IN_DMA_MEM
2618 pool_parm.pool_size = (pool_parm.blk_size + sizeof(bcmos_memblk)) * BCMOS_BUF_POOL_SIZE;
2619 pool_parm.start = bcmos_dma_alloc(0, pool_parm.pool_size);
2620 if (!pool_parm.start)
2621 return BCM_ERR_NOMEM;
2622#else
2623 pool_parm.num_blks = BCMOS_BUF_POOL_SIZE;
2624#endif
2625
2626 rc = bcmos_blk_pool_create(&sys_buf_pool, &pool_parm);
2627 if (rc)
2628 {
2629 if (pool_parm.start)
2630 bcmos_dma_free(0, pool_parm.start);
2631 }
2632
2633 return rc;
2634}
2635#endif
2636
2637/* Allocate buffer */
2638bcmos_buf *bcmos_buf_alloc(uint32_t size)
2639{
2640 /* Allocate extra 2 * BCMOS_BUF_DATA_ALIGNMENT to make sure that neither data start nor end
2641 * end up in the middle of cache line
2642 */
2643 bcmos_buf *buf;
2644
2645/* Allocate from the pool */
2646#ifdef BCMOS_BUF_POOL_SIZE
2647
2648 if (size > BCMOS_BUF_POOL_BUF_SIZE)
2649 {
2650 BCMOS_TRACE_ERR("Attempt to allocate buffer bigger than buffer pool block size. %u > %u\n",
2651 size, BCMOS_BUF_POOL_BUF_SIZE);
2652 return NULL;
2653 }
2654 buf = bcmos_blk_pool_alloc(&sys_buf_pool);
2655
2656#else /* else of #if BCMOS_BUF_POOL_SIZE */
2657 {
2658 uint32_t alloc_size = sizeof(bcmos_buf) + size + BCMTR_BUF_EXTRA_HEADROOM +
2659 2*BCMOS_BUF_DATA_ALIGNMENT - 1 + BCMOS_BUF_DATA_GUARD;
2660#ifdef BCMOS_BUF_DATA_UNIT_SIZE
2661#if BCMOS_BUF_DATA_UNIT_SIZE & (BCMOS_BUF_DATA_UNIT_SIZE - 1)
2662#error BCMOS_BUF_DATA_UNIT_SIZE must be a power of 2
2663#endif
2664 alloc_size = BCMOS_ROUND_UP(alloc_size, BCMOS_BUF_DATA_UNIT_SIZE);
2665#endif
2666#ifdef BCMOS_BUF_IN_DMA_MEM
2667 buf = bcmos_dma_alloc(0, alloc_size);
2668#else
2669 buf = bcmos_alloc(alloc_size);
2670#endif
2671 }
2672#endif /* end of #if BCMOS_BUF_POOL_SIZE */
2673
2674 if (!buf)
2675 return NULL;
2676 buf->start = (uint8_t *)(buf + 1) + BCMOS_BUF_DATA_GUARD;
2677 buf->data = (uint8_t *)(BCMOS_ROUND_UP((long)buf->start + BCMTR_BUF_EXTRA_HEADROOM, BCMOS_BUF_DATA_ALIGNMENT));
2678 buf->size = size + (buf->data - buf->start);
2679 buf->len = 0;
2680#ifdef BCMOS_BUF_POOL_SIZE
2681 buf->pool = &sys_buf_pool;
2682#else
2683 buf->pool = NULL;
2684#endif
2685 return buf;
2686}
2687
2688/* Release buffer */
2689void bcmos_buf_free(bcmos_buf *buf)
2690{
2691
2692#ifdef BCMOS_BUF_POOL_SIZE
2693 /* Buffer might have been allocated from the system pool */
2694 if (buf->pool)
2695 {
2696 bcmos_blk_pool_free(buf);
2697 return;
2698 }
2699#endif
2700
2701#ifdef BCMOS_BUF_IN_DMA_MEM
2702 bcmos_dma_free(0, buf);
2703#else
2704 bcmos_free(buf);
2705#endif
2706}
2707
2708#endif
2709
2710EXPORT_SYMBOL(bcmos_init);
2711
2712EXPORT_SYMBOL(bcmos_msg_queue_create);
2713EXPORT_SYMBOL(bcmos_msg_queue_destroy);
2714EXPORT_SYMBOL(bcmos_msg_queue_query);
2715EXPORT_SYMBOL(bcmos_msg_queue_get_next);
2716EXPORT_SYMBOL(bcmos_msg_send);
2717EXPORT_SYMBOL(bcmos_msg_send_to_module);
2718EXPORT_SYMBOL(bcmos_msg_recv);
2719EXPORT_SYMBOL(bcmos_msg_register);
2720EXPORT_SYMBOL(bcmos_msg_unregister);
2721EXPORT_SYMBOL(bcmos_msg_dispatch);
2722
2723EXPORT_SYMBOL(bcmos_msg_qgroup_create);
2724EXPORT_SYMBOL(bcmos_msg_qgroup_destroy);
2725EXPORT_SYMBOL(bcmos_msg_qgroup_query);
2726EXPORT_SYMBOL(bcmos_msg_recv_from_qgroup);
2727EXPORT_SYMBOL(bcmos_msg_send_to_qgroup);
2728
2729EXPORT_SYMBOL(bcmos_task_timeout_set);
2730EXPORT_SYMBOL(bcmos_task_get_next);
2731
2732EXPORT_SYMBOL(bcmos_module_create);
2733EXPORT_SYMBOL(bcmos_module_destroy);
2734EXPORT_SYMBOL(bcmos_module_current);
2735EXPORT_SYMBOL(bcmos_module_context);
2736EXPORT_SYMBOL(bcmos_module_context_set);
2737EXPORT_SYMBOL(bcmos_module_query);
2738
2739EXPORT_SYMBOL(bcmos_event_create);
2740EXPORT_SYMBOL(bcmos_event_destroy);
2741EXPORT_SYMBOL(bcmos_event_raise);
2742EXPORT_SYMBOL(bcmos_event_recv);
2743
2744EXPORT_SYMBOL(bcmos_timer_create);
2745EXPORT_SYMBOL(bcmos_timer_destroy);
2746EXPORT_SYMBOL(bcmos_timer_start);
2747EXPORT_SYMBOL(bcmos_timer_stop);
2748EXPORT_SYMBOL(bcmos_timer_handler_set);
2749
2750EXPORT_SYMBOL(bcmos_blk_pool_create);
2751EXPORT_SYMBOL(bcmos_blk_pool_destroy);
2752EXPORT_SYMBOL(bcmos_blk_pool_reset);
2753EXPORT_SYMBOL(bcmos_blk_pool_alloc);
2754EXPORT_SYMBOL(bcmos_blk_pool_calloc);
2755EXPORT_SYMBOL(bcmos_blk_pool_free);
2756EXPORT_SYMBOL(bcmos_blk_pool_query);
2757EXPORT_SYMBOL(bcmos_blk_pool_get_next);
2758
2759EXPORT_SYMBOL(bcmos_msg_pool_create);
2760EXPORT_SYMBOL(bcmos_msg_pool_alloc);
2761EXPORT_SYMBOL(bcmos_msg_pool_query);
2762EXPORT_SYMBOL(bcmos_msg_pool_destroy);
2763EXPORT_SYMBOL(bcmos_msg_pool_get_next);
2764
2765EXPORT_SYMBOL(bcmos_print_redirect);
2766EXPORT_SYMBOL(bcmos_printf);
2767EXPORT_SYMBOL(bcmos_vprintf);
2768
2769#ifndef BCMOS_BUF_OS_SPECIFIC
2770EXPORT_SYMBOL(bcmos_buf_alloc);
2771EXPORT_SYMBOL(bcmos_buf_free);
2772#endif