FreeCalypso > hg > freecalypso-citrine
comparison riviera/rvm/rvm_swe_hdlr.c @ 0:75a11d740a02
initial import of gsm-fw from freecalypso-sw rev 1033:5ab737ac3ad7
author | Mychaela Falconia <falcon@freecalypso.org> |
---|---|
date | Thu, 09 Jun 2016 00:02:41 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:75a11d740a02 |
---|---|
1 /** | |
2 * | |
3 * @file rvm_swe_hdlr.c | |
4 * | |
5 * This file contains the functions related to SWEs management within RVM. | |
6 * | |
7 * @author David Lamy-Charrier (d-lamy@ti.com) | |
8 * @version 0.1 | |
9 * | |
10 */ | |
11 | |
12 /* | |
13 * Revision History: | |
14 * | |
15 * 10/26/2001 David Lamy-Charrier Create for Riviera 1.6. | |
16 * | |
17 * (C) Copyright 2001 by Texas Instruments Incorporated, All Rights Reserved | |
18 */ | |
19 #include <stdio.h> | |
20 #include "../rvf/rvf_env.h" | |
21 #include "rvm_gen.h" | |
22 #include "rvm_api.h" | |
23 #include "rvm_i.h" | |
24 | |
25 #include "../rvf/rvf_i.h" /* ET2 rvf private invocation API */ | |
26 #include "../rvf/rvf_api.h" /* A-M-E-N-D-E-D! */ | |
27 | |
28 /* temporary inclusion for HCI pb on WINDOWS */ | |
29 /* TO DO: remove it. */ | |
30 #include "rvm_use_id_list.h" | |
31 | |
32 #include <string.h> | |
33 | |
34 extern T_RVM_CONST_SWE_INFO RVM_SWE_GET_INFO_ARRAY[]; | |
35 | |
36 extern T_RVM_USE_ID * RVM_TYPE2_SWE_GROUPS[]; | |
37 | |
38 extern BOOLEAN rvm_allocated_task_id [MAX_RVF_TASKS]; | |
39 | |
40 extern T_RVM_KNOWN_SWE * rvm_swe_array; | |
41 | |
42 | |
43 /* private */ | |
44 T_RVM_RETURN _fatal(T_RVM_PROCESSING_SWE* appli, UINT8 rm); | |
45 | |
46 /*********************************************************************** | |
47 * Function _resolve_t2_grouping (private) | |
48 * | |
49 * Description resolves number of group directives & ret. group count | |
50 *************************************************************************/ | |
51 UINT8 _resolve_t2_grouping(T_RVM_PROCESSING_SWE* appli, T_RVM_GROUP_DIRECTIVE* gd) { | |
52 T_RVM_INFO_SWE swe_info; | |
53 T_RVM_PROCESSING_SWE* cur_swe = appli; | |
54 UINT8 i=0, j=0, k=0; | |
55 | |
56 for(; cur_swe != NULL;) { | |
57 UINT8 swe_index = cur_swe->swe_id; | |
58 | |
59 rvm_swe_array[swe_index].swe_get_info(&swe_info); | |
60 | |
61 if (rvm_swe_array[swe_index].swe_state !=SWE_RUNNING && //== SWE_NOT_STARTED && | |
62 swe_info.swe_type==RVM_SWE_TYPE_2) { | |
63 | |
64 for(i=0; i<MAX_GRPS; i++) { | |
65 if(swe_info.type_info.type2.swe_group_directive == gd[i].group_directive) { | |
66 for(k=0; gd[i].hosted_swe_db_index[k]!=0; k++); | |
67 if(k<MAX_COMPOSITES) { | |
68 gd[i].hosted_swe_db_index[k]=swe_index; | |
69 } else { | |
70 /* TO DO ... ERROR !!! */ | |
71 } | |
72 // RVM_TRACE_WARNING_PARAM("rvm.SweHndlr.resolve_t2_grouping(), appended to grp entry , nb=",\ | |
73 // (UINT32)swe_index); | |
74 //printf("rvm.SweHndlr.resolve_t2_grouping(): appended %d to group: %d\n",gd[i].hosted_swe_db_index[k], gd[i].host_task_addr); | |
75 | |
76 break; | |
77 } else if( swe_info.type_info.type2.swe_group_directive != gd[i].group_directive && | |
78 gd[i].host_task_addr==0 ) { | |
79 | |
80 /* Constraint! Expects all group priorites and stack sz to be equal | |
81 * Additional method must be used to set highest entity pri. or resolve */ | |
82 gd[i].host_task_addr=RVF_INVALID_ADDR_ID; //rvm_allocate_task_id(1); | |
83 gd[i].group_directive=swe_info.type_info.type2.swe_group_directive; | |
84 gd[i].task_priority=swe_info.type_info.type2.priority; | |
85 gd[i].stack_size=swe_info.type_info.type2.stack_size; | |
86 | |
87 gd[i].hosted_swe_db_index[0]=swe_index; | |
88 j++; | |
89 // RVM_TRACE_WARNING_PARAM("rvm.SweHndlr.resolve_t2_grouping(), created grp entry , nb=",\ | |
90 // (UINT32)swe_index); | |
91 //printf("rvm.SweHndlr.resolve_t2_grouping(): created host group: %d AND append %d\n",gd[i].host_task_addr, gd[i].hosted_swe_db_index[0]); | |
92 break; | |
93 } | |
94 } | |
95 | |
96 } else RVM_TRACE_WARNING_PARAM("rvm.SweHndlr.resolve_t2_grouping(), SWE Not type 2: ", rvm_swe_array[swe_index].swe_use_id); | |
97 cur_swe = cur_swe->next_swe; /* process next SWE */ | |
98 } | |
99 //printf("rvm.SweHndlr.resolve_t2_grouping(): total group count: %d\n", j); | |
100 | |
101 //for(i=0; i<j; i++) /* de'bugger only!! */ | |
102 // for(k=0; k<MAX_COMPOSITES && gd[i].hosted_swe_db_index[k]!=0; k++) | |
103 // printf("host addr: %d, T2 swe_db_index %d\n", | |
104 // gd[i].host_task_addr, gd[i].hosted_swe_db_index[k]); | |
105 | |
106 return j; | |
107 } | |
108 | |
109 /******************************************************************************* | |
110 ** Function rvm_allocate_task_id | |
111 ** | |
112 ** Description Internal function which allocate the first available | |
113 ** task id to a SWE in creation | |
114 *******************************************************************************/ | |
115 T_RVM_TASK_ID rvm_allocate_task_id(UINT8 isRealTask) { | |
116 /* UINT8 i=0; */ | |
117 | |
118 /* Find the 1st free task id | |
119 If we reach the max: all task ids are allocated => not possible to start SWE.*/ | |
120 /* while (rvm_allocated_task_id[i] == TRUE) | |
121 { | |
122 i++; | |
123 if (i == MAX_RVF_TASKS) | |
124 return RVF_INVALID_TASK; | |
125 }*/ | |
126 | |
127 /* Lock task id and return its value. */ | |
128 /* rvm_allocated_task_id[i] = TRUE; */ | |
129 /* return ((T_RVM_TASK_ID) i); */ | |
130 return (T_RVM_TASK_ID) rvf_allocate_task_id(isRealTask); /* A-M-E-N-D-E-D! */ | |
131 } | |
132 | |
133 | |
134 /******************************************************************************* | |
135 ** | |
136 ** Function rvm_set_swe_info | |
137 ** | |
138 ** Description This function call the set_info function of each SWEs required | |
139 ** to start a specified SWE. | |
140 ** | |
141 ** Parameters: T_RVM_PROCESSING_SWE * appli: list of required SWEs with their parameters. | |
142 ** | |
143 ** Returns T_RVM_RETURN: RVM_OK if successful. | |
144 ** | |
145 *******************************************************************************/ | |
146 T_RVM_RETURN rvm_set_swe_info(T_RVM_PROCESSING_SWE * appli) | |
147 { | |
148 T_RVM_PROCESSING_SWE * cur_swe = appli; | |
149 UINT8 i; | |
150 T_RVF_MB_ID _bk_id_table[RVM_MAX_NB_MEM_BK]; | |
151 | |
152 /* for each SWE in the list */ | |
153 while( cur_swe != NULL ) | |
154 { | |
155 UINT8 swe_index = cur_swe->swe_id; | |
156 | |
157 if (rvm_swe_array[swe_index].swe_state != SWE_RUNNING) | |
158 /* Call the set_info function for only those for which MB were just created */ | |
159 { | |
160 /* First build return path */ | |
161 T_RVM_INFO_SWE swe_info; | |
162 T_RV_RETURN_PATH return_path[RVM_MAX_NB_LINKED_SWE]; | |
163 T_RVM_USE_ID linked_swe_use_id[RVM_MAX_NB_LINKED_SWE]; | |
164 UINT8 nb_linked_swe = 0; | |
165 | |
166 rvm_swe_array[swe_index].swe_get_info(&swe_info); | |
167 | |
168 switch( swe_info.swe_type) | |
169 { | |
170 case(RVM_SWE_TYPE_1): | |
171 { nb_linked_swe = swe_info.type_info.type1.nb_linked_swe; | |
172 memcpy( linked_swe_use_id, swe_info.type_info.type1.linked_swe_id, RVM_MAX_NB_LINKED_SWE * sizeof(T_RVM_USE_ID) ); | |
173 if(rvm_swe_array[swe_index].swe_state != SWE_NOT_STARTED) { | |
174 for(i=0;i<swe_info.type_info.type1.nb_mem_bank; i++) { | |
175 rvf_get_mb_id((char*)&swe_info.type_info.type1.mem_bank[i], | |
176 &_bk_id_table[i]); | |
177 } | |
178 } | |
179 break; | |
180 } | |
181 case(RVM_SWE_TYPE_2): | |
182 { nb_linked_swe = swe_info.type_info.type2.nb_linked_swe; | |
183 memcpy( linked_swe_use_id, swe_info.type_info.type2.linked_swe_id, RVM_MAX_NB_LINKED_SWE * sizeof(T_RVM_USE_ID) ); | |
184 if((rvm_swe_array[cur_swe->swe_id].swe_addr_id = rvm_allocate_task_id(0))==RVF_INVALID_ADDR_ID) { | |
185 return RVM_INTERNAL_ERR; | |
186 } | |
187 if(rvm_swe_array[swe_index].swe_state != SWE_NOT_STARTED) { | |
188 for(i=0;i<swe_info.type_info.type2.nb_mem_bank; i++) { | |
189 rvf_get_mb_id((char*)&swe_info.type_info.type2.mem_bank[i], | |
190 &_bk_id_table[i]); | |
191 } | |
192 } | |
193 break; | |
194 } | |
195 case(RVM_SWE_TYPE_3): | |
196 { nb_linked_swe = swe_info.type_info.type3.nb_linked_swe; | |
197 memcpy( linked_swe_use_id, swe_info.type_info.type3.linked_swe_id, RVM_MAX_NB_LINKED_SWE * sizeof(T_RVM_USE_ID) ); | |
198 if((rvm_swe_array[cur_swe->swe_id].swe_addr_id = rvm_allocate_task_id(1))==RVF_INVALID_ADDR_ID) { | |
199 return RVM_INTERNAL_ERR; | |
200 } | |
201 if(rvm_swe_array[swe_index].swe_state != SWE_NOT_STARTED) { | |
202 for(i=0;i<swe_info.type_info.type3.nb_mem_bank; i++) { | |
203 rvf_get_mb_id((char*)&swe_info.type_info.type3.mem_bank[i], | |
204 &_bk_id_table[i]); | |
205 } | |
206 } | |
207 break; | |
208 } | |
209 case(RVM_SWE_TYPE_4): | |
210 { nb_linked_swe = swe_info.type_info.type4.nb_linked_swe; | |
211 memcpy( linked_swe_use_id, swe_info.type_info.type4.linked_swe_id, RVM_MAX_NB_LINKED_SWE * sizeof(T_RVM_USE_ID) ); | |
212 if((rvm_swe_array[cur_swe->swe_id].swe_addr_id = rvm_allocate_task_id(1))==RVF_INVALID_ADDR_ID) { | |
213 return RVM_INTERNAL_ERR; | |
214 } | |
215 if(rvm_swe_array[swe_index].swe_state != SWE_NOT_STARTED) { | |
216 for(i=0;i<swe_info.type_info.type4.nb_mem_bank; i++) { | |
217 rvf_get_mb_id((char*)&swe_info.type_info.type4.mem_bank[i], | |
218 &_bk_id_table[i]); | |
219 } | |
220 } | |
221 break; | |
222 } | |
223 } | |
224 rvm_swe_array[cur_swe->swe_id].swe_return_path.addr_id=rvm_swe_array[cur_swe->swe_id].swe_addr_id; | |
225 | |
226 for (i=0; i < nb_linked_swe; i++) | |
227 { | |
228 UINT8 linked_swe_index; | |
229 if (rvm_get_swe_index(&linked_swe_index, linked_swe_use_id[i]) != RVM_OK) | |
230 { | |
231 return RVM_INTERNAL_ERR; | |
232 } | |
233 return_path[i].callback_func = rvm_swe_array[linked_swe_index].swe_return_path.callback_func; | |
234 /* TO DO: manage addr_id for GROUP_MEMBER SWEs */ | |
235 return_path[i].addr_id = rvm_swe_array[linked_swe_index].swe_addr_id; | |
236 } | |
237 | |
238 | |
239 if (cur_swe->rvm_functions.set_info != NULL ) { | |
240 if(rvm_swe_array[swe_index].swe_state == SWE_NOT_STARTED) { | |
241 cur_swe->rvm_functions.set_info(rvm_swe_array[cur_swe->swe_id].swe_addr_id, \ | |
242 return_path, \ | |
243 cur_swe->bk_id_table, \ | |
244 rvm_error); | |
245 } else { | |
246 cur_swe->rvm_functions.set_info(rvm_swe_array[cur_swe->swe_id].swe_addr_id, \ | |
247 return_path, \ | |
248 _bk_id_table, \ | |
249 rvm_error); | |
250 } | |
251 } | |
252 } | |
253 | |
254 cur_swe = cur_swe->next_swe; /* process next SWE */ | |
255 } | |
256 return RVM_OK; | |
257 } | |
258 | |
259 | |
260 /******************************************************************************* | |
261 ** | |
262 ** Function rvm_initialize_swe | |
263 ** | |
264 ** Description This function initialize all the required SWEs which are not running. | |
265 ** It also creates the tasks in a suspend state. | |
266 ** Then it resumes the tasks and call the start function of each SWE. | |
267 ** | |
268 ** Parameters: T_RVM_PROCESSING_SWE * appli: list of required SWEs with their parameters. | |
269 ** | |
270 ** Returns T_RVM_RETURN: RVM_OK if successful. | |
271 ** | |
272 *******************************************************************************/ | |
273 T_RVM_RETURN rvm_initialize_swe( T_RVM_PROCESSING_SWE * appli, | |
274 T_RVM_GROUP_DIRECTIVE* gd, | |
275 UINT8 t2cnt) { | |
276 T_RVM_PROCESSING_SWE * cur_swe = appli; | |
277 UINT8 i=0, j=0; | |
278 UINT16 tuid=0; | |
279 T_RVF_BUFFER* stack_ptr=NULL; | |
280 T_RVM_INFO_SWE swe_info; | |
281 | |
282 #ifdef _WINDOWS | |
283 BOOLEAN hci_started = FALSE; | |
284 #endif | |
285 | |
286 /* for each SWE in the list, initialize it */ | |
287 while( cur_swe != NULL ) | |
288 { | |
289 UINT8 swe_index = cur_swe->swe_id; | |
290 | |
291 if ( rvm_swe_array[swe_index].swe_state != SWE_RUNNING) | |
292 { | |
293 /* call its init function */ | |
294 if (cur_swe->rvm_functions.init) | |
295 { | |
296 if (cur_swe->rvm_functions.init() != RVM_OK) | |
297 { | |
298 rvf_send_trace("RVM: Error Calling init function of swe nb ", 43, \ | |
299 (UINT32)swe_index, RV_TRACE_LEVEL_ERROR, RVM_USE_ID ); | |
300 } | |
301 } | |
302 } | |
303 cur_swe = cur_swe->next_swe; | |
304 } | |
305 | |
306 | |
307 /* for each SWE in the list, create the task if necessary. */ | |
308 cur_swe = appli; | |
309 while( cur_swe != NULL ) | |
310 { | |
311 UINT8 swe_index = cur_swe->swe_id; | |
312 | |
313 if ( rvm_swe_array[swe_index].swe_state != SWE_RUNNING) { | |
314 /* start the task if necessary in SUSPEND mode */ | |
315 | |
316 if ( cur_swe->swe_type == RVM_SWE_TYPE_4) { | |
317 /* allocate a buffer for the stack */ | |
318 if ( rvm_allocate_stack_buffer( cur_swe->stack_size, | |
319 &rvm_swe_array[swe_index].stack_ptr) != RVM_OK) { | |
320 | |
321 rvf_send_trace("RVM: Error allocating stack nb:", 28, (UINT32)rvm_swe_array[swe_index].swe_addr_id, RV_TRACE_LEVEL_ERROR, RVM_USE_ID); | |
322 | |
323 /* TO DO: manage the error case */ | |
324 return RVF_MEMORY_ERR; | |
325 } | |
326 | |
327 /* start the task in suspend mode */ | |
328 if (rvf_create_task((TASKPTR) cur_swe->rvm_functions.core, \ | |
329 (UINT8)rvm_swe_array[swe_index].swe_addr_id,\ | |
330 rvm_swe_array[swe_index].swe_name, \ | |
331 rvm_swe_array[swe_index].stack_ptr, \ | |
332 cur_swe->stack_size, \ | |
333 cur_swe->priority, \ | |
334 ET4_TASK,\ | |
335 DEFAULT_TIME_SLICING, \ | |
336 SUSPEND ) != RV_OK) { | |
337 | |
338 rvf_send_trace("RVM: Error Creating Task nb:", 28, (UINT32)rvm_swe_array[swe_index].swe_addr_id, RV_TRACE_LEVEL_ERROR, RVM_USE_ID); | |
339 } | |
340 | |
341 rvf_setRtAddrSweIndex(rvm_swe_array[swe_index].swe_addr_id, | |
342 swe_index); | |
343 | |
344 rvf_send_trace("RVM: Created task nb ", 21, (UINT32)rvm_swe_array[swe_index].swe_addr_id, RV_TRACE_LEVEL_DEBUG_LOW, RVM_USE_ID); | |
345 | |
346 } else if (cur_swe->swe_type == RVM_SWE_TYPE_3) { | |
347 /* allocate a buffer for the stack */ | |
348 if ( rvm_allocate_stack_buffer( cur_swe->stack_size, | |
349 &rvm_swe_array[swe_index].stack_ptr) != RVM_OK) { | |
350 | |
351 rvf_send_trace("RVM: Error allocating stack nb:", 28, (UINT32)rvm_swe_array[swe_index].swe_addr_id, RV_TRACE_LEVEL_ERROR, RVM_USE_ID); | |
352 /* TO DO: manage the error case */ | |
353 return RVF_MEMORY_ERR; | |
354 } | |
355 | |
356 /* start the task in suspend mode */ | |
357 if (rvf_create_task((TASKPTR)rvm_t3_proxy, \ | |
358 (UINT8)rvm_swe_array[swe_index].swe_addr_id,\ | |
359 rvm_swe_array[swe_index].swe_name, \ | |
360 rvm_swe_array[swe_index].stack_ptr, \ | |
361 cur_swe->stack_size, \ | |
362 cur_swe->priority, \ | |
363 ET3_TASK,\ | |
364 DEFAULT_TIME_SLICING, \ | |
365 SUSPEND ) != RV_OK) { | |
366 | |
367 rvf_send_trace("RVM: Error Creating E3 Task nb:", 28, (UINT32)rvm_swe_array[swe_index].swe_addr_id, RV_TRACE_LEVEL_ERROR, RVM_USE_ID); | |
368 } | |
369 | |
370 rvf_register_t3_handlers(rvm_swe_array[swe_index].swe_addr_id, | |
371 cur_swe->rvm_functions.handle_message, /* traverse list hence: cur_swe->rvm_functions */ | |
372 cur_swe->rvm_functions.handle_timer ); | |
373 | |
374 rvf_setRtAddrSweIndex(rvm_swe_array[swe_index].swe_addr_id, | |
375 swe_index); | |
376 | |
377 rvf_send_trace("RVM: Created task nb ", 21, (UINT32)rvm_swe_array[swe_index].swe_addr_id, RV_TRACE_LEVEL_DEBUG_LOW, RVM_USE_ID); | |
378 | |
379 | |
380 } | |
381 } | |
382 cur_swe = cur_swe->next_swe; /* process next SWE */ | |
383 } | |
384 /* resolve T2 grouping */ | |
385 for(i=0; i<t2cnt; i++) { | |
386 gd[i].host_task_addr=rvf_resolveHostingAddrId(gd[i]); | |
387 if( gd[i].host_task_addr==RVF_INVALID_ADDR_ID) { | |
388 | |
389 if ( rvm_allocate_stack_buffer( gd[i].stack_size, &stack_ptr) != RVM_OK){ | |
390 /* TO DO: manage the error case - ABORT & Clean-up if one or more linked Ent. fail */ | |
391 //break; | |
392 return RVF_MEMORY_ERR; | |
393 } | |
394 | |
395 gd[i].host_task_addr=rvm_allocate_task_id(1); | |
396 rvf_create_task((TASKPTR)rvm_t2_proxy, | |
397 gd[i].host_task_addr, // | |
398 "hosting_task", | |
399 stack_ptr, | |
400 gd[i].stack_size, | |
401 gd[i].task_priority, | |
402 ET2_HOST_TASK, | |
403 DEFAULT_TIME_SLICING, | |
404 SUSPEND); | |
405 | |
406 rvf_associateGrpToHost(gd[i].host_task_addr, gd[i].group_directive); | |
407 } | |
408 | |
409 for(j=0; j<MAX_COMPOSITES && gd[i].hosted_swe_db_index[j]!=0; j++) { | |
410 /* create virtual task for each "hosted_swe_db_index[]" */ | |
411 rvm_swe_array[gd[i].hosted_swe_db_index[j]].swe_get_info(&swe_info); | |
412 | |
413 rvf_create_virtual_task(swe_info.type_info.type2.handle_message, | |
414 swe_info.type_info.type2.handle_timer, | |
415 rvm_swe_array[gd[i].hosted_swe_db_index[j]].swe_addr_id, | |
416 gd[i].host_task_addr, | |
417 rvm_swe_array[gd[i].hosted_swe_db_index[j]].swe_name, | |
418 rvm_swe_array[gd[i].hosted_swe_db_index[j]].swe_priority, | |
419 ET2_VTASK); | |
420 rvf_setRtAddrSweIndex(rvm_swe_array[gd[i].hosted_swe_db_index[j]].swe_addr_id, | |
421 gd[i].hosted_swe_db_index[j]); | |
422 | |
423 /* register each with associate host */ | |
424 rvf_registerToHost( gd[i].host_task_addr, | |
425 rvm_swe_array[gd[i].hosted_swe_db_index[j]].swe_addr_id); | |
426 } | |
427 | |
428 } | |
429 | |
430 /* resume all hosting tasks... */ | |
431 for(i=0; i<t2cnt; i++) rvf_resume_task((UINT8)gd[i].host_task_addr); | |
432 | |
433 /* start composites or virtual tasks */ | |
434 for(i=0; i<t2cnt; i++) { | |
435 rvm_start_group_req((UINT8)gd[i].host_task_addr, | |
436 gd[i].hosted_swe_db_index); | |
437 } | |
438 | |
439 /* for each SWE in the list, start it if necessary. */ | |
440 for(cur_swe = appli; cur_swe != NULL; ) { | |
441 UINT8 swe_index = cur_swe->swe_id; | |
442 | |
443 if ( rvm_swe_array[swe_index].swe_state != SWE_RUNNING) { | |
444 /* if the SWE is a task, resume it */ | |
445 if ( (cur_swe->swe_type == RVM_SWE_TYPE_3) | |
446 || (cur_swe->swe_type == RVM_SWE_TYPE_4) ) { | |
447 | |
448 /* TO DO: check the return value */ | |
449 if(rvf_resume_task((UINT8)rvm_swe_array[swe_index].swe_addr_id )!=RVF_OK) { | |
450 RVM_TRACE_WARNING("RVM: ERROR! UNABLE TO RESUME SWE"); | |
451 return RVF_INTERNAL_ERR; | |
452 } | |
453 rvf_send_trace("RVM: Resumed task nb ", 21, (UINT32)rvm_swe_array[swe_index].swe_addr_id, RV_TRACE_LEVEL_DEBUG_LOW, RVM_USE_ID); | |
454 rvf_send_trace("RVM: Resumed SWE ", 17, (UINT32)rvm_swe_array[swe_index].swe_use_id, RV_TRACE_LEVEL_DEBUG_LOW, RVM_USE_ID); | |
455 | |
456 #ifdef _WINDOWS | |
457 if (rvm_swe_array[swe_index].swe_use_id == HCI_USE_ID ) { | |
458 hci_started = TRUE; | |
459 } | |
460 #endif | |
461 | |
462 } else if(cur_swe->swe_type==RVM_SWE_TYPE_1) { /* A-M-E-N-D-E-D! */ | |
463 | |
464 /* call its init function */ | |
465 if (cur_swe->rvm_functions.start) { | |
466 if (cur_swe->rvm_functions.start() != RVM_OK) { | |
467 rvf_send_trace("RVM: Error Calling start function of swe nb ", 44, \ | |
468 (UINT32)swe_index, RV_TRACE_LEVEL_ERROR, RVM_USE_ID); | |
469 } | |
470 } | |
471 } | |
472 } | |
473 | |
474 /* increment the number of using swe and points to the using appli */ | |
475 /* DOES NOT DEPEND ON THE STATE */ | |
476 /*rvm_swe_array[swe_index].swe_get_info(&swe_info); | |
477 switch( swe_info.swe_type) { | |
478 case RVM_SWE_TYPE_1: | |
479 if(!swe_info.type_info.type1.nb_linked_swe) rvm_swe_array[swe_index].nb_using_appli=0; | |
480 break; | |
481 case RVM_SWE_TYPE_2: | |
482 if(!swe_info.type_info.type2.nb_linked_swe) rvm_swe_array[swe_index].nb_using_appli=0; | |
483 break; | |
484 case RVM_SWE_TYPE_3: | |
485 if(!swe_info.type_info.type3.nb_linked_swe) rvm_swe_array[swe_index].nb_using_appli=0; | |
486 break; | |
487 case RVM_SWE_TYPE_4: | |
488 if(!swe_info.type_info.type4.nb_linked_swe) rvm_swe_array[swe_index].nb_using_appli=0; | |
489 break; | |
490 default: rvm_swe_array[swe_index].nb_using_appli=0; | |
491 }*/ | |
492 | |
493 // if(rvm_swe_array[swe_index].nb_using_appli) { | |
494 // rvm_swe_array[swe_index].using_appli[rvm_swe_array[swe_index].nb_using_appli++] = appli->swe_id; | |
495 // | |
496 // } | |
497 | |
498 if(rvm_swe_array[appli->swe_id].nb_using_appli<RVM_MAX_SWE_USING ) { | |
499 rvm_swe_array[appli->swe_id].using_appli[rvm_swe_array[appli->swe_id].nb_using_appli++]=swe_index; | |
500 } else { | |
501 RVM_TRACE_WARNING_PARAM("RVM: Unable to track 'Using Appli' list is full nb=", appli->swe_id); | |
502 } | |
503 | |
504 cur_swe = cur_swe->next_swe; /* process next SWE */ | |
505 } | |
506 | |
507 for(cur_swe=appli; cur_swe!=NULL; ) { | |
508 rvm_swe_array[cur_swe->swe_id].swe_state = SWE_RUNNING; | |
509 cur_swe = cur_swe->next_swe; | |
510 } | |
511 | |
512 #ifdef _WINDOWS | |
513 if (hci_started == TRUE) { | |
514 rvf_delay(RVF_MS_TO_TICKS(1000)); | |
515 } | |
516 #endif | |
517 | |
518 return RVM_OK; | |
519 } | |
520 | |
521 | |
522 /******************************************************************************* | |
523 ** | |
524 ** Function rvm_stop_swe_list | |
525 ** | |
526 ** Description This function will call the stop functions when possible. | |
527 ** | |
528 ** Parameters: T_RVM_PROCESSING_SWE * appli: list of required SWEs with their parameters. | |
529 ** | |
530 ** Returns T_RVM_OK if all allocation are successful, | |
531 ** else T_RVM_INTERNAL_ERR (then some SWE are not stopped. | |
532 ** | |
533 *******************************************************************************/ | |
534 T_RVM_RETURN rvm_stop_swe_list( T_RVM_PROCESSING_SWE * appli, T_RV_HDR* hdr) | |
535 { | |
536 T_RVM_PROCESSING_SWE * cur_swe = appli; | |
537 T_RVM_INFO_SWE swe_info; | |
538 volatile T_RVM_RETURN rvm_ret_value = RVM_OK; | |
539 T_RVM_STOP_MSG* p_msg=NULL; | |
540 UINT8 i=0; | |
541 | |
542 | |
543 /* for each SWE in the list */ | |
544 while (cur_swe != NULL ) | |
545 { | |
546 UINT8 swe_index = cur_swe->swe_id; | |
547 | |
548 /* If nb_using_appli > 1, SWE cannot be stopped */ | |
549 /* if (rvm_swe_array[swe_index].nb_using_appli > 1) { | |
550 cur_swe = cur_swe->next_swe; | |
551 continue; | |
552 } | |
553 // If nb_using_appli == 1 but using_appli != appli, SWE cannot be stopped | |
554 if ((rvm_swe_array[swe_index].nb_using_appli == 1) && \ | |
555 (rvm_swe_array[swe_index].using_appli[0] != appli->swe_id)) { | |
556 cur_swe = cur_swe->next_swe; | |
557 continue; | |
558 } | |
559 */ | |
560 if (cur_swe->swe_type==RVM_SWE_TYPE_1) { //cater for de-init of lib | |
561 if(cur_swe->rvm_functions.stop1)cur_swe->rvm_functions.stop1(); | |
562 if(cur_swe->rvm_functions.kill)cur_swe->rvm_functions.kill(); | |
563 cur_swe = cur_swe->next_swe; | |
564 continue; | |
565 } | |
566 if (cur_swe->swe_type==RVM_SWE_TYPE_4) { // etype 4 restriction | |
567 RVM_TRACE_WARNING_PARAM("RVM: Stop & Kill is not applicable to Type 4 entities, nb=", (UINT32)swe_index); | |
568 for (rvm_swe_array[swe_index].nb_using_appli=0,i=0; i<RVM_MAX_SWE_USING; i++) { //reset using appli - workaround! | |
569 rvm_swe_array[swe_index].using_appli[i] = RVM_INVALID_SWE_INDEX; | |
570 } | |
571 cur_swe = cur_swe->next_swe; | |
572 continue; | |
573 } | |
574 /* Retrieve stop function with a get_info */ | |
575 if (rvm_swe_array[swe_index].swe_get_info == NULL) | |
576 { | |
577 RVM_TRACE_WARNING_PARAM("RVM: SWE with no get info, cannot be stopped, nb=", (UINT32)swe_index); | |
578 cur_swe = cur_swe->next_swe; | |
579 rvm_ret_value = RVM_INTERNAL_ERR; | |
580 continue; | |
581 } | |
582 rvm_swe_array[swe_index].swe_get_info( &swe_info); | |
583 | |
584 if (cur_swe->rvm_functions.stop == NULL) { | |
585 RVM_TRACE_WARNING_PARAM("RVM: SWE with no stop function, cannot be stopped, nb=", (UINT32)swe_index); | |
586 cur_swe = cur_swe->next_swe; | |
587 continue; | |
588 } | |
589 | |
590 if (rvf_get_buf( rvm_mem_bank, sizeof(T_RVM_STOP_MSG), (void **)&p_msg) == RVF_RED ) { | |
591 RVM_TRACE_WARNING_PARAM("RVM: Unable to create STOP msg, nb=", (UINT32)swe_index); | |
592 cur_swe = cur_swe->next_swe; | |
593 continue; | |
594 } | |
595 | |
596 p_msg->header.msg_id = RVM_STOP_MSG; | |
597 p_msg->header.src_addr_id = hdr->src_addr_id; | |
598 p_msg->header.dest_addr_id = hdr->dest_addr_id; | |
599 // p_msg->header.callback_func = hdr->callback_func; | |
600 p_msg->rp.callback_func = ((T_RVM_STOP_MSG*)hdr)->rp.callback_func; | |
601 p_msg->status = SWE_STOPPING; | |
602 p_msg->swe_num = swe_index; //((T_RVM_STOP_MSG*)hdr)->swe_num; | |
603 | |
604 if ( rvf_send_msg( rvm_swe_array[swe_index].swe_addr_id, p_msg) != RVF_OK) { | |
605 rvm_ret_value = RVM_INTERNAL_ERR; | |
606 cur_swe = cur_swe->next_swe; | |
607 continue; | |
608 } | |
609 | |
610 rvm_swe_array[swe_index].swe_state=SWE_STOPPING; | |
611 | |
612 /*printf("SHUTDOWN: SWE %s nb %d USING APPLI= %d\n",rvm_swe_array[swe_index].swe_name, swe_index, rvm_swe_array[swe_index].nb_using_appli); | |
613 for(i=0; i<rvm_swe_array[swe_index].nb_using_appli; i++)printf(" %d, ", rvm_swe_array[swe_index].using_appli[i]); | |
614 printf("\n");*/ | |
615 | |
616 for (rvm_swe_array[swe_index].nb_using_appli=0,i=0; i<RVM_MAX_SWE_USING; i++) { //reset using appli - workaround! | |
617 rvm_swe_array[swe_index].using_appli[i] = RVM_INVALID_SWE_INDEX; | |
618 } | |
619 | |
620 /*printf("SHUTDOWN: SWE %s nb %d USING APPLI= %d\n",rvm_swe_array[swe_index].swe_name, swe_index, rvm_swe_array[swe_index].nb_using_appli); | |
621 for(i=0; i<rvm_swe_array[swe_index].nb_using_appli; i++)printf(" %d, ", rvm_swe_array[swe_index].using_appli[i]); | |
622 printf("\n");*/ | |
623 | |
624 /* Stop SWE - amended to ASYNC */ | |
625 /* TO DO: for type 2 and 3 SWEs, send a message to the host to call the stop function */ | |
626 //cur_swe->rvm_functions.stop(NULL); | |
627 | |
628 /* Proceed to the next SWE */ | |
629 cur_swe = cur_swe->next_swe; | |
630 } | |
631 | |
632 return rvm_ret_value; | |
633 } | |
634 | |
635 | |
636 /******************************************************************************* | |
637 ** | |
638 ** Function rvm_suspend_swe_tasks | |
639 ** | |
640 ** Description This function will suspend all SWE that are tasks. | |
641 ** | |
642 ** Parameters: T_RVM_PROCESSING_SWE * appli: list of required SWEs with their parameters. | |
643 ** | |
644 ** Returns T_RVM_OK if all allocation are successful, | |
645 ** else T_RVM_INTERNAL_ERR (then some SWE are not stopped. | |
646 ** | |
647 *******************************************************************************/ | |
648 T_RVM_RETURN rvm_suspend_swe_tasks( T_RVM_PROCESSING_SWE * appli) | |
649 { | |
650 T_RVM_PROCESSING_SWE * cur_swe = appli; | |
651 T_RVM_INFO_SWE swe_info; | |
652 volatile T_RVM_RETURN rvm_ret_value = RVM_OK; | |
653 | |
654 /* for each SWE in the list */ | |
655 while (cur_swe != NULL ) | |
656 { | |
657 UINT8 swe_index = cur_swe->swe_id; | |
658 | |
659 /* If nb_using_appli > 1, SWE cannot be stopped */ | |
660 if (rvm_swe_array[swe_index].nb_using_appli > 1) | |
661 { | |
662 cur_swe = cur_swe->next_swe; | |
663 continue; | |
664 } | |
665 /* If nb_using_appli == 1 but using_appli != appli, SWE cannot be stopped */ | |
666 if ((rvm_swe_array[swe_index].nb_using_appli == 1) && \ | |
667 (rvm_swe_array[swe_index].using_appli[0] != appli->swe_id)) | |
668 { | |
669 cur_swe = cur_swe->next_swe; | |
670 continue; | |
671 } | |
672 | |
673 /* Retrieve task info with a get_info */ | |
674 if (rvm_swe_array[swe_index].swe_get_info == NULL) | |
675 { | |
676 RVM_TRACE_WARNING_PARAM("RVM: SWE with no get info, cannot be stopped, nb=", (UINT32)swe_index); | |
677 cur_swe = cur_swe->next_swe; | |
678 rvm_ret_value = RVM_INTERNAL_ERR; | |
679 continue; | |
680 } | |
681 rvm_swe_array[swe_index].swe_get_info( &swe_info); | |
682 | |
683 /* If SWE is not a task, continue */ | |
684 /* TO DO: manage group member SWEs */ | |
685 if ( (swe_info.swe_type == RVM_SWE_TYPE_1) || | |
686 (swe_info.swe_type == RVM_SWE_TYPE_2) ) | |
687 { | |
688 cur_swe = cur_swe->next_swe; | |
689 continue; | |
690 } | |
691 | |
692 /* Suspend SWE task */ | |
693 rvf_suspend_task( (UINT8)rvm_swe_array[swe_index].swe_return_path.addr_id); | |
694 RVM_TRACE_DEBUG_LOW_PARAM("RVM: Suspended task nb ", (UINT32) (rvm_swe_array[swe_index].swe_return_path.addr_id & 0x000000FF) ); | |
695 | |
696 /* Proceed to the next SWE */ | |
697 cur_swe = cur_swe->next_swe; | |
698 } | |
699 | |
700 return rvm_ret_value; | |
701 } | |
702 | |
703 | |
704 /******************************************************************************* | |
705 ** | |
706 ** Function rvm_kill_swe_list | |
707 ** | |
708 ** Description This function will call the kill functions when possible. | |
709 ** It will also delete the task, the stack and the used MBs. | |
710 ** | |
711 ** Parameters: T_RVM_PROCESSING_SWE * appli: list of required SWEs with their parameters. | |
712 ** | |
713 ** Returns T_RVM_OK if everything is successful, | |
714 ** else T_RVM_INTERNAL_ERR (then some SWE are not killed). | |
715 ** | |
716 *******************************************************************************/ | |
717 T_RVM_RETURN rvm_kill_swe_list( T_RVM_PROCESSING_SWE * appli) | |
718 { | |
719 T_RVM_PROCESSING_SWE * cur_swe = appli; | |
720 T_RVM_INFO_SWE swe_info; | |
721 volatile T_RVM_RETURN rvm_ret_value = RVM_OK; | |
722 | |
723 /* for each SWE in the list */ | |
724 while (cur_swe != NULL ) | |
725 { | |
726 UINT8 swe_index = cur_swe->swe_id; | |
727 | |
728 /* If nb_using_appli > 1, SWE cannot be killed */ | |
729 if (rvm_swe_array[swe_index].nb_using_appli > 1) | |
730 { | |
731 cur_swe = cur_swe->next_swe; | |
732 continue; | |
733 } | |
734 | |
735 /* If nb_using_appli == 1 but using_appli != appli, SWE cannot be killed */ | |
736 if ((rvm_swe_array[swe_index].nb_using_appli == 1) && \ | |
737 (rvm_swe_array[swe_index].using_appli[0] != appli->swe_id)) | |
738 { | |
739 cur_swe = cur_swe->next_swe; | |
740 continue; | |
741 } | |
742 | |
743 /* Retrieve kill function with a get_info */ | |
744 if (rvm_swe_array[swe_index].swe_get_info == NULL) | |
745 { | |
746 RVM_TRACE_WARNING_PARAM("RVM: SWE with no get info, cannot be killed, nb=", (UINT32)swe_index); | |
747 cur_swe = cur_swe->next_swe; | |
748 rvm_ret_value = RVM_INTERNAL_ERR; | |
749 continue; | |
750 } | |
751 rvm_swe_array[swe_index].swe_get_info( &swe_info); | |
752 | |
753 if (cur_swe->rvm_functions.kill == NULL) | |
754 { | |
755 RVM_TRACE_WARNING_PARAM("RVM: SWE with no kill function, cannot be killed, nb=", (UINT32)swe_index); | |
756 cur_swe = cur_swe->next_swe; | |
757 rvm_ret_value = RVM_INTERNAL_ERR; | |
758 continue; | |
759 } | |
760 | |
761 /* Kill SWE */ | |
762 cur_swe->rvm_functions.kill(); | |
763 | |
764 /* TO DO: manage group member SWEs */ | |
765 /* If the SWE is a task, the task should be deleted, as well as its stack */ | |
766 if ( (swe_info.swe_type == RVM_SWE_TYPE_3) || | |
767 (swe_info.swe_type == RVM_SWE_TYPE_4) ) | |
768 { | |
769 rvf_exit_task((UINT8)(rvm_swe_array[swe_index].swe_return_path.addr_id)); | |
770 rvf_free_buf(rvm_swe_array[swe_index].stack_ptr); | |
771 RVM_TRACE_DEBUG_LOW_PARAM("RVM: Deleted task nb ", (UINT32)(rvm_swe_array[swe_index].swe_return_path.addr_id & 0x000000FF)); | |
772 rvf_free_sys_resources(rvm_swe_array[swe_index].swe_addr_id, 2); | |
773 | |
774 } else if(swe_info.swe_type == RVM_SWE_TYPE_2) { | |
775 rvf_free_sys_resources(rvm_swe_array[swe_index].swe_addr_id, 0); | |
776 } | |
777 | |
778 /* Proceed to the next SWE */ | |
779 cur_swe = cur_swe->next_swe; | |
780 } | |
781 | |
782 return rvm_ret_value; | |
783 } | |
784 | |
785 | |
786 /******************************************************************************* | |
787 ** | |
788 ** Function rvm_launch_appli | |
789 ** | |
790 ** Description Called by the main RVM task to start a specified known application | |
791 ** | |
792 ** Parameters: T_RVM_MSG msg: containing the return path and the index of the | |
793 ** application to start in the array of known SWEs. | |
794 ** | |
795 ** Returns None | |
796 ** | |
797 *******************************************************************************/ | |
798 void rvm_launch_appli( T_RVM_MSG * msg_Ptr) { | |
799 T_RVM_GROUP_DIRECTIVE GroupDirectives[MAX_GRPS]; | |
800 UINT8 gdCount=0; | |
801 T_RVM_PROCESSING_SWE * appli = NULL; /* pointer to the first element of the list */ | |
802 T_RV_RETURN_PATH appli_return_path; | |
803 UINT8 i,j=0; | |
804 | |
805 for(i=0; i<MAX_GRPS; i++) { | |
806 GroupDirectives[i].group_directive=0; | |
807 GroupDirectives[i].host_task_addr=0; | |
808 GroupDirectives[i].stack_size=0; | |
809 memset(&GroupDirectives[i].hosted_swe_db_index, 0, (sizeof(UINT8)*MAX_COMPOSITES)); | |
810 } | |
811 | |
812 /* store the return path of the caller */ | |
813 appli_return_path.callback_func = msg_Ptr->rp.callback_func; | |
814 appli_return_path.addr_id = msg_Ptr->header.src_addr_id; | |
815 | |
816 /* recursively call all get_info functions and build the list of running swe */ | |
817 if ( rvm_build_swe_list( &appli, msg_Ptr->swe_num, 0) != RVM_OK ) | |
818 { | |
819 /* Display error message | |
820 error case: use the return_path to inform the caller that an error occurs*/ | |
821 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_INVALID_PARAMETER, msg_Ptr->swe_num, appli_return_path); | |
822 RVM_TRACE_ERROR("RVM: SWE list built error"); | |
823 return; | |
824 } | |
825 | |
826 gdCount=_resolve_t2_grouping(appli, GroupDirectives); | |
827 | |
828 if(!appli) { | |
829 // error case: use return_path to inform the caller about memory lack | |
830 // Unlock state of SWE and free memory | |
831 RVM_TRACE_WARNING_PARAM("RVM: ABORTED, Stand-alone ENTITY start request!", (UINT32)msg_Ptr->swe_num); | |
832 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_NOT_READY, msg_Ptr->swe_num, appli_return_path); | |
833 rvm_delete_used_memory (appli); | |
834 return; | |
835 } | |
836 | |
837 RVM_TRACE_DEBUG_HIGH("RVM: SWE list built success"); | |
838 RVM_TRACE_DEBUG_HIGH_PARAM("RVM: trying to launch SWE", rvm_swe_array[appli->swe_id].swe_use_id); | |
839 | |
840 /* check if there is enough available memory */ | |
841 if ( rvm_verify_memory_requirement( appli, GroupDirectives, gdCount) != RVM_OK) | |
842 { | |
843 /* error case: use return_path to inform the caller about memory lack */ | |
844 /* Unlock state of SWE and free memory */ | |
845 RVM_TRACE_WARNING_PARAM("RVM: SWE not enough memory: unable to launch Appli nb", (UINT32)appli->swe_id); | |
846 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_MEMORY_ERR, msg_Ptr->swe_num, appli_return_path); | |
847 rvm_delete_used_memory (appli); | |
848 return; | |
849 } | |
850 | |
851 /* allocates memory banks */ | |
852 if ( rvm_allocate_mb( appli) != RVM_OK ) | |
853 { /* error case: use return_path to inform the caller about memory lack */ | |
854 rvm_delete_used_memory (appli); | |
855 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_MEMORY_ERR, msg_Ptr->swe_num, appli_return_path); | |
856 RVM_TRACE_WARNING("RVM: SWE memory bank allocation error - launch aborted!"); | |
857 return; | |
858 } | |
859 RVM_TRACE_DEBUG_LOW("RVM: SWE memory bank allocation success"); | |
860 | |
861 /* call set_info function for each SWE */ | |
862 if ( rvm_set_swe_info( appli) != RVM_OK) | |
863 { /* error case: use return_path to inform the caller that an error occurs */ | |
864 RVM_TRACE_WARNING("RVM: SWE set info functions error"); | |
865 _fatal(appli, 0); | |
866 rvm_delete_created_mb(appli); | |
867 rvm_delete_used_memory (appli); | |
868 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_INTERNAL_ERR, msg_Ptr->swe_num, appli_return_path); | |
869 return; | |
870 } | |
871 RVM_TRACE_DEBUG_LOW("RVM: SWE set info functions called"); | |
872 | |
873 | |
874 /* call the init and start functions */ | |
875 if ( rvm_initialize_swe( appli, GroupDirectives, gdCount) != RVM_OK) | |
876 { /* error case: use return_path to inform the caller that an error occurs */ | |
877 RVM_TRACE_WARNING("RVM: SWE initialization error"); | |
878 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_INTERNAL_ERR, msg_Ptr->swe_num, appli_return_path); | |
879 _fatal(appli, 2); | |
880 rvm_delete_created_mb(appli); | |
881 rvm_delete_used_memory (appli); | |
882 return; | |
883 } | |
884 RVM_TRACE_DEBUG_LOW("RVM: SWE initialization success"); | |
885 | |
886 /* build a message and send the response to the caller */ | |
887 /* send a result using the return_path */ | |
888 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_OK, msg_Ptr->swe_num, appli_return_path); | |
889 | |
890 | |
891 /* and store the return_path */ | |
892 rvm_swe_array[ msg_Ptr->swe_num ].mmi_return_path.callback_func = msg_Ptr->rp.callback_func; | |
893 rvm_swe_array[ msg_Ptr->swe_num ].mmi_return_path.addr_id = msg_Ptr->header.src_addr_id; | |
894 | |
895 /* Once Everything is back in stand-by, release used memory */ | |
896 rvm_delete_used_memory (appli); | |
897 } | |
898 | |
899 | |
900 /******************************************************************************* | |
901 ** | |
902 ** Function rvm_shut_down_appli | |
903 ** | |
904 ** Description Called by the main RVM task to stop a specified known application | |
905 ** | |
906 ** Parameters: T_RVM_MSG msg: containing the return path and the index of the | |
907 ** application to stop in the array of known SWEs. | |
908 ** | |
909 ** Returns None | |
910 ** | |
911 *******************************************************************************/ | |
912 void rvm_stop_appli( T_RVM_STOP_MSG* msg_Ptr) { | |
913 T_RVM_PROCESSING_SWE * appli = NULL; /* pointer to the first element of the list */ | |
914 T_RVM_RETURN ret_value; | |
915 UINT8 swe_idx = 200; | |
916 T_RV_RETURN_PATH appli_return_path; | |
917 | |
918 appli_return_path.callback_func = msg_Ptr->rp.callback_func; | |
919 appli_return_path.addr_id = msg_Ptr->header.src_addr_id; | |
920 | |
921 | |
922 RVM_TRACE_DEBUG_HIGH_PARAM("RVM: trying to stop Appli nb ", (UINT32)swe_idx); | |
923 | |
924 if (rvm_swe_array[msg_Ptr->swe_num].nb_using_appli > 1) { | |
925 RVM_TRACE_WARNING_PARAM("RVM: SWE has dependencies, nb=", (UINT32)msg_Ptr->swe_num); | |
926 return; | |
927 } | |
928 // ??? If nb_using_appli == 1 but using_appli != appli, SWE cannot be stopped | |
929 if ((rvm_swe_array[msg_Ptr->swe_num].nb_using_appli == 1) && \ | |
930 (rvm_swe_array[msg_Ptr->swe_num].using_appli[0] != msg_Ptr->swe_num)) { | |
931 RVM_TRACE_WARNING_PARAM("RVM: SWE has dependencies, nb=", (UINT32)msg_Ptr->swe_num); | |
932 return; | |
933 } | |
934 | |
935 | |
936 /* TO DO : REBUILD SWE LIST !!!! */ | |
937 if ( rvm_build_swe_list( &appli, msg_Ptr->swe_num, 1) != RVM_OK ) | |
938 { | |
939 /* Display error message | |
940 error case: use the return_path to inform the caller that an error occurs*/ | |
941 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_INVALID_PARAMETER, msg_Ptr->swe_num, appli_return_path); | |
942 RVM_TRACE_ERROR("RVM: SWE list built error"); | |
943 return; | |
944 } | |
945 | |
946 /* Stop all swe in the list that are used only once */ | |
947 if ((ret_value = rvm_stop_swe_list(appli, (T_RV_HDR*)msg_Ptr)) != RVM_OK ) | |
948 { | |
949 /* Display error message | |
950 TO DO: error case: use the return_path to inform the caller that an error occurs */ | |
951 RVM_TRACE_WARNING_PARAM("RVM: Error in SWE stop", (UINT32)ret_value); | |
952 return; | |
953 } | |
954 | |
955 rvm_delete_used_memory (appli); | |
956 | |
957 RVM_TRACE_DEBUG_LOW("RVM: SWE stop broadcast!"); | |
958 | |
959 } | |
960 | |
961 // NOTE: presently no timeout exists, if the ENT. fails to reply | |
962 // to stop with rvm_swe_stopped() RVM doesn't kill it. | |
963 void rvm_swe_has_stopped(T_RVM_STOP_MSG* msg) { | |
964 T_RVM_STOP_MSG* p_msg=(T_RVM_STOP_MSG*)msg; | |
965 T_RV_RETURN_PATH appli_return_path; | |
966 | |
967 appli_return_path.callback_func = msg->rp.callback_func; | |
968 appli_return_path.addr_id = msg->header.src_addr_id; | |
969 | |
970 if(msg->status!=SWE_STOPPING) { | |
971 // inform upper of problem | |
972 rvm_snd_msg_to_upper(RVM_STOP_APPLI, RVM_INVALID_PARAMETER, msg->swe_num, appli_return_path); | |
973 RVM_TRACE_ERROR("RVM: Entity declines STOP REQ"); | |
974 rvf_free_msg((T_RV_HDR*)msg); | |
975 return; | |
976 } | |
977 | |
978 // cont. with shutdown - MUST DO ERROR CASE ! | |
979 rvm_shutdown_swe(p_msg->swe_num); | |
980 | |
981 // set stopped status | |
982 rvm_swe_array[p_msg->swe_num].swe_state=SWE_KILLED; //SWE_STOPPING; | |
983 | |
984 /* build a message and send the response to the caller */ | |
985 /* send a result using the return_path */ | |
986 if(rvm_get_mb_level(p_msg->swe_num) ){ | |
987 rvm_snd_msg_to_upper(RVM_STOP_APPLI, RV_MEMORY_REMAINING, msg->swe_num, appli_return_path); | |
988 } else { | |
989 rvm_snd_msg_to_upper(RVM_STOP_APPLI, RVM_OK, msg->swe_num, appli_return_path); | |
990 } | |
991 | |
992 /* and store the return_path */ | |
993 rvm_swe_array[ msg->swe_num ].mmi_return_path.callback_func = msg->rp.callback_func; | |
994 rvm_swe_array[ msg->swe_num ].mmi_return_path.addr_id = msg->header.src_addr_id; | |
995 | |
996 } | |
997 | |
998 void rvm_shutdown_swe(UINT8 index) { //should ret. ok or fail | |
999 rvm_suspend_swe(index); | |
1000 rvm_kill_swe(index); | |
1001 } | |
1002 | |
1003 void rvm_suspend_swe(UINT8 swe_index) { | |
1004 volatile T_RVM_RETURN rvm_ret_value = RVM_OK; | |
1005 T_RVM_INFO_SWE swe_info; | |
1006 | |
1007 /* ??? If nb_using_appli > 1, SWE cannot be stopped | |
1008 if (rvm_swe_array[swe_index].nb_using_appli > 1) { | |
1009 RVM_TRACE_WARNING_PARAM("RVM-SUSPEND: SWE has dependencies, nb=", (UINT32)swe_index); | |
1010 } | |
1011 // ??? If nb_using_appli == 1 but using_appli != appli, SWE cannot be stopped | |
1012 if ((rvm_swe_array[swe_index].nb_using_appli == 1) && \ | |
1013 (rvm_swe_array[swe_index].using_appli[0] != swe_index)) { | |
1014 RVM_TRACE_WARNING_PARAM("RVM-SUSPEND: SWE has dependencies, nb=", (UINT32)swe_index); | |
1015 }*/ | |
1016 | |
1017 /* Retrieve task info with a get_info */ | |
1018 if (rvm_swe_array[swe_index].swe_get_info == NULL) { | |
1019 RVM_TRACE_WARNING_PARAM("RVM: SWE with no get info, cannot be stopped, nb=", (UINT32)swe_index); | |
1020 rvm_ret_value = RVM_INTERNAL_ERR; | |
1021 return; | |
1022 } | |
1023 rvm_swe_array[swe_index].swe_get_info( &swe_info); | |
1024 | |
1025 /* If SWE is not a task, continue */ | |
1026 /* TO DO: manage group member SWEs */ | |
1027 if ( (swe_info.swe_type == RVM_SWE_TYPE_1) || | |
1028 (swe_info.swe_type == RVM_SWE_TYPE_2) ) { | |
1029 return; | |
1030 } | |
1031 | |
1032 /* Suspend SWE task */ | |
1033 rvf_suspend_task( (UINT8)rvm_swe_array[swe_index].swe_return_path.addr_id); | |
1034 RVM_TRACE_DEBUG_LOW_PARAM("RVM: Suspended task nb ", (UINT32) (rvm_swe_array[swe_index].swe_return_path.addr_id & 0x000000FF) ); | |
1035 } | |
1036 | |
1037 T_RVM_RETURN rvm_kill_swe(UINT8 swe_index) { | |
1038 T_RVM_INFO_SWE swe_info; | |
1039 volatile T_RVM_RETURN rvm_ret_value = RVM_OK; | |
1040 UINT8 isVirtual=0; | |
1041 T_RVF_G_ADDR_ID gid=RVF_INVALID_ADDR_ID; | |
1042 UINT8 isIdle=0; | |
1043 UINT8 i=0; | |
1044 | |
1045 /* If nb_using_appli > 1, SWE cannot be killed | |
1046 if (rvm_swe_array[swe_index].nb_using_appli > 1) return rvm_ret_value; | |
1047 | |
1048 // If nb_using_appli == 1 but using_appli != appli, SWE cannot be killed | |
1049 if ((rvm_swe_array[swe_index].nb_using_appli == 1) && \ | |
1050 (rvm_swe_array[swe_index].using_appli[0] != swe_index)) { | |
1051 RVM_TRACE_WARNING_PARAM("RVM-KILL: SWE has dependencies, nb=", (UINT32)swe_index); | |
1052 return rvm_ret_value; | |
1053 }*/ | |
1054 | |
1055 /* Retrieve kill function with a get_info */ | |
1056 if (rvm_swe_array[swe_index].swe_get_info == NULL){ | |
1057 RVM_TRACE_WARNING_PARAM("RVM-KILL: SWE has no kill function defined, nb=", (UINT32)swe_index); | |
1058 rvm_ret_value = RVM_INTERNAL_ERR; | |
1059 } | |
1060 | |
1061 rvm_swe_array[swe_index].swe_get_info(&swe_info); | |
1062 switch( swe_info.swe_type) { | |
1063 case RVM_SWE_TYPE_1: | |
1064 // if(swe_info.type_info.type1.kill) swe_info.type_info.type1.kill() ; | |
1065 isVirtual=1; | |
1066 break; | |
1067 case RVM_SWE_TYPE_2: | |
1068 gid=resolveHostAddrId(rvm_swe_array[swe_index].swe_addr_id); | |
1069 rvf_unregisterFromHost(gid, rvm_swe_array[swe_index].swe_addr_id); | |
1070 rvf_isHostingTaskIdle(gid, &isIdle); | |
1071 if(isIdle) { // Defered suspend of hosting task: | |
1072 rvf_suspend_task(gid); | |
1073 rvf_exit_task(gid); | |
1074 rvf_free_sys_resources(gid, 2); | |
1075 } | |
1076 if(swe_info.type_info.type2.kill) swe_info.type_info.type2.kill(); | |
1077 isVirtual=1; | |
1078 break; | |
1079 case RVM_SWE_TYPE_3: | |
1080 if(swe_info.type_info.type3.kill) swe_info.type_info.type3.kill(); | |
1081 break; | |
1082 case RVM_SWE_TYPE_4: | |
1083 if(swe_info.type_info.type4.kill) swe_info.type_info.type4.kill(); | |
1084 break; | |
1085 default: | |
1086 RVM_TRACE_WARNING_PARAM("RVM: SWE with no kill function, cannot be killed, nb=", (UINT32)swe_index); | |
1087 } | |
1088 | |
1089 if(!isVirtual) { | |
1090 rvf_exit_task((UINT8)(rvm_swe_array[swe_index].swe_return_path.addr_id)); | |
1091 rvf_free_buf(rvm_swe_array[swe_index].stack_ptr); | |
1092 RVM_TRACE_DEBUG_LOW_PARAM("RVM: Deleted task nb ", (UINT32)(rvm_swe_array[swe_index].swe_return_path.addr_id & 0x000000FF)); | |
1093 rvf_free_sys_resources(rvm_swe_array[swe_index].swe_addr_id, 2); | |
1094 } else { | |
1095 rvf_free_sys_resources(rvm_swe_array[swe_index].swe_addr_id, 0); | |
1096 } | |
1097 | |
1098 return rvm_ret_value; | |
1099 } | |
1100 | |
1101 UINT8 rvm_get_mb_level(UINT8 swe_index) { | |
1102 T_RVM_INFO_SWE swe_info; | |
1103 INT8 i=0; | |
1104 UINT8 isUsed=0; | |
1105 | |
1106 rvm_swe_array[swe_index].swe_get_info(&swe_info); | |
1107 switch( swe_info.swe_type) { | |
1108 case RVM_SWE_TYPE_1: | |
1109 if(swe_info.type_info.type1.nb_mem_bank!=0) | |
1110 for(i=0; i<swe_info.type_info.type1.nb_mem_bank; i++) { | |
1111 rvf_mb_is_used(swe_info.type_info.type1.mem_bank[i].bank_name, &isUsed); | |
1112 if(isUsed) return isUsed; | |
1113 } | |
1114 return isUsed; | |
1115 case RVM_SWE_TYPE_2: | |
1116 if(swe_info.type_info.type2.nb_mem_bank!=0) | |
1117 for(i=0; i<swe_info.type_info.type2.nb_mem_bank; i++) { | |
1118 rvf_mb_is_used(swe_info.type_info.type2.mem_bank[i].bank_name, &isUsed); | |
1119 if(isUsed) return isUsed; | |
1120 } | |
1121 return isUsed; | |
1122 case RVM_SWE_TYPE_3: | |
1123 if(swe_info.type_info.type3.nb_mem_bank!=0) | |
1124 for(i=0; i<swe_info.type_info.type3.nb_mem_bank; i++) { | |
1125 rvf_mb_is_used(swe_info.type_info.type3.mem_bank[i].bank_name, &isUsed); | |
1126 if(isUsed) return isUsed; | |
1127 } | |
1128 return isUsed; | |
1129 case RVM_SWE_TYPE_4: | |
1130 if(swe_info.type_info.type4.nb_mem_bank!=0) | |
1131 for(i=0; i<swe_info.type_info.type4.nb_mem_bank; i++) { | |
1132 rvf_mb_is_used(swe_info.type_info.type4.mem_bank[i].bank_name, &isUsed); | |
1133 if(isUsed) return isUsed; | |
1134 } | |
1135 return isUsed; | |
1136 default: RVM_TRACE_DEBUG_LOW("RVM: Error rvm_get_mb_level()"); | |
1137 return isUsed; | |
1138 } | |
1139 } | |
1140 | |
1141 void rvm_shut_down_appli( T_RVM_MSG * msg_Ptr) { | |
1142 T_RVM_PROCESSING_SWE * appli = NULL; /* pointer to the first element of the list */ | |
1143 T_RVM_RETURN ret_value; | |
1144 UINT8 swe_idx = 200; | |
1145 T_RV_RETURN_PATH appli_return_path; | |
1146 | |
1147 | |
1148 appli_return_path.callback_func = msg_Ptr->rp.callback_func; | |
1149 appli_return_path.addr_id = msg_Ptr->header.src_addr_id; | |
1150 | |
1151 | |
1152 RVM_TRACE_DEBUG_HIGH_PARAM("RVM: trying to stop Appli nb ", (UINT32)swe_idx); | |
1153 | |
1154 /* TO DO : REBUILD SWE LIST !!!! */ | |
1155 if ( rvm_build_swe_list( &appli, msg_Ptr->swe_num, 1) != RVM_OK ) | |
1156 { | |
1157 /* Display error message | |
1158 error case: use the return_path to inform the caller that an error occurs*/ | |
1159 rvm_snd_msg_to_upper(RVM_START_APPLI, RVM_INVALID_PARAMETER, msg_Ptr->swe_num, appli_return_path); | |
1160 RVM_TRACE_ERROR("RVM: SWE list built error"); | |
1161 return; | |
1162 } | |
1163 | |
1164 /* Stop all swe in the list that are used only once */ | |
1165 if ((ret_value = rvm_stop_swe_list(appli, (T_RV_HDR*)msg_Ptr)) != RVM_OK ) | |
1166 { | |
1167 /* Display error message | |
1168 TO DO: error case: use the return_path to inform the caller that an error occurs */ | |
1169 RVM_TRACE_WARNING_PARAM("RVM: Error in SWE stop", (UINT32)ret_value); | |
1170 return; | |
1171 } | |
1172 RVM_TRACE_DEBUG_LOW("RVM: SWE stop success"); | |
1173 | |
1174 | |
1175 /* Suspend all swe that are tasks */ | |
1176 if ((ret_value = rvm_suspend_swe_tasks(appli)) != RVM_OK ) | |
1177 { | |
1178 /* Display error message | |
1179 TO DO: error case: use the return_path to inform the caller that an error occurs */ | |
1180 RVM_TRACE_WARNING_PARAM("RVM: Error in tasks suspension", (UINT32)ret_value); | |
1181 return; | |
1182 } | |
1183 RVM_TRACE_DEBUG_LOW("RVM: SWE task supsended"); | |
1184 | |
1185 /* Kill all SWEs */ | |
1186 if ((ret_value = rvm_kill_swe_list(appli)) != RVM_OK) | |
1187 { | |
1188 /* Display error message | |
1189 TO DO: error case: use the return_path to inform the caller that an error occurs */ | |
1190 RVM_TRACE_WARNING_PARAM("RVM: Error in SWE killing", (UINT32)ret_value); | |
1191 return; | |
1192 } | |
1193 RVM_TRACE_DEBUG_LOW("RVM: SWE kill success"); | |
1194 | |
1195 | |
1196 /* Delete the swe Memory Banks */ | |
1197 rvm_delete_created_mb(appli); | |
1198 | |
1199 /* Delete memory used and restore NOT_STARTED states */ | |
1200 if ((ret_value = rvm_clean_env(appli)) != RVM_OK) | |
1201 { | |
1202 /* Display error message | |
1203 TO DO: error case: use the return_path to inform the caller that an error occurs */ | |
1204 RVM_TRACE_WARNING_PARAM("RVM: Error in Memory cleaning", (UINT32)ret_value); | |
1205 return; | |
1206 } | |
1207 RVM_TRACE_DEBUG_LOW("RVM: Memory cleaning success"); | |
1208 | |
1209 | |
1210 /* build a message and send the response to the caller */ | |
1211 /* send a result using the return_path */ | |
1212 rvm_snd_msg_to_upper(RVM_STOP_APPLI, RVM_OK, msg_Ptr->swe_num, appli_return_path); | |
1213 | |
1214 | |
1215 /* and store the return_path */ | |
1216 rvm_swe_array[ msg_Ptr->swe_num ].mmi_return_path.callback_func = msg_Ptr->rp.callback_func; | |
1217 rvm_swe_array[ msg_Ptr->swe_num ].mmi_return_path.addr_id = msg_Ptr->header.src_addr_id; | |
1218 } | |
1219 | |
1220 T_RVM_RETURN _fatal( T_RVM_PROCESSING_SWE * appli, UINT8 rm) { | |
1221 T_RVM_PROCESSING_SWE * cur_swe = NULL; | |
1222 // T_RVM_INFO_SWE swe_info; | |
1223 | |
1224 RVM_TRACE_DEBUG_LOW("RVM: Fatality handler: reclaiming system resources!"); | |
1225 /* free all appli's system resources */ | |
1226 for (cur_swe = appli; cur_swe!=NULL; ) { | |
1227 if(rvm_swe_array[cur_swe->swe_id].swe_state!=SWE_RUNNING) | |
1228 rvf_free_sys_resources(rvm_swe_array[cur_swe->swe_id].swe_addr_id, rm); | |
1229 } | |
1230 | |
1231 | |
1232 return RVM_OK; | |
1233 } | |
1234 | |
1235 /******************************************************************************* | |
1236 ** | |
1237 ** Function rvm_generic_swe_core | |
1238 ** | |
1239 ** Description This is the main task core used for GROUP_MEMBER SWEs hosting | |
1240 ** and for SINGLE SWEs. | |
1241 ** | |
1242 ** Parameters: useless, may be for future evolutions if Nucleus really | |
1243 ** supports it. | |
1244 ** | |
1245 ** Returns None | |
1246 ** | |
1247 *******************************************************************************/ | |
1248 T_RVM_RETURN rvm_generic_swe_core(void) | |
1249 { | |
1250 return RVM_OK; | |
1251 } |