comparison src/gpf/osl/os_mem_fl.c @ 6:8b2a9a374324

src/gpf: addition of Magnetite src/gpf2
author Mychaela Falconia <falcon@freecalypso.org>
date Sun, 15 Jul 2018 08:15:49 +0000
parents
children 82ae724ca0d7
comparison
equal deleted inserted replaced
5:1ea54a97e831 6:8b2a9a374324
1 /*
2 * This C module is a reconstruction based on the disassembly of
3 * os_mem.obj in frame_na7_db_fl.lib from the Leonardo package.
4 */
5
6 /* set of included headers from COFF symtab: */
7 #include <stdio.h>
8 #include <string.h>
9 #include "nucleus.h"
10 #include "typedefs.h"
11 #include "os.h"
12 #include "gdi.h"
13 #include "os_types.h"
14 #include "os_glob.h"
15
16 extern T_OS_PART_GRP_TABLE_ENTRY PartGrpTable[];
17 extern T_OS_MEM_POOL_TABLE_ENTRY MemPoolTable[];
18 extern T_OS_POOL_BORDER PoolBorder[];
19
20 OS_HANDLE os_ext_pool_handle;
21 OS_HANDLE os_int_pool_handle;
22
23 static USHORT NumOfMemoryPools;
24 static NU_SEMAPHORE MemSemCB;
25 static NU_MEMORY_POOL mem_pool_head;
26
27 GLOBAL LONG
28 os_SetPoolHandles(OS_HANDLE ext_pool_handle, OS_HANDLE int_pool_handle)
29 {
30 os_ext_pool_handle = ext_pool_handle;
31 os_int_pool_handle = int_pool_handle;
32 return(OS_OK);
33 }
34
35 static int
36 os_GetPartitionPoolEntry(USHORT Index, T_OS_PART_POOL **pool)
37 {
38 static T_OS_PART_POOL *part_pool;
39 static int grp_hndl;
40
41 switch (Index) {
42 case FIRST_ENTRY:
43 grp_hndl = 0;
44 *pool = part_pool = PartGrpTable[0].grp_head;
45 return(OS_OK);
46 case NEXT_ENTRY:
47 if (part_pool->next) {
48 *pool = part_pool = part_pool->next;
49 return(OS_OK);
50 }
51 grp_hndl++;
52 if (PartGrpTable[grp_hndl].grp_head) {
53 *pool = part_pool = PartGrpTable[grp_hndl].grp_head;
54 return(OS_OK);
55 } else
56 return(OS_ERROR);
57 default:
58 return(OS_ERROR);
59 }
60 }
61
62 GLOBAL LONG
63 os_PartitionInformation(USHORT Handle, char *Buffer)
64 {
65 T_OS_PART_POOL *pool;
66 OPTION SuspendType;
67 UNSIGNED PoolSize;
68 UNSIGNED PartitionSize;
69 UNSIGNED Available;
70 UNSIGNED Waiting;
71 UNSIGNED Allocated;
72 VOID *pStartAddress;
73 NU_TASK *First;
74 CHAR Name[NU_MAX_NAME];
75
76 if (os_GetPartitionPoolEntry(Handle, &pool) == OS_ERROR)
77 return(OS_ERROR);
78 if (NU_Partition_Pool_Information(&pool->pcb, Name, &pStartAddress,
79 &PoolSize, &PartitionSize, &Available,
80 &Allocated, &SuspendType, &Waiting,
81 &First)
82 != NU_SUCCESS)
83 return(OS_ERROR);
84 sprintf(Buffer,
85 "Name:%s Addr:%lx PoolSize:%ld PartSize:%ld Free:%ld Used:%ld",
86 Name, (UNSIGNED) pStartAddress, PoolSize, PartitionSize,
87 Available, Allocated);
88 return(OS_OK);
89 }
90
91 static int
92 os_GetMemoryPoolEntry(USHORT Index, OS_HANDLE *Handle)
93 {
94 static USHORT Idx;
95
96 switch (Index) {
97 case FIRST_ENTRY:
98 Idx = 0;
99 break;
100 case NEXT_ENTRY:
101 Idx++;
102 break;
103 default:
104 Idx = Index;
105 }
106 if (Idx == NumOfMemoryPools)
107 return(OS_ERROR);
108 *Handle = Idx;
109 return(OS_OK);
110 }
111
112 GLOBAL LONG
113 os_MemoryInformation(USHORT Index, char *Buffer)
114 {
115 OS_HANDLE Handle;
116 OPTION SuspendType;
117 UNSIGNED Size, Min, Available, Waiting;
118 VOID *pStartAddress;
119 NU_TASK *First;
120 CHAR Name[NU_MAX_NAME];
121
122 if (os_GetMemoryPoolEntry(Index, &Handle) == OS_ERROR)
123 return(OS_ERROR);
124 if (NU_Memory_Pool_Information(MemPoolTable[Handle].pcb, Name,
125 &pStartAddress, &Size, &Min,
126 &Available, &SuspendType, &Waiting,
127 &First)
128 != NU_SUCCESS)
129 return(OS_ERROR);
130 sprintf(Buffer,
131 "Heapname:%s Addr:%lx Size:%ld Min:%ld Free:%ld Suspend:%d",
132 Name, (UNSIGNED) pStartAddress, Size, Min, Available,
133 SuspendType);
134 return(OS_OK);
135 }
136
137 GLOBAL LONG
138 os_MemInit(void)
139 {
140 USHORT i;
141
142 if (NU_Create_Semaphore(&MemSemCB, "MEMSEM", 1, NU_PRIORITY)
143 != NU_SUCCESS)
144 return(OS_ERROR);
145 for (i = 0; i <= MaxPoolGroups; i++) {
146 PoolBorder[i].Start = (char *)0xFFFFFFFF;
147 PoolBorder[i].End = (char *)0;
148 PartGrpTable[i].grp_head = 0;
149 PartGrpTable[i].name[0] = 0;
150 }
151 MemPoolTable[0].pcb = &mem_pool_head;
152 return(OS_OK);
153 }
154
155 void
156 os_InitPartitionCheck(T_OS_PART_POOL *pool)
157 {
158 unsigned **Buffer, offset;
159 USHORT i, k;
160
161 NU_Allocate_Memory(MemPoolTable[0].pcb, (VOID **) &Buffer,
162 pool->pcb.pm_available * sizeof(unsigned *),
163 NU_NO_SUSPEND);
164 offset = pool->pcb.pm_partition_size / sizeof(unsigned) - 1;
165 for (i = 0; ; i++) {
166 if (NU_Allocate_Partition(&pool->pcb, (VOID **)(Buffer + i),
167 NU_NO_SUSPEND)
168 != NU_SUCCESS)
169 break;
170 Buffer[i][offset] = GUARD_PATTERN;
171 }
172 for (k = 0; k < i; k++)
173 if (NU_Deallocate_Partition(Buffer[k]) != NU_SUCCESS)
174 break;
175 NU_Deallocate_Memory(Buffer);
176 }
177
178 GLOBAL const ULONG *
179 os_GetPrimpoolCB(int grp, int id)
180 {
181 T_OS_PART_POOL *pool;
182 int i;
183
184 pool = PartGrpTable[grp].grp_head;
185 if (!pool)
186 return(0);
187 if (id < 0)
188 return(0);
189 for (i = 0; i < id; i++) {
190 pool = pool->next;
191 if (!pool)
192 return(0);
193 }
194 return (const ULONG *) &pool->pcb;
195 }
196
197 GLOBAL LONG
198 os_GetPartitionPoolStatus(ULONG size, OS_HANDLE gr_hndl,
199 USHORT *m_free, USHORT *m_alloc)
200 {
201 T_OS_PART_POOL *pool;
202 UNSIGNED dummy, allocated, available;
203 CHAR Name[NU_MAX_NAME];
204
205 for (pool = PartGrpTable[gr_hndl].grp_head; pool; pool = pool->next) {
206 if (!size)
207 break;
208 if (size > pool->size)
209 continue;
210 if (NU_Partition_Pool_Information(&pool->pcb, Name,
211 (VOID **)&dummy, &dummy,
212 &dummy, &available,
213 &allocated, (OPTION *)&dummy,
214 &dummy, (NU_TASK **)&dummy)
215 != NU_SUCCESS)
216 break;
217 *m_alloc = allocated;
218 *m_free = available;
219 return(OS_OK);
220 }
221 *m_alloc = 0;
222 *m_free = 0;
223 return(OS_ERROR);
224 }
225
226 GLOBAL LONG
227 os_GetPartitionGroupHandle(OS_HANDLE Caller, char *Name, OS_HANDLE *GroupHandle)
228 {
229 int i;
230
231 for (i = 0; i <= MaxPoolGroups; i++) {
232 if (!PartGrpTable[i].grp_head)
233 continue;
234 if (strncmp(Name, PartGrpTable[i].name, RESOURCE_NAMELEN-1))
235 continue;
236 *GroupHandle = i;
237 return(OS_OK);
238 }
239 return(OS_ERROR);
240 }
241
242 GLOBAL LONG
243 os_DeallocateMemory(OS_HANDLE TaskHandle, T_VOID_STRUCT *Buffer)
244 {
245 if (NU_Deallocate_Memory(Buffer) == NU_SUCCESS)
246 return(OS_OK);
247 else
248 return(OS_ERROR);
249 }
250
251 GLOBAL LONG
252 os_AllocateMemory(OS_HANDLE TaskHandle, T_VOID_STRUCT **Buffer, ULONG Size,
253 ULONG Suspend, OS_HANDLE PoolHandle)
254 {
255 int ret, sts;
256
257 if (Suspend == 0xFFFFFFFF)
258 Suspend = 1;
259 ret = OS_OK;
260 for (;;) {
261 sts = NU_Allocate_Memory(MemPoolTable[PoolHandle].pcb,
262 (VOID **) Buffer, Size, Suspend);
263 switch (sts) {
264 case NU_SUCCESS:
265 return(ret);
266 case NU_INVALID_SUSPEND:
267 Suspend = 0;
268 continue;
269 case NU_NO_MEMORY:
270 case NU_TIMEOUT:
271 if (Suspend == 1) {
272 Suspend = 0xFFFFFFFF;
273 ret = OS_WAITED;
274 continue;
275 } else {
276 *Buffer = 0;
277 return(OS_TIMEOUT);
278 }
279 default:
280 /*
281 * Disassembly reveals that the original code
282 * has an endless loop here, the equivalent
283 * of continue. My guess is that they simply
284 * forgot the default case, and so control
285 * falls onto the closing brace of the switch
286 * and then onto the closing brace of the for
287 * loop. But I prefer better error handling,
288 * hence the present addition. - Space Falcon
289 */
290 *Buffer = 0;
291 return(OS_ERROR);
292 }
293 }
294 }
295
296 GLOBAL LONG
297 os_CreatePartitionPool(OS_HANDLE TaskHandle, char *GroupName, void *Addr,
298 USHORT Num, ULONG Size, OS_HANDLE *GroupHandle)
299 {
300 STATUS sts;
301 T_OS_PART_POOL *part_group_head, *opool, *npool;
302 USHORT part_group;
303 USHORT i, j;
304 char PoolName[8], *cp;
305
306 sts = NU_Obtain_Semaphore(&MemSemCB, NU_SUSPEND);
307 j = 0;
308 part_group_head = 0;
309 for (i = 0; i <= MaxPoolGroups; i++) {
310 if (!PartGrpTable[i].grp_head || !PartGrpTable[i].name[0])
311 break;
312 if (!strncmp(GroupName, PartGrpTable[i].name,
313 RESOURCE_NAMELEN - 1)) {
314 part_group_head = PartGrpTable[i].grp_head;
315 opool = part_group_head;
316 j++;
317 while (opool->next) {
318 opool = opool->next;
319 j++;
320 }
321 break;
322 }
323 }
324 /*
325 * This error check logic has been modified from the original
326 * faithful reconstruction by Space Falcon. In the original code
327 * if MaxPoolGroups had been reached and the for loop above
328 * never broke, the code would proceed to overwrite pool #0
329 * instead of catching the error.
330 */
331 if (i > MaxPoolGroups) {
332 release_sem_return_err:
333 if (sts == NU_SUCCESS)
334 NU_Release_Semaphore(&MemSemCB);
335 return(OS_ERROR);
336 }
337 part_group = i;
338 if (!part_group_head) {
339 strncpy(PartGrpTable[part_group].name, GroupName,
340 RESOURCE_NAMELEN);
341 PartGrpTable[part_group].name[RESOURCE_NAMELEN-1] = 0;
342 }
343 if (os_AllocateMemory(OS_NOTASK, (T_VOID_STRUCT **) &npool,
344 sizeof(T_OS_PART_POOL), OS_NO_SUSPEND,
345 os_ext_pool_handle) != OS_OK)
346 goto release_sem_return_err;
347 sprintf(PoolName, "POOL%1d%1d", part_group + 1, j);
348 Size &= ~3;
349 npool->pool_mem = Addr;
350 #if 0
351 /*
352 * FreeCalypso: in our first-attempt gcc-built firmwares we needed to
353 * bzero the PM_PCB before calling NU_Create_Partition_Pool() to
354 * prevent the possibility of Nucleus error checker failing the call
355 * because the signature word happens to be there already. The issue
356 * arose because we were using "raw" memory sections that weren't
357 * zeroed out on boot like standard .bss, but in TI's original
358 * architecture everything is zeroed out on boot, so we don't need
359 * this additional zeroing here.
360 */
361 bzero(&npool->pcb, sizeof(NU_PARTITION_POOL));
362 #endif
363 if (NU_Create_Partition_Pool(&npool->pcb, PoolName, npool->pool_mem,
364 POOL_SIZE(Num, Size), Size + 4, NU_FIFO)
365 != NU_SUCCESS)
366 goto release_sem_return_err;
367 if (!part_group_head)
368 PartGrpTable[part_group].grp_head = npool;
369 else
370 opool->next = npool;
371 npool->size = Size;
372 npool->next = 0;
373 *GroupHandle = part_group;
374 cp = (char *) npool->pool_mem;
375 if (PoolBorder[part_group].Start >= cp)
376 PoolBorder[part_group].Start = cp;
377 cp += POOL_SIZE(Num, Size);
378 if (PoolBorder[part_group].End < cp)
379 PoolBorder[part_group].End = cp;
380 os_InitPartitionCheck(npool);
381 if (sts == NU_SUCCESS)
382 NU_Release_Semaphore(&MemSemCB);
383 return(OS_OK);
384 }
385
386 GLOBAL LONG
387 os_CreatePartitionPool_fixed_pool_size(OS_HANDLE TaskHandle, char *GroupName,
388 void *Addr, USHORT PoolSize,
389 ULONG PartSize, OS_HANDLE *GroupHandle,
390 ULONG *NumCreated)
391 {
392 USHORT num;
393
394 num = PoolSize / (PartSize + PT_CHKOVERHEAD + PT_OVERHEAD);
395 *NumCreated = num;
396 return os_CreatePartitionPool(TaskHandle, GroupName, Addr, num,
397 PartSize, GroupHandle);
398 }
399
400 GLOBAL LONG
401 os_CreateMemoryPool(OS_HANDLE TaskHandle, char *Name, void *Addr,
402 ULONG PoolSize, OS_HANDLE *PoolHandle)
403 {
404 STATUS sts;
405 USHORT i;
406
407 sts = NU_Obtain_Semaphore(&MemSemCB, NU_SUSPEND);
408 for (i = 0; i < NumOfMemoryPools; i++)
409 if (!strncmp(Name, MemPoolTable[i].name, RESOURCE_NAMELEN-1)) {
410 *PoolHandle = i;
411 if (sts == NU_SUCCESS)
412 NU_Release_Semaphore(&MemSemCB);
413 return(OS_OK);
414 }
415 if (i >= MaxMemoryPools) {
416 release_sem_return_err:
417 if (sts == NU_SUCCESS)
418 NU_Release_Semaphore(&MemSemCB);
419 return(OS_ERROR);
420 }
421 if (i) {
422 if (os_AllocateMemory(OS_NOTASK,
423 (T_VOID_STRUCT **) &MemPoolTable[i].pcb,
424 sizeof(NU_MEMORY_POOL), OS_NO_SUSPEND,
425 os_ext_pool_handle) != OS_OK)
426 goto release_sem_return_err;
427 #if 0
428 /*
429 * FreeCalypso: in our first-attempt gcc-built firmwares we
430 * needed to bzero the DM_PCB before calling
431 * NU_Create_Memory_Pool() to prevent the possibility of
432 * Nucleus error checker failing the call because the signature
433 * word happens to be there already. The issue arose because
434 * we were using "raw" memory sections that weren't zeroed out
435 * on boot like standard .bss, but in TI's original architecture
436 * everything is zeroed out on boot, so we don't need this
437 * additional zeroing here.
438 */
439 bzero(MemPoolTable[i].pcb, sizeof(NU_MEMORY_POOL));
440 #endif
441 }
442 if (NU_Create_Memory_Pool(MemPoolTable[i].pcb, Name, Addr, PoolSize,
443 4, NU_FIFO) != NU_SUCCESS)
444 goto release_sem_return_err;
445 strncpy(MemPoolTable[i].name, Name, RESOURCE_NAMELEN);
446 MemPoolTable[i].name[RESOURCE_NAMELEN-1] = 0;
447 *PoolHandle = i;
448 NumOfMemoryPools++;
449 if (sts == NU_SUCCESS)
450 NU_Release_Semaphore(&MemSemCB);
451 return(OS_OK);
452 }