gencgc.c 241 KB
Newer Older
1
2
3
4
5
6
7
/*
 * Generational Conservative Garbage Collector for CMUCL x86.
 *
 * This code was written by Douglas T. Crosher, based on Public Domain
 * codes from Carnegie Mellon University. This code has been placed in
 * the public domain, and is provided 'as is'.
 *
dtc's avatar
dtc committed
8
 * Douglas Crosher, 1996, 1997, 1998, 1999.
9
 *
dtc's avatar
dtc committed
10
 */
11

12
#include <limits.h>
13
#include <stdio.h>
14
#include <stdlib.h>
15
#include <signal.h>
rtoy's avatar
rtoy committed
16
#include <string.h>
17
#include "lisp.h"
dtc's avatar
dtc committed
18
#include "arch.h"
19
20
21
22
23
24
#include "internals.h"
#include "os.h"
#include "globals.h"
#include "interrupt.h"
#include "validate.h"
#include "lispregs.h"
dtc's avatar
dtc committed
25
#include "interr.h"
26
27
#include "gencgc.h"

rtoy's avatar
rtoy committed
28
29
30
31
32
33
/*
 * This value in a hash table hash-vector means that the key uses
 * EQ-based hashing.  That is, the key might be using EQ or EQL for
 * the test.  This MUST match the value used in hash-new.lisp!
 */
#define EQ_BASED_HASH_VALUE     0x80000000
rtoy's avatar
rtoy committed
34

35
36
37
#define gc_abort() lose("GC invariant lost!  File \"%s\", line %d\n", \
			__FILE__, __LINE__)

cwang's avatar
cwang committed
38
#if (defined(i386) || defined(__x86_64))
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56

#define set_alloc_pointer(value) \
  SetSymbolValue (ALLOCATION_POINTER, (value))
#define get_alloc_pointer() \
  SymbolValue (ALLOCATION_POINTER)
#define get_binding_stack_pointer() \
  SymbolValue (BINDING_STACK_POINTER)
#define get_pseudo_atomic_atomic() \
  SymbolValue (PSEUDO_ATOMIC_ATOMIC)
#define set_pseudo_atomic_atomic() \
  SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (1))
#define clr_pseudo_atomic_atomic() \
  SetSymbolValue (PSEUDO_ATOMIC_ATOMIC, make_fixnum (0))
#define get_pseudo_atomic_interrupted() \
  SymbolValue (PSEUDO_ATOMIC_INTERRUPTED)
#define clr_pseudo_atomic_interrupted() \
  SetSymbolValue (PSEUDO_ATOMIC_INTERRUPTED, make_fixnum (0))

57
58
59
60
61
62
63
64
65
66
#define set_current_region_free(value) \
  SetSymbolValue(CURRENT_REGION_FREE_POINTER, (value))
#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))
#define get_current_region_free() \
  SymbolValue(CURRENT_REGION_FREE_POINTER)

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

67
#elif defined(sparc)
68

69
70
71
72
73
/*
 * current_dynamic_space_free_pointer contains the pseudo-atomic
 * stuff, so we need to preserve those bits when we give it a value.
 * This value better not have any bits set there either!
 */
74
75
76
77
78
79
80

/*
 * On sparc, we don't need to set the alloc_pointer in the code here
 * because the alloc pointer (current_dynamic_space_free_pointer) is
 * the same as *current-region-free-pointer* and is stored in
 * alloc-tn.
 */
81
#define set_alloc_pointer(value)
82
#define get_alloc_pointer() \
83
  ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
84
85
#define get_binding_stack_pointer() \
  (current_binding_stack_pointer)
86
#define get_pseudo_atomic_atomic() \
87
  ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
88
#define set_pseudo_atomic_atomic() \
89
  (current_dynamic_space_free_pointer \
90
   = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
91
#define clr_pseudo_atomic_atomic() \
92
  (current_dynamic_space_free_pointer \
93
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
94
#define get_pseudo_atomic_interrupted() \
95
  ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
96
#define clr_pseudo_atomic_interrupted() \
97
  (current_dynamic_space_free_pointer \
98
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))
99

100
101
102
103
104
105
106
107
108
#define set_current_region_free(value) \
  current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))

#define get_current_region_free() \
  ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

cshapiro's avatar
cshapiro committed
109
#elif defined(DARWIN) && defined(__ppc__)
rtoy's avatar
rtoy committed
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
#ifndef pseudo_atomic_InterruptedValue
#define pseudo_atomic_InterruptedValue 1
#endif
#ifndef pseudo_atomic_Value
#define pseudo_atomic_Value 4
#endif

#define set_alloc_pointer(value) 
#define get_alloc_pointer() \
  ((unsigned long) current_dynamic_space_free_pointer & ~lowtag_Mask)
#define get_binding_stack_pointer() \
  (current_binding_stack_pointer)
#define get_pseudo_atomic_atomic() \
  ((unsigned long)current_dynamic_space_free_pointer & pseudo_atomic_Value)
#define set_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long)current_dynamic_space_free_pointer | pseudo_atomic_Value))
#define clr_pseudo_atomic_atomic() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_Value))
#define get_pseudo_atomic_interrupted() \
  ((unsigned long) current_dynamic_space_free_pointer & pseudo_atomic_InterruptedValue)
#define clr_pseudo_atomic_interrupted() \
  (current_dynamic_space_free_pointer \
   = (lispobj*) ((unsigned long) current_dynamic_space_free_pointer & ~pseudo_atomic_InterruptedValue))

#define set_current_region_free(value) \
  current_dynamic_space_free_pointer = (lispobj*)((value) | ((long)current_dynamic_space_free_pointer & lowtag_Mask))

#define get_current_region_free() \
  ((long)current_dynamic_space_free_pointer & (~(lowtag_Mask)))

#define set_current_region_end(value) \
  SetSymbolValue(CURRENT_REGION_END_ADDR, (value))

145
#else
146
#error gencgc is not supported on this platform
147
148
#endif

149
150
151
152
153
154
155
/* Define for activating assertions.  */

/* Check for references to stack-allocated objects.  */

static void *invalid_stack_start, *invalid_stack_end;

static inline void
156
157
check_escaped_stack_object(lispobj * where, lispobj obj)
{
cshapiro's avatar
cshapiro committed
158
#if !defined(DARWIN) && !defined(__ppc__)
159
160
161
162
    void *p;

    if (Pointerp(obj)
	&& (p = (void *) PTR(obj),
163
	    (p >= (void *) control_stack
164
	     && p < (void *) control_stack_end))) {
165
166
167
	char *space;

	if (where >= (lispobj *) DYNAMIC_0_SPACE_START
168
	    && where < (lispobj *) (DYNAMIC_0_SPACE_START + dynamic_space_size))
169
170
171
	    space = "dynamic space";
	else if (where >= (lispobj *) STATIC_SPACE_START
		 && where <
172
		 (lispobj *) (STATIC_SPACE_START + static_space_size)) space =
173
174
175
176
		"static space";
	else if (where >= (lispobj *) READ_ONLY_SPACE_START
		 && where <
		 (lispobj *) (READ_ONLY_SPACE_START +
177
			      read_only_space_size)) space = "read-only space";
178
179
180
181
182
183
184
185
186
187
188
189
190
	else
	    space = NULL;

	/* GC itself uses some stack, so we can't tell exactly where the
	   invalid stack area starts.  Usually, it should be an error if a
	   reference to a stack-allocated object is found, although it
	   is valid to store a reference to a stack-allocated object
	   temporarily in another reachable object, as long as the
	   reference goes away at the end of a dynamic extent.  */

	if (p >= invalid_stack_start && p < invalid_stack_end)
	    lose("Escaped stack-allocated object 0x%08lx at %p in %s\n",
		 (unsigned long) obj, where, space);
191
#ifndef i386
192
	else if ((where >= (lispobj *) control_stack
193
		  && where < (lispobj *) (control_stack_end))
194
195
196
197
198
199
200
201
202
203
204
205
206
		 || (space == NULL)) {
	    /* Do nothing if it the reference is from the control stack,
	       because that will happen, and that's ok.  Or if it's from
	       an unknown space (typically from scavenging an interrupt
	       context. */
	}
#endif

	else
	    fprintf(stderr,
		    "Reference to stack-allocated object 0x%08lx at %p in %s\n",
		    (unsigned long) obj, where,
		    space ? space : "Unknown space");
207
    }
rtoy's avatar
rtoy committed
208
#endif
209
210
211
}


212
213
214
215
216
217
218
#if defined(x86) && defined(SOLARIS)
#define DEFAULT_GC_ASSERT_LEVEL 1
#else
#define DEFAULT_GC_ASSERT_LEVEL 0
#endif

int gc_assert_level = DEFAULT_GC_ASSERT_LEVEL;
219
220
221
222
223

#define gc_assert(ex)		\
  do {				\
    if (!(ex)) gc_abort ();     \
  } while (0)
224

225

dtc's avatar
dtc committed
226
227
228
/*
 * The number of generations, an extra is added to this for use as a temp.
 */
229
230
231
232
#define NUM_GENERATIONS 6

/* Debugging variables. */

dtc's avatar
dtc committed
233
234
235
236
/*
 * The verbose level. All non-error messages are disabled at level 0;
 * and only a few rare messages are printed at level 1.
 */
237
unsigned gencgc_verbose = 0;
cracauer's avatar
   
cracauer committed
238
unsigned counters_verbose = 0;
239

240
241
242
243
244
245
/*
 * If true, then some debugging information is printed when scavenging
 * static (malloc'ed) arrays.
 */
boolean debug_static_array_p = 0;

dtc's avatar
dtc committed
246
247
248
249
/*
 * To enable the use of page protection to help avoid the scavenging
 * of pages that don't have pointers to younger generations.
 */
250
boolean enable_page_protection = TRUE;
cracauer's avatar
   
cracauer committed
251

dtc's avatar
dtc committed
252
253
254
255
/*
 * Hunt for pointers to old-space, when GCing generations >= verify_gen.
 * Set to NUM_GENERATIONS to disable.
 */
256
int verify_gens = NUM_GENERATIONS;
257

dtc's avatar
dtc committed
258
/*
259
260
261
 * Enable a pre-scan verify of generation 0 before it's GCed.  (This
 * makes GC very, very slow, so don't enable this unless you really
 * need it!)
dtc's avatar
dtc committed
262
 */
263
int pre_verify_gen_0 = FALSE;
264

265
/*
dtc's avatar
dtc committed
266
 * Enable checking for bad pointers after gc_free_heap called from purify.
267
 */
268
#if 0 && defined(DARWIN)
269
int verify_after_free_heap = TRUE;
rtoy's avatar
rtoy committed
270
#else
271
int verify_after_free_heap = FALSE;
rtoy's avatar
rtoy committed
272
#endif
273

dtc's avatar
dtc committed
274
275
276
277
/*
 * Enable the printing of a note when code objects are found in the
 * dynamic space during a heap verify.
 */
278
279
boolean verify_dynamic_code_check = FALSE;

dtc's avatar
dtc committed
280
281
/*
 * Enable the checking of code objects for fixup errors after they are
282
 * transported.  (Only used for x86.)
dtc's avatar
dtc committed
283
 */
284
boolean check_code_fixups = FALSE;
285

286

Raymond Toy's avatar
Raymond Toy committed
287
288
289
290
291
/*
 * How to have a page zero-filled.  When a page is freed, we need to
 * either zero it immediately or mark it as needing to be zero-filled
 * when it is allocated later.
 */
292
enum gencgc_unmap_mode {
Raymond Toy's avatar
Raymond Toy committed
293
294
295
296
    /*
     * Unmap and mmap the region to get it zeroed when the page
     * is freed..
     */
297
298
    MODE_MAP,

Raymond Toy's avatar
Raymond Toy committed
299
300
301
    /*
     * Memset the region to 0 when it is freed.
     */
302
303
304
305
    MODE_MEMSET,

    /*
     * Call madvise to allow the kernel to free the memory if needed.
Raymond Toy's avatar
Raymond Toy committed
306
307
     * But when the region needs to be allocated, we will zero it if
     * necessary.
308
309
310
311
312
     */
    MODE_MADVISE,

    /*
     * Like madvise, except we don't actually call madvize and lazily
Raymond Toy's avatar
Raymond Toy committed
313
     * zero the region when it is allocated.
314
315
     */
    MODE_LAZY,
316
317
318
};
  
    
dtc's avatar
dtc committed
319
/*
Raymond Toy's avatar
Raymond Toy committed
320
 * Control how freed regions should be zeroed.  Default to MODE_LAZY
321
322
 * for all systems since tests indicate that it is much faster than
 * unmapping and re-mapping it to zero the region.  See enum
Raymond Toy's avatar
Raymond Toy committed
323
 * gencgc_unmap_made for other possible options.
324
325
326
 *
 * XXX: Choose the appopriate mode for each OS/arch.
 * 
dtc's avatar
dtc committed
327
 * To enable unmapping of a page and re-mmaping it to have it zero filled.
328
329
 * Note: this can waste a lot of swap on FreeBSD and Open/NetBSD(?) so
 * don't unmap.
dtc's avatar
dtc committed
330
 */
331

Raymond Toy's avatar
Raymond Toy committed
332
333
334
#if defined(DARWIN) || defined(__linux__) || defined(sparc)
enum gencgc_unmap_mode gencgc_unmap_zero = MODE_LAZY;
#else
335
enum gencgc_unmap_mode gencgc_unmap_zero = MODE_MEMSET;
Raymond Toy's avatar
Raymond Toy committed
336
#endif
337

dtc's avatar
dtc committed
338
339
340
/*
 * Enable checking that newly allocated regions are zero filled.
 */
341
#if 0 && defined(DARWIN)
rtoy's avatar
rtoy committed
342
343
344
boolean gencgc_zero_check = TRUE;
boolean gencgc_enable_verify_zero_fill = TRUE;
#else
345
boolean gencgc_zero_check = FALSE;
346
boolean gencgc_enable_verify_zero_fill = FALSE;
rtoy's avatar
rtoy committed
347
#endif
348

349
350
351
352
/*
 * Enable checking that free pages are zero filled during gc_free_heap
 * called after purify.
 */
353
#if 0 && defined(DARWIN)
rtoy's avatar
rtoy committed
354
355
boolean gencgc_zero_check_during_free_heap = TRUE;
#else
356
boolean gencgc_zero_check_during_free_heap = FALSE;
rtoy's avatar
rtoy committed
357
#endif
358

359
/*
360
361
362
363
 * For now, enable the zero check if gencgc_zero_check is true or if
 * gencgc_unmap_zero is MODE_LAZY or MODE_MADVISE.  XXX: Remove this
 * additional condition when we feel that gencgc_unmap_zero is good
 * enough.
364
365
 */

366
#define DO_GENCGC_ZERO_CHECK	(gencgc_zero_check)
367
368
369
370
371
372
373

/*
 * Only to the zero check during free_heap if both
 * gencgc_zero_check_during_free_heap is true and gencgc_unmap_zero is
 * MODE_MAP or MODE_MEMSET because in all other modes, unallocated
 * pages are known not to contain zeroes.
 */
374
#define DO_GENCGC_ZERO_CHECK_DURING_FREE_HEAP	(gencgc_zero_check_during_free_heap)
375

dtc's avatar
dtc committed
376
377
378
/*
 * The minimum size for a large object.
 */
379
unsigned large_object_size = 4 * GC_PAGE_SIZE;
380

dtc's avatar
dtc committed
381
382
383
384
385
/*
 * Enable the filtering of stack/register pointers. This could reduce
 * the number of invalid pointers accepted. It will probably degrades
 * interrupt safety during object initialisation.
 */
386
387
boolean enable_pointer_filter = TRUE;

388

dtc's avatar
dtc committed
389
390
391
/*
 * The total bytes allocated. Seen by (dynamic-usage)
 */
392
unsigned long bytes_allocated = 0;
393

cracauer's avatar
   
cracauer committed
394
395
396
397
398
399
/*
 * The total amount of bytes ever allocated.  Not decreased by GC.
 */

volatile unsigned long long bytes_allocated_sum = 0;

400
401
402
403
/*
 * GC trigger; a value of 0xffffffff represents disabled.
 */
unsigned long auto_gc_trigger = 0xffffffff;
404

405
406
407
408
409
410
411
412
413
414
/*
 * Number of pages to reserve for heap overflow.  We want some space
 * available on the heap when we are close to a heap overflow, so we
 * can handle the overflow.  But how much do we really need?  I (rtoy)
 * think 256 pages is probably a decent amount.  (That's 1 MB for x86,
 * 2 MB for sparc, which has 8K pages.)
 */

unsigned long reserved_heap_pages = 256;

dtc's avatar
dtc committed
415
416
417
/*
 * The src. and dest. generations. Set before a GC starts scavenging.
 */
418
419
420
static int from_space;
static int new_space;

421

dtc's avatar
dtc committed
422
423
424
/*
 * GC structures and variables.
 */
425

426
427
428
429
430
431
/*
 * Number of pages within the dynamic heap, setup from the size of the
 * dynamic space.
 */
unsigned dynamic_space_pages;

dtc's avatar
dtc committed
432
433
/*
 * An array of page structures is statically allocated.
cwang's avatar
typos    
cwang committed
434
 * This helps quickly map between an address and its page structure.
dtc's avatar
dtc committed
435
 */
436
struct page *page_table;
437

dtc's avatar
dtc committed
438
439
440
/*
 * Heap base, needed for mapping addresses to page structures.
 */
441
static char *heap_base = NULL;
442

dtc's avatar
dtc committed
443
444
445
/*
 * Calculate the start address for the given page number.
 */
446
static char *
447
page_address(int page_num)
448
{
449
    return heap_base + GC_PAGE_SIZE * page_num;
450
451
}

dtc's avatar
dtc committed
452
453
454
455
/*
 * Find the page index within the page_table for the given address.
 * Returns -1 on failure.
 */
456
int
457
find_page_index(void *addr)
458
{
459
    int index = (char *) addr - heap_base;
460

461
    if (index >= 0) {
462
	index = (unsigned int) index / GC_PAGE_SIZE;
463
464
465
	if (index < dynamic_space_pages)
	    return index;
    }
466

467
    return -1;
468
469
}

470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
/*
 * This routine implements a write barrier used to record stores into
 * to boxed regions outside of generation 0.  When such a store occurs
 * this routine will be automatically invoked by the page fault
 * handler.  If passed an address outside of the dynamic space, this
 * routine will return immediately with a value of 0.  Otherwise, the
 * page belonging to the address is made writable, the protection
 * change is recorded in the garbage collector page table, and a value
 * of 1 is returned.
 */
int
gc_write_barrier(void *addr)
{
    int page_index = find_page_index(addr);

    /* Check if the fault is within the dynamic space. */
    if (page_index == -1) {
	 return 0;
    }

    /* The page should have been marked write protected */
    if (!PAGE_WRITE_PROTECTED(page_index))
	 fprintf(stderr,
		 "*** Page fault in page not marked as write protected\n");

    /* Un-protect the page */
496
    os_protect((os_vm_address_t) page_address(page_index), GC_PAGE_SIZE, OS_VM_PROT_ALL);
497
498
499
500
501
    page_table[page_index].flags &= ~PAGE_WRITE_PROTECTED_MASK;
    page_table[page_index].flags |= PAGE_WRITE_PROTECT_CLEARED_MASK;

    return 1;
}
502

dtc's avatar
dtc committed
503
504
505
/*
 * A structure to hold the state of a generation.
 */
506
507
508
#define MEM_AGE_SHIFT 16
#define MEM_AGE_SCALE (1 << MEM_AGE_SHIFT)

509
510
struct generation {

511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
    /* The first page that gc_alloc checks on its next call. */
    int alloc_start_page;

    /* The first page that gc_alloc_unboxed checks on its next call. */
    int alloc_unboxed_start_page;

    /*
     * The first page that gc_alloc_large (boxed) considers on its next call.
     * Although it always allocates after the boxed_region.
     */
    int alloc_large_start_page;

    /*
     * The first page that gc_alloc_large (unboxed) considers on its next call.
     * Although it always allocates after the current_unboxed_region.
     */
    int alloc_large_unboxed_start_page;

    /* The bytes allocate to this generation. */
    int bytes_allocated;

    /* The number of bytes at which to trigger a GC */
    int gc_trigger;

    /* To calculate a new level for gc_trigger */
    int bytes_consed_between_gc;

    /* The number of GCs since the last raise. */
    int num_gc;

    /*
     * The average age at after which a GC will raise objects to the
     * next generation.
     */
    int trigger_age;

    /*
     * The cumulative sum of the bytes allocated to this generation. It
     * is cleared after a GC on this generation, and update before new
     * objects are added from a GC of a younger generation. Dividing by
     * the bytes_allocated will give the average age of the memory in
     * this generation since its last GC.
     */
    int cum_sum_bytes_allocated;

    /*
     * A minimum average memory age before a GC will occur helps prevent
     * a GC when a large number of new live objects have been added, in
     * which case a GC could be a waste of time.
560
561
562
     *
     * The age is represented as an integer between 0 and 32767
     * corresponding to an age of 0 to (just less than) 1.
563
     */
564
    int min_av_mem_age;
565
566
};

dtc's avatar
dtc committed
567
568
569
570
571
/*
 * An array of generation structures. There needs to be one more
 * generation structure than actual generations as the oldest
 * generations is temporarily raised then lowered.
 */
572
static struct generation generations[NUM_GENERATIONS + 1];
573

moore's avatar
   
moore committed
574
575
576
577
578
/* Statistics about a generation, extracted from the generations
   array.  This gets returned to Lisp.
*/

struct generation_stats {
579
580
581
582
583
584
    int bytes_allocated;
    int gc_trigger;
    int bytes_consed_between_gc;
    int num_gc;
    int trigger_age;
    int cum_sum_bytes_allocated;
585
    int min_av_mem_age;
moore's avatar
   
moore committed
586
};
587

moore's avatar
   
moore committed
588

dtc's avatar
dtc committed
589
590
591
592
/*
 * The oldest generation that will currently be GCed by default.
 * Valid values are: 0, 1, ... (NUM_GENERATIONS - 1)
 *
593
594
 * A value of NUM_GENERATIONS - 1 enables GC on all generations; the
 * default is less.
dtc's avatar
dtc committed
595
596
597
598
599
600
601
602
603
 *
 * Setting this to 0 effectively disables the generational nature of
 * the GC. In some applications generational GC may not be useful
 * because there are no long-lived objects.
 *
 * An intermediate value could be handy after moving long-lived data
 * into an older generation so an unnecessary GC of this long-lived
 * data can be avoided.
 */
604
605
606
#define DEFAULT_OLDEST_GEN_TO_GC	3

unsigned int gencgc_oldest_gen_to_gc = DEFAULT_OLDEST_GEN_TO_GC;
607
608


dtc's avatar
dtc committed
609
610
611
612
613
/*
 * The maximum free page in the heap is maintained and used to update
 * ALLOCATION_POINTER which is used by the room function to limit its
 * search of the heap. XX Gencgc obviously needs to be better
 * integrated with the lisp code.
614
615
616
 *
 * Except on sparc and ppc, there's no ALLOCATION_POINTER, so it's
 * never updated.  So make this available (non-static).
dtc's avatar
dtc committed
617
 */
618
int last_free_page;
619

620

cshapiro's avatar
cshapiro committed
621
622
static void scan_weak_tables(void);
static void scan_weak_objects(void);
623

dtc's avatar
dtc committed
624
625
626
/*
 * Misc. heap functions.
 */
627

dtc's avatar
dtc committed
628
629
630
/*
 * Count the number of write protected pages within the given generation.
 */
631
632
static int
count_write_protect_generation_pages(int generation)
633
{
634
635
636
    int i;
    int cnt = 0;
    int mmask, mflags;
637

638
639
640
    mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
	| PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK | generation;
dtc's avatar
dtc committed
641

642
643
644
645
    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mflags)
	    cnt++;
    return cnt;
646
647
}

dtc's avatar
dtc committed
648
649
650
/*
 * Count the number of pages within the given generation.
 */
651
652
static int
count_generation_pages(int generation)
653
{
654
655
656
    int i;
    int cnt = 0;
    int mmask, mflags;
657

658
659
    mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | generation;
dtc's avatar
dtc committed
660

661
662
663
664
    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mflags)
	    cnt++;
    return cnt;
665
666
}

dtc's avatar
dtc committed
667
668
669
/*
 * Count the number of dont_move pages.
 */
670
671
static int
count_dont_move_pages(void)
672
{
673
674
675
    int i;
    int cnt = 0;
    int mmask;
676

677
    mmask = PAGE_ALLOCATED_MASK | PAGE_DONT_MOVE_MASK;
dtc's avatar
dtc committed
678

679
680
681
682
    for (i = 0; i < last_free_page; i++)
	if (PAGE_FLAGS(i, mmask) == mmask)
	    cnt++;
    return cnt;
683
684
}

dtc's avatar
dtc committed
685
686
687
688
/*
 * Work through the pages and add up the number of bytes used for the
 * given generation.
 */
689
690
static int
generation_bytes_allocated(int generation)
691
{
692
693
694
    int i;
    int bytes_allocated = 0;
    int mmask, mflags;
695

696
697
    mmask = PAGE_ALLOCATED_MASK | PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | generation;
dtc's avatar
dtc committed
698

699
700
701
702
703
    for (i = 0; i < last_free_page; i++) {
	if (PAGE_FLAGS(i, mmask) == mflags)
	    bytes_allocated += page_table[i].bytes_used;
    }
    return bytes_allocated;
704
705
}

dtc's avatar
dtc committed
706
707
708
/*
 * Return the average age of the memory in a generation.
 */
709
static int
710
gen_av_mem_age(int gen)
711
{
712
    if (generations[gen].bytes_allocated == 0)
713
	return 0;
dtc's avatar
dtc committed
714

715
    return (((long long) generations[gen].cum_sum_bytes_allocated) << MEM_AGE_SHIFT) /
716
	generations[gen].bytes_allocated;
717
718
}

719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744

void
save_fpu_state(void* state)
{
#if defined(i386) || defined(__x86_64)
    if (fpu_mode == SSE2) {
        sse_save(state);
    } else {
        fpu_save(state);
    }
#else
    fpu_save(state);
#endif    
}

void
restore_fpu_state(void* state)
{
#if defined(i386) || defined(__x86_64)
    if (fpu_mode == SSE2) {
        sse_restore(state);
    } else {
        fpu_restore(state);
    }
#else
    fpu_restore(state);
745
#endif
746
}
747

748
749
750
751
752
753
754
755
756
757
758
759

/*
 * The verbose argument controls how much to print out:
 * 0 for normal level of detail; 1 for debugging.
 */
void
print_generation_stats(int verbose)
{
    int i, gens;

    FPU_STATE(fpu_state);
    
760
761
762
763
    /*
     * This code uses the FP instructions which may be setup for Lisp so
     * they need to the saved and reset for C.
     */
rtoy's avatar
rtoy committed
764

765
    save_fpu_state(fpu_state);
rtoy's avatar
rtoy committed
766

767
768
769
770
771
772
773
    /* Number of generations to print out. */
    if (verbose)
	gens = NUM_GENERATIONS + 1;
    else
	gens = NUM_GENERATIONS;

    /* Print the heap stats */
774
    fprintf(stderr, "          Page count (%d KB)\n", GC_PAGE_SIZE / 1024);
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
    fprintf(stderr,
	    "   Gen  Boxed Unboxed  LB   LUB    Alloc    Waste    Trigger   WP  GCs Mem-age\n");

    for (i = 0; i < gens; i++) {
	int j;
	int boxed_cnt = 0;
	int unboxed_cnt = 0;
	int large_boxed_cnt = 0;
	int large_unboxed_cnt = 0;

	for (j = 0; j < last_free_page; j++) {
	    int flags = page_table[j].flags;

	    if ((flags & PAGE_GENERATION_MASK) == i) {
		if (flags & PAGE_ALLOCATED_MASK) {
		    /*
		     * Count the number of boxed and unboxed pages within the
		     * given generation.
		     */
		    if (flags & PAGE_UNBOXED_MASK)
			if (flags & PAGE_LARGE_OBJECT_MASK)
			    large_unboxed_cnt++;
			else
			    unboxed_cnt++;
		    else if (flags & PAGE_LARGE_OBJECT_MASK)
			large_boxed_cnt++;
		    else
			boxed_cnt++;
		}
	    }
805
	}
dtc's avatar
dtc committed
806

807
808
809
810
811
        if (gc_assert_level > 0) {
            gc_assert(generations[i].bytes_allocated ==
                      generation_bytes_allocated(i));
        }

812
813
814
	fprintf(stderr, " %5d: %5d %5d %5d %5d %10d %6d %10d %4d %3d %7.4f\n",
		i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
		generations[i].bytes_allocated,
815
		GC_PAGE_SIZE * count_generation_pages(i) -
816
817
		generations[i].bytes_allocated, generations[i].gc_trigger,
		count_write_protect_generation_pages(i), generations[i].num_gc,
818
		(double)gen_av_mem_age(i) / MEM_AGE_SCALE);
819
820
    }
    fprintf(stderr, "   Total bytes alloc=%ld\n", bytes_allocated);
821

822
    restore_fpu_state(fpu_state);
823
824
}

moore's avatar
   
moore committed
825
826
827
/* Get statistics that are kept "on the fly" out of the generation
   array.
*/
828
829
830
831
832
833
834
835
836
837
838
839
840
841
void
get_generation_stats(int gen, struct generation_stats *stats)
{
    if (gen <= NUM_GENERATIONS) {
	stats->bytes_allocated = generations[gen].bytes_allocated;
	stats->gc_trigger = generations[gen].gc_trigger;
	stats->bytes_consed_between_gc =
	    generations[gen].bytes_consed_between_gc;
	stats->num_gc = generations[gen].num_gc;
	stats->trigger_age = generations[gen].trigger_age;
	stats->cum_sum_bytes_allocated =
	    generations[gen].cum_sum_bytes_allocated;
	stats->min_av_mem_age = generations[gen].min_av_mem_age;
    }
moore's avatar
   
moore committed
842
843
}

844
845
void
set_gc_trigger(int gen, int trigger)
moore's avatar
   
moore committed
846
{
847
848
849
    if (gen <= NUM_GENERATIONS) {
	generations[gen].gc_trigger = trigger;
    }
moore's avatar
   
moore committed
850
}
851

852
853
void
set_trigger_age(int gen, int trigger_age)
moore's avatar
   
moore committed
854
{
855
856
857
    if (gen <= NUM_GENERATIONS) {
	generations[gen].trigger_age = trigger_age;
    }
moore's avatar
   
moore committed
858
859
}

860
861
void
set_min_mem_age(int gen, double min_mem_age)
moore's avatar
   
moore committed
862
{
863
    if (gen <= NUM_GENERATIONS) {
864
	generations[gen].min_av_mem_age = min_mem_age * MEM_AGE_SCALE;
865
    }
moore's avatar
   
moore committed
866
}
867

dtc's avatar
dtc committed
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
/*
 * Allocation routines.
 *
 *
 * To support quick and inline allocation, regions of memory can be
 * allocated and then allocated from with just a free pointer and a
 * check against an end address.
 *
 * Since objects can be allocated to spaces with different properties
 * e.g. boxed/unboxed, generation, ages; there may need to be many
 * allocation regions.
 *
 * Each allocation region may be start within a partly used page.
 * Many features of memory use are noted on a page wise basis,
 * E.g. the generation; so if a region starts within an existing
 * allocated page it must be consistent with this page.
 *
 * During the scavenging of the newspace, objects will be transported
 * into an allocation region, and pointers updated to point to this
 * allocation region. It is possible that these pointers will be
 * scavenged again before the allocation region is closed, E.g. due to
 * trans_list which jumps all over the place to cleanup the list. It
 * is important to be able to determine properties of all objects
 * pointed to when scavenging, E.g to detect pointers to the
 * oldspace. Thus it's important that the allocation regions have the
 * correct properties set when allocated, and not just set when
 * closed.  The region allocation routines return regions with the
 * specified properties, and grab all the pages, setting there
 * properties appropriately, except that the amount used is not known.
 *
 * These regions are used to support quicker allocation using just a
 * free pointer. The actual space used by the region is not reflected
 * in the pages tables until it is closed. It can't be scavenged until
 * closed.
 *
 * When finished with the region it should be closed, which will
 * update the page tables for the actual space used returning unused
 * space. Further it may be noted in the new regions which is
 * necessary when scavenging the newspace.
 *
 * Large objects may be allocated directly without an allocation
 * region, the page tables are updated immediately.
 *
 * Unboxed objects don't contain points to other objects so don't need
 * scavenging. Further they can't contain pointers to younger
 * generations so WP is not needed.  By allocating pages to unboxed
 * objects the whole page never needs scavenging or write protecting.
 */
916

dtc's avatar
dtc committed
917
918
919
920
/*
 * Only using two regions at present, both are for the current
 * newspace generation.
 */
921
922
struct alloc_region boxed_region;
struct alloc_region unboxed_region;
923

moore's avatar
   
moore committed
924
#if 0
dtc's avatar
dtc committed
925
926
927
/*
 * X hack. current lisp code uses the following. Need coping in/out.
 */
928
929
void *current_region_free_pointer;
void *current_region_end_addr;
moore's avatar
   
moore committed
930
#endif
931
932

/* The generation currently being allocated to. X */
933
static int gc_alloc_generation = 0;
934

rtoy's avatar
rtoy committed
935
936
937
extern void do_dynamic_space_overflow_warning(void);
extern void do_dynamic_space_overflow_error(void);

938
939
/* Handle heap overflow here, maybe. */
static void
940
handle_heap_overflow(const char *msg, int size)
941
{
942
943
944
945
    unsigned long heap_size_mb;

    if (msg) {
	fprintf(stderr, msg, size);
946
947
948
    }
#ifndef SPARSE_BLOCK_SIZE
#define SPARSE_BLOCK_SIZE (0)
949
950
951
952
#endif

    /* Figure out how many MB of heap we have */
    heap_size_mb = (dynamic_space_size + SPARSE_BLOCK_SIZE) >> 20;
953

954
955
956
    fprintf(stderr, " CMUCL has run out of dynamic heap space (%lu MB).\n",
	    heap_size_mb);
    /* Try to handle heap overflow somewhat gracefully if we can. */
957
#if defined(trap_DynamicSpaceOverflow) || defined(FEATURE_HEAP_OVERFLOW_CHECK)
958
959
960
961
962
963
964
    if (reserved_heap_pages == 0) {
	fprintf(stderr, "\n Returning to top-level.\n");
	do_dynamic_space_overflow_error();
    } else {
	fprintf(stderr,
		"  You can control heap size with the -dynamic-space-size commandline option.\n");
	do_dynamic_space_overflow_warning();
965
966
    }
#else
967
    print_generation_stats(1);
968

969
    exit(1);
970
971
972
#endif
}

Raymond Toy's avatar
Raymond Toy committed
973
/*
Raymond Toy's avatar
Raymond Toy committed
974
 * Enables debug messages for MODE_MADVISE and MODE_LAZY
Raymond Toy's avatar
Raymond Toy committed
975
976
 */
boolean gencgc_debug_madvise = FALSE;
977

978
static inline void
979
980
981
982
handle_madvise_first_page(int first_page)
{
    int flags = page_table[first_page].flags;
        
983
984
985
986
987
    if (gencgc_debug_madvise) {
        fprintf(stderr, "first_page = %d, FLAGS = %x, orig = %d",
                first_page, flags, page_table[first_page].bytes_used);
    }
    
988
    if (!PAGE_ALLOCATED(first_page)) {
989
        int *page_start = (int *) page_address(first_page);
990
991
992
993
        
        if (gencgc_debug_madvise) {
            fprintf(stderr, ": marker = %x", *page_start);
        }
994
995
996
997
        if (*page_start != 0) {
            memset(page_start, 0, GC_PAGE_SIZE);
        }
    }
998
999
1000
    if (gencgc_debug_madvise) {
        fprintf(stderr, "\n");
    }
1001
1002
1003
1004
1005
1006
1007
1008
}

static void
handle_madvise_other_pages(int first_page, int last_page)
{
    int i;
    
    for (i = first_page + 1; i <= last_page; ++i) {
Raymond Toy's avatar
Raymond Toy committed
1009
        if (!PAGE_ALLOCATED(i)) {
1010
1011
            int *page_start = (int *) page_address(i);

1012
1013
1014
1015
            if (gencgc_debug_madvise) {
                fprintf(stderr, "MADVISE page %d, FLAGS = %x: marker %x\n",
                        i, page_table[i].flags, *page_start);
            }
1016
1017
1018
1019
1020
1021
1022
            if (*page_start != 0) {
                memset(page_start, 0, GC_PAGE_SIZE);
            }
        }
    }
}

dtc's avatar
dtc committed
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
/*
 * Find a new region with room for at least the given number of bytes.
 *
 * It starts looking at the current generations alloc_start_page. So
 * may pick up from the previous region if there is enough space. This
 * keeps the allocation contiguous when scavenging the newspace.
 *
 * The alloc_region should have been closed by a call to
 * gc_alloc_update_page_tables, and will thus be in an empty state.
 *
 * To assist the scavenging functions, write protected pages are not
 * used. Free pages should not be write protected.
 *
 * It is critical to the conservative GC that the start of regions be
 * known. To help achieve this only small regions are allocated at a
 * time.
 *
 * During scavenging, pointers may be found that point within the
 * current region and the page generation must be set so pointers to
 * the from space can be recognised.  So the generation of pages in
 * the region are set to gc_alloc_generation.  To prevent another
 * allocation call using the same pages, all the pages in the region
 * are allocated, although they will initially be empty.
 */
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
static void
gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
{
    int first_page;
    int last_page;
    int region_size;
    int restart_page;
    int bytes_found;
    int num_pages;
    int i;
    int mmask, mflags;

    /* Shut up some compiler warnings */
    last_page = bytes_found = 0;

dtc's avatar
dtc committed
1062
#if 0
1063
1064
    fprintf(stderr, "alloc_new_region for %d bytes from gen %d\n",
	    nbytes, gc_alloc_generation);
dtc's avatar
dtc committed
1065
#endif
1066

1067
    /* Check that the region is in a reset state. */
1068
1069
1070
1071
1072
    if (gc_assert_level > 0) {
        gc_assert(alloc_region->first_page == 0
                  && alloc_region->last_page == -1
                  && alloc_region->free_pointer == alloc_region->end_addr);
    }
1073
1074
1075
1076
1077
1078

    if (unboxed)
	restart_page =
	    generations[gc_alloc_generation].alloc_unboxed_start_page;
    else
	restart_page = generations[gc_alloc_generation].alloc_start_page;
1079

1080
1081
1082
1083
1084
    /*
     * Search for a contiguous free region of at least nbytes with the
     * given properties: boxed/unboxed, generation. First setting up the
     * mask and matching flags.
     */
1085

1086
1087
1088
1089
1090
    mmask = PAGE_ALLOCATED_MASK | PAGE_WRITE_PROTECTED_MASK
	| PAGE_LARGE_OBJECT_MASK | PAGE_DONT_MOVE_MASK
	| PAGE_UNBOXED_MASK | PAGE_GENERATION_MASK;
    mflags = PAGE_ALLOCATED_MASK | (unboxed << PAGE_UNBOXED_SHIFT)
	| gc_alloc_generation;
1091

1092
1093
    do {
	first_page = restart_page;
1094

1095
1096
1097
1098
	/*
	 * First search for a page with at least 32 bytes free, that is
	 * not write protected, or marked dont_move.
	 */
dtc's avatar
dtc committed
1099

1100
1101
	while (first_page < dynamic_space_pages) {
	    int flags = page_table[first_page].flags;
1102

1103
1104
	    if (!(flags & PAGE_ALLOCATED_MASK)
		|| ((flags & mmask) == mflags &&
1105
		    page_table[first_page].bytes_used < GC_PAGE_SIZE - 32))
1106
1107
1108
		break;
	    first_page++;
	}
1109

1110
1111
	/* Check for a failure */
	if (first_page >= dynamic_space_pages - reserved_heap_pages) {
1112
#if 0
1113
1114
	    handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
				 nbytes);
1115
#else
1116
	    break;
1117
#endif
1118
	}
dtc's avatar
dtc committed
1119

1120
1121
1122
        if (gc_assert_level > 0) {
            gc_assert(!PAGE_WRITE_PROTECTED(first_page));
        }
dtc's avatar
dtc committed
1123
1124

#if 0
1125
1126
	fprintf(stderr, "  first_page=%d bytes_used=%d\n",
		first_page, page_table[first_page].bytes_used);
dtc's avatar
dtc committed
1127
1128
#endif

1129
1130
1131
1132
1133
1134
1135
	/*
	 * Now search forward to calculate the available region size.  It
	 * tries to keeps going until nbytes are found and the number of
	 * pages is greater than some level. This helps keep down the
	 * number of pages in a region.
	 */
	last_page = first_page;
1136
	bytes_found = GC_PAGE_SIZE - page_table[first_page].bytes_used;
1137
1138
1139
1140
1141
1142
	num_pages = 1;
	while ((bytes_found < nbytes || num_pages < 2)
	       && last_page < dynamic_space_pages - 1
	       && !PAGE_ALLOCATED(last_page + 1)) {
	    last_page++;
	    num_pages++;
1143
	    bytes_found += GC_PAGE_SIZE;
1144
1145
1146
            if (gc_assert_level > 0) {
                gc_assert(!PAGE_WRITE_PROTECTED(last_page));
            }
1147
	}
dtc's avatar
dtc committed
1148

1149
1150
	region_size = (GC_PAGE_SIZE - page_table[first_page].bytes_used)
	    + GC_PAGE_SIZE * (last_page - first_page);
dtc's avatar
dtc committed
1151

1152
1153
1154
        if (gc_assert_level > 0) {
            gc_assert(bytes_found == region_size);
        }
dtc's avatar
dtc committed
1155
#if 0
1156
1157
	fprintf(stderr, "  last_page=%d bytes_found=%d num_pages=%d\n",
		last_page, bytes_found, num_pages);
dtc's avatar
dtc committed
1158
1159
#endif

1160
1161
1162
	restart_page = last_page + 1;
    }
    while (restart_page < dynamic_space_pages && bytes_found < nbytes);
dtc's avatar
dtc committed
1163

1164
1165
1166
1167
    if (first_page >= dynamic_space_pages - reserved_heap_pages) {
	handle_heap_overflow("*A2 gc_alloc_new_region failed, nbytes=%d.\n",
			     nbytes);
    }
dtc's avatar
dtc committed
1168

1169
1170
1171
1172
1173
1174
    /* Check for a failure */
    if (restart_page >= (dynamic_space_pages - reserved_heap_pages)
	&& bytes_found < nbytes) {
	handle_heap_overflow("*A1 gc_alloc_new_region failed, nbytes=%d.\n",
			     nbytes);
    }
dtc's avatar
dtc committed
1175
#if 0
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
    fprintf(stderr,
	    "gc_alloc_new_region gen %d: %d bytes: from pages %d to %d: addr=%x\n",
	    gc_alloc_generation, bytes_found, first_page, last_page,
	    page_address(first_page));
#endif

    /* Setup the alloc_region. */
    alloc_region->first_page = first_page;
    alloc_region->last_page = last_page;
    alloc_region->start_addr = page_table[first_page].bytes_used
	+ page_address(first_page);
    alloc_region->free_pointer = alloc_region->start_addr;
    alloc_region->end_addr = alloc_region->start_addr + bytes_found;

1190
1191
    if ((gencgc_unmap_zero == MODE_MADVISE)
        || (gencgc_unmap_zero == MODE_LAZY)) {
1192
1193
1194
1195
        handle_madvise_first_page(first_page);
        handle_madvise_other_pages(first_page, last_page);
    }

1196
    if (DO_GENCGC_ZERO_CHECK) {
1197
1198
1199
1200
1201
	int *p;

	for (p = (int *) alloc_region->start_addr;
	     p < (int *) alloc_region->end_addr; p++)
	    if (*p != 0)
1202
		fprintf(stderr, "** new region not zero @ %lx: %x\n",
1203
			(unsigned long) p, *p);
1204
    }
1205

1206
1207
1208
1209
1210
1211
1212
1213
    /* Setup the pages. */

    /* The first page may have already been in use. */
    if (page_table[first_page].bytes_used == 0) {
	PAGE_FLAGS_UPDATE(first_page, mmask, mflags);
	page_table[first_page].first_object_offset = 0;
    }

1214
1215
1216
1217
1218
1219
1220
    if (gc_assert_level > 0) {
        gc_assert(PAGE_ALLOCATED(first_page));
        gc_assert(PAGE_UNBOXED_VAL(first_page) == unboxed);
        gc_assert(PAGE_GENERATION(first_page) == gc_alloc_generation);
        gc_assert(!PAGE_LARGE_OBJECT(first_page));
    }
    
1221
1222
1223
1224
1225