reload_saved_string_length = 0;
read_command_line_options (argc, argv);
+#ifndef ENABLE_SMP
setup_memory ((BLOCKS_TO_BYTES (option_heap_size)),
(BLOCKS_TO_BYTES (option_stack_size)),
(BLOCKS_TO_BYTES (option_constant_size)));
-#ifdef ENABLE_SMP
+#else
+ setup_memory ((BLOCKS_TO_BYTES (option_heap_size)),
+ (BLOCKS_TO_BYTES (option_constant_size)),
+ option_processor_count,
+ (BLOCKS_TO_BYTES (option_processor_heap_size)),
+ (BLOCKS_TO_BYTES (option_stack_size)));
setup_processors (option_processor_count);
#endif
SCHEME_OBJECT * block_end = ((CC_BLOCK_ADDR_END (block)) - 1);
return
((((HEAP_ADDRESS_P (block)) && (HEAP_ADDRESS_P (block_end)))
+#ifdef ENABLE_SMP
+ || ((SHARED_HEAP_ADDRESS_P (block))
+ && (SHARED_HEAP_ADDRESS_P (block_end)))
+#endif
|| ((ADDRESS_IN_CONSTANT_P (block))
&& (ADDRESS_IN_CONSTANT_P (block_end))))
&& (ENVIRONMENT_P (*block_end)));
(unsigned long)Free);
dump_heap_area_at (addr, heap_start, Free);
}
+#ifdef ENABLE_SMP
+ else if (shared_heap_start <= addr && addr < shared_heap_free)
+ {
+ outf_error ("Scanning shared heap (%#lx - %#lx):\n",
+ (unsigned long)shared_heap_start,
+ (unsigned long)shared_heap_free);
+ dump_heap_area_at (addr, shared_heap_start, shared_heap_free);
+ }
+#endif
else
{
outf_error ("%#lx: not a heap address\n", (unsigned long)addr);
{
bool c = verify_heap_area ("constants", constant_start, constant_alloc_next);
bool h = verify_heap_area ("heap", heap_start, Free);
+#ifdef ENABLE_SMP
+ bool s = verify_heap_area ("shared heap",shared_heap_start,shared_heap_free);
+ return (c && h && s);
+#else
return (c && h);
+#endif
}
#else /* !ENABLE_DEBUGGING_TOOLS */
extern SCHEME_OBJECT * heap_alloc_limit;
extern SCHEME_OBJECT * heap_start;
extern SCHEME_OBJECT * heap_end;
+#ifdef ENABLE_SMP
+extern SCHEME_OBJECT * p0_heap_start;
+extern SCHEME_OBJECT * shared_heap_start;
+extern SCHEME_OBJECT * shared_heap_free;
+extern SCHEME_OBJECT * shared_heap_end;
+#endif
extern SCHEME_OBJECT * stack_pointer;
extern SCHEME_OBJECT * stack_guard;
extern struct obstack scratch_obstack;
extern struct obstack ffi_obstack;
-extern unsigned long n_heap_blocks;
-extern unsigned long n_constant_blocks;
-extern unsigned long n_stack_blocks;
-
extern SCHEME_OBJECT * memory_block_start;
extern SCHEME_OBJECT * memory_block_end;
extern bool object_in_constant_space_p (SCHEME_OBJECT);
extern SCHEME_OBJECT * copy_to_constant_space (SCHEME_OBJECT *, unsigned long);
+#ifndef ENABLE_SMP
extern void setup_memory (unsigned long, unsigned long, unsigned long);
+#else
+extern void setup_memory (unsigned long, unsigned long,
+ int, unsigned long, unsigned long);
+#endif
extern void reset_memory (void);
/* Utilities for primitives */
static void finalize_fasl_header (unsigned long);
static bool write_fasl_file
(SCHEME_OBJECT *, SCHEME_OBJECT *, fasl_file_handle_t);
+
+#ifndef ENABLE_SMP
+#define HEAP_START heap_start
+#define HEAP_END heap_end
+#else
+#define HEAP_START shared_heap_start
+#define HEAP_END shared_heap_end
+#endif
\f
/* FASDUMP:
copy must have the global value cell of symbols set to UNBOUND.
Second, and worse, all the broken hearts created during the process
must be restored to their original values. This last is done by
- growing the copy of the object in the bottom of spare heap, keeping
- track of the locations of broken hearts and original contents at
- the top of the spare heap. */
+ keeping track of the locations of broken hearts and original
+ contents in a malloced/realloced array of fixup_ts. */
DEFINE_PRIMITIVE ("PRIMITIVE-FASDUMP", Prim_prim_fasdump, 3, 3,
"(OBJECT NAMESTRING FLAG)\n\
#ifdef ENABLE_SMP
assert (gc_processor == self);
#endif
- open_tospace (heap_start);
+ open_tospace (HEAP_START);
/* This must be _before_ the call to initialize_fixups(): */
transaction_record_action (tat_abort, abort_fasdump, 0);
initialize_fixups ();
static SCHEME_OBJECT * fasdump_saved_stack_guard;
static SCHEME_OBJECT * fasdump_saved_stack_start;
static SCHEME_OBJECT * fasdump_saved_stack_end;
+#ifdef ENABLE_SMP
+static SCHEME_OBJECT * fasdump_saved_shared_heap_free;
+static SCHEME_OBJECT * fasdump_saved_shared_heap_start;
+static SCHEME_OBJECT * fasdump_saved_shared_heap_end;
+#endif
static SCHEME_OBJECT * fasdump_saved_constant_alloc_next;
static SCHEME_OBJECT * fasdump_saved_constant_start;
static SCHEME_OBJECT * fasdump_saved_constant_end;
SAVE_GC_VAR (stack_guard);
SAVE_GC_VAR (stack_start);
SAVE_GC_VAR (stack_end);
+#ifdef ENABLE_SMP
+ SAVE_GC_VAR (shared_heap_free);
+ SAVE_GC_VAR (shared_heap_start);
+ SAVE_GC_VAR (shared_heap_end);
+#endif
SAVE_GC_VAR (constant_alloc_next);
SAVE_GC_VAR (constant_start);
SAVE_GC_VAR (constant_end);
COMPARE_GC_VAR (stack_guard);
COMPARE_GC_VAR (stack_start);
COMPARE_GC_VAR (stack_end);
+#ifdef ENABLE_SMP
+ COMPARE_GC_VAR (shared_heap_free);
+ COMPARE_GC_VAR (shared_heap_start);
+ COMPARE_GC_VAR (shared_heap_end);
+#endif
COMPARE_GC_VAR (constant_alloc_next);
COMPARE_GC_VAR (constant_start);
COMPARE_GC_VAR (constant_end);
fasdump_saved_heap_start,
((fasdump_saved_Free - fasdump_saved_heap_start)
* SIZEOF_SCHEME_OBJECT));
+#ifdef ENABLE_SMP
+ (void) mhash (ctx,
+ fasdump_saved_shared_heap_start,
+ ((fasdump_saved_shared_heap_free
+ - fasdump_saved_shared_heap_start)
+ * SIZEOF_SCHEME_OBJECT));
+#endif
return (mhash_end (ctx));
}
static
DEFINE_GC_HANDLER (handle_ephemeron)
{
- /* Count each one once by counting only if there is no borken heart. */
+ /* Count each one once by counting only if there is no broken heart. */
if (0 == (GC_PRECHECK_FROM (OBJECT_ADDRESS (object))))
dumped_ephemeron_count += 1;
return (gc_handle_unaligned_vector (scan, object));
Saves an image of the current world to the file NAMESTRING.\n\
When the file is reloaded, PROCEDURE is called with an argument of #F.")
{
- SCHEME_OBJECT * to = Free;
+ SCHEME_OBJECT * to;
SCHEME_OBJECT * prim_table_start;
SCHEME_OBJECT * c_code_table_start;
bool result;
CHECK_ARG (1, INTERPRETER_APPLICABLE_P);
CHECK_ARG (2, STRING_P);
- Primitive_GC_If_Needed (5);
+#ifndef ENABLE_SMP
+ to = Free;
+ Primitive_GC_If_Needed (6);
+#else
+ ENTER_CRITICAL_SECTION ("band dump");
+
+ smp_gc_start ();
+ open_tospace (shared_heap_start);
+
+ initialize_weak_chain ();
+ ephemeron_count = 0;
+
+ std_gc_pt1 ();
+ std_gc_pt2 ();
+
+ to = shared_heap_free;
+
+ smp_gc_finish ();
+ assert (Free == heap_start);
+ assert (SHARED_HEAP_AVAILABLE_P (6));
+#endif
initialize_fasl_header (true, true);
{
SCHEME_OBJECT comb;
to += (FASLHDR_C_CODE_TABLE_SIZE (fh));
#endif
- if (to > heap_end)
+ if (to > HEAP_END)
result = false;
else
{
export_c_code_table (c_code_table_start);
#endif
- (FASLHDR_HEAP_START (fh)) = heap_start;
+ (FASLHDR_HEAP_START (fh)) = HEAP_START;
(FASLHDR_HEAP_END (fh)) = prim_table_start;
(FASLHDR_CONSTANT_START (fh)) = constant_start;
(FASLHDR_CONSTANT_END (fh)) = constant_alloc_next;
reset_allocator_parameters
((FASLHDR_CONSTANT_SIZE (fh)), (FASLHDR_HEAP_RESERVED (fh)));
+#ifdef ENABLE_SMP
+ reset_processor_allocator (self);
+#endif
/* We cleared the heap; the ephemeron array is now bogus. */
ephemeron_array = SHARP_F;
result = (load_file (handle, 0));
typedef void gc_abort_handler_t (void);
typedef bool gc_walk_proc_t (SCHEME_OBJECT *, SCHEME_OBJECT *, void *);
+#ifndef ENABLE_SMP
extern void initialize_gc
(unsigned long, SCHEME_OBJECT **, SCHEME_OBJECT **,
gc_tospace_allocator_t *, gc_abort_handler_t * NORETURN);
+#else
+extern void initialize_gc
+ (unsigned long, gc_tospace_allocator_t *, gc_abort_handler_t * NORETURN);
+#endif
extern void resize_tospace (unsigned long);
extern void open_tospace (SCHEME_OBJECT *);
*/
\f
#include "object.h"
-#include "outf.h"
+#include "extern.h"
#include "gccode.h"
/* For ephemeron layout. */
/* For memory advice. */
#include "ostop.h"
-static SCHEME_OBJECT ** p_fromspace_start;
-static SCHEME_OBJECT ** p_fromspace_end;
static gc_tospace_allocator_t * gc_tospace_allocator;
static gc_abort_handler_t * gc_abort_handler NORETURN;
static SCHEME_OBJECT * current_scan;
static SCHEME_OBJECT current_object;
+#ifndef ENABLE_SMP
+static SCHEME_OBJECT ** p_fromspace_start;
+static SCHEME_OBJECT ** p_fromspace_end;
+
#define ADDRESS_IN_FROMSPACE_P(addr) \
((((void *) (addr)) >= ((void *) (*p_fromspace_start))) \
&& (((void *) (addr)) < ((void *) (*p_fromspace_end))))
+#else /* ENABLE_SMP */
+
+#define ADDRESS_IN_FROMSPACE_P(addr) \
+ (((((void *) (addr)) >= ((void *) (p0_heap_start))) \
+ && (((void *) (addr)) < ((void *) (constant_start)))) \
+ || ((((void *) (addr)) >= ((void *) (shared_heap_start))) \
+ && (((void *) (addr)) < ((void *) (shared_heap_free)))))
+#endif
+
#define TOSPACE_TO_NEWSPACE(p) (((p) - tospace_start) + newspace_start)
#define NEWSPACE_TO_TOSPACE(p) (((p) - newspace_start) + tospace_start)
static SCHEME_OBJECT ephemeron_queue = SHARP_F;
static bool scanning_ephemerons_p = false;
-extern SCHEME_OBJECT ephemeron_array;
-extern unsigned long ephemeron_count;
-
static void queue_ephemerons_for_key (SCHEME_OBJECT *);
static SCHEME_OBJECT gc_transport_weak_pair (SCHEME_OBJECT);
static SCHEME_OBJECT gc_transport_ephemeron (SCHEME_OBJECT);
\f
void
initialize_gc (unsigned long n_words,
+#ifndef ENABLE_SMP
SCHEME_OBJECT ** pf_start,
SCHEME_OBJECT ** pf_end,
+#endif
gc_tospace_allocator_t * allocator,
gc_abort_handler_t * abort_handler NORETURN)
{
+#ifndef ENABLE_SMP
p_fromspace_start = pf_start;
p_fromspace_end = pf_end;
+#endif
gc_tospace_allocator = allocator;
gc_abort_handler = abort_handler;
CLOSE_TOSPACE ();
static unsigned long saved_heap_size;
static unsigned long saved_constant_size;
-static unsigned long saved_stack_size;
+unsigned long saved_stack_size;
+#ifdef ENABLE_SMP
+int saved_processor_count;
+unsigned long saved_processor_heap_size;
+#endif
static gc_tospace_allocator_t allocate_tospace;
static gc_abort_handler_t abort_gc NORETURN;
static unsigned long compute_ephemeron_array_length (unsigned long);
-/* Memory Allocation, sequential processor:
+#ifndef ENABLE_SMP
+
+/* Memory Allocation, uni-processor:
oo
------------------------------------------
initialize_gc (heap_size, (&heap_start), (&Free), allocate_tospace, abort_gc);
}
+#else /* ENABLE_SMP */
+
+/* Memory Allocation, multi-processor:
+
+ Machine variables, both thread-local & shared:
+oo ---------------- ---------------------------------
+ -------------------------
+ | To Space |
+ | |
+ ------------------------- <- to_space, malloced
+ . .
+ . .
+ . .
+ ------------------------- <- memory_block_end
+ | (Shared) Heap | & shared_heap_end
+ | |
+ | --------------------- | <- shared_heap_free
+ | /\ |
+ | || |
+ ------------------------- <- shared_heap_start
+ | Constant (& Pure) | & constant_end, moved by purify
+ | |
+ | -------------------- | <- constant_alloc_next
+ | /\ |
+ | || |
+ | || |
+ ------------------------- <- heap_end constant_start & pN->heap_end
+ | --------------------- | <- heap_alloc_limit
+ | |
+ | --------------------- | <- Free pN->heap_free
+ | /\ |
+ | ProcessorN Heap || |
+ ------------------------- <- heap_start pN->heap_start
+ ...
+ ------------------------- <- heap_end p0->heap_end & p1->heap_start
+ | --------------------- | <- heap_alloc_limit
+ | |
+ | --------------------- | <- Free p0->free_pointer
+ | /\ |
+ | Processor0 Heap || |
+ ------------------------- <- heap_start p0->heap_start & p0_heap_start
+ | ProcessorN Stack || | & pN->stack_end
+ | \/ |
+ | --------------------- | <- stack_pointer pN->stack_pointer
+ | |
+ | --------------------- | <- stack_guard
+ ------------------------- <- stack_start pN->stack_start
+ ...
+ ------------------------- <- p0->stack_end
+ | Processor0 Stack || |
+ | \/ |
+ | ----------------------| <- stack_pointer p0->stack_pointer
+ | |
+ | ----------------------| <- stack_guard
+ ------------------------- <- stack_start p0->stack_start
+0 & memory_block_start
+
+ Each area has a pointer to the next free word. For the stack it is
+ a pointer to the last word in use. */
+
+void
+setup_memory (unsigned long heap_size,
+ unsigned long constant_size,
+ int processor_count,
+ unsigned long processor_heap_size,
+ unsigned long stack_size)
+{
+ unsigned long total_size;
+ ALLOCATE_REGISTERS ();
+
+ if (heap_size == 0)
+ {
+ outf_fatal ("Invalid (zero) heap size.\n");
+ outf_flush_fatal ();
+ exit (1);
+ }
+ if (stack_size == 0)
+ {
+ outf_fatal ("Invalid (zero) stack size.\n");
+ outf_flush_fatal ();
+ exit (1);
+ }
+ if (processor_count == 0)
+ {
+ outf_fatal ("Invalid (zero) processor count.\n");
+ outf_flush_fatal ();
+ exit (1);
+ }
+
+ if (processor_heap_size == 0)
+ {
+ processor_heap_size = heap_size; /* / 2 / processor_count; ??? */
+ if (processor_heap_size < SIZEOF_SCHEME_OBJECT*1024*1024)
+ processor_heap_size = SIZEOF_SCHEME_OBJECT*1024*1024;
+ }
+ total_size = (heap_size + constant_size
+ + (processor_count * (stack_size + processor_heap_size)));
+
+ if (total_size >= DATUM_MASK)
+ {
+ outf_fatal ("Not enough heap addresses for this configuration.\n");
+ outf_flush_fatal ();
+ exit (1);
+ }
+
+ /* Allocate */
+ ALLOCATE_HEAP_SPACE (total_size, memory_block_start, memory_block_end);
+
+ if (memory_block_start == 0)
+ {
+ outf_fatal ("Not enough memory for this configuration.\n");
+ outf_flush_fatal ();
+ reset_memory ();
+ exit (1);
+ }
+
+ if ((ADDRESS_TO_DATUM (memory_block_end)) > DATUM_MASK)
+ {
+ outf_fatal ("Requested allocation is too large.\n");
+ outf_fatal ("Try again with a smaller argument to '--heap'.\n");
+ outf_flush_fatal ();
+ reset_memory ();
+ exit (1);
+ }
+
+ saved_heap_size = heap_size;
+ saved_stack_size = stack_size;
+ saved_constant_size = constant_size;
+ saved_processor_count = processor_count;
+ saved_processor_heap_size = processor_heap_size;
+ reset_allocator_parameters (0, 0);
+ initialize_gc (saved_heap_size, allocate_tospace, abort_gc);
+}
+#endif /* ENABLE_SMP */
+
void
reset_memory (void)
{
unsigned long n_reserved)
{
return
- ((memory_block_start
- + saved_stack_size
+ ((constant_start
+ n_constant + CONSTANT_SPACE_FUDGE
+ n_heap + ((n_reserved == 0) ? DEFAULT_HEAP_RESERVED : n_reserved))
< memory_block_end);
}
+#ifndef ENABLE_SMP
+
void
reset_allocator_parameters (unsigned long n_constant, unsigned long reserved)
{
STACK_RESET ();
}
+#else /* ENABLE_SMP */
+
+void
+reset_allocator_parameters (unsigned long n_constant, unsigned long reserved)
+{
+ heap_reserved = ((reserved == 0) ? DEFAULT_HEAP_RESERVED : reserved);
+ p0_heap_start = (memory_block_start
+ + saved_processor_count * saved_stack_size);
+ constant_start = (memory_block_start
+ + saved_processor_count * (saved_stack_size
+ + saved_processor_heap_size));
+ constant_alloc_next = constant_start;
+ constant_end = (constant_alloc_next + n_constant + CONSTANT_SPACE_FUDGE);
+ shared_heap_start = constant_end;
+ shared_heap_free = shared_heap_start;
+ shared_heap_end = memory_block_end;
+}
+
+void
+reset_processor_allocator (processor_t *p)
+{
+ /* All of these variables should be thread-local. */
+ SET_STACK_LIMITS (p->stack_start, saved_stack_size);
+ heap_start = p->heap_start;
+ heap_end = p->heap_end;
+ Free = heap_start;
+
+ RESET_HEAP_ALLOC_LIMIT ();
+ INITIALIZE_STACK ();
+ STACK_RESET ();
+}
+
+#define FOR_EACH_PROCESSOR(stmt) do { \
+ processor_t *P = processors; \
+ while (P != NULL) \
+ { \
+ stmt; \
+ P = P->next; \
+ } \
+ } while (0)
+
+#endif
+
static void
allocate_tospace (unsigned long n_words,
SCHEME_OBJECT ** start_r, SCHEME_OBJECT ** end_r)
object_in_heap_p (SCHEME_OBJECT object)
{
SCHEME_OBJECT * address = (get_object_address (object));
- return ((address != 0) && (ADDRESS_IN_HEAP_P (address)));
+ return ((address != 0) && ((ADDRESS_IN_HEAP_P (address))
+#ifdef ENABLE_SMP
+ || (ADDRESS_IN_SHARED_HEAP_P (address))
+#endif
+ ));
}
\f
DEFINE_PRIMITIVE ("GARBAGE-COLLECT", Prim_garbage_collect, 1, 1,
STACK_CHECK_FATAL ("GC");
if (Free > heap_end)
{
+#ifndef ENABLE_SMP
outf_fatal ("\nGC has been delayed too long!\n");
+#else
+ outf_fatal ("\n;%d GC has been delayed too long!\n", self->id);
+#endif
outf_fatal
("Free = %#lx; heap_alloc_limit = %#lx; heap_end = %#lx\n",
((unsigned long) Free),
if (GC_Debug == true) verify_heap ();
#endif
-#ifdef ENABLE_SMP
- assert (gc_processor == self);
-#endif
+#ifndef ENABLE_SMP
+
open_tospace (heap_start);
+
+#else
+
+ smp_gc_start ();
+ open_tospace (shared_heap_start);
+
+#endif /* ENABLE_SMP */
+
initialize_weak_chain ();
ephemeron_count = 0;
std_gc_pt1 ();
std_gc_pt2 ();
+#ifdef ENABLE_SMP
+ smp_gc_finish ();
+#endif
+
Will_Push (CONTINUATION_SIZE);
SET_RC (RC_NORMAL_GC_DONE);
SET_EXP (ULONG_TO_FIXNUM ((HEAP_AVAILABLE > gc_space_needed)
saved_to = (get_newspace_ptr ());
add_to_tospace (fixed_objects);
+#ifndef ENABLE_SMP
add_to_tospace
(MAKE_POINTER_OBJECT (UNMARKED_HISTORY_TYPE, history_register));
+#else
+ FOR_EACH_PROCESSOR
+ (add_to_tospace (MAKE_POINTER_OBJECT (UNMARKED_HISTORY_TYPE,
+ P->history_register)));
+#endif
current_gc_table = (std_gc_table ());
+#ifndef ENABLE_SMP
gc_scan_oldspace (stack_pointer, stack_end);
+#else
+ FOR_EACH_PROCESSOR
+ (gc_scan_oldspace (P->stack_pointer, P->stack_end));
+#endif
gc_scan_oldspace (constant_start, constant_alloc_next);
gc_scan_tospace (saved_to, 0);
update_weak_pointers ();
}
+#ifndef ENABLE_SMP
+
+#define EPHEMERON_GC_NEEDED(length,n_ephemerons) \
+ !HEAP_AVAILABLE_P ((VECTOR_DATA + length) + (n_ephemerons * EPHEMERON_SIZE))
+
+#define make_ephemeron_vector(length) (make_vector (length, SHARP_F, false))
+
+#else
+
+#define EPHEMERON_GC_NEEDED(length,n_ephemerons) \
+ !((HEAP_AVAILABLE_P (n_ephemerons * EPHEMERON_SIZE)) \
+ && SHARED_HEAP_AVAILABLE_P (VECTOR_DATA + length))
+
+static SCHEME_OBJECT
+make_ephemeron_vector (unsigned long length)
+{
+ SCHEME_OBJECT result;
+ result = (MAKE_POINTER_OBJECT (TC_VECTOR, shared_heap_free));
+ (*shared_heap_free++) = (MAKE_OBJECT (TC_MANIFEST_VECTOR, length));
+ while ((length--) > 0)
+ (*shared_heap_free++) = SHARP_F;
+ return (result);
+}
+#endif
+
void
std_gc_pt2 (void)
{
SCHEME_OBJECT * p = (get_newspace_ptr ());
(void) save_tospace (save_tospace_copy, 0);
+#ifndef ENABLE_SMP
Free = p;
+#else
+ shared_heap_free = p;
+ Free = heap_start;
+#endif
fixed_objects = (*saved_to++);
+#ifndef ENABLE_SMP
history_register = (OBJECT_ADDRESS (*saved_to++));
+#else
+ FOR_EACH_PROCESSOR
+ (P->history_register = (OBJECT_ADDRESS (*saved_to++)));
+ history_register = self->history_register;
+#endif
saved_to = 0;
{
unsigned long length
= (compute_ephemeron_array_length
(ephemeron_count + n_ephemerons_requested));
- if (!HEAP_AVAILABLE_P
- ((VECTOR_DATA + length) + (n_ephemerons_requested * EPHEMERON_SIZE)))
+ if (EPHEMERON_GC_NEEDED (length, n_ephemerons_requested))
{
if (ephemeron_request_hard_p)
gc_space_needed += (VECTOR_DATA + length);
/* This should never trigger, because we discard the previous
ephemeron array, which always has room for at least as many
ephemerons as are now live. */
- if (!HEAP_AVAILABLE_P (VECTOR_DATA + length))
+ if (!SHARED_HEAP_AVAILABLE_P (VECTOR_DATA + length))
std_gc_death ("No room for ephemeron array");
#endif
}
- ephemeron_array = (make_vector (length, SHARP_F, false));
+ ephemeron_array = make_ephemeron_vector (length);
n_ephemerons_requested = 0;
ephemeron_request_hard_p = false;
}
if (!ephemeron_array_big_enough_p (n))
{
unsigned long length = (compute_ephemeron_array_length (n));
- assert (HEAP_AVAILABLE_P (VECTOR_DATA + length));
- ephemeron_array = (make_vector (length, SHARP_F, false));
+ assert (SHARED_HEAP_AVAILABLE_P (VECTOR_DATA + length));
+ ephemeron_array = make_ephemeron_vector (length);
}
}
\f
static void
-gc_if_needed_for_ephemeron (unsigned long extra_space)
+gc_if_needed_for_ephemeron (unsigned long table_space)
{
- if (GC_NEEDED_P (EPHEMERON_SIZE + extra_space))
+#ifndef ENABLE_SMP
+ if (GC_NEEDED_P (EPHEMERON_SIZE + table_space))
{
n_ephemerons_requested = 1;
ephemeron_request_hard_p = true;
Primitive_GC (EPHEMERON_SIZE);
}
+#else
+ if ((GC_NEEDED_P (EPHEMERON_SIZE))
+ || ((!SHARED_HEAP_AVAILABLE_P (table_space))
+ && (GC_ENABLED_P())))
+ {
+ n_ephemerons_requested = 1;
+ ephemeron_request_hard_p = true;
+ Primitive_GC (EPHEMERON_SIZE);
+ }
+#endif
}
DEFINE_PRIMITIVE ("MAKE-EPHEMERON", Prim_make_ephemeron, 2, 2, 0)
unsigned long length
= (compute_ephemeron_array_length (ephemeron_count));
gc_if_needed_for_ephemeron (VECTOR_DATA + length);
- ephemeron_array = (make_vector (length, SHARP_F, false));
+ ephemeron_array = (make_ephemeron_vector (length));
}
{
SCHEME_OBJECT * addr = Free;
#define HEAP_AVAILABLE \
((unsigned long) ((FREE_OK_P (Free)) ? (heap_alloc_limit - Free) : 0))
+#ifndef ENABLE_SMP
+# define SHARED_HEAP_AVAILABLE_P HEAP_AVAILABLE_P
+#else
+# define SHARED_HEAP_AVAILABLE_P(n_words) \
+ ((shared_heap_free + (n_words)) < shared_heap_end)
+# define SHARED_HEAP_AVAILABLE \
+ ((unsigned long) (shared_heap_end - shared_heap_free))
+#endif
+
#define FREE_OK_P(free) \
(((free) >= heap_start) && ((free) < heap_alloc_limit))
#define ADDRESS_IN_HEAP_P(address) \
(((address) >= heap_start) && ((address) < heap_end))
+#ifdef ENABLE_SMP
+#define ADDRESS_IN_SHARED_HEAP_P(address) \
+ (((address) >= shared_heap_start) && ((address) < shared_heap_end))
+#endif
+
#define ADDRESS_IN_STACK_P(address) \
(((address) >= stack_start) && ((address) < stack_end))
extern bool allocations_ok_p (unsigned long, unsigned long, unsigned long);
extern void reset_allocator_parameters (unsigned long, unsigned long);
+#ifdef ENABLE_SMP
+extern void reset_processor_allocator (processor_t *p);
+#endif
extern bool object_in_heap_p (SCHEME_OBJECT);
extern void std_gc_pt1 (void);
extern void std_gc_pt2 (void);
#define HEAP_ADDRESS_P(address) \
(((address) >= heap_start) && ((address) < Free))
+#ifdef ENABLE_SMP
+#define SHARED_HEAP_ADDRESS_P(address) \
+ (((address) >= shared_heap_start) && ((address) < shared_heap_free))
+#endif
#ifndef FLOATING_ALIGNMENT
# define FLOATING_ALIGNMENT 0
pc_location = pc_in_heap;
block_address = (find_block_address (((void *) pc), heap_start));
}
+ else if ((((ULONG)shared_heap_start) <= pc)&&(pc < ((ULONG)shared_heap_end)))
+ {
+ pc_location = pc_in_heap;
+ block_address = (find_block_address (((void *) pc), shared_heap_start));
+ }
else if ((((ULONG) constant_start) <= pc) && (pc < ((ULONG) constant_end)))
{
pc_location = pc_in_heap;
char id;
processor_state_t state;
pthread_t pthread;
+ SCHEME_OBJECT * stack_end;
+ SCHEME_OBJECT * stack_start;
+ SCHEME_OBJECT * heap_end;
+ SCHEME_OBJECT * heap_start;
+ SCHEME_OBJECT * stack_pointer;
+ SCHEME_OBJECT * free_pointer;
+ SCHEME_OBJECT * history_register;
};
extern processor_t *processors;
extern void setup_processors (int count);
+extern void smp_gc_start (void);
+extern void smp_gc_finish (void);
+
extern void smp_kill_gc (processor_t *);
#ifdef ENABLE_DEBUGGING_TOOLS
#ifdef ENABLE_SMP
-#include "option.h"
+#include "history.h"
#include <errno.h>
/* The chain of processors, starting with processor0 -- main()'s thread: */
/* The current pthread's processor. */
__thread processor_t *self;
+extern int saved_processor_count;
+extern int saved_processor_heap_size;
+extern int saved_stack_size;
+
#ifdef ENABLE_DEBUGGING_TOOLS
bool smp_trace_p = false;
voutf_fatal (format, args);
va_end (args);
outf_flush_fatal ();
+ self->free_pointer = Free;
+ self->stack_pointer = stack_pointer;
+ self->history_register = history_register;
self->state = PROCESSOR_DEAD;
pthread_exit ((void*)self);
/* NOTREACHED */
void
setup_processors (int count)
{
- trace ("; processor count: %d", option_processor_count);
- trace ("; local heap size: %d", option_processor_heap_size);
- trace ("; stack size: %d", option_stack_size);
-
- make_processors (0);
+ make_processors (count-1);
self = processors;
assert (self->id == 0);
self->pthread = pthread_self ();
+ /* assert (C_Stack_Pointer == NULL); */
+ reset_processor_allocator (self);
+ RESET_HISTORY ();
}
static void
make_processors (int id)
{
+ SCHEME_OBJECT *stack_start, *heap_start;
processor_t *new;
trace (";%d Setup.", id);
outf_flush_fatal ();
Microcode_Termination (TERM_NO_SPACE);
}
+ stack_start = memory_block_start + id * saved_stack_size;
+ heap_start = memory_block_start + (saved_processor_count * saved_stack_size
+ + id * saved_processor_heap_size);
new->next = processors;
new->id = id;
new->state = PROCESSOR_NEW;
+ new->stack_start = stack_start;
+ new->stack_end = stack_start + saved_stack_size;
+ new->heap_start = heap_start;
+ new->heap_end = heap_start + saved_processor_heap_size;
processors = new;
+ trace (";%d heap: 0x%0lx-0x%0lx", id,
+ (ulong)new->heap_start, (ulong)new->heap_end);
+ trace (";%d stack: 0x%0lx-0x%0lx", id,
+ (ulong)new->stack_start, (ulong)new->stack_end);
+
if (id > 0)
make_processors (id - 1);
}
}
return (all);
}
+\f
+static void
+export_state (void)
+{
+ self->free_pointer = Free;
+ self->stack_pointer = stack_pointer;
+ self->history_register = history_register;
+}
+
+static void
+import_state (void)
+{
+ Free = self->free_pointer;
+ history_register = self->history_register;
+ heap_alloc_limit = heap_end - heap_reserved;
+}
static void
gc_wait (void)
smp_gc_wait (void)
{
mutex_lock (&state_mutex);
- CLEAR_INTERRUPT (INT_Global_GC);
+ export_state ();
gc_wait ();
+ import_state ();
+ CLEAR_INTERRUPT (INT_Global_GC);
mutex_unlock (&state_mutex);
}
+
+void
+smp_gc_start (void)
+{
+ assert (gc_processor == self);
+ export_state ();
+ trace (";%d smp_gc_start exported", self->id);
+}
+
+void
+smp_gc_finish (void)
+{
+ assert (gc_processor == self);
+ for (processor_t *p = processors; p != NULL; p = p->next)
+ if (p != self)
+ p->free_pointer = p->heap_start;
+ trace (";%d smp_gc_finish reset", self->id);
+}
#endif /* ENABLE_SMP */
\f
DEFINE_PRIMITIVE ("SMP-COUNT", Prim_smp_count, 0, 0, "(SMP-COUNT)\n\
{
PRIMITIVE_HEADER (0);
#ifdef ENABLE_SMP
- PRIMITIVE_RETURN (LONG_TO_FIXNUM (1));
+ PRIMITIVE_RETURN (LONG_TO_FIXNUM (saved_processor_count));
#else
PRIMITIVE_RETURN (SHARP_F);
#endif
#include "prims.h"
#include "gccode.h"
+#ifndef ENABLE_SMP
+#define HEAP_START heap_start
+#define HEAP_END heap_end
+#define SMP_GC_START()
+#define SMP_GC_FINISH()
+#else
+#define HEAP_START shared_heap_start
+#define HEAP_END shared_heap_end
+#define SMP_GC_START smp_gc_start
+#define SMP_GC_FINISH smp_gc_finish
+#endif
+
static void purify (SCHEME_OBJECT);
\f
/* Purify increases the size of constant space at the expense of the
POP_PRIMITIVE_FRAME (3);
ENTER_CRITICAL_SECTION ("purify");
-#ifdef ENABLE_SMP
- assert (gc_processor == self);
-#endif
+ SMP_GC_START ();
heap_reserved = safety_margin;
purify (object);
+ SMP_GC_FINISH ();
Will_Push (CONTINUATION_SIZE);
SET_RC (RC_NORMAL_GC_DONE);
open_tospace (constant_alloc_next);
initialize_weak_chain ();
+ ephemeron_count = 0;
start_copy = (get_newspace_ptr ());
add_to_tospace (object);
constant_alloc_next = new_constant_alloc_next;
constant_end = heap_copy_start;
- heap_start = constant_end;
+ HEAP_START = constant_end;
std_gc_pt2 ();
- resize_tospace (heap_end - heap_start);
+ resize_tospace (HEAP_END - HEAP_START);
}
/* strict limit for Free */
SCHEME_OBJECT * heap_alloc_limit;
-/* limits of active heap */
+#ifdef ENABLE_SMP
+/* the start of From space */
+SCHEME_OBJECT * p0_heap_start;
+
+/* limits of shared heap */
+SCHEME_OBJECT * shared_heap_start;
+SCHEME_OBJECT * shared_heap_end;
+
+/* next free word in the shared heap */
+SCHEME_OBJECT * shared_heap_free;
+#endif
+
+/* limits of local heap */
SCHEME_OBJECT * heap_start;
SCHEME_OBJECT * heap_end;
bool trapping;
-unsigned long n_heap_blocks;
-unsigned long n_constant_blocks;
-unsigned long n_stack_blocks;
SCHEME_OBJECT * memory_block_start;
SCHEME_OBJECT * memory_block_end;
{
PRIMITIVE_HEADER (0);
{
+#ifdef ENABLE_SMP
+ SCHEME_OBJECT v = (make_vector (15, SHARP_F, true));
+#else
SCHEME_OBJECT v = (make_vector (12, SHARP_F, true));
+#endif
VECTOR_SET (v, 0, (ULONG_TO_FIXNUM (sizeof (SCHEME_OBJECT))));
VECTOR_SET (v, 1, (CONVERT_ADDRESS (constant_start)));
VECTOR_SET (v, 2, (CONVERT_ADDRESS (constant_alloc_next)));
VECTOR_SET (v, 9, (CONVERT_ADDRESS (stack_pointer)));
VECTOR_SET (v, 10, (CONVERT_ADDRESS (stack_guard)));
VECTOR_SET (v, 11, (CONVERT_ADDRESS (stack_end)));
+#ifdef ENABLE_SMP
+ VECTOR_SET (v, 12, (CONVERT_ADDRESS (shared_heap_start)));
+ VECTOR_SET (v, 13, (CONVERT_ADDRESS (shared_heap_free)));
+ VECTOR_SET (v, 14, (CONVERT_ADDRESS (shared_heap_end)));
+#endif
PRIMITIVE_RETURN (v);
}
}
extern const char * find_signal_name (int);
extern void UX_dump_core (void);
-extern void * initial_C_stack_pointer;
\f
struct ux_sig_code_desc
{
enum pc_location
{
pcl_heap,
+#ifdef ENABLE_SMP
+ pcl_shared_heap,
+#endif
pcl_constant,
pcl_builtin,
pcl_utility,
#ifdef CC_SUPPORT_P
static SCHEME_OBJECT * find_heap_address (unsigned long);
+#ifdef ENABLE_SMP
+ static SCHEME_OBJECT * find_shared_heap_address (unsigned long);
+#endif
static SCHEME_OBJECT * find_constant_address (unsigned long);
# ifdef ENABLE_TRAP_RECOVERY
static SCHEME_OBJECT * find_block_address (unsigned long, SCHEME_OBJECT *);
{
unsigned long pc = (SIGCONTEXT_PC (scp));
SCHEME_OBJECT primitive = GET_PRIMITIVE;
- SCHEME_OBJECT * block_addr;
- unsigned int index;
+ SCHEME_OBJECT * block_addr = NULL;
+ unsigned int index = 0;
SCHEME_OBJECT * new_sp = 0;
struct trap_recovery_info recovery_info;
break;
case pcl_heap:
+#ifdef ENABLE_SMP
+ case pcl_shared_heap:
+#endif
case pcl_constant:
#ifdef CC_SUPPORT_P
return (find_block_address (pc, heap_start));
}
+#ifdef ENABLE_SMP
+static SCHEME_OBJECT *
+find_shared_heap_address (unsigned long pc)
+{
+ return (find_block_address (pc, shared_heap_start));
+}
+#endif
+
static SCHEME_OBJECT *
find_constant_address (unsigned long pc)
{
return (0);
}
+#ifdef ENABLE_SMP
+static SCHEME_OBJECT *
+find_shared_heap_address (unsigned long pc)
+{
+ return (0);
+}
+#endif
+
static SCHEME_OBJECT *
find_constant_address (unsigned long pc)
{
unsigned int * r_index)
{
#ifdef CC_SUPPORT_P
+ enum pc_location type = pcl_unknown;
+ SCHEME_OBJECT * block_addr = NULL;
+
if (PC_ALIGNED_P (pc))
{
if (HEAP_ADDRESS_P ((SCHEME_OBJECT *) pc))
{
- SCHEME_OBJECT * block_addr = (find_heap_address (pc));
- if (block_addr == 0)
- return (pcl_unknown);
- if (r_block_addr != 0)
- (*r_block_addr) = block_addr;
- return (pcl_heap);
+ block_addr = (find_heap_address (pc));
+ type = pcl_heap;
}
- if (ADDRESS_IN_CONSTANT_P ((SCHEME_OBJECT *) pc))
+#ifdef ENABLE_SMP
+ else if (SHARED_HEAP_ADDRESS_P ((SCHEME_OBJECT *) pc))
{
- SCHEME_OBJECT * block_addr = (find_constant_address (pc));
- if (block_addr == 0)
- return (pcl_unknown);
- if (r_block_addr != 0)
- (*r_block_addr) = block_addr;
- return (pcl_constant);
+ block_addr = (find_shared_heap_address (pc));
+ type = pcl_shared_heap;
}
- if (ADDRESS_UCODE_P (pc))
+#endif
+ else if (ADDRESS_IN_CONSTANT_P ((SCHEME_OBJECT *) pc))
+ {
+ block_addr = (find_constant_address (pc));
+ type = pcl_constant;
+ }
+ else if (ADDRESS_UCODE_P (pc))
{
int index = (pc_to_builtin_index (pc));
if (index >= 0)
if ((OBJECT_TYPE (GET_PRIMITIVE)) == TC_PRIMITIVE)
return (pcl_primitive);
}
+
+ if (r_block_addr != 0)
+ (*r_block_addr) = block_addr;
+ return (type);
}
#else
if ((ADDRESS_UCODE_P (pc))
n += Find_In_Area("Constant Space",
constant_start, constant_alloc_next, Obj,
Find_Mode, print_p, store_p);
+#ifndef ENABLE_SMP
n += Find_In_Area("the Heap",
heap_start, Saved_Free, Obj,
Find_Mode, print_p, store_p);
+#else
+ n += Find_In_Area("the local Heap",
+ heap_start, Saved_Free, Obj,
+ Find_Mode, print_p, store_p);
+ n += Find_In_Area("the shared Heap",
+ shared_heap_start, shared_heap_free, Obj,
+ Find_Mode, print_p, store_p);
+#endif
n += Find_In_Area("the Stack",
stack_pointer, stack_end, Obj,
Find_Mode, print_p, store_p);
(define-package (runtime save/restore)
(files "savres")
(parent (runtime))
+ (import (runtime thread)
+ enable-smp?
+ with-heap-lock)
(export ()
disk-restore
disk-save
(let ((filename (->namestring (merge-pathnames filename)))
(id (if (default-object? id) world-id id))
(time (local-decoded-time)))
+ (if (and enable-smp? (other-running-threads?))
+ (error "Cannot disk-save multiple running threads."))
(gc-clean)
((without-interrupts
(lambda ()
(call-with-current-continuation
(lambda (continuation)
- ;; GC cannot be allowed before the fixed-objects-vector
- ;; is reset after restoring.
- (with-absolutely-no-interrupts
- (lambda ()
- (let ((fixed-objects (get-fixed-objects-vector)))
- ((ucode-primitive call-with-current-continuation)
- (lambda (restart)
- (with-interrupt-mask interrupt-mask/gc-ok
- (lambda (interrupt-mask)
- interrupt-mask
- (gc-flip)
- (do ()
- (((ucode-primitive dump-band) restart filename))
- (with-simple-restart 'RETRY "Try again."
- (lambda ()
- (error "Disk save failed:" filename))))
- (continuation
- (lambda ()
- (set! time-world-saved time)
- (if (string? id) unspecific #f)))))))
- ((ucode-primitive set-fixed-objects-vector!) fixed-objects))))
+ (if enable-smp?
+ (smp-drop-band continuation filename id time)
+ (drop-band continuation filename id time))
(read-microcode-tables!)
(lambda ()
(set! time-world-saved time)
(else
(event-distributor/invoke! event:after-restart)
#t))))))))))
+
+(define (drop-band continuation filename id time)
+ ;; GC cannot be allowed before the fixed-objects-vector
+ ;; is reset after restoring.
+ (with-absolutely-no-interrupts
+ (lambda ()
+ (let ((fixed-objects (get-fixed-objects-vector)))
+ ((ucode-primitive call-with-current-continuation)
+ (lambda (restart)
+ (with-interrupt-mask interrupt-mask/gc-ok
+ (lambda (interrupt-mask)
+ interrupt-mask
+ (gc-flip)
+ (do ()
+ (((ucode-primitive dump-band) restart filename))
+ (with-simple-restart 'RETRY "Try again."
+ (lambda ()
+ (error "Disk save failed:" filename))))
+ (continuation
+ (lambda ()
+ (set! time-world-saved time)
+ (if (string? id) unspecific #f)))))))
+ ((ucode-primitive set-fixed-objects-vector!) fixed-objects)))))
+
+(define (smp-drop-band continuation filename id time)
+ ;; Just like drop-band, except dump-band starts the flip and RESTART
+ ;; will finish it as well as restore the fixed-objects.
+ (with-absolutely-no-interrupts
+ (lambda ()
+ (let ((fixed-objects (get-fixed-objects-vector)))
+ (define-integrable (trigger-primitive-gc-daemons!)
+ ((vector-ref fixed-objects #x0B)))
+ ((ucode-primitive call-with-current-continuation)
+ (lambda (restart)
+ (do ()
+ ((with-heap-lock
+ (lambda ()
+ ((ucode-primitive dump-band) restart filename))))
+ (trigger-primitive-gc-daemons!)
+ (with-interrupt-mask interrupt-mask/all
+ (lambda (interrupt-mask)
+ interrupt-mask
+ (with-simple-restart 'RETRY "Try again."
+ (lambda ()
+ (error "Disk save failed:" filename))))))
+ (continuation
+ (lambda ()
+ (trigger-primitive-gc-daemons!)
+ (set! time-world-saved time)
+ (if (string? id) unspecific #f)))))
+ ((ucode-primitive set-fixed-objects-vector!) fixed-objects)
+ (trigger-primitive-gc-daemons!)
+ ((ucode-primitive smp-gc-unlock 0))))))
\f
(define (disk-restore #!optional filename)
;; Force order of events -- no need to run event:before-exit if