/[cmucl]/src/lisp/gencgc.c
ViewVC logotype

Diff of /src/lisp/gencgc.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.74 by rtoy, Fri Jul 21 17:36:10 2006 UTC revision 1.75 by rtoy, Fri Aug 4 16:41:18 2006 UTC
# Line 2341  scavenge_interrupt_context(os_context_t Line 2341  scavenge_interrupt_context(os_context_t
2341  #ifdef reg_CTR  #ifdef reg_CTR
2342      ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);      ctr_code_offset = SC_REG(context, reg_CTR) - SC_REG(context, reg_CODE);
2343  #endif  #endif
2344    
2345      /* Scanvenge all boxed registers in the context. */      /* Scanvenge all boxed registers in the context. */
2346      for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {      for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
2347          int index;          int index;
# Line 3608  struct hash_table { Line 3608  struct hash_table {
3608    
3609  /* List of weak hash tables chained through their WEAK-P slot.  Set to  /* List of weak hash tables chained through their WEAK-P slot.  Set to
3610     NIL at the start of a collection.     NIL at the start of a collection.
3611    
3612     This is not optimal because, when a table is tenured, it won't be     This is not optimal because, when a table is tenured, it won't be
3613     processed automatically; only the yougest generation is GC'd by     processed automatically; only the yougest generation is GC'd by
3614     default.  On the other hand, all applications will need an     default.  On the other hand, all applications will need an
# Line 3682  free_hash_entry(struct hash_table *hash_ Line 3682  free_hash_entry(struct hash_table *hash_
3682          lispobj* kv_vector = (lispobj *) PTR(hash_table->table);          lispobj* kv_vector = (lispobj *) PTR(hash_table->table);
3683          lispobj empty_symbol;          lispobj empty_symbol;
3684    
   
3685          gc_assert(count > 0);          gc_assert(count > 0);
3686          hash_table->number_entries = make_fixnum(count - 1);          hash_table->number_entries = make_fixnum(count - 1);
3687          next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);          next_vector[kv_index] = fixnum_value(hash_table->next_free_kv);
# Line 3778  scav_hash_entries(struct hash_table *has Line 3777  scav_hash_entries(struct hash_table *has
3777              free_hash_entry(hash_table, old_index, i);              free_hash_entry(hash_table, old_index, i);
3778          else {          else {
3779              /* If the key is EQ-hashed and moves, schedule it for rehashing. */              /* If the key is EQ-hashed and moves, schedule it for rehashing. */
3780    #if 0
3781                if ((i < 4) && (hash_table >= (void*)0x40000000)) {
3782                    fprintf(stderr, "scav_hash_entries: %p: %d\n", hash_table, i);
3783                    fprintf(stderr, "  key = %p\n", kv_vector[2*i]);
3784                    fprintf(stderr, "  val = %p\n", kv_vector[2*i+1]);
3785                }
3786    #endif
3787              scavenge(&kv_vector[2 * i], 2);              scavenge(&kv_vector[2 * i], 2);
3788              new_key = kv_vector[2 * i];              new_key = kv_vector[2 * i];
3789              new_index = EQ_HASH(new_key) % length;              new_index = EQ_HASH(new_key) % length;
3790    #if 0
3791                if ((i < 4) && (hash_table >= (void*)0x40000000)) {
3792                    fprintf(stderr, "  new key = %p\n", kv_vector[2*i]);
3793                    fprintf(stderr, "  new val = %p\n", kv_vector[2*i+1]);
3794                }
3795    #endif
3796    
3797              if (old_index != new_index              if (old_index != new_index
3798                  && index_vector[old_index] != 0                  && index_vector[old_index] != 0
# Line 3824  scav_weak_entries(struct hash_table *has Line 3836  scav_weak_entries(struct hash_table *has
3836              && index_vector[old_index] != 0              && index_vector[old_index] != 0
3837              && (hash_vector == 0 || hash_vector[old_index] == 0x80000000)              && (hash_vector == 0 || hash_vector[old_index] == 0x80000000)
3838              && !survives_gc(kv_vector[2 * i + 1])) {              && !survives_gc(kv_vector[2 * i + 1])) {
3839    #if 0
3840                lispobj old_val;
3841                old_val = kv_vector[2*i+1];
3842    #endif
3843              scavenge(&kv_vector[2 * i + 1], 1);              scavenge(&kv_vector[2 * i + 1], 1);
3844    #if 0
3845                fprintf(stderr, "scav_weak_entries:  kv_vector[entry = %d] from %p to %p\n",
3846                        i, old_val, kv_vector[2*i+1]);
3847    #endif
3848              scavenged = 1;              scavenged = 1;
3849          }          }
3850      }      }
# Line 3927  scav_hash_vector(lispobj * where, lispob Line 3947  scav_hash_vector(lispobj * where, lispob
3947    
3948      /* Scavenging the hash table which fix the positions of the other      /* Scavenging the hash table which fix the positions of the other
3949         needed objects.  */         needed objects.  */
3950    #if 0
3951        if (hash_table >= (void*) 0x40000000) {
3952            fprintf(stderr, "scav_hash_vector: scavenge table %p\n", hash_table);
3953        }
3954    #endif
3955    
3956      scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);      scavenge((lispobj *) hash_table, HASH_TABLE_SIZE);
3957    
3958      /* Testing for T here instead of NIL automatially makes sure we      /* Testing for T here instead of NIL automatially makes sure we
3959         don't add the same table twice to the list of weak tables, should         don't add the same table twice to the list of weak tables, should
3960         this function ever be called twice for the same object.  */         this function ever be called twice for the same object.  */
3961    
3962      if (hash_table->weak_p == T) {      if (hash_table->weak_p == T) {
3963          hash_table->weak_p = weak_hash_tables;          hash_table->weak_p = weak_hash_tables;
3964    #if 0
3965            fprintf(stderr, "  adding %p to weak_hash_tables\n", hash_table);
3966    #endif
3967          weak_hash_tables = hash_table_obj;          weak_hash_tables = hash_table_obj;
3968      } else      } else
3969          scav_hash_entries(hash_table, 0);          scav_hash_entries(hash_table, 0);
# Line 4450  scav_weak_pointer(lispobj * where, lispo Line 4480  scav_weak_pointer(lispobj * where, lispo
4480  static lispobj  static lispobj
4481  trans_weak_pointer(lispobj object)  trans_weak_pointer(lispobj object)
4482  {  {
4483        lispobj copy;
4484    
4485      gc_assert(Pointerp(object));      gc_assert(Pointerp(object));
4486      return copy_object(object, WEAK_POINTER_NWORDS);      copy = copy_object(object, WEAK_POINTER_NWORDS);
4487    #if 0
4488        fprintf(stderr, "Transport weak pointer %p to %p\n", object, copy);
4489    #endif
4490        return copy;
4491  }  }
4492    
4493  static int  static int
# Line 5900  scavenge_newspace_generation_one_scan(in Line 5936  scavenge_newspace_generation_one_scan(in
5936              i = last_page;              i = last_page;
5937          }          }
5938      }      }
5939    #if 0
5940        fprintf(stderr, "Finished one full scan of newspace generation %d\n",
5941                generation);
5942    #endif
5943    }
5944    
5945    /* Scan all weak objects and reset weak object lists */
5946    static void
5947    scan_weak_objects()
5948    {
5949        scan_weak_pointers();
5950        scan_weak_tables();
5951    
5952        /* Re-initialise the weak pointer and weak tables lists. */
5953        weak_pointers = NULL;
5954        weak_hash_tables = NIL;
5955  }  }
5956    
5957  /* Do a complete scavenge of the newspace generation */  /* Do a complete scavenge of the newspace generation */
# Line 5916  scavenge_newspace_generation(int generat Line 5968  scavenge_newspace_generation(int generat
5968      struct new_area (*previous_new_areas)[] = NULL;      struct new_area (*previous_new_areas)[] = NULL;
5969      int previous_new_areas_index;      int previous_new_areas_index;
5970    
5971    #if 0
5972        fprintf(stderr, "Start scavenge_newspace_generation %d\n", generation);
5973    #endif
5974    
5975  #define SC_NS_GEN_CK 0  #define SC_NS_GEN_CK 0
5976  #if SC_NS_GEN_CK  #if SC_NS_GEN_CK
5977      /* Clear the write_protected_cleared flags on all pages */      /* Clear the write_protected_cleared flags on all pages */
# Line 5953  scavenge_newspace_generation(int generat Line 6009  scavenge_newspace_generation(int generat
6009  #if 0  #if 0
6010      fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",      fprintf(stderr, "First scan finished; current_new_areas_index=%d\n",
6011              current_new_areas_index);              current_new_areas_index);
6012        if (current_new_areas_index > 0) {
6013            fprintf(stderr, "Start rescans\n");
6014        }
6015  #endif  #endif
6016    
6017      while (current_new_areas_index > 0) {      while (current_new_areas_index > 0) {
# Line 5994  scavenge_newspace_generation(int generat Line 6053  scavenge_newspace_generation(int generat
6053               */               */
6054              record_new_objects = 1;              record_new_objects = 1;
6055    
6056    #if 0
6057                fprintf(stderr, " Rescan generation %d\n", generation);
6058    #endif
6059              scavenge_newspace_generation_one_scan(generation);              scavenge_newspace_generation_one_scan(generation);
6060    
6061                /*
6062                 * Not sure this call is needed, but I (rtoy) am putting
6063                 * this here anyway on the assumption that since we do it
6064                 * below after scavenging some stuff, we should do it here
6065                 * also because scavenge_newspace_generation_one_scan
6066                 * scavenges stuff too.
6067                 */
6068    
6069                scan_weak_objects();
6070    
6071              /* Record all new areas now. */              /* Record all new areas now. */
6072              record_new_objects = 2;              record_new_objects = 2;
6073    
# Line 6014  scavenge_newspace_generation(int generat Line 6086  scavenge_newspace_generation(int generat
6086  #if 0  #if 0
6087                  fprintf(stderr, "*S page %d offset %d size %d\n", page, offset,                  fprintf(stderr, "*S page %d offset %d size %d\n", page, offset,
6088                          size * sizeof(lispobj));                          size * sizeof(lispobj));
6089                    fprintf(stderr, "  scavenge(%p, %d)\n", page_address(page) + offset, size);
6090  #endif  #endif
6091                  scavenge(page_address(page) + offset, size);                  scavenge(page_address(page) + offset, size);
6092              }              }
6093    
6094                /*
6095                 * I (rtoy) am not sure this is 100% correct.  But if we
6096                 * don't scan the weak pointers and tables here (or
6097                 * somewhere near here, perhaps), we get problems like
6098                 * live weak pointers that haven't been transported out of
6099                 * oldspace.  Then anything referring to this pointer
6100                 * causes a crash when GC happens later on.
6101                 *
6102                 * This fixes a bug with weak hash tables, reported by
6103                 * Lynn Quam, cmucl-imp, 2006-07-04.
6104                 */
6105                scan_weak_objects();
6106    
6107              /* Flush the current regions updating the tables. */              /* Flush the current regions updating the tables. */
6108              gc_alloc_update_page_tables(0, &boxed_region);              gc_alloc_update_page_tables(0, &boxed_region);
6109              gc_alloc_update_page_tables(1, &unboxed_region);              gc_alloc_update_page_tables(1, &unboxed_region);
# Line 6032  scavenge_newspace_generation(int generat Line 6118  scavenge_newspace_generation(int generat
6118  #endif  #endif
6119      }      }
6120    
6121    #if 0
6122        fprintf(stderr, "All rescans finished\n");
6123    #endif
6124    
6125      /* Turn off recording of areas allocated by gc_alloc */      /* Turn off recording of areas allocated by gc_alloc */
6126      record_new_objects = 0;      record_new_objects = 0;
6127    
# Line 6049  scavenge_newspace_generation(int generat Line 6139  scavenge_newspace_generation(int generat
6139                      "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",                      "*** scav.new.gen. %d: write protected page %d written to? dont_move=%d\n",
6140                      generation, i, PAGE_DONT_MOVE(i));                      generation, i, PAGE_DONT_MOVE(i));
6141  #endif  #endif
6142    #if 0
6143        fprintf(stderr, "Finished scavenge_newspace_generation %d\n", generation);
6144    #endif
6145  }  }
6146    
6147    
# Line 6817  garbage_collect_generation(int generatio Line 6910  garbage_collect_generation(int generatio
6910       */       */
6911      scavenge_newspace_generation(new_space);      scavenge_newspace_generation(new_space);
6912    
6913        /* I think we should do this *before* the rescan check */
6914        scan_weak_objects();
6915    
6916  #define RESCAN_CHECK 0  #define RESCAN_CHECK 0
6917  #if RESCAN_CHECK  #if RESCAN_CHECK
6918      /*      /*
# Line 6845  garbage_collect_generation(int generatio Line 6941  garbage_collect_generation(int generatio
6941      }      }
6942  #endif  #endif
6943    
     scan_weak_pointers();  
     scan_weak_tables();  
   
6944      /* Flush the current regions, updating the tables. */      /* Flush the current regions, updating the tables. */
6945      gc_alloc_update_page_tables(0, &boxed_region);      gc_alloc_update_page_tables(0, &boxed_region);
6946      gc_alloc_update_page_tables(1, &unboxed_region);      gc_alloc_update_page_tables(1, &unboxed_region);

Legend:
Removed from v.1.74  
changed lines
  Added in v.1.75

  ViewVC Help
Powered by ViewVC 1.1.5