openjdk17 jvm堆空间分配
##堆空间分配源码
jint Universe::initialize_heap() {
assert(_collectedHeap == NULL, "Heap already created");
_collectedHeap = GCConfig::arguments()->create_heap();
log_info(gc)("Using %s", _collectedHeap->name());
return _collectedHeap->initialize();
}
##
jint G1CollectedHeap::initialize() {
// Necessary to satisfy locking discipline assertions.
MutexLocker x(Heap_lock);
// While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some
// cases incorrectly returns the size in wordSize units rather than
// HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
size_t init_byte_size = InitialHeapSize;
size_t reserved_byte_size = G1Arguments::heap_reserved_size_bytes();
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(reserved_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(reserved_byte_size, HeapAlignment, "g1 heap");
// Reserve the maximum.
// When compressed oops are enabled, the preferred heap base
// is calculated by subtracting the requested size from the
// 32Gb boundary and using the result as the base address for
// heap reservation. If the requested size is not aligned to
// HeapRegion::GrainBytes (i.e. the alignment that is passed
// into the ReservedHeapSpace constructor) then the actual
// base of the reserved heap may end up differing from the
// address that was requested (i.e. the preferred heap base).
// If this happens then we could end up using a non-optimal
// compressed oops mode.
ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
HeapAlignment);
initialize_reserved_region(heap_rs);
// Create the barrier set for the entire reserved region.
G1CardTable* ct = new G1CardTable(heap_rs.region());
ct->initialize();
G1BarrierSet* bs = new G1BarrierSet(ct);
bs->initialize();
assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
BarrierSet::set_barrier_set(bs);
_card_table = ct;
{
G1SATBMarkQueueSet& satbqs = bs->satb_mark_queue_set();
satbqs.set_process_completed_buffers_threshold(G1SATBProcessCompletedThreshold);
satbqs.set_buffer_enqueue_threshold_percentage(G1SATBBufferEnqueueingThresholdPercent);
}
// Create the hot card cache.
_hot_card_cache = new G1HotCardCache(this);
// Create space mappers.
size_t page_size = heap_rs.page_size();
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(heap_rs,
heap_rs.size(),
page_size,
HeapRegion::GrainBytes,
1,
mtJavaHeap);
if(heap_storage == NULL) {
vm_shutdown_during_initialization("Could not initialize G1 heap");
return JNI_ERR;
}
os::trace_page_sizes("Heap",
MinHeapSize,
reserved_byte_size,
page_size,
heap_rs.base(),
heap_rs.size());
heap_storage->set_mapping_changed_listener(&_listener);
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
G1RegionToSpaceMapper* bot_storage =
create_aux_memory_mapper("Block Offset Table",
G1BlockOffsetTable::compute_size(heap_rs.size() / HeapWordSize),
G1BlockOffsetTable::heap_map_factor());
G1RegionToSpaceMapper* cardtable_storage =
create_aux_memory_mapper("Card Table",
G1CardTable::compute_size(heap_rs.size() / HeapWordSize),
G1CardTable::heap_map_factor());
G1RegionToSpaceMapper* card_counts_storage =
create_aux_memory_mapper("Card Counts Table",
G1CardCounts::compute_size(heap_rs.size() / HeapWordSize),
G1CardCounts::heap_map_factor());
size_t bitmap_size = G1CMBitMap::compute_size(heap_rs.size());
G1RegionToSpaceMapper* prev_bitmap_storage =
create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
G1RegionToSpaceMapper* next_bitmap_storage =
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
_card_table->initialize(cardtable_storage);
// Do later initialization work for concurrent refinement.
_hot_card_cache->initialize(card_counts_storage);
// 6843694 - ensure that the maximum region index can fit
// in the remembered set structures.
const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
guarantee((max_reserved_regions() - 1) <= max_region_idx, "too many regions");
// The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
// start within the first card.
guarantee(heap_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
G1FromCardCache::initialize(max_reserved_regions());
// Also create a G1 rem set.
_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
_rem_set->initialize(max_reserved_regions());
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
"too many cards per region");
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
_bot = new G1BlockOffsetTable(reserved(), bot_storage);
{
size_t granularity = HeapRegion::GrainBytes;
_region_attr.initialize(reserved(), granularity);
_humongous_reclaim_candidates.initialize(reserved(), granularity);
}
_workers = new WorkGang("GC Thread", ParallelGCThreads,
true /* are_GC_task_threads */,
false /* are_ConcurrentGC_threads */);
if (_workers == NULL) {
return JNI_ENOMEM;
}
_workers->initialize_workers();
_numa->set_region_info(HeapRegion::GrainBytes, page_size);
// Create the G1ConcurrentMark data structure and thread.
// (Must do this late, so that "max_[reserved_]regions" is defined.)
_cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
_cm_thread = _cm->cm_thread();
// Now expand into the initial heap size.
if (!expand(init_byte_size, _workers)) {
vm_shutdown_during_initialization("Failed to allocate initial heap.");
return JNI_ENOMEM;
}
// Perform any initialization actions delegated to the policy.
policy()->init(this, &_collection_set);
jint ecode = initialize_concurrent_refinement();
if (ecode != JNI_OK) {
return ecode;
}
ecode = initialize_service_thread();
if (ecode != JNI_OK) {
return ecode;
}
// Initialize and schedule sampling task on service thread.
_rem_set->initialize_sampling_task(service_thread());
// Create and schedule the periodic gc task on the service thread.
_periodic_gc_task = new G1PeriodicGCTask("Periodic GC Task");
_service_thread->register_task(_periodic_gc_task);
{
G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.set_process_cards_threshold(concurrent_refine()->yellow_zone());
dcqs.set_max_cards(concurrent_refine()->red_zone());
}
// Here we allocate the dummy HeapRegion that is required by the
// G1AllocRegion class.
HeapRegion* dummy_region = _hrm.get_dummy_region();
// We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young
// region will complain that it cannot support allocations without
// BOT updates. So we'll tag the dummy region as eden to avoid that.
dummy_region->set_eden();
// Make sure it's full.
dummy_region->set_top(dummy_region->end());
G1AllocRegion::setup(this, dummy_region);
_allocator->init_mutator_alloc_regions();
// Do create of the monitoring and management support so that
// values in the heap have been properly initialized.
_g1mm = new G1MonitoringSupport(this);
_preserved_marks_set.init(ParallelGCThreads);
_collection_set.initialize(max_reserved_regions());
_regions_failed_evacuation = NEW_C_HEAP_ARRAY(volatile bool, max_regions(), mtGC);
G1InitLogger::print();
return JNI_OK;
}
##
ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
assert(alignment <= Arguments::conservative_max_heap_alignment(),
"actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT,
alignment, Arguments::conservative_max_heap_alignment());
size_t total_reserved = align_up(heap_size, alignment);
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
size_t page_size = os::vm_page_size();
if (UseLargePages && is_aligned(alignment, os::large_page_size())) {
page_size = os::large_page_size();
} else {
// Parallel is the only collector that might opt out of using large pages
// for the heap.
assert(!UseLargePages || UseParallelGC , "Wrong alignment to use large pages");
}
// Now create the space.
ReservedHeapSpace total_rs(total_reserved, alignment, page_size, AllocateHeapAt);
if (total_rs.is_reserved()) {
assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
"must be exactly of required size and alignment");
// We are good.
if (AllocateHeapAt != NULL) {
log_info(gc,heap)("Successfully allocated Java heap at location %s", AllocateHeapAt);
}
if (UseCompressedOops) {
CompressedOops::initialize(total_rs);
}
Universe::calculate_verify_data((HeapWord*)total_rs.base(), (HeapWord*)total_rs.end());
return total_rs;
}
vm_exit_during_initialization(
err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap",
total_reserved/K));
// satisfy compiler
ShouldNotReachHere();
return ReservedHeapSpace(0, 0, os::vm_page_size());
}
##
bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aligned_expand_bytes = align_up(aligned_expand_bytes,
HeapRegion::GrainBytes);
log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
expand_bytes, aligned_expand_bytes);
if (is_maximal_no_gc()) {
log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
return false;
}
double expand_heap_start_time_sec = os::elapsedTime();
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
assert(regions_to_expand > 0, "Must expand by at least one region");
uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
if (expand_time_ms != NULL) {
*expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
}
if (expanded_by > 0) {
size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
policy()->record_new_heap_size(num_regions());
} else {
log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
if (G1ExitOnExpansionFailure &&
_hrm.available() >= regions_to_expand) {
// We had head room...
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
}
}
return regions_to_expand > 0;
}
原文地址:https://blog.csdn.net/u014200244/article/details/144166582
免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!