--- a/src/share/vm/adlc/Doc/Syntax.doc Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/adlc/Doc/Syntax.doc Fri Sep 18 14:21:46 2015 -0700@@ -1,5 +1,5 @@ #-# Copyright (c) 1997, 1998, Oracle and/or its affiliates. All rights reserved.+# Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it@@ -33,7 +33,7 @@ the architecture of a processor, and is the input to the ADL Compiler. The ADL Compiler compiles an ADL file into code which is incorporated into the Optimizing Just In Time Compiler (OJIT) to generate efficient and correct code-for the target architecture. The ADL describes three bassic different types+for the target architecture. The ADL describes three basic different types of architectural features. It describes the instruction set (and associated operands) of the target architecture. It describes the register set of the target architecture along with relevant information for the register allocator.

--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp Fri Sep 18 14:21:46 2015 -0700@@ -295,7 +295,7 @@ promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin; }- // If the younger gen collections were skipped, then the+ // If the young gen collection was skipped, then the // number of promoted bytes will be 0 and adding it to the // average will incorrectly lessen the average. It is, however, // also possible that no promotion was needed.

--- a/src/share/vm/gc/cms/parNewGeneration.hpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/cms/parNewGeneration.hpp Fri Sep 18 14:21:46 2015 -0700@@ -71,11 +71,7 @@ ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier- // One of these two will be passed to process_roots, which will- // set its generation. The first is for two-gen configs where the- // old gen collects the perm gen; the second is for arbitrary configs.- // The second isn't used right now (it used to be used for the train, an- // incremental collector) but the declaration has been left as a reminder.+ // Will be passed to process_roots to set its generation. ParRootScanWithBarrierTwoGensClosure _older_gen_closure; // This closure will always be bound to the old gen; it will be used // in evacuate_followers.@@ -85,7 +81,6 @@ ParScanWeakRefClosure _scan_weak_ref_closure; ParKeepAliveClosure _keep_alive_closure;- Space* _to_space; Space* to_space() { return _to_space; }

--- a/src/share/vm/gc/g1/g1CollectedHeap.hpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp Fri Sep 18 14:21:46 2015 -0700@@ -757,6 +757,12 @@ // alloc_archive_regions, and after class loading has occurred. void fill_archive_regions(MemRegion* range, size_t count);+ // For each of the specified MemRegions, uncommit the containing G1 regions+ // which had been allocated by alloc_archive_regions. This should be called+ // rather than fill_archive_regions at JVM init time if the archive file+ // mapping failed, with the same non-overlapping and sorted MemRegion array.+ void dealloc_archive_regions(MemRegion* range, size_t count);+ protected: // Shrink the garbage-first heap by at most the given size (in bytes!).

--- a/src/share/vm/gc/g1/g1EvacStats.cpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/g1/g1EvacStats.cpp Fri Sep 18 14:21:46 2015 -0700@@ -54,17 +54,46 @@ _allocated, _wasted, _region_end_waste, _unused, used())); _allocated = 1; }- // We account region end waste fully to PLAB allocation. This is not completely fair,- // but is a conservative assumption because PLABs may be sized flexibly while we- // cannot adjust direct allocations.- // In some cases, wasted_frac may become > 1 but that just reflects the problem- // with region_end_waste.- double wasted_frac = (double)(_unused + _wasted + _region_end_waste) / (double)_allocated;- size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);- if (target_refills == 0) {- target_refills = 1;- }- size_t cur_plab_sz = used() / target_refills;+ // The size of the PLAB caps the amount of space that can be wasted at the+ // end of the collection. In the worst case the last PLAB could be completely+ // empty.+ // This allows us to calculate the new PLAB size to achieve the+ // TargetPLABWastePct given the latest memory usage and that the last buffer+ // will be G1LastPLABAverageOccupancy full.+ //+ // E.g. assume that if in the current GC 100 words were allocated and a+ // TargetPLABWastePct of 10 had been set.+ //+ // So we could waste up to 10 words to meet that percentage. Given that we+ // also assume that that buffer is typically half-full, the new desired PLAB+ // size is set to 20 words.+ //+ // The amount of allocation performed should be independent of the number of+ // threads, so should the maximum waste we can spend in total. So if+ // we used n threads to allocate, each of them can spend maximum waste/n words in+ // a first rough approximation. The number of threads only comes into play later+ // when actually retrieving the actual desired PLAB size.+ //+ // After calculating this optimal PLAB size the algorithm applies the usual+ // exponential decaying average over this value to guess the next PLAB size.+ //+ // We account region end waste fully to PLAB allocation (in the calculation of+ // what we consider as "used_for_waste_calculation" below). This is not+ // completely fair, but is a conservative assumption because PLABs may be sized+ // flexibly while we cannot adjust inline allocations.+ // Allocation during GC will try to minimize region end waste so this impact+ // should be minimal.+ //+ // We need to cover overflow when calculating the amount of space actually used+ // by objects in PLABs when subtracting the region end waste.+ // Region end waste may be higher than actual allocation. This may occur if many+ // threads do not allocate anything but a few rather large objects. In this+ // degenerate case the PLAB size would simply quickly tend to minimum PLAB size,+ // which is an okay reaction.+ size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;++ size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;+ size_t const cur_plab_sz = (double)total_waste_allowed / G1LastPLABAverageOccupancy; // Take historical weighted average _filter.sample(cur_plab_sz); // Clip from above and below, and align to object boundary

--- a/src/share/vm/gc/parallel/psParallelCompact.cpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/parallel/psParallelCompact.cpp Fri Sep 18 14:21:46 2015 -0700@@ -958,7 +958,7 @@ { // Update the from & to space pointers in space_info, since they are swapped // at each young gen gc. Do the update unconditionally (even though a- // promotion failure does not swap spaces) because an unknown number of minor+ // promotion failure does not swap spaces) because an unknown number of young // collections will have swapped the spaces an unknown number of times. GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();

--- a/src/share/vm/gc/parallel/psScavenge.cpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/parallel/psScavenge.cpp Fri Sep 18 14:21:46 2015 -0700@@ -597,9 +597,9 @@ // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces.- // Resizing the old gen at minor collects can cause increases+ // Resizing the old gen at young collections can cause increases // that don't feed back to the generation sizing policy until- // a major collection. Don't resize the old gen here.+ // a full collection. Don't resize the old gen here. heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes());

--- a/src/share/vm/gc/serial/defNewGeneration.inline.hpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/serial/defNewGeneration.inline.hpp Fri Sep 18 14:21:46 2015 -0700@@ -57,8 +57,8 @@ // each generation, allowing them in turn to examine the modified // field. //- // We could check that p is also in an older generation, but- // dirty cards in the youngest gen are never scanned, so the+ // We could check that p is also in the old generation, but+ // dirty cards in the young gen are never scanned, so the // extra check probably isn't worthwhile. if (GenCollectedHeap::heap()->is_in_reserved(p)) { oop obj = oopDesc::load_decode_heap_oop_not_null(p);

--- a/src/share/vm/gc/serial/tenuredGeneration.cpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/serial/tenuredGeneration.cpp Fri Sep 18 14:21:46 2015 -0700@@ -108,7 +108,7 @@ free()); } }- // If we had to expand to accommodate promotions from younger generations+ // If we had to expand to accommodate promotions from the young generation if (!result && _capacity_at_prologue < capacity()) { result = true; if (PrintGC && Verbose) {@@ -140,11 +140,11 @@ // that are of interest at this point. bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation); if (!full && current_is_young) {- // Calculate size of data promoted from the younger generations+ // Calculate size of data promoted from the young generation // before doing the collection. size_t used_before_gc = used();- // If the younger gen collections were skipped, then the+ // If the young gen collection was skipped, then the // number of promoted bytes will be 0 and adding it to the // average will incorrectly lessen the average. It is, however, // also possible that no promotion was needed.

--- a/src/share/vm/gc/shared/genRemSet.hpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/shared/genRemSet.hpp Fri Sep 18 14:21:46 2015 -0700@@ -110,13 +110,11 @@ virtual void print() {} // Informs the RS that the given memregion contains no references to- // younger generations.+ // the young generation. virtual void clear(MemRegion mr) = 0;- // Informs the RS that there are no references to generations- // younger than gen from generations gen and older.- // The parameter clear_perm indicates if the perm_gen's- // remembered set should also be processed/cleared.+ // Informs the RS that there are no references to the young generation+ // from old_gen. virtual void clear_into_younger(Generation* old_gen) = 0; // Informs the RS that refs in the given "mr" may have changed

--- a/src/share/vm/gc/shared/generation.hpp Fri Sep 18 10:46:35 2015 -0700+++ b/src/share/vm/gc/shared/generation.hpp Fri Sep 18 14:21:46 2015 -0700@@ -80,7 +80,6 @@ // first two fields are word-sized.) };- class Generation: public CHeapObj<mtGC> { friend class VMStructs; private:@@ -299,8 +298,7 @@ // word of "obj" may have been overwritten with a forwarding pointer, and // also taking care to copy the klass pointer *last*. Returns the new // object if successful, or else NULL.- virtual oop par_promote(int thread_num,- oop obj, markOop m, size_t word_sz);+ virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz); // Informs the current generation that all par_promote_alloc's in the // collection have been completed; any supporting data structures can be@@ -315,7 +313,7 @@ // This generation will collect all younger generations // during a full collection.- virtual bool full_collects_younger_generations() const { return false; }+ virtual bool full_collects_young_generation() const { return false; } // This generation does in-place marking, meaning that mark words // are mutated during the marking phase and presumably reinitialized@@ -370,18 +368,18 @@ // Some generations may require some cleanup or preparation actions before // allowing a collection. The default is to do nothing.- virtual void gc_prologue(bool full) {};+ virtual void gc_prologue(bool full) {} // Some generations may require some cleanup actions after a collection. // The default is to do nothing.- virtual void gc_epilogue(bool full) {};+ virtual void gc_epilogue(bool full) {} // Save the high water marks for the used space in a generation.- virtual void record_spaces_top() {};+ virtual void record_spaces_top() {} // Some generations may need to be "fixed-up" after some allocation // activity to make them parsable again. The default is to do nothing.- virtual void ensure_parsability() {};+ virtual void ensure_parsability() {} // Time (in ms) when we were last collected or now if a collection is // in progress.@@ -417,7 +415,7 @@ virtual void adjust_pointers(); // Mark sweep support phase4 virtual void compact();- virtual void post_compact() {ShouldNotReachHere();}+ virtual void post_compact() { ShouldNotReachHere(); } // Support for CMS's rescan. In this general form we return a pointer // to an abstract object that can be used, based on specific previously@@ -432,7 +430,7 @@ // Some generations may require some cleanup actions before allowing // a verification.- virtual void prepare_for_verify() {};+ virtual void prepare_for_verify() {} // Accessing "marks".@@ -483,7 +481,7 @@ // Give each generation an opportunity to do clean up for any // contributed scratch.- virtual void reset_scratch() {};+ virtual void reset_scratch() {} // When an older generation has been collected, and perhaps resized, // this method will be invoked on all younger generations (from older to

--- /dev/null Thu Jan 01 00:00:00 1970 +0000+++ b/src/share/vm/gc/shared/memset_with_concurrent_readers.hpp Fri Sep 18 14:21:46 2015 -0700@@ -0,0 +1,54 @@+/*+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.+ *+ * This code is free software; you can redistribute it and/or modify it+ * under the terms of the GNU General Public License version 2 only, as+ * published by the Free Software Foundation.+ *+ * This code is distributed in the hope that it will be useful, but WITHOUT+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License+ * version 2 for more details (a copy is included in the LICENSE file that+ * accompanied this code).+ *+ * You should have received a copy of the GNU General Public License version+ * 2 along with this work; if not, write to the Free Software Foundation,+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.+ *+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA+ * or visit www.oracle.com if you need additional information or have any+ * questions.+ *+ */++#ifndef SRC_SHARE_VM_GC_SHARED_MEMSETWITHCONCURRENTREADERS_HPP+#define SRC_SHARE_VM_GC_SHARED_MEMSETWITHCONCURRENTREADERS_HPP++#include <stddef.h>+#include <string.h>+#include "utilities/macros.hpp"++// Only used by concurrent collectors.+#if INCLUDE_ALL_GCS++// Fill a block of memory with value, like memset, but with the+// understanding that there may be concurrent readers of that memory.+void memset_with_concurrent_readers(void* to, int value, size_t size);++#ifdef TARGET_ARCH_sparc++// SPARC requires special handling. See SPARC-specific definition.++#else+// All others just use memset.++inline void memset_with_concurrent_readers(void* to, int value, size_t size) {+ ::memset(to, value, size);+}++#endif // End of target dispatch.++#endif // INCLUDE_ALL_GCS++#endif // include guard