From christian.thalinger at oracle.com Fri Apr 1 01:01:01 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Thu, 31 Mar 2016 15:01:01 -1000 Subject: JMH and JDK9 In-Reply-To: <56FBFCCD.1000704@oracle.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> Message-ID: <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> Maybe totally unrelated but I?m seeing these exceptions with Eclipse 4.6M6: !SESSION 2016-03-31 14:59:16.071 ----------------------------------------------- eclipse.buildId=4.6.0.I20160317-0200 java.version=9-internal java.vendor=Oracle Corporation BootLoader constants: OS=macosx, ARCH=x86_64, WS=cocoa, NL=en_US Command-line arguments: -debug !ENTRY org.eclipse.osgi 4 0 2016-03-31 14:59:20.297 !MESSAGE Application error !STACK 1 org.eclipse.e4.core.di.InjectionException: java.lang.NoClassDefFoundError: javax/annotation/PostConstruct at org.eclipse.e4.core.internal.di.InjectorImpl.internalMake(InjectorImpl.java:386) at org.eclipse.e4.core.internal.di.InjectorImpl.make(InjectorImpl.java:294) at org.eclipse.e4.core.contexts.ContextInjectionFactory.make(ContextInjectionFactory.java:162) at org.eclipse.e4.ui.internal.workbench.swt.E4Application.createDefaultHeadlessContext(E4Application.java:490) at org.eclipse.e4.ui.internal.workbench.swt.E4Application.createDefaultContext(E4Application.java:504) at org.eclipse.e4.ui.internal.workbench.swt.E4Application.createE4Workbench(E4Application.java:203) at org.eclipse.ui.internal.Workbench$5.run(Workbench.java:627) at org.eclipse.core.databinding.observable.Realm.runWithDefault(Realm.java:336) at org.eclipse.ui.internal.Workbench.createAndRunWorkbench(Workbench.java:605) at org.eclipse.ui.PlatformUI.createAndRunWorkbench(PlatformUI.java:148) at org.eclipse.ui.internal.ide.application.IDEApplication.start(IDEApplication.java:138) at org.eclipse.equinox.internal.app.EclipseAppHandle.run(EclipseAppHandle.java:196) at org.eclipse.core.runtime.internal.adaptor.EclipseAppLauncher.runApplication(EclipseAppLauncher.java:134) at org.eclipse.core.runtime.internal.adaptor.EclipseAppLauncher.start(EclipseAppLauncher.java:104) at org.eclipse.core.runtime.adaptor.EclipseStarter.run(EclipseStarter.java:388) at org.eclipse.core.runtime.adaptor.EclipseStarter.run(EclipseStarter.java:243) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:520) at org.eclipse.equinox.launcher.Main.invokeFramework(Main.java:670) at org.eclipse.equinox.launcher.Main.basicRun(Main.java:609) at org.eclipse.equinox.launcher.Main.run(Main.java:1516) at org.eclipse.equinox.launcher.Main.main(Main.java:1489) Caused by: java.lang.NoClassDefFoundError: javax/annotation/PostConstruct at org.eclipse.e4.core.internal.di.InjectorImpl.inject(InjectorImpl.java:151) at org.eclipse.e4.core.internal.di.InjectorImpl.internalMake(InjectorImpl.java:375) ... 23 more Caused by: java.lang.ClassNotFoundException: javax.annotation.PostConstruct cannot be found by org.eclipse.e4.core.di_1.6.0.v20160211-1614 at org.eclipse.osgi.internal.loader.BundleLoader.findClassInternal(BundleLoader.java:394) at org.eclipse.osgi.internal.loader.BundleLoader.findClass(BundleLoader.java:357) at org.eclipse.osgi.internal.loader.BundleLoader.findClass(BundleLoader.java:349) at org.eclipse.osgi.internal.loader.ModuleClassLoader.loadClass(ModuleClassLoader.java:161) at java.lang.ClassLoader.loadClass(ClassLoader.java:358) ... 25 more !ENTRY org.eclipse.e4.ui.workbench 4 0 2016-03-31 14:59:20.304 !MESSAGE FrameworkEvent ERROR !STACK 0 java.lang.NoClassDefFoundError: javax/annotation/PreDestroy at org.eclipse.e4.core.internal.di.InjectorImpl.disposed(InjectorImpl.java:426) at org.eclipse.e4.core.internal.di.Requestor.disposed(Requestor.java:154) at org.eclipse.e4.core.internal.contexts.ContextObjectSupplier$ContextInjectionListener.update(ContextObjectSupplier.java:78) at org.eclipse.e4.core.internal.contexts.TrackableComputationExt.update(TrackableComputationExt.java:111) at org.eclipse.e4.core.internal.contexts.TrackableComputationExt.handleInvalid(TrackableComputationExt.java:74) at org.eclipse.e4.core.internal.contexts.EclipseContext.dispose(EclipseContext.java:175) at org.eclipse.e4.core.internal.contexts.osgi.EclipseContextOSGi.dispose(EclipseContextOSGi.java:106) at org.eclipse.e4.core.internal.contexts.osgi.EclipseContextOSGi.bundleChanged(EclipseContextOSGi.java:139) at org.eclipse.osgi.internal.framework.BundleContextImpl.dispatchEvent(BundleContextImpl.java:903) at org.eclipse.osgi.framework.eventmgr.EventManager.dispatchEvent(EventManager.java:230) at org.eclipse.osgi.framework.eventmgr.ListenerQueue.dispatchEventSynchronous(ListenerQueue.java:148) at org.eclipse.osgi.internal.framework.EquinoxEventPublisher.publishBundleEventPrivileged(EquinoxEventPublisher.java:213) at org.eclipse.osgi.internal.framework.EquinoxEventPublisher.publishBundleEvent(EquinoxEventPublisher.java:120) at org.eclipse.osgi.internal.framework.EquinoxEventPublisher.publishBundleEvent(EquinoxEventPublisher.java:112) at org.eclipse.osgi.internal.framework.EquinoxContainerAdaptor.publishModuleEvent(EquinoxContainerAdaptor.java:156) at org.eclipse.osgi.container.Module.publishEvent(Module.java:476) at org.eclipse.osgi.container.Module.doStop(Module.java:634) at org.eclipse.osgi.container.Module.stop(Module.java:498) at org.eclipse.osgi.container.SystemModule.stop(SystemModule.java:191) at org.eclipse.osgi.internal.framework.EquinoxBundle$SystemBundle$EquinoxSystemModule$1.run(EquinoxBundle.java:165) at java.lang.Thread.run(Thread.java:804) Caused by: java.lang.ClassNotFoundException: javax.annotation.PreDestroy cannot be found by org.eclipse.e4.core.di_1.6.0.v20160211-1614 at org.eclipse.osgi.internal.loader.BundleLoader.findClassInternal(BundleLoader.java:394) at org.eclipse.osgi.internal.loader.BundleLoader.findClass(BundleLoader.java:357) at org.eclipse.osgi.internal.loader.BundleLoader.findClass(BundleLoader.java:349) at org.eclipse.osgi.internal.loader.ModuleClassLoader.loadClass(ModuleClassLoader.java:161) at java.lang.ClassLoader.loadClass(ClassLoader.java:358) ... 21 more > On Mar 30, 2016, at 6:20 AM, Aleksey Shipilev wrote: > > On 03/30/2016 07:12 PM, Alan Bateman wrote: >> On 30/03/2016 17:09, Andrew Haley wrote: >>> I'm totally stuck trying to use jmh to test my JDK9 code. I can't >>> build JMH itself with JDK9 because I get a weird error in some Eclipse >>> classes in Maven. So, I've tried to run Maven with JDK8 but use a >>> >>> error: Annotation generator had thrown the exception. >>> java.lang.NoClassDefFoundError: javax/annotation/Generated >>> >>> My guess is that this has something to do with modules, but I don't >>> know that for sure. I need to compile my test code against JDK9 >>> because the methods I'm testing do not exist in JDK8. >>> >>> Is there a recipe for using JMH with JDK9 classes? What do people do? >>> >> There is discussion on this in this bug: >> https://bugs.openjdk.java.net/browse/JDK-8152842 >> >> but in the mean-time it looks like JMH no longer uses @Generated. > > Yeah, hold on, Andrew! Jigsaw-enabled JMH is coming this week. > > Meanwhile, there are workarounds: > a) Compile with JDK 9 EA b110; (not b111) > b) Checkout and build JMH 1.12-SNAPSHOT (see instructions on JMH page); > > -Aleksey > > From tobias.hartmann at oracle.com Fri Apr 1 06:30:45 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Fri, 1 Apr 2016 08:30:45 +0200 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <56FD4720.90800@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> Message-ID: <56FE1595.20603@oracle.com> Hi Vladimir, On 31.03.2016 17:49, Vladimir Kozlov wrote: > On 3/31/16 6:22 AM, Tobias Hartmann wrote: >> Hi, >> >> please review the following patch: >> >> https://bugs.openjdk.java.net/browse/JDK-8074553 >> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >> >> While the code cache sweeper processes a nmethod in NMethodSweeper::process_nmethod(), safepoints may happen and the GC may unload the currently processed nmethod. To prevent this, the sweeper uses a NMethodMarker which saves the nmethod in CodeCacheSweeperThread::_scanned_nmethod. The nmethod is then passed to the GC through a CodeBlobClosure in CodeCacheSweeperThread::oops_do() to keep it alive when the GC iterates over all threads. >> >> The problem is that G1 calls nmethods_do() on all threads in the remark phase (see G1RemarkThreadsClosure::do_thread()) which is not overwritten by the sweeper thread. Since the currently processed nmethod is not passed through nmethods_do() by any thread, it is unloaded and we later hit the assert when encountering the nmethod through oops_do(). >> >> Mikael Gerdin and Stefan Karlsson (thanks again!) suggested to overwrite nmethods_do() as well in CodeCacheSweeperThread and pass _scanned_nmethod to the closure. I also modified Threads::nmethods_do() to ignore the sweeper thread because we want to avoid marking the _scanned_nmethod as seen on the stack when scanning stacks from the sweeper (the nmethod may already be zombie and only referenced by the sweeper). > > I did not get this. If you exclude CodeCacheSweeperThread in Threads::nmethods_do() then CodeCacheSweeperThread::nmethods_do() will not be called. What is the point? The point is that the GC code calls JavaThread::nmethods_do() (which I modified to include _scanned_nmethod) and not Threads::nmethods_do(). The latter one is only used by the CodeCacheSweeperThread to mark nmethods active on the Java stack and should therefore *exclude* _scanned_nmethod. This is because _scanned_nmethod should only be prevented from being unloaded by the GC but the hotness value or stack marking should not be affected (it may very well a zombie already). Please also note that there is Thread*s*::nmethods_do() and Thread::nmethods_do() which is a bit confusing. However, Mikael is right that nmethods_do() should be virtual (like oops_do() is) to allow the GC code to call the CodeCacheSweeperThread::nmethods_do() version. Thanks, Tobias > > Thanks, > Vladimir > >> >> Unfortunately, this bug is extremely hard to reproduce (it showed up 18 times since early 2015). I was able to reproduce it only once after thousands of runs and therefore not often enough to verify the fix. However, I'm very confident that this solves the problem. >> >> Tested with JPRT and RBT (running). >> >> Thanks, >> Tobias >> From tobias.hartmann at oracle.com Fri Apr 1 07:28:06 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Fri, 1 Apr 2016 09:28:06 +0200 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <56FD4C8D.7050201@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> <56FD4C8D.7050201@oracle.com> Message-ID: <56FE2306.5080705@oracle.com> Hi Mikael, On 31.03.2016 18:13, Mikael Gerdin wrote: > Hi, > > On 2016-03-31 17:49, Vladimir Kozlov wrote: >> On 3/31/16 6:22 AM, Tobias Hartmann wrote: >>> Hi, >>> >>> please review the following patch: >>> >>> https://bugs.openjdk.java.net/browse/JDK-8074553 >>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >>> >>> While the code cache sweeper processes a nmethod in >>> NMethodSweeper::process_nmethod(), safepoints may happen and the GC >>> may unload the currently processed nmethod. To prevent this, the >>> sweeper uses a NMethodMarker which saves the nmethod in >>> CodeCacheSweeperThread::_scanned_nmethod. The nmethod is then passed >>> to the GC through a CodeBlobClosure in >>> CodeCacheSweeperThread::oops_do() to keep it alive when the GC >>> iterates over all threads. >>> >>> The problem is that G1 calls nmethods_do() on all threads in the >>> remark phase (see G1RemarkThreadsClosure::do_thread()) which is not >>> overwritten by the sweeper thread. Since the currently processed >>> nmethod is not passed through nmethods_do() by any thread, it is >>> unloaded and we later hit the assert when encountering the nmethod >>> through oops_do(). >>> >>> Mikael Gerdin and Stefan Karlsson (thanks again!) suggested to >>> overwrite nmethods_do() as well in CodeCacheSweeperThread and pass >>> _scanned_nmethod to the closure. I also modified >>> Threads::nmethods_do() to ignore the sweeper thread because we want to >>> avoid marking the _scanned_nmethod as seen on the stack when scanning >>> stacks from the sweeper (the nmethod may already be zombie and only >>> referenced by the sweeper). >> >> I did not get this. If you exclude CodeCacheSweeperThread in >> Threads::nmethods_do() then CodeCacheSweeperThread::nmethods_do() will >> not be called. What is the point? > > The GC code in question iterates over the threads and calls nmethods_do on each JavaThread and the VMThread after claiming them with atomic operations to achieve parallelism. > > There is still something a bit fishy here though. > Thread::nmethods_do is not virtual, so one must be careful to downcast one's Thread* to JavaThread before calling it. And since Tobias' change does not make that or JavaThread::nmethods_do (which actually shadows and does not override the methods) virtual we can't reach the new code unless the GC code is changed to downcast to CodeCacheSweeperThread before calling nmethods_do. Thanks for taking a look. Of course, you are right. I just assumed that Thread::nmethods_do() is virtual and missed that it's not. > I still believe that what Tobias is attempting to do is a necessary fix but this only shows how hard this is to reproduce. > > Perhaps Thread::nmethods_do should simply be removed (along with any calls to it) and JavaThread::nmethods_do should then be made virtual. Yes, I agree. Here is the new webrev: http://cr.openjdk.java.net/~thartmann/8074553/webrev.01/ I removed the empty Thread::nmethods_do() and all the (static) calls to it. Thanks, Tobias > > /Mikael > >> >> Thanks, >> Vladimir >> >>> >>> Unfortunately, this bug is extremely hard to reproduce (it showed up >>> 18 times since early 2015). I was able to reproduce it only once after >>> thousands of runs and therefore not often enough to verify the fix. >>> However, I'm very confident that this solves the problem. >>> >>> Tested with JPRT and RBT (running). >>> >>> Thanks, >>> Tobias >>> From aph at redhat.com Fri Apr 1 07:43:23 2016 From: aph at redhat.com (Andrew Haley) Date: Fri, 1 Apr 2016 08:43:23 +0100 Subject: JMH and JDK9 In-Reply-To: <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> Message-ID: <56FE269B.3030901@redhat.com> On 01/04/16 02:01, Christian Thalinger wrote: > Maybe totally unrelated but I?m seeing these exceptions with Eclipse 4.6M6: I doubt very much that it's unrelated. I don't quite understand what is going on here: why have annotatons stopped working? I know they're in a separate module, but it's supposed to be part of the platform. Andrew. From Alan.Bateman at oracle.com Fri Apr 1 07:57:34 2016 From: Alan.Bateman at oracle.com (Alan Bateman) Date: Fri, 1 Apr 2016 08:57:34 +0100 Subject: JMH and JDK9 In-Reply-To: <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> Message-ID: <56FE29EE.1090801@oracle.com> On 01/04/2016 02:01, Christian Thalinger wrote: > Maybe totally unrelated but I?m seeing these exceptions with Eclipse > 4.6M6: > > I don't recognize this but there has been at least one issue with Eclipse [1] where it needed a configuration change due to the ongoing effort to move non-core classes out of the boot loader. In this case then the types are in module java.annotations.common. We moved those types to the extension class loader (now called the "platform class loader" btw) last year. In JDK 8 and older then the types in this module were defined to the boot loader. If changing the defining class loader is caused this then I would have expected we would have heard about it before now but perhaps there aren't too many people running Eclipse on the latest JDK 9 builds. One other thing about these so-called "Common Annotations" is that Java SE only defines a small subset whereas Java EE defines all the annotations that JSR-250 defined. If they have been putting the EE version on the class path then it's not going to work now because we can't split packages between the class path and modules. The way to upgrade is to deploy the EE version as a module on the "upgrade module path". I have no idea if they are using it of course but mentioning it in case it might be relevant. Are you going to submit a bug to Eclipse on this? -Alan [1] https://bugs.eclipse.org/bugs/show_bug.cgi?id=466683 From mikael.gerdin at oracle.com Fri Apr 1 08:24:16 2016 From: mikael.gerdin at oracle.com (Mikael Gerdin) Date: Fri, 1 Apr 2016 10:24:16 +0200 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <56FE2306.5080705@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> <56FD4C8D.7050201@oracle.com> <56FE2306.5080705@oracle.com> Message-ID: <56FE3030.6070601@oracle.com> Hi Tobias, On 2016-04-01 09:28, Tobias Hartmann wrote: > Hi Mikael, > > On 31.03.2016 18:13, Mikael Gerdin wrote: >> Hi, >> >> On 2016-03-31 17:49, Vladimir Kozlov wrote: >>> On 3/31/16 6:22 AM, Tobias Hartmann wrote: >>>> Hi, >>>> >>>> please review the following patch: >>>> >>>> https://bugs.openjdk.java.net/browse/JDK-8074553 >>>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >>>> >>>> While the code cache sweeper processes a nmethod in >>>> NMethodSweeper::process_nmethod(), safepoints may happen and the GC >>>> may unload the currently processed nmethod. To prevent this, the >>>> sweeper uses a NMethodMarker which saves the nmethod in >>>> CodeCacheSweeperThread::_scanned_nmethod. The nmethod is then passed >>>> to the GC through a CodeBlobClosure in >>>> CodeCacheSweeperThread::oops_do() to keep it alive when the GC >>>> iterates over all threads. >>>> >>>> The problem is that G1 calls nmethods_do() on all threads in the >>>> remark phase (see G1RemarkThreadsClosure::do_thread()) which is not >>>> overwritten by the sweeper thread. Since the currently processed >>>> nmethod is not passed through nmethods_do() by any thread, it is >>>> unloaded and we later hit the assert when encountering the nmethod >>>> through oops_do(). >>>> >>>> Mikael Gerdin and Stefan Karlsson (thanks again!) suggested to >>>> overwrite nmethods_do() as well in CodeCacheSweeperThread and pass >>>> _scanned_nmethod to the closure. I also modified >>>> Threads::nmethods_do() to ignore the sweeper thread because we want to >>>> avoid marking the _scanned_nmethod as seen on the stack when scanning >>>> stacks from the sweeper (the nmethod may already be zombie and only >>>> referenced by the sweeper). >>> >>> I did not get this. If you exclude CodeCacheSweeperThread in >>> Threads::nmethods_do() then CodeCacheSweeperThread::nmethods_do() will >>> not be called. What is the point? >> >> The GC code in question iterates over the threads and calls nmethods_do on each JavaThread and the VMThread after claiming them with atomic operations to achieve parallelism. >> >> There is still something a bit fishy here though. >> Thread::nmethods_do is not virtual, so one must be careful to downcast one's Thread* to JavaThread before calling it. And since Tobias' change does not make that or JavaThread::nmethods_do (which actually shadows and does not override the methods) virtual we can't reach the new code unless the GC code is changed to downcast to CodeCacheSweeperThread before calling nmethods_do. > > Thanks for taking a look. Of course, you are right. I just assumed that Thread::nmethods_do() is virtual and missed that it's not. > >> I still believe that what Tobias is attempting to do is a necessary fix but this only shows how hard this is to reproduce. >> >> Perhaps Thread::nmethods_do should simply be removed (along with any calls to it) and JavaThread::nmethods_do should then be made virtual. > > Yes, I agree. Here is the new webrev: > http://cr.openjdk.java.net/~thartmann/8074553/webrev.01/ > > I removed the empty Thread::nmethods_do() and all the (static) calls to it. I kind of agree with Vladimir that it's unfortunate to have to check for the sweeper thread in Threads::nmethods_do() but I don't have a good suggestion for an easy fix. From my point of view this fix is ready to go in but perhaps it would be a good idea to also think about if this could be cleaned up somehow. For me - as a GC person - it's hard to see why the sweeper thread should be a JavaThread at all, it seems like more of a VM-only thread to me. /Mikael > > Thanks, > Tobias > >> >> /Mikael >> >>> >>> Thanks, >>> Vladimir >>> >>>> >>>> Unfortunately, this bug is extremely hard to reproduce (it showed up >>>> 18 times since early 2015). I was able to reproduce it only once after >>>> thousands of runs and therefore not often enough to verify the fix. >>>> However, I'm very confident that this solves the problem. >>>> >>>> Tested with JPRT and RBT (running). >>>> >>>> Thanks, >>>> Tobias >>>> From robbin.ehn at oracle.com Fri Apr 1 08:53:31 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Fri, 1 Apr 2016 10:53:31 +0200 Subject: RFR: 8153254: Delegate (v)write from Log to LogTagSet Message-ID: <56FE370B.3020204@oracle.com> Hi all, Please review this patch. This moves log writes methods to LogTagSet. Bug: https://bugs.openjdk.java.net/browse/JDK-8153254 Webrev: http://cr.openjdk.java.net/~rehn/8153254/webrev/ Tested with internal vm test and jprt. Thanks! /Robbin From tobias.hartmann at oracle.com Fri Apr 1 09:35:28 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Fri, 1 Apr 2016 11:35:28 +0200 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <56FE3030.6070601@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> <56FD4C8D.7050201@oracle.com> <56FE2306.5080705@oracle.com> <56FE3030.6070601@oracle.com> Message-ID: <56FE40E0.9060307@oracle.com> Hi Mikael, On 01.04.2016 10:24, Mikael Gerdin wrote: > Hi Tobias, > > On 2016-04-01 09:28, Tobias Hartmann wrote: >> Hi Mikael, >> >> On 31.03.2016 18:13, Mikael Gerdin wrote: >>> Hi, >>> >>> On 2016-03-31 17:49, Vladimir Kozlov wrote: >>>> On 3/31/16 6:22 AM, Tobias Hartmann wrote: >>>>> Hi, >>>>> >>>>> please review the following patch: >>>>> >>>>> https://bugs.openjdk.java.net/browse/JDK-8074553 >>>>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >>>>> >>>>> While the code cache sweeper processes a nmethod in >>>>> NMethodSweeper::process_nmethod(), safepoints may happen and the GC >>>>> may unload the currently processed nmethod. To prevent this, the >>>>> sweeper uses a NMethodMarker which saves the nmethod in >>>>> CodeCacheSweeperThread::_scanned_nmethod. The nmethod is then passed >>>>> to the GC through a CodeBlobClosure in >>>>> CodeCacheSweeperThread::oops_do() to keep it alive when the GC >>>>> iterates over all threads. >>>>> >>>>> The problem is that G1 calls nmethods_do() on all threads in the >>>>> remark phase (see G1RemarkThreadsClosure::do_thread()) which is not >>>>> overwritten by the sweeper thread. Since the currently processed >>>>> nmethod is not passed through nmethods_do() by any thread, it is >>>>> unloaded and we later hit the assert when encountering the nmethod >>>>> through oops_do(). >>>>> >>>>> Mikael Gerdin and Stefan Karlsson (thanks again!) suggested to >>>>> overwrite nmethods_do() as well in CodeCacheSweeperThread and pass >>>>> _scanned_nmethod to the closure. I also modified >>>>> Threads::nmethods_do() to ignore the sweeper thread because we want to >>>>> avoid marking the _scanned_nmethod as seen on the stack when scanning >>>>> stacks from the sweeper (the nmethod may already be zombie and only >>>>> referenced by the sweeper). >>>> >>>> I did not get this. If you exclude CodeCacheSweeperThread in >>>> Threads::nmethods_do() then CodeCacheSweeperThread::nmethods_do() will >>>> not be called. What is the point? >>> >>> The GC code in question iterates over the threads and calls nmethods_do on each JavaThread and the VMThread after claiming them with atomic operations to achieve parallelism. >>> >>> There is still something a bit fishy here though. >>> Thread::nmethods_do is not virtual, so one must be careful to downcast one's Thread* to JavaThread before calling it. And since Tobias' change does not make that or JavaThread::nmethods_do (which actually shadows and does not override the methods) virtual we can't reach the new code unless the GC code is changed to downcast to CodeCacheSweeperThread before calling nmethods_do. >> >> Thanks for taking a look. Of course, you are right. I just assumed that Thread::nmethods_do() is virtual and missed that it's not. >> >>> I still believe that what Tobias is attempting to do is a necessary fix but this only shows how hard this is to reproduce. >>> >>> Perhaps Thread::nmethods_do should simply be removed (along with any calls to it) and JavaThread::nmethods_do should then be made virtual. >> >> Yes, I agree. Here is the new webrev: >> http://cr.openjdk.java.net/~thartmann/8074553/webrev.01/ >> >> I removed the empty Thread::nmethods_do() and all the (static) calls to it. > > I kind of agree with Vladimir that it's unfortunate to have to check for the sweeper thread in Threads::nmethods_do() but I don't have a good suggestion for an easy fix. > > From my point of view this fix is ready to go in but perhaps it would be a good idea to also think about if this could be cleaned up somehow. > > For me - as a GC person - it's hard to see why the sweeper thread should be a JavaThread at all, it seems like more of a VM-only thread to me. Yes, I agree that it is kind of counterintuitive that the sweeper thread is a JavaThread. I think this is because before JDK-8046809 [1] sweeping was done by the CompilerThreads which are JavaThreads as well. We could make the CodeCacheSweeperThread a subclass of NamedThread similar to ConcurrentGCThread but the problem is that the GC calls oops_do() only on the JavaThreads and the VMThread (see Threads::oops_do() or ThreadRootsMarkingTask::do_it()). That code would have to be modified to include the sweeper thread as well which seems a bit hacky to me. What do you think? Thanks, Tobias [1] https://bugs.openjdk.java.net/browse/JDK-8046809 > > /Mikael > >> >> Thanks, >> Tobias >> >>> >>> /Mikael >>> >>>> >>>> Thanks, >>>> Vladimir >>>> >>>>> >>>>> Unfortunately, this bug is extremely hard to reproduce (it showed up >>>>> 18 times since early 2015). I was able to reproduce it only once after >>>>> thousands of runs and therefore not often enough to verify the fix. >>>>> However, I'm very confident that this solves the problem. >>>>> >>>>> Tested with JPRT and RBT (running). >>>>> >>>>> Thanks, >>>>> Tobias >>>>> From mikael.gerdin at oracle.com Fri Apr 1 10:59:30 2016 From: mikael.gerdin at oracle.com (Mikael Gerdin) Date: Fri, 1 Apr 2016 12:59:30 +0200 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <56FE40E0.9060307@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> <56FD4C8D.7050201@oracle.com> <56FE2306.5080705@oracle.com> <56FE3030.6070601@oracle.com> <56FE40E0.9060307@oracle.com> Message-ID: <56FE5492.1080707@oracle.com> Tobias, On 2016-04-01 11:35, Tobias Hartmann wrote: > Hi Mikael, > > On 01.04.2016 10:24, Mikael Gerdin wrote: >> Hi Tobias, >> >> On 2016-04-01 09:28, Tobias Hartmann wrote: >>> Hi Mikael, >>> >>> On 31.03.2016 18:13, Mikael Gerdin wrote: >>>> Hi, >>>> >>>> On 2016-03-31 17:49, Vladimir Kozlov wrote: >>>>> On 3/31/16 6:22 AM, Tobias Hartmann wrote: >>>>>> Hi, >>>>>> >>>>>> please review the following patch: >>>>>> >>>>>> https://bugs.openjdk.java.net/browse/JDK-8074553 >>>>>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >>>>>> >>>>>> While the code cache sweeper processes a nmethod in >>>>>> NMethodSweeper::process_nmethod(), safepoints may happen >>>>>> and the GC may unload the currently processed nmethod. To >>>>>> prevent this, the sweeper uses a NMethodMarker which saves >>>>>> the nmethod in CodeCacheSweeperThread::_scanned_nmethod. >>>>>> The nmethod is then passed to the GC through a >>>>>> CodeBlobClosure in CodeCacheSweeperThread::oops_do() to >>>>>> keep it alive when the GC iterates over all threads. >>>>>> >>>>>> The problem is that G1 calls nmethods_do() on all threads >>>>>> in the remark phase (see >>>>>> G1RemarkThreadsClosure::do_thread()) which is not >>>>>> overwritten by the sweeper thread. Since the currently >>>>>> processed nmethod is not passed through nmethods_do() by >>>>>> any thread, it is unloaded and we later hit the assert when >>>>>> encountering the nmethod through oops_do(). >>>>>> >>>>>> Mikael Gerdin and Stefan Karlsson (thanks again!) suggested >>>>>> to overwrite nmethods_do() as well in >>>>>> CodeCacheSweeperThread and pass _scanned_nmethod to the >>>>>> closure. I also modified Threads::nmethods_do() to ignore >>>>>> the sweeper thread because we want to avoid marking the >>>>>> _scanned_nmethod as seen on the stack when scanning stacks >>>>>> from the sweeper (the nmethod may already be zombie and >>>>>> only referenced by the sweeper). >>>>> >>>>> I did not get this. If you exclude CodeCacheSweeperThread in >>>>> Threads::nmethods_do() then >>>>> CodeCacheSweeperThread::nmethods_do() will not be called. >>>>> What is the point? >>>> >>>> The GC code in question iterates over the threads and calls >>>> nmethods_do on each JavaThread and the VMThread after claiming >>>> them with atomic operations to achieve parallelism. >>>> >>>> There is still something a bit fishy here though. >>>> Thread::nmethods_do is not virtual, so one must be careful to >>>> downcast one's Thread* to JavaThread before calling it. And >>>> since Tobias' change does not make that or >>>> JavaThread::nmethods_do (which actually shadows and does not >>>> override the methods) virtual we can't reach the new code >>>> unless the GC code is changed to downcast to >>>> CodeCacheSweeperThread before calling nmethods_do. >>> >>> Thanks for taking a look. Of course, you are right. I just >>> assumed that Thread::nmethods_do() is virtual and missed that >>> it's not. >>> >>>> I still believe that what Tobias is attempting to do is a >>>> necessary fix but this only shows how hard this is to >>>> reproduce. >>>> >>>> Perhaps Thread::nmethods_do should simply be removed (along >>>> with any calls to it) and JavaThread::nmethods_do should then >>>> be made virtual. >>> >>> Yes, I agree. Here is the new webrev: >>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.01/ >>> >>> I removed the empty Thread::nmethods_do() and all the (static) >>> calls to it. >> >> I kind of agree with Vladimir that it's unfortunate to have to >> check for the sweeper thread in Threads::nmethods_do() but I don't >> have a good suggestion for an easy fix. >> >> From my point of view this fix is ready to go in but perhaps it >> would be a good idea to also think about if this could be cleaned >> up somehow. >> >> For me - as a GC person - it's hard to see why the sweeper thread >> should be a JavaThread at all, it seems like more of a VM-only >> thread to me. > > Yes, I agree that it is kind of counterintuitive that the sweeper > thread is a JavaThread. I think this is because before JDK-8046809 > [1] sweeping was done by the CompilerThreads which are JavaThreads as > well. > > We could make the CodeCacheSweeperThread a subclass of NamedThread > similar to ConcurrentGCThread but the problem is that the GC calls > oops_do() only on the JavaThreads and the VMThread (see > Threads::oops_do() or ThreadRootsMarkingTask::do_it()). That code > would have to be modified to include the sweeper thread as well which > seems a bit hacky to me. In a way I consider it less hacky to make it explicit that the sweeper thread is a special kind of thread and that due to its interaction with the code cache the GC may have to interact with it. The more I look at this code the more strange stuff I seem to find. It appears, for example, that the VM thread and JavaThreads are the only threads who have their handle areas examined by the GC, even though other threads do have handle areas (and JNI handle areas) these are not seen by GC since it's only interested in JavaThreads and the VMThread. > > What do you think? Thinking out loud a bit it sounds like there is in fact some concept of threads which need to interact with GC, have handle areas, and so on and perhaps these threads should be based on a subclass of Thread which provides this interface, support for nmethod iteration and the "claim parity" hack which the GC uses to parallelize scanning of the threads. Anyway, as I said I think your fix is good to go as-is but in general I think this code needs to be cleaned up :) /Mikael > > Thanks, Tobias > > [1] https://bugs.openjdk.java.net/browse/JDK-8046809 > >> >> /Mikael >> >>> >>> Thanks, Tobias >>> >>>> >>>> /Mikael >>>> >>>>> >>>>> Thanks, Vladimir >>>>> >>>>>> >>>>>> Unfortunately, this bug is extremely hard to reproduce (it >>>>>> showed up 18 times since early 2015). I was able to >>>>>> reproduce it only once after thousands of runs and >>>>>> therefore not often enough to verify the fix. However, I'm >>>>>> very confident that this solves the problem. >>>>>> >>>>>> Tested with JPRT and RBT (running). >>>>>> >>>>>> Thanks, Tobias >>>>>> From dmitry.fazunenko at oracle.com Fri Apr 1 11:50:24 2016 From: dmitry.fazunenko at oracle.com (Dmitry Fazunenko) Date: Fri, 1 Apr 2016 14:50:24 +0300 Subject: RFR(S): 8150899 - [TESTBUG] Split hotspot_all job into smaller jobs In-Reply-To: <065a01d18b81$0dcee540$296cafc0$@oracle.com> References: <194101d189a0$d08736b0$7195a410$@oracle.com> <56FD55DF.4070509@oracle.com> <065a01d18b81$0dcee540$296cafc0$@oracle.com> Message-ID: <56FE6080.1040006@oracle.com> Christian, Moving tests (renaming) is very bad practice, because we lose the history associated with the tests in JIRA and in the execution system. Fortunately the story of stress/gc is not so long and we can sacrifice it. So, I'm okay with moving stress/gc to gc/stress. Thanks, Dima On 31.03.2016 22:10, Christian Tornqvist wrote: > Hi Dima, > > I merged with your change for 8152422. The tests in stress/gc should be > moved to gc/stress instead, I'll file a bug for this. > > Thanks, > Christian > > -----Original Message----- > From: Dmitry Fazunenko [mailto:dmitry.fazunenko at oracle.com] > Sent: Thursday, March 31, 2016 12:53 PM > To: Christian Tornqvist ; > hotspot-dev at openjdk.java.net > Subject: Re: RFR(S): 8150899 - [TESTBUG] Split hotspot_all job into smaller > jobs > > Hi Christian, > > Please note, that hotspot_gc has been split up recently: > https://bugs.openjdk.java.net/browse/JDK-8152422 > so, you need to adjust a little your change. > > GC tests besides gc/ also comprise stress/gc/ Please update the hostpot_gc > group to include stress tests as well. > > Other changes looks good to me. > > Thanks, > Dima > > > > > On 29.03.2016 12:53, Christian Tornqvist wrote: >> Hi everyone, >> >> >> >> Please review this small change that splits the hotspot_all group used >> in our testing into smaller batches. Since the >> hotspot_compiler/gc/etc.. names were already used by JPRT but didn't >> reflect a complete set of tests for that component area, these groups >> have been renamed to hotspot_fast_compiler etc to reflect the intent >> of these being a faster subset of the subcomponent tests. >> >> >> >> Webrevs: >> >> http://cr.openjdk.java.net/~ctornqvi/webrev/8150899/webrev.hotspot.00/ >> >> http://cr.openjdk.java.net/~ctornqvi/webrev/8150899/webrev.root.00/ >> >> >> >> Bug is unfortunately not visible externally. >> >> >> >> Thanks, >> >> Christian >> >> >> > From tobias.hartmann at oracle.com Fri Apr 1 13:23:31 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Fri, 1 Apr 2016 15:23:31 +0200 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <56FE5492.1080707@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> <56FD4C8D.7050201@oracle.com> <56FE2306.5080705@oracle.com> <56FE3030.6070601@oracle.com> <56FE40E0.9060307@oracle.com> <56FE5492.1080707@oracle.com> Message-ID: <56FE7653.7090604@oracle.com> Hi Mikael, On 01.04.2016 12:59, Mikael Gerdin wrote: > Tobias, > > On 2016-04-01 11:35, Tobias Hartmann wrote: >> Hi Mikael, >> >> On 01.04.2016 10:24, Mikael Gerdin wrote: >>> Hi Tobias, >>> >>> On 2016-04-01 09:28, Tobias Hartmann wrote: >>>> Hi Mikael, >>>> >>>> On 31.03.2016 18:13, Mikael Gerdin wrote: >>>>> Hi, >>>>> >>>>> On 2016-03-31 17:49, Vladimir Kozlov wrote: >>>>>> On 3/31/16 6:22 AM, Tobias Hartmann wrote: >>>>>>> Hi, >>>>>>> >>>>>>> please review the following patch: >>>>>>> >>>>>>> https://bugs.openjdk.java.net/browse/JDK-8074553 >>>>>>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >>>>>>> >>>>>>> While the code cache sweeper processes a nmethod in >>>>>>> NMethodSweeper::process_nmethod(), safepoints may happen >>>>>>> and the GC may unload the currently processed nmethod. To >>>>>>> prevent this, the sweeper uses a NMethodMarker which saves >>>>>>> the nmethod in CodeCacheSweeperThread::_scanned_nmethod. >>>>>>> The nmethod is then passed to the GC through a >>>>>>> CodeBlobClosure in CodeCacheSweeperThread::oops_do() to >>>>>>> keep it alive when the GC iterates over all threads. >>>>>>> >>>>>>> The problem is that G1 calls nmethods_do() on all threads >>>>>>> in the remark phase (see >>>>>>> G1RemarkThreadsClosure::do_thread()) which is not >>>>>>> overwritten by the sweeper thread. Since the currently >>>>>>> processed nmethod is not passed through nmethods_do() by >>>>>>> any thread, it is unloaded and we later hit the assert when >>>>>>> encountering the nmethod through oops_do(). >>>>>>> >>>>>>> Mikael Gerdin and Stefan Karlsson (thanks again!) suggested >>>>>>> to overwrite nmethods_do() as well in >>>>>>> CodeCacheSweeperThread and pass _scanned_nmethod to the >>>>>>> closure. I also modified Threads::nmethods_do() to ignore >>>>>>> the sweeper thread because we want to avoid marking the >>>>>>> _scanned_nmethod as seen on the stack when scanning stacks >>>>>>> from the sweeper (the nmethod may already be zombie and >>>>>>> only referenced by the sweeper). >>>>>> >>>>>> I did not get this. If you exclude CodeCacheSweeperThread in >>>>>> Threads::nmethods_do() then >>>>>> CodeCacheSweeperThread::nmethods_do() will not be called. >>>>>> What is the point? >>>>> >>>>> The GC code in question iterates over the threads and calls >>>>> nmethods_do on each JavaThread and the VMThread after claiming >>>>> them with atomic operations to achieve parallelism. >>>>> >>>>> There is still something a bit fishy here though. >>>>> Thread::nmethods_do is not virtual, so one must be careful to >>>>> downcast one's Thread* to JavaThread before calling it. And >>>>> since Tobias' change does not make that or >>>>> JavaThread::nmethods_do (which actually shadows and does not >>>>> override the methods) virtual we can't reach the new code >>>>> unless the GC code is changed to downcast to >>>>> CodeCacheSweeperThread before calling nmethods_do. >>>> >>>> Thanks for taking a look. Of course, you are right. I just >>>> assumed that Thread::nmethods_do() is virtual and missed that >>>> it's not. >>>> >>>>> I still believe that what Tobias is attempting to do is a >>>>> necessary fix but this only shows how hard this is to >>>>> reproduce. >>>>> >>>>> Perhaps Thread::nmethods_do should simply be removed (along >>>>> with any calls to it) and JavaThread::nmethods_do should then >>>>> be made virtual. >>>> >>>> Yes, I agree. Here is the new webrev: >>>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.01/ >>>> >>>> I removed the empty Thread::nmethods_do() and all the (static) >>>> calls to it. >>> >>> I kind of agree with Vladimir that it's unfortunate to have to >>> check for the sweeper thread in Threads::nmethods_do() but I don't >>> have a good suggestion for an easy fix. >>> >>> From my point of view this fix is ready to go in but perhaps it >>> would be a good idea to also think about if this could be cleaned >>> up somehow. >>> >>> For me - as a GC person - it's hard to see why the sweeper thread >>> should be a JavaThread at all, it seems like more of a VM-only >>> thread to me. >> >> Yes, I agree that it is kind of counterintuitive that the sweeper >> thread is a JavaThread. I think this is because before JDK-8046809 >> [1] sweeping was done by the CompilerThreads which are JavaThreads as >> well. >> >> We could make the CodeCacheSweeperThread a subclass of NamedThread >> similar to ConcurrentGCThread but the problem is that the GC calls >> oops_do() only on the JavaThreads and the VMThread (see >> Threads::oops_do() or ThreadRootsMarkingTask::do_it()). That code >> would have to be modified to include the sweeper thread as well which >> seems a bit hacky to me. > > In a way I consider it less hacky to make it explicit that the sweeper thread is a special kind of thread and that due to its interaction with the code cache the GC may have to interact with it. > > The more I look at this code the more strange stuff I seem to find. > It appears, for example, that the VM thread and JavaThreads are the only threads who have their handle areas examined by the GC, even though other threads do have handle areas (and JNI handle areas) these are not seen by GC since it's only interested in JavaThreads and the VMThread. Yes, this is indeed strange. Unfortunately, I'm not too familiar with this code. I quickly tried to change the CodeCacheSweeperThread to be a NamedThread but unfortunately this affects many other components that rely on the sweeper thread being a JavaThread (for example, SA, GC and CompilerBroker). My prototype also triggers asserts like "GC active during NoGCVerifier" in the sweeper code. I would therefore prefer to not change the sweeper thread type with this fix. I filed JDK-8153271 to keep track of this. >> What do you think? > > Thinking out loud a bit it sounds like there is in fact some concept of threads which need to interact with GC, have handle areas, and so on and perhaps these threads should be based on a subclass of Thread which provides this interface, support for nmethod iteration and the "claim parity" hack which the GC uses to parallelize scanning of the threads. > > Anyway, as I said I think your fix is good to go as-is but in general I think this code needs to be cleaned up :) Thanks! I added a reference to this discussion to JDK-8153271. Best regards, Tobias > > /Mikael > >> >> Thanks, Tobias >> >> [1] https://bugs.openjdk.java.net/browse/JDK-8046809 >> >>> >>> /Mikael >>> >>>> >>>> Thanks, Tobias >>>> >>>>> >>>>> /Mikael >>>>> >>>>>> >>>>>> Thanks, Vladimir >>>>>> >>>>>>> >>>>>>> Unfortunately, this bug is extremely hard to reproduce (it >>>>>>> showed up 18 times since early 2015). I was able to >>>>>>> reproduce it only once after thousands of runs and >>>>>>> therefore not often enough to verify the fix. However, I'm >>>>>>> very confident that this solves the problem. >>>>>>> >>>>>>> Tested with JPRT and RBT (running). >>>>>>> >>>>>>> Thanks, Tobias >>>>>>> From marcus.larsson at oracle.com Fri Apr 1 13:33:39 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Fri, 1 Apr 2016 15:33:39 +0200 Subject: RFR: 8145934: Make ttyLocker equivalent for Unified Logging framework In-Reply-To: <56FD3CC1.4050502@oracle.com> References: <56BB3FD0.5000104@oracle.com> <3910DA9B-43C9-4C1A-8FD0-993A54225550@oracle.com> <56BCA8C9.102@oracle.com> <56C34F0E.4090803@oracle.com> <90DC33E3-F597-40E4-A317-6C92F4969575@oracle.com> <56EC03A4.1030705@oracle.com> <56FCE56C.6070606@oracle.com> <56FD1481.3090707@oracle.com> <56FD3CC1.4050502@oracle.com> Message-ID: <56FE78B3.2060802@oracle.com> Hi again, Updated webrev with removed decoration buffers. Decorations are now written directly to the streams with the help of flockfile/funlockfile as you suggested. Webrev: http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/ Incremental: http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02-03/ Thanks, Marcus On 03/31/2016 05:05 PM, Marcus Larsson wrote: > > On 03/31/2016 03:40 PM, Thomas St?fe wrote: >> Hi Marcus, >> >> On Thu, Mar 31, 2016 at 2:13 PM, Marcus Larsson >> > wrote: >> >> Hi Thomas, >> >> >> On 03/31/2016 12:48 PM, Thomas St?fe wrote: >>> Hi Marcus, >>> >>> nice to see progress on that issue! >> >> Thanks for taking time to look at it. >> >>> >>> Unfortunately your patch did not apply to my freshly synced hs-rt >>> repository. So I took a "dry look" at your code, and here some >>> feedback (by no means complete, and also I am not a (R)eviewer): >> >> I'll rebase it and update the webrev. >> >>> >>> - thank you for taking my input and avoiding resource area for >>> memory. I am still apprehensive about UL using NEW_C_HEAP_ARRAY >>> instead of raw malloc() here, but I see it has pros and cons. >> >> It might be worth investigating, but if so it should probably be a >> separate RFE. >> >> >> Ok. Easy enough to fix should e.g. NMT ever want to use UL. >> >>> >>> - I am not sure about flockfile(): I really do not like file >>> locks, this always bites in customer scenarios. Also, by using >>> this lock, are we not just reintroducing the ttyLocker at a >>> deeper level? >> >> The fprintfs locks the FILE* internally even if we don't. This is >> AFAIU how fprintf guarantees the writes to be atomic. With the >> explicit flock calls we're just ensuring nothing can be printed >> in-between our fprintf calls, it shouldn't add any cost. >> >> >> Ah, I see. If we really feel safe about flockfile(), we might just as >> well use it in LogFileStreamOutput::write() too. There, we assemble >> the decorators in a stack local buffer to fprintf them out to the >> FILE* in a separate step - I guess to prevent tearing? But if >> flockfile comes without cost, we could save the stack local buffer >> and do: >> >> flockfile() >> fputs(decorators) >> fputs(message) >> funlockfile() > > Good idea. > >> >>> Instead, how about assembling the total message in memory - like >>> it would appear in the file - and print it in one go using >>> ::write()? That usually is atomic. This way you would have to >>> write out the decorators for each line in memory as they are >>> added, but you could get rid of the _lines[] array and all its >>> surrounding code. So, no lock, less complicated code, at the cost >>> of a bit more memory usage. >> >> As the message might go to different outputs, configured for >> different levels, we can't really get rid of the _lines[] array. >> We could assemble each applicable message as a long string for >> each of the outputs, but given how fprintf seems to work we won't >> really have gained anything for that extra work and memory usage. >> >> >> Oh, I see. I did not understand the complexity of the whole thing. >> Why is it needed to write lines to a message with different log >> levels? I may be slow, but I find that not easy to understand. The >> fact that different lines in my message may go to different outputs >> is a bit surprising. I would have thought a message is just a text >> blob I assemble offline and send to the logging framework in one go, >> like a glorified String, and that I would hand it down to UL "send >> this for this level/tagset combination". And that the message itself >> would not even need to know anything about log levels and tagsets. > > The use case I want to support with multi-part messages on different > levels is when you have an event you want to log, on for example info > level, but where part of that event might include data that is too > verbose to fit the info level. So then you could split the event into > two parts, one line with the basic information on info level and the > other line (or multiple lines) on debug or trace level. The framework > then makes sure these lines are delivered together non-interleaved. > >> >>> - If I understand this correctly, there is no way to print part >>> of a line to the message object? So, if I would want to assemble >>> a line from various inputs, I would still have to assemble it on >>> the stack and feed it to say ScopedLogMessage::debug() in one go? >>> Would it be posssible to get an outputStream* from the >>> ScopedLogMessage to write into? >> >> Yes, that's right. I wanted to avoid streams for multi-line >> messages because I thought the API would become a bit messy with >> that functionality. The logStreams we have today are line >> buffered, and will send completed lines to the log outputs when >> they see a terminating newline character. This means that it won't >> be obvious how lines from different streams or writes to the >> message will be ordered in the output. Perhaps it's not that bad, >> but I figured that we could use stringStreams or similar for when >> we need to build up lines for the message. This has the nice side >> effect that it will be very obvious when, and in what order, each >> line is written to the outputs. Perhaps it's worth a follow up RFE >> if we find ourselves writing one too many log cases with >> stringStreams? >> >> >> Sorry, I think I was not clear enough. What I meant was simpler. We >> have now ScopedLogMessage::debug() which does >> LogMessageBuffer::write() which writes a line and terminates the >> line. Line outputStream::print_cr(). I would like to have an option >> to just write but not terminate the current line, like >> outputStream::print(). That way one could assemble a line piece by >> piece, maybe in a loop (e.g. for table row values) without needing >> another temporary buffer. > > Ok, so say we add the debug_no_cr() family of functions that writes > into the log message buffer without newlines. Then, what does it mean > if someone does debug_no_cr(s1); trace_no_cr(s2); info(s3); ? > > It would be simpler if it wasn't for the support for different levels > on different parts of the message. Maybe some well defined rules for > how it should work would solve this, but I intended to avoid the whole > use case for now. It can be done manually with stringStreams, so I > don't think it's that serious. > >> >>> >>> - I like how you implemented os::log_vsnprintf(), using >>> _vscprintf() on windows. Would it be worthwhile to merge this >>> with jio_vsnprintf(), which does the same but returns -1 on >>> truncation? >> >> The patch for JDK-8138916 [0] added the log_vsnprintf. You mean to >> change jio_vsnprintf to not return -1 on truncation, and instead >> work like vsnprintf on POSIX? I think that would be useful, and it >> allows us to remove log_vsnprintf. >> >> >> That is exactly what I meant. I think that would be a separate RFE >> though, one would have to check on all callers of jio_snprintf. > > Yeah. > > Regards, > Marcus > >> >> Thanks, >> Marcus >> >> >> Thank you! >> >> ..Thomas >> >> [0] https://bugs.openjdk.java.net/browse/JDK-8138916 >> >> >>> >>> Kind Regards, Thomas >>> >>> >>> On Thu, Mar 31, 2016 at 10:53 AM, Marcus Larsson >>> > >>> wrote: >>> >>> Any further feedback on this? >>> >>> >>> >>> On 03/18/2016 02:33 PM, Marcus Larsson wrote: >>> >>> Hi again, >>> >>> New webrev: >>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02/ >>> >>> >>> Incremental: >>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt-02/ >>> >>> >>> Made all allocations regular C heap allocations because >>> of the problems with resource allocations that Thomas >>> brought up. We can do a follow up change for resource >>> allocation support if we really need it. >>> Also added some more tests for scoped messages. >>> >>> >>> On 02/17/2016 12:19 AM, John Rose wrote: >>> >>> On Feb 16, 2016, at 8:32 AM, Marcus Larsson >>> >> >>> >> >> wrote: >>> >>> >>> Alternative version where a LogMessage >>> automatically writes its messages when it goes >>> out of scope: >>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt/ >>> >>> >>> >>> >>> >>> I like this, with the LogMessageBuffer that does the >>> heavy work, and the [Scoped]LogMessage which is the >>> simplest way to use it. >>> >>> The LogMessageBuffer should have a neutral >>> unallocated state, for use through the LogMessage >>> macro. I.e., is_c_allocated should be a three-state >>> flag, including 'not allocated at all'. That way, if >>> you create the thing only to ask 'is_debug' and get a >>> false answer, you won't have done more than a few >>> cycles of work. Probably the set_prefix operation >>> should be lazy in the same way. >>> >>> >>> Fixed. Since I removed the resource allocation completely >>> I could keep it as a boolean. >>> >>> >>> I think the destructor should call a user-callable >>> flush function, something like this: >>> >>> ~ScopedLogMessage() { flush(); } >>> // in LogMessageBuffer: >>> void flush() { >>> if (_line_count > 0) { >>> _log.write(*this); >>> reset(); >>> } >>> } >>> void reset() { >>> _line_count = 0; >>> _message_buffer_size = 0; >>> } >>> >>> It will be rare for user code to want to either flush >>> early or cancel pending output, but when you need it, >>> it should be there. >>> >>> >>> Fixed. >>> >>> >>> I still prefer the first patch though, where >>> messages are neither tied to a particular log, >>> nor automatically written when they go out of >>> scope. Like I've said, the explicit write line >>> makes it easier to read the code. >>> >>> >>> There's a tradeoff here: It's easier to read the >>> *logging* code if all the *logging* operations are >>> explicit. But the point of logging code is to add >>> logging to code that is busy doing *other* operations >>> besides logging. That's why (I assume) people have >>> been noting that some uses of logging are >>> "intrusive": The logging logic calls too much >>> attention to itself, and with attention being a >>> limited resource, it takes away attention from the >>> actual algorithm that's being logged about. >>> >>> The scoped (RAII) log buffer, with automatic write, >>> is the best way I know to reduce the intrusiveness of >>> this auxiliary mechanism. >>> >>> >>> Fair point. I'm going with the automatic write on out of >>> scope. >>> >>> >>> Of course, I'm interested in finding out what your >>> everyday customers think about it. (Rachel, Coleen, >>> David, Dan?) >>> >>> For comparison I've updated the first suggestion >>> with the guarantee for unwritten messages, as >>> well as cleaning it up a bit by moving the >>> implementation to the .cpp rather than the .hpp. >>> Full >>> webrev:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.01/ >>> >>> >>> Incremental:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.00-01/ >>> >>> >>> >>> Let me know what you think. >>> >>> >>> That option is more intrusive than the RAII buffered >>> log alias. >>> >>> Separately, the review thread on JDK-8149383 shows a >>> use for LogMessageBuffer to collect a complex log >>> message. The log message can then be sent down one >>> of two log streams. Something like: >>> >>> if (need_to_log) { >>> ResourceMark rm; >>> LogMessageBuffer buf; >>> buf.write("Revoking bias of object " >>> INTPTR_FORMAT " , mark " >>> INTPTR_FORMAT " , type %s , prototype header " >>> INTPTR_FORMAT >>> " , allow rebias %d , >>> requesting thread " INTPTR_FORMAT, >>> p2i((void *)obj), >>> (intptr_t) mark, >>> obj->klass()->external_name(), >>> (intptr_t) obj->klass()->prototype_header(), >>> (allow_rebias ? 1 : 0), >>> (intptr_t) requesting_thread); >>> if (!is_bulk) >>> log_info(biasedlocking).write(buf); >>> else >>> log_trace(biasedlocking).write(buf); >>> } >>> >>> It is important here (like you pointed out) that the >>> LogMessageBuffer is decoupled from log levels and >>> streams, so that it can be used as a flexible >>> component of logic like this. >>> >>> But the commonest usage should (IMO) be supported by >>> a scoped auto-writing log alias. >>> >>> >>> Yeah, I agree. >>> >>> Thanks, >>> Marcus >>> >>> >>> >> >> > From mikael.gerdin at oracle.com Fri Apr 1 13:49:15 2016 From: mikael.gerdin at oracle.com (Mikael Gerdin) Date: Fri, 1 Apr 2016 15:49:15 +0200 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <56FE7653.7090604@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> <56FD4C8D.7050201@oracle.com> <56FE2306.5080705@oracle.com> <56FE3030.6070601@oracle.com> <56FE40E0.9060307@oracle.com> <56FE5492.1080707@oracle.com> <56FE7653.7090604@oracle.com> Message-ID: <56FE7C5B.1060607@oracle.com> Hi, On 2016-04-01 15:23, Tobias Hartmann wrote: > Hi Mikael, > > On 01.04.2016 12:59, Mikael Gerdin wrote: >> Tobias, >> >> On 2016-04-01 11:35, Tobias Hartmann wrote: >>> Hi Mikael, >>> >>> On 01.04.2016 10:24, Mikael Gerdin wrote: >>>> Hi Tobias, >>>> >>>> On 2016-04-01 09:28, Tobias Hartmann wrote: >>>>> Hi Mikael, >>>>> >>>>> On 31.03.2016 18:13, Mikael Gerdin wrote: >>>>>> Hi, >>>>>> >>>>>> On 2016-03-31 17:49, Vladimir Kozlov wrote: >>>>>>> On 3/31/16 6:22 AM, Tobias Hartmann wrote: >>>>>>>> Hi, >>>>>>>> >>>>>>>> please review the following patch: >>>>>>>> >>>>>>>> https://bugs.openjdk.java.net/browse/JDK-8074553 >>>>>>>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >>>>>>>> >>>>>>>> >>>>>>>> While the code cache sweeper processes a nmethod in >>>>>>>> NMethodSweeper::process_nmethod(), safepoints may >>>>>>>> happen and the GC may unload the currently processed >>>>>>>> nmethod. To prevent this, the sweeper uses a >>>>>>>> NMethodMarker which saves the nmethod in >>>>>>>> CodeCacheSweeperThread::_scanned_nmethod. The nmethod >>>>>>>> is then passed to the GC through a CodeBlobClosure in >>>>>>>> CodeCacheSweeperThread::oops_do() to keep it alive when >>>>>>>> the GC iterates over all threads. >>>>>>>> >>>>>>>> The problem is that G1 calls nmethods_do() on all >>>>>>>> threads in the remark phase (see >>>>>>>> G1RemarkThreadsClosure::do_thread()) which is not >>>>>>>> overwritten by the sweeper thread. Since the currently >>>>>>>> processed nmethod is not passed through nmethods_do() >>>>>>>> by any thread, it is unloaded and we later hit the >>>>>>>> assert when encountering the nmethod through >>>>>>>> oops_do(). >>>>>>>> >>>>>>>> Mikael Gerdin and Stefan Karlsson (thanks again!) >>>>>>>> suggested to overwrite nmethods_do() as well in >>>>>>>> CodeCacheSweeperThread and pass _scanned_nmethod to >>>>>>>> the closure. I also modified Threads::nmethods_do() to >>>>>>>> ignore the sweeper thread because we want to avoid >>>>>>>> marking the _scanned_nmethod as seen on the stack when >>>>>>>> scanning stacks from the sweeper (the nmethod may >>>>>>>> already be zombie and only referenced by the sweeper). >>>>>>> >>>>>>> I did not get this. If you exclude CodeCacheSweeperThread >>>>>>> in Threads::nmethods_do() then >>>>>>> CodeCacheSweeperThread::nmethods_do() will not be >>>>>>> called. What is the point? >>>>>> >>>>>> The GC code in question iterates over the threads and >>>>>> calls nmethods_do on each JavaThread and the VMThread after >>>>>> claiming them with atomic operations to achieve >>>>>> parallelism. >>>>>> >>>>>> There is still something a bit fishy here though. >>>>>> Thread::nmethods_do is not virtual, so one must be careful >>>>>> to downcast one's Thread* to JavaThread before calling it. >>>>>> And since Tobias' change does not make that or >>>>>> JavaThread::nmethods_do (which actually shadows and does >>>>>> not override the methods) virtual we can't reach the new >>>>>> code unless the GC code is changed to downcast to >>>>>> CodeCacheSweeperThread before calling nmethods_do. >>>>> >>>>> Thanks for taking a look. Of course, you are right. I just >>>>> assumed that Thread::nmethods_do() is virtual and missed >>>>> that it's not. >>>>> >>>>>> I still believe that what Tobias is attempting to do is a >>>>>> necessary fix but this only shows how hard this is to >>>>>> reproduce. >>>>>> >>>>>> Perhaps Thread::nmethods_do should simply be removed >>>>>> (along with any calls to it) and JavaThread::nmethods_do >>>>>> should then be made virtual. >>>>> >>>>> Yes, I agree. Here is the new webrev: >>>>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.01/ >>>>> >>>>> I removed the empty Thread::nmethods_do() and all the >>>>> (static) calls to it. >>>> >>>> I kind of agree with Vladimir that it's unfortunate to have to >>>> check for the sweeper thread in Threads::nmethods_do() but I >>>> don't have a good suggestion for an easy fix. >>>> >>>> From my point of view this fix is ready to go in but perhaps >>>> it would be a good idea to also think about if this could be >>>> cleaned up somehow. >>>> >>>> For me - as a GC person - it's hard to see why the sweeper >>>> thread should be a JavaThread at all, it seems like more of a >>>> VM-only thread to me. >>> >>> Yes, I agree that it is kind of counterintuitive that the >>> sweeper thread is a JavaThread. I think this is because before >>> JDK-8046809 [1] sweeping was done by the CompilerThreads which >>> are JavaThreads as well. >>> >>> We could make the CodeCacheSweeperThread a subclass of >>> NamedThread similar to ConcurrentGCThread but the problem is that >>> the GC calls oops_do() only on the JavaThreads and the VMThread >>> (see Threads::oops_do() or ThreadRootsMarkingTask::do_it()). That >>> code would have to be modified to include the sweeper thread as >>> well which seems a bit hacky to me. >> >> In a way I consider it less hacky to make it explicit that the >> sweeper thread is a special kind of thread and that due to its >> interaction with the code cache the GC may have to interact with >> it. >> >> The more I look at this code the more strange stuff I seem to >> find. It appears, for example, that the VM thread and JavaThreads >> are the only threads who have their handle areas examined by the >> GC, even though other threads do have handle areas (and JNI handle >> areas) these are not seen by GC since it's only interested in >> JavaThreads and the VMThread. > > Yes, this is indeed strange. Unfortunately, I'm not too familiar with > this code. > > I quickly tried to change the CodeCacheSweeperThread to be a > NamedThread but unfortunately this affects many other components that > rely on the sweeper thread being a JavaThread (for example, SA, GC > and CompilerBroker). My prototype also triggers asserts like "GC > active during NoGCVerifier" in the sweeper code. I would therefore > prefer to not change the sweeper thread type with this fix. I filed > JDK-8153271 to keep track of this. Right, I didn't expect it to be an easy change. :) Anyway, thanks for trying it out to see if it was a simple change. > >>> What do you think? >> >> Thinking out loud a bit it sounds like there is in fact some >> concept of threads which need to interact with GC, have handle >> areas, and so on and perhaps these threads should be based on a >> subclass of Thread which provides this interface, support for >> nmethod iteration and the "claim parity" hack which the GC uses to >> parallelize scanning of the threads. >> >> Anyway, as I said I think your fix is good to go as-is but in >> general I think this code needs to be cleaned up :) > > Thanks! I added a reference to this discussion to JDK-8153271. Excellent. /Mikael > > Best regards, Tobias > >> >> /Mikael >> >>> >>> Thanks, Tobias >>> >>> [1] https://bugs.openjdk.java.net/browse/JDK-8046809 >>> >>>> >>>> /Mikael >>>> >>>>> >>>>> Thanks, Tobias >>>>> >>>>>> >>>>>> /Mikael >>>>>> >>>>>>> >>>>>>> Thanks, Vladimir >>>>>>> >>>>>>>> >>>>>>>> Unfortunately, this bug is extremely hard to reproduce >>>>>>>> (it showed up 18 times since early 2015). I was able >>>>>>>> to reproduce it only once after thousands of runs and >>>>>>>> therefore not often enough to verify the fix. However, >>>>>>>> I'm very confident that this solves the problem. >>>>>>>> >>>>>>>> Tested with JPRT and RBT (running). >>>>>>>> >>>>>>>> Thanks, Tobias >>>>>>>> From filipp.zhinkin at gmail.com Fri Apr 1 14:27:59 2016 From: filipp.zhinkin at gmail.com (Filipp Zhinkin) Date: Fri, 1 Apr 2016 17:27:59 +0300 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <56FD4F63.8020103@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <56FD4F63.8020103@oracle.com> Message-ID: Hi Mikael, On Thu, Mar 31, 2016 at 7:25 PM, Mikael Gerdin wrote: > Hi, > > I like the cleanup, can't we also remove CHeapArray in arrays.hpp? Sure! I've missed that it is not used at all. > > As for the CMS change, I would prefer this instead (untested!): > http://cr.openjdk.java.net/~mgerdin/pss-array/webrev/ Thanks, your implementation looks much better. If you don't mind I'll incorporate it into my change. Also, it seems like in ParNewGeneration::collect we have to create ResourceMark before ParScanThreadStateSet, right? Thanks, Filipp. > > /Mikael > > > On 2016-03-31 17:14, Vladimir Kozlov wrote: >> >> Hi Filipp, >> >> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups >> to look on. >> >> Thanks, >> Vladimir >> >> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>> >>> Hi Vladimir, >>> >>> thank you for looking at this change. >>> >>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>> wrote: >>>> >>>> Nice clean up but I don't see any source code removed. What benefits >>>> we have >>>> then? >>>> I understand that we don't generate subclasses for ResourceArray and use >>>> GrowableArray. But it will not save space I think. >>>> What prevents us to remove ResourceArray at all? >>> >>> >>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>> so it should be updated before removing ResourceArray: >>> >>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>> >>>> >>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>> >>>>> >>>>> Hi all, >>>>> >>>>> please review a fix for JDK-8149374: >>>>> >>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>> Testing done: hotspot_all tests + CTW >>>>> >>>>> I've replaced all usages of collections defined via define_array and >>>>> define_stack macros with GrowableArray. >>>>> >>>>> There are good and bad news regarding performance impact of that >>>>> change. >>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>> >>>> >>>> >>>> It is acceptable regression I think. I don't think we should optimize >>>> and >>>> make more complex GrowableArray just to save 0.5% of performance for C2. >>> >>> >>> As long as GrowableArray is used in different Hotspot's subsystems it >>> may be beneficial to optimize it, >>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>> significant difference. >>> >>> If ~3% regression is OK for C1 then I'm fine with leaving >>> GrowableArray's initialization >>> in its current state unless there will be other reasons to speed it up. >>> >>> Thanks, >>> Filipp. >>> >>>> >>>> Thanks, >>>> Vladimir >>>> >>>> >>>>> >>>>> Such difference caused by eager initialization of GrowableArray's >>>>> backing array elements [1]. I can imagine when we actually need to >>>>> force >>>>> initialization and de-initialization during array's >>>>> growing/destruction, but for some types like c++ primitive types or >>>>> pointers such initialization does not make much sense, because >>>>> GrowableArray is not allowing to access an element which was not >>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>> widely used to store pointers we're simply wasting the time with >>>>> initialization. >>>>> >>>>> I've measured CTW time with following workaround which implements >>>>> initialization for numeric types and pointers as no-op and C1 >>>>> compilation time returned back to values that were measured before >>>>> original change (51.06?0.24s): >>>>> >>>>> >>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>> >>>>> >>>>> I've also measured C2 compilation time and it dropped down by a few >>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>> >>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>> initialization for some types, don't we? >>>>> >>>>> Best regards, >>>>> Filipp >>>>> >>>>> [1] >>>>> >>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>> >>>>> >>>> > From mikael.gerdin at oracle.com Fri Apr 1 14:34:57 2016 From: mikael.gerdin at oracle.com (Mikael Gerdin) Date: Fri, 1 Apr 2016 16:34:57 +0200 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <56FD4F63.8020103@oracle.com> Message-ID: <56FE8711.3080703@oracle.com> Hi Filipp On 2016-04-01 16:27, Filipp Zhinkin wrote: > Hi Mikael, > > On Thu, Mar 31, 2016 at 7:25 PM, Mikael Gerdin wrote: >> Hi, >> >> I like the cleanup, can't we also remove CHeapArray in arrays.hpp? > > Sure! I've missed that it is not used at all. Great! > >> >> As for the CMS change, I would prefer this instead (untested!): >> http://cr.openjdk.java.net/~mgerdin/pss-array/webrev/ > > Thanks, your implementation looks much better. > If you don't mind I'll incorporate it into my change. Go ahead, that was my intention. > > Also, it seems like in ParNewGeneration::collect we have to create > ResourceMark before ParScanThreadStateSet, right? There is a ResourceMark in the caller so I don't think it's needed. The old version of the code used resource allocation as well and was fine so I don't think there is a need to introduce another ResourceMark. /Mikael > > Thanks, > Filipp. > >> >> /Mikael >> >> >> On 2016-03-31 17:14, Vladimir Kozlov wrote: >>> >>> Hi Filipp, >>> >>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups >>> to look on. >>> >>> Thanks, >>> Vladimir >>> >>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>> >>>> Hi Vladimir, >>>> >>>> thank you for looking at this change. >>>> >>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>> wrote: >>>>> >>>>> Nice clean up but I don't see any source code removed. What benefits >>>>> we have >>>>> then? >>>>> I understand that we don't generate subclasses for ResourceArray and use >>>>> GrowableArray. But it will not save space I think. >>>>> What prevents us to remove ResourceArray at all? >>>> >>>> >>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>> so it should be updated before removing ResourceArray: >>>> >>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>> >>>>> >>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>> >>>>>> >>>>>> Hi all, >>>>>> >>>>>> please review a fix for JDK-8149374: >>>>>> >>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>> Testing done: hotspot_all tests + CTW >>>>>> >>>>>> I've replaced all usages of collections defined via define_array and >>>>>> define_stack macros with GrowableArray. >>>>>> >>>>>> There are good and bad news regarding performance impact of that >>>>>> change. >>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>> >>>>> >>>>> >>>>> It is acceptable regression I think. I don't think we should optimize >>>>> and >>>>> make more complex GrowableArray just to save 0.5% of performance for C2. >>>> >>>> >>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>> may be beneficial to optimize it, >>>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>>> significant difference. >>>> >>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>> GrowableArray's initialization >>>> in its current state unless there will be other reasons to speed it up. >>>> >>>> Thanks, >>>> Filipp. >>>> >>>>> >>>>> Thanks, >>>>> Vladimir >>>>> >>>>> >>>>>> >>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>> force >>>>>> initialization and de-initialization during array's >>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>> pointers such initialization does not make much sense, because >>>>>> GrowableArray is not allowing to access an element which was not >>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>> widely used to store pointers we're simply wasting the time with >>>>>> initialization. >>>>>> >>>>>> I've measured CTW time with following workaround which implements >>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>> compilation time returned back to values that were measured before >>>>>> original change (51.06?0.24s): >>>>>> >>>>>> >>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>> >>>>>> >>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>> >>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>> initialization for some types, don't we? >>>>>> >>>>>> Best regards, >>>>>> Filipp >>>>>> >>>>>> [1] >>>>>> >>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>> >>>>>> >>>>> >> From ioi.lam at oracle.com Fri Apr 1 16:45:16 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Fri, 01 Apr 2016 09:45:16 -0700 Subject: RFR [XS] 8153297 - [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files Message-ID: <56FEA59C.9020805@oracle.com> Please review a very small fix: http://cr.openjdk.java.net/~iklam/jdk9/8153300-enhance-classfileinstaller.v01/ Bug: [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files https://bugs.openjdk.java.net/browse/JDK-8153300 Summary of fix: Many hotspot tests need to create JAR files. The current method is messy: [1] Because JTREG compiles .class files into various directories, we need to use ClassFileInstaller to find the .class file and copy it into the current directory. [2] Then, there are various ad-hoc calls to sun.tools.jar.Main to create the JAR file. This is not desirable because sun.tools.jar is an internal package and javac gives warnings about it. I have improved ClassFileInstaller so that JAR files can be easily created using JTREG tags: * @build ClassFileInstaller sun.hotspot.WhiteBox * @run main ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox To handle more complex use cases, new APIs are also added to ClassFileInstaller for programmatically creating JAR files inside the test source code. I converted two CDS test cases to use the new functionality. FYI, I am also working on new tests for a closed issue (JDK-8153297) that use the new functionality. Thanks - Ioi From ioi.lam at oracle.com Fri Apr 1 16:54:58 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Fri, 01 Apr 2016 09:54:58 -0700 Subject: RFR [XS] 8153300 - [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files In-Reply-To: <56FEA59C.9020805@oracle.com> References: <56FEA59C.9020805@oracle.com> Message-ID: <56FEA7E2.50305@oracle.com> Sorry, the bug ID is 8153300. I have fixed the e-mail subject. - Ioi On 4/1/16 9:45 AM, Ioi Lam wrote: > Please review a very small fix: > > http://cr.openjdk.java.net/~iklam/jdk9/8153300-enhance-classfileinstaller.v01/ > > > Bug: [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to > support JAR files > > https://bugs.openjdk.java.net/browse/JDK-8153300 > > Summary of fix: > > Many hotspot tests need to create JAR files. The current method is > messy: > > [1] Because JTREG compiles .class files into various directories, > we need > to use ClassFileInstaller to find the .class file and copy it > into the > current directory. > [2] Then, there are various ad-hoc calls to sun.tools.jar.Main to > create the > JAR file. This is not desirable because sun.tools.jar is an > internal > package and javac gives warnings about it. > > I have improved ClassFileInstaller so that JAR files can be easily > created using JTREG tags: > > * @build ClassFileInstaller sun.hotspot.WhiteBox > * @run main ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox > > To handle more complex use cases, new APIs are also added to > ClassFileInstaller > for programmatically creating JAR files inside the test source code. > > I converted two CDS test cases to use the new functionality. > > FYI, I am also working on new tests for a closed issue > (JDK-8153297) that use > the new functionality. > > Thanks > - Ioi > > > From aph at redhat.com Fri Apr 1 17:30:45 2016 From: aph at redhat.com (Andrew Haley) Date: Fri, 1 Apr 2016 18:30:45 +0100 Subject: RFR 8153310: AArch64: JEP 254: Implement byte_array_inflate and char_array_compress Message-ID: <56FEB045.8000905@redhat.com> We need this for CompactStrings. http://cr.openjdk.java.net/~aph/8153310/ https://bugs.openjdk.java.net/browse/JDK-8153310 Note: this depends on http://cr.openjdk.java.net/~aph/8152554/ which is still pending review. Thanks, Andrew. From aleksey.shipilev at oracle.com Fri Apr 1 19:14:12 2016 From: aleksey.shipilev at oracle.com (Aleksey Shipilev) Date: Fri, 1 Apr 2016 22:14:12 +0300 Subject: JMH and JDK9 In-Reply-To: <56FBFCCD.1000704@oracle.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> Message-ID: <56FEC884.9090403@oracle.com> On 03/30/2016 07:20 PM, Aleksey Shipilev wrote: > On 03/30/2016 07:12 PM, Alan Bateman wrote: >> On 30/03/2016 17:09, Andrew Haley wrote: >>> Is there a recipe for using JMH with JDK9 classes? What do people do? >>> >> There is discussion on this in this bug: >> https://bugs.openjdk.java.net/browse/JDK-8152842 >> >> but in the mean-time it looks like JMH no longer uses @Generated. > > Yeah, hold on, Andrew! Jigsaw-enabled JMH is coming this week. Bump to 1.12: http://mail.openjdk.java.net/pipermail/jmh-dev/2016-April/002156.html -Aleksey From gromero at linux.vnet.ibm.com Fri Apr 1 20:36:03 2016 From: gromero at linux.vnet.ibm.com (Gustavo Romero) Date: Fri, 1 Apr 2016 17:36:03 -0300 Subject: PPC64 VSX load/store instructions in stubs Message-ID: <56FEDBB3.5030106@linux.vnet.ibm.com> Hi Martin, Hi Volker Currently VSX load/store instructions are not being used in PPC64 stubs, particularly in arraycopy stubs inside generate_arraycopy_stubs() like, but not limited to, generate_disjoint_{byte,short,int,long}_copy. We can speed up mass copy using VSX (Vector-Scalar Extension) load/store instruction in processors >= POWER8, the same way it's already done for libc memcpy(). This is an initial patch just for jshort_disjoint_arraycopy() VSX vector load/store: http://81.de.7a9f.ip4.static.sl-reverse.com/202539/webrev What are your thoughts on that? Is there any impediment to use VSX instructions in OpenJDK at the moment? Thank you. Best regards, Gustavo From filipp.zhinkin at gmail.com Sat Apr 2 11:32:21 2016 From: filipp.zhinkin at gmail.com (Filipp Zhinkin) Date: Sat, 2 Apr 2016 14:32:21 +0300 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <56FE8711.3080703@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <56FD4F63.8020103@oracle.com> <56FE8711.3080703@oracle.com> Message-ID: Here is an webrev updated according Mikael's comments: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ Tested using hotspot_all tests w/ CMS turned on. Thanks, Filipp. On Fri, Apr 1, 2016 at 5:34 PM, Mikael Gerdin wrote: > Hi Filipp > > On 2016-04-01 16:27, Filipp Zhinkin wrote: >> >> Hi Mikael, >> >> On Thu, Mar 31, 2016 at 7:25 PM, Mikael Gerdin >> wrote: >>> >>> Hi, >>> >>> I like the cleanup, can't we also remove CHeapArray in arrays.hpp? >> >> >> Sure! I've missed that it is not used at all. > > > Great! > >> >>> >>> As for the CMS change, I would prefer this instead (untested!): >>> http://cr.openjdk.java.net/~mgerdin/pss-array/webrev/ >> >> >> Thanks, your implementation looks much better. >> If you don't mind I'll incorporate it into my change. > > > Go ahead, that was my intention. > >> >> Also, it seems like in ParNewGeneration::collect we have to create >> ResourceMark before ParScanThreadStateSet, right? > > > There is a ResourceMark in the caller so I don't think it's needed. > The old version of the code used resource allocation as well and was fine so > I don't think there is a need to introduce another ResourceMark. > > > /Mikael > > >> >> Thanks, >> Filipp. >> >>> >>> /Mikael >>> >>> >>> On 2016-03-31 17:14, Vladimir Kozlov wrote: >>>> >>>> >>>> Hi Filipp, >>>> >>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups >>>> to look on. >>>> >>>> Thanks, >>>> Vladimir >>>> >>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>> >>>>> >>>>> Hi Vladimir, >>>>> >>>>> thank you for looking at this change. >>>>> >>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>> wrote: >>>>>> >>>>>> >>>>>> Nice clean up but I don't see any source code removed. What benefits >>>>>> we have >>>>>> then? >>>>>> I understand that we don't generate subclasses for ResourceArray and >>>>>> use >>>>>> GrowableArray. But it will not save space I think. >>>>>> What prevents us to remove ResourceArray at all? >>>>> >>>>> >>>>> >>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>> so it should be updated before removing ResourceArray: >>>>> >>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>> >>>>>> >>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>> >>>>>>> >>>>>>> >>>>>>> Hi all, >>>>>>> >>>>>>> please review a fix for JDK-8149374: >>>>>>> >>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>> Testing done: hotspot_all tests + CTW >>>>>>> >>>>>>> I've replaced all usages of collections defined via define_array and >>>>>>> define_stack macros with GrowableArray. >>>>>>> >>>>>>> There are good and bad news regarding performance impact of that >>>>>>> change. >>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>> >>>>>> >>>>>> >>>>>> >>>>>> It is acceptable regression I think. I don't think we should optimize >>>>>> and >>>>>> make more complex GrowableArray just to save 0.5% of performance for >>>>>> C2. >>>>> >>>>> >>>>> >>>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>>> may be beneficial to optimize it, >>>>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>>>> significant difference. >>>>> >>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>> GrowableArray's initialization >>>>> in its current state unless there will be other reasons to speed it up. >>>>> >>>>> Thanks, >>>>> Filipp. >>>>> >>>>>> >>>>>> Thanks, >>>>>> Vladimir >>>>>> >>>>>> >>>>>>> >>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>> force >>>>>>> initialization and de-initialization during array's >>>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>>> pointers such initialization does not make much sense, because >>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>> initialization. >>>>>>> >>>>>>> I've measured CTW time with following workaround which implements >>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>> compilation time returned back to values that were measured before >>>>>>> original change (51.06?0.24s): >>>>>>> >>>>>>> >>>>>>> >>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>> >>>>>>> >>>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>> >>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>> initialization for some types, don't we? >>>>>>> >>>>>>> Best regards, >>>>>>> Filipp >>>>>>> >>>>>>> [1] >>>>>>> >>>>>>> >>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>> >>>>>>> >>>>>> >>> > From stefan.karlsson at oracle.com Mon Apr 4 07:34:08 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 09:34:08 +0200 Subject: RFR: 8153254: Delegate (v)write from Log to LogTagSet In-Reply-To: <56FE370B.3020204@oracle.com> References: <56FE370B.3020204@oracle.com> Message-ID: <570218F0.1080902@oracle.com> Hi Robbin, On 2016-04-01 10:53, Robbin Ehn wrote: > Hi all, > > Please review this patch. > > This moves log writes methods to LogTagSet. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153254 > Webrev: http://cr.openjdk.java.net/~rehn/8153254/webrev/ This looks good to me. The indention is wrong in the parameter initialization list and the includes are incorrectly sorted (These issues come from the original patch :). Thanks for changing this, StefanK > > Tested with internal vm test and jprt. > > Thanks! > > /Robbin > From robbin.ehn at oracle.com Mon Apr 4 07:43:51 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Mon, 4 Apr 2016 09:43:51 +0200 Subject: RFR: 8153254: Delegate (v)write from Log to LogTagSet In-Reply-To: <570218F0.1080902@oracle.com> References: <56FE370B.3020204@oracle.com> <570218F0.1080902@oracle.com> Message-ID: <57021B37.4070209@oracle.com> Hi Stefan On 04/04/2016 09:34 AM, Stefan Karlsson wrote: > Hi Robbin, > > On 2016-04-01 10:53, Robbin Ehn wrote: >> Hi all, >> >> Please review this patch. >> >> This moves log writes methods to LogTagSet. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153254 >> Webrev: http://cr.openjdk.java.net/~rehn/8153254/webrev/ > > This looks good to me. > > The indention is wrong in the parameter initialization list and the > includes are incorrectly sorted (These issues come from the original > patch :). :) Fixed (no webrev). Thanks Stefan! /Robbin > > Thanks for changing this, > StefanK > >> >> Tested with internal vm test and jprt. >> >> Thanks! >> >> /Robbin >> > From kim.barrett at oracle.com Mon Apr 4 08:24:03 2016 From: kim.barrett at oracle.com (Kim Barrett) Date: Mon, 4 Apr 2016 04:24:03 -0400 Subject: RFR: 8153254: Delegate (v)write from Log to LogTagSet In-Reply-To: <56FE370B.3020204@oracle.com> References: <56FE370B.3020204@oracle.com> Message-ID: <76B81C9B-7E5E-465B-94D9-3ADB19ADAD54@oracle.com> > On Apr 1, 2016, at 4:53 AM, Robbin Ehn wrote: > > Hi all, > > Please review this patch. > > This moves log writes methods to LogTagSet. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153254 > Webrev: http://cr.openjdk.java.net/~rehn/8153254/webrev/ > > Tested with internal vm test and jprt. > > Thanks! > > /Robbin I think LogTagSetMapping might need to have a GuardTag template parameter added, similar to that of Log, so that things like LogTagSetMapping< LOG_TAGS(__VAR_ARGS__) > can be used in some of Stefan's planned followups to this change. Otherwise, looks good. From stefan.karlsson at oracle.com Mon Apr 4 08:39:00 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 10:39:00 +0200 Subject: RFR: 8152635: Create a UL class to represent a Log + Level combination In-Reply-To: <56FA8744.8050007@oracle.com> References: <56FA8744.8050007@oracle.com> Message-ID: <57022824.4040205@oracle.com> Hi all, Here's an updated webrev with fixes for the issues found by Bengt and Marcus: http://cr.openjdk.java.net/~stefank/8152635/webrev.02.delta/ http://cr.openjdk.java.net/~stefank/8152635/webrev.02/ Thanks, StefanK On 2016-03-29 15:46, Stefan Karlsson wrote: > Hi all, > > Please review this patch to introduce a Unified Logging class to > describe a combination of a tagset and a log level. > > http://cr.openjdk.java.net/~stefank/8152635/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8152635 > > The patch is applied on top of the patch in: > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022302.html > > With this class we could write code as: > LogTarget(Debug, gc, heap) log; > if (log.is_enabled()) { > log.print(...); > log.print(...); > ... = log.stream(); > } > > instead of the current model: > Log(gc, heap) log; > if (log.is_debug()) { > log.debug(...) > log.debug(...) > ... = log.debug_stream(): > } > > The LogTarget class/macro ensures that we only mention the log level > once, so that we don't accidentally log to the wrong level. The > previous version forces the coder to repeat 'debug' four times. > > One example where the wrong log level has been used: > https://bugs.openjdk.java.net/browse/JDK-8152962 > > I've converted one function in compactibleFreeListSpace.cpp to show > how to use the new class. > > Test: new internal vm test, jprt > > Thanks, > StefanK From stefan.karlsson at oracle.com Mon Apr 4 08:47:37 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 10:47:37 +0200 Subject: RFR: 8152635: Create a UL class to represent a Log + Level combination In-Reply-To: <56FAA54D.9050103@oracle.com> References: <56FA8744.8050007@oracle.com> <56FAA54D.9050103@oracle.com> Message-ID: <57022A29.2050203@oracle.com> Hi Robbin, On 2016-03-29 17:54, Robbin Ehn wrote: > Hi Stefan, looks good to me. Thanks. > > (we can now also remove the ResourceMark from the other two test, right?) Maybe. I'll prefer if that could be handled by a separate RFE. Thanks, StefanK > > /Robbin > > On 03/29/2016 03:46 PM, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to introduce a Unified Logging class to >> describe a combination of a tagset and a log level. >> >> http://cr.openjdk.java.net/~stefank/8152635/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8152635 >> >> The patch is applied on top of the patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022302.html >> >> >> With this class we could write code as: >> LogTarget(Debug, gc, heap) log; >> if (log.is_enabled()) { >> log.print(...); >> log.print(...); >> ... = log.stream(); >> } >> >> instead of the current model: >> Log(gc, heap) log; >> if (log.is_debug()) { >> log.debug(...) >> log.debug(...) >> ... = log.debug_stream(): >> } >> >> The LogTarget class/macro ensures that we only mention the log level >> once, so that we don't accidentally log to the wrong level. The previous >> version forces the coder to repeat 'debug' four times. >> >> One example where the wrong log level has been used: >> https://bugs.openjdk.java.net/browse/JDK-8152962 >> >> I've converted one function in compactibleFreeListSpace.cpp to show how >> to use the new class. >> >> Test: new internal vm test, jprt >> >> Thanks, >> StefanK From bengt.rutisson at oracle.com Mon Apr 4 08:49:25 2016 From: bengt.rutisson at oracle.com (Bengt Rutisson) Date: Mon, 4 Apr 2016 10:49:25 +0200 Subject: RFR: 8152635: Create a UL class to represent a Log + Level combination In-Reply-To: <57022824.4040205@oracle.com> References: <56FA8744.8050007@oracle.com> <57022824.4040205@oracle.com> Message-ID: <57022A95.3060406@oracle.com> Hi StefanK, On 2016-04-04 10:39, Stefan Karlsson wrote: > Hi all, > > Here's an updated webrev with fixes for the issues found by Bengt and > Marcus: > http://cr.openjdk.java.net/~stefank/8152635/webrev.02.delta/ > http://cr.openjdk.java.net/~stefank/8152635/webrev.02/ Looks good. Thanks, Bengt > > Thanks, > StefanK > > On 2016-03-29 15:46, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to introduce a Unified Logging class to >> describe a combination of a tagset and a log level. >> >> http://cr.openjdk.java.net/~stefank/8152635/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8152635 >> >> The patch is applied on top of the patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022302.html >> >> >> With this class we could write code as: >> LogTarget(Debug, gc, heap) log; >> if (log.is_enabled()) { >> log.print(...); >> log.print(...); >> ... = log.stream(); >> } >> >> instead of the current model: >> Log(gc, heap) log; >> if (log.is_debug()) { >> log.debug(...) >> log.debug(...) >> ... = log.debug_stream(): >> } >> >> The LogTarget class/macro ensures that we only mention the log level >> once, so that we don't accidentally log to the wrong level. The >> previous version forces the coder to repeat 'debug' four times. >> >> One example where the wrong log level has been used: >> https://bugs.openjdk.java.net/browse/JDK-8152962 >> >> I've converted one function in compactibleFreeListSpace.cpp to show >> how to use the new class. >> >> Test: new internal vm test, jprt >> >> Thanks, >> StefanK > From stefan.karlsson at oracle.com Mon Apr 4 09:01:14 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 11:01:14 +0200 Subject: RFR: 8152635: Create a UL class to represent a Log + Level combination In-Reply-To: <57022A95.3060406@oracle.com> References: <56FA8744.8050007@oracle.com> <57022824.4040205@oracle.com> <57022A95.3060406@oracle.com> Message-ID: <57022D5A.1040404@oracle.com> Thanks, Bengt. StefanK On 2016-04-04 10:49, Bengt Rutisson wrote: > > Hi StefanK, > > On 2016-04-04 10:39, Stefan Karlsson wrote: >> Hi all, >> >> Here's an updated webrev with fixes for the issues found by Bengt and >> Marcus: >> http://cr.openjdk.java.net/~stefank/8152635/webrev.02.delta/ >> http://cr.openjdk.java.net/~stefank/8152635/webrev.02/ > > Looks good. > > Thanks, > Bengt > >> >> Thanks, >> StefanK >> >> On 2016-03-29 15:46, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to introduce a Unified Logging class to >>> describe a combination of a tagset and a log level. >>> >>> http://cr.openjdk.java.net/~stefank/8152635/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8152635 >>> >>> The patch is applied on top of the patch in: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022302.html >>> >>> >>> With this class we could write code as: >>> LogTarget(Debug, gc, heap) log; >>> if (log.is_enabled()) { >>> log.print(...); >>> log.print(...); >>> ... = log.stream(); >>> } >>> >>> instead of the current model: >>> Log(gc, heap) log; >>> if (log.is_debug()) { >>> log.debug(...) >>> log.debug(...) >>> ... = log.debug_stream(): >>> } >>> >>> The LogTarget class/macro ensures that we only mention the log level >>> once, so that we don't accidentally log to the wrong level. The >>> previous version forces the coder to repeat 'debug' four times. >>> >>> One example where the wrong log level has been used: >>> https://bugs.openjdk.java.net/browse/JDK-8152962 >>> >>> I've converted one function in compactibleFreeListSpace.cpp to show >>> how to use the new class. >>> >>> Test: new internal vm test, jprt >>> >>> Thanks, >>> StefanK >> > From stefan.karlsson at oracle.com Mon Apr 4 09:11:04 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 11:11:04 +0200 Subject: RFR: 8153254: Delegate (v)write from Log to LogTagSet In-Reply-To: <76B81C9B-7E5E-465B-94D9-3ADB19ADAD54@oracle.com> References: <56FE370B.3020204@oracle.com> <76B81C9B-7E5E-465B-94D9-3ADB19ADAD54@oracle.com> Message-ID: <57022FA8.4080206@oracle.com> Hi Kim, On 2016-04-04 10:24, Kim Barrett wrote: >> On Apr 1, 2016, at 4:53 AM, Robbin Ehn wrote: >> >> Hi all, >> >> Please review this patch. >> >> This moves log writes methods to LogTagSet. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153254 >> Webrev: http://cr.openjdk.java.net/~rehn/8153254/webrev/ >> >> Tested with internal vm test and jprt. >> >> Thanks! >> >> /Robbin > I think LogTagSetMapping might need to have a GuardTag template > parameter added, similar to that of Log, so that things like > > LogTagSetMapping< LOG_TAGS(__VAR_ARGS__) > > > can be used in some of Stefan's planned followups to this change. Actually, I don't have a need for the GuardTag template parameter. All my usages of LogTagSetMapping are encapsulated within other classes and I don't expose them to code taking the tags as varargs macros. I'm OK with leaving out the GuardTag until we need it. StefanK > > Otherwise, looks good. > From marcus.larsson at oracle.com Mon Apr 4 10:21:30 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Mon, 4 Apr 2016 12:21:30 +0200 Subject: RFR: 8152635: Create a UL class to represent a Log + Level combination In-Reply-To: <57022824.4040205@oracle.com> References: <56FA8744.8050007@oracle.com> <57022824.4040205@oracle.com> Message-ID: <5702402A.10901@oracle.com> On 04/04/2016 10:39 AM, Stefan Karlsson wrote: > Hi all, > > Here's an updated webrev with fixes for the issues found by Bengt and > Marcus: > http://cr.openjdk.java.net/~stefank/8152635/webrev.02.delta/ > http://cr.openjdk.java.net/~stefank/8152635/webrev.02/ Looks good! Thanks, Marcus > > Thanks, > StefanK > > On 2016-03-29 15:46, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to introduce a Unified Logging class to >> describe a combination of a tagset and a log level. >> >> http://cr.openjdk.java.net/~stefank/8152635/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8152635 >> >> The patch is applied on top of the patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022302.html >> >> >> With this class we could write code as: >> LogTarget(Debug, gc, heap) log; >> if (log.is_enabled()) { >> log.print(...); >> log.print(...); >> ... = log.stream(); >> } >> >> instead of the current model: >> Log(gc, heap) log; >> if (log.is_debug()) { >> log.debug(...) >> log.debug(...) >> ... = log.debug_stream(): >> } >> >> The LogTarget class/macro ensures that we only mention the log level >> once, so that we don't accidentally log to the wrong level. The >> previous version forces the coder to repeat 'debug' four times. >> >> One example where the wrong log level has been used: >> https://bugs.openjdk.java.net/browse/JDK-8152962 >> >> I've converted one function in compactibleFreeListSpace.cpp to show >> how to use the new class. >> >> Test: new internal vm test, jprt >> >> Thanks, >> StefanK > From stefan.karlsson at oracle.com Mon Apr 4 10:33:00 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 12:33:00 +0200 Subject: RFR: 8152635: Create a UL class to represent a Log + Level combination In-Reply-To: <5702402A.10901@oracle.com> References: <56FA8744.8050007@oracle.com> <57022824.4040205@oracle.com> <5702402A.10901@oracle.com> Message-ID: <570242DC.8060002@oracle.com> Thanks, Marcus! StefanK On 2016-04-04 12:21, Marcus Larsson wrote: > > On 04/04/2016 10:39 AM, Stefan Karlsson wrote: >> Hi all, >> >> Here's an updated webrev with fixes for the issues found by Bengt and >> Marcus: >> http://cr.openjdk.java.net/~stefank/8152635/webrev.02.delta/ >> http://cr.openjdk.java.net/~stefank/8152635/webrev.02/ > > Looks good! > > Thanks, > Marcus > >> >> Thanks, >> StefanK >> >> On 2016-03-29 15:46, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to introduce a Unified Logging class to >>> describe a combination of a tagset and a log level. >>> >>> http://cr.openjdk.java.net/~stefank/8152635/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8152635 >>> >>> The patch is applied on top of the patch in: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022302.html >>> >>> >>> With this class we could write code as: >>> LogTarget(Debug, gc, heap) log; >>> if (log.is_enabled()) { >>> log.print(...); >>> log.print(...); >>> ... = log.stream(); >>> } >>> >>> instead of the current model: >>> Log(gc, heap) log; >>> if (log.is_debug()) { >>> log.debug(...) >>> log.debug(...) >>> ... = log.debug_stream(): >>> } >>> >>> The LogTarget class/macro ensures that we only mention the log level >>> once, so that we don't accidentally log to the wrong level. The >>> previous version forces the coder to repeat 'debug' four times. >>> >>> One example where the wrong log level has been used: >>> https://bugs.openjdk.java.net/browse/JDK-8152962 >>> >>> I've converted one function in compactibleFreeListSpace.cpp to show >>> how to use the new class. >>> >>> Test: new internal vm test, jprt >>> >>> Thanks, >>> StefanK >> > From stefan.karlsson at oracle.com Mon Apr 4 12:05:18 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 14:05:18 +0200 Subject: RFR: 8152637: Create a stack allocatable LogStream class In-Reply-To: <56FA877B.9030601@oracle.com> References: <56FA877B.9030601@oracle.com> Message-ID: <5702587E.5090003@oracle.com> Hi all, I've updated the patch to use LogTagSets instead of function pointers: http://cr.openjdk.java.net/~stefank/8152637/webrev.02.delta http://cr.openjdk.java.net/~stefank/8152637/webrev.02 The patch is rebased against the LogTagSet enhancements in: http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022368.html I've also updated the unit tests after feedback on earlier UL patches. Thanks, StefanK On 2016-03-29 15:47, Stefan Karlsson wrote: > Hi all, > > Please review this patch to create a stack allocatable LogStream class > that embeds the ResourceMark. > > http://cr.openjdk.java.net/~stefank/8152637/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-815263 > > The patch is applied on top of the patch in: > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022303.html > > I've converted the code in binaryTreeDictionary.cpp to show how to use > the class. > > Test: new internal vm test, jprt > > Thanks, > StefanK From stefan.karlsson at oracle.com Mon Apr 4 12:20:43 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 14:20:43 +0200 Subject: RFR: 8152711: Create a non-template Log wrapper class In-Reply-To: <56FB8BC9.8070006@oracle.com> References: <56FA87AC.6000402@oracle.com> <67C1AA71-7C36-43BE-BE33-7091C5AC5F1A@oracle.com> <56FB8BC9.8070006@oracle.com> Message-ID: <57025C1B.6080707@oracle.com> Hi all, I've created a new patch to use LogTagSets instead of function pointers: http://cr.openjdk.java.net/~stefank/8152711/webrev.02.delta/ http://cr.openjdk.java.net/~stefank/8152711/webrev.02/ The patch is rebased against the LogTagSet enhancements in: http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022368.html I've also updated the unit tests after feedback on earlier UL patches. Thanks, StefanK On 2016-03-30 10:18, Stefan Karlsson wrote: > On 2016-03-29 19:10, Kim Barrett wrote: >>> On Mar 29, 2016, at 9:48 AM, Stefan Karlsson >>> wrote: >>> >>> Hi all, >>> >>> Please review this patch to introduce type-erased Log and LogTarget >>> wrapper classes. These classes can be used where we don't want or >>> can't use the template parameters associated with the Log and >>> LogTarget classes. >>> >>> http://cr.openjdk.java.net/~stefank/8152711/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8152711 >>> >>> The patch is applied on top of the patch in: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022304.html >>> >>> >>> I've received internal feedback that we probably want to reuse the >>> LogTagSet instances. I'd prefer if that could be prototyped and >>> implemented as a separate RFE. >> I don't think the change being proposed by this RFE should be made. >> Instead, I think the approach of using existing LogTagSet should be >> taken. Introducing the new classes here and starting to use them will >> just make it harder to to adopt the LogTagSet approach, which I feel >> is superior, and is not hard. I already provided a prototype last >> week. > > I've talked to Kim about this offline. > > Kim envisioned the direct usage of the LogTagSet when logging in our > code. But I see a value in having a smaller, simpler Log[Target]Handle > interface instead of exposing the rather big LogTagSet interface to > the rest of our code. I think Kim now agrees on this. > > What this then boils down to is the difference of using function > pointers vs LogTagSet and LogLevels, to implement the > Log[Target]Handle wrapper classes. IMHO, that's not enough of a reason > to block my patch. We could easily have changed the Log[Target]Handle > implementation to use LogTagSets without changing code outside the UL > framework. And that was what I had asked for, both in this RFR and in > offline communications. > > I worked with Kim yesterday to provide a prototype to use LogTagSets > instead of function pointers: > http://cr.openjdk.java.net/~stefank/8152711/webrev.logHandleWithLogTagSetsImpl/ > > > I'm pretty happy with this change. > > The plan to get this pushed is to: > 1) Kim, or someone else, will send out an RFR regarding the > infrastructural changes to LogTagSet > 2) I'll update my LogStream patch to use LogTagSets > 3) I'll update my LogHandle patch to use LogTagSets > > So, until (1) is done, this RFR will be put on hold. > > StefanK > >> >>> I've changed the implementation of GCTraceTime to show how >>> LogHandles can be used to lower the amount of template parameters >>> used throughout the implementation. >>> >>> Test: new internal vm test, jprt >>> >>> Thanks, >>> StefanK >> > From robbin.ehn at oracle.com Mon Apr 4 13:14:10 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Mon, 4 Apr 2016 15:14:10 +0200 Subject: RFR: 8153254: Delegate (v)write from Log to LogTagSet In-Reply-To: <76B81C9B-7E5E-465B-94D9-3ADB19ADAD54@oracle.com> References: <56FE370B.3020204@oracle.com> <76B81C9B-7E5E-465B-94D9-3ADB19ADAD54@oracle.com> Message-ID: <570268A2.4040801@oracle.com> Hi Kim On 04/04/2016 10:24 AM, Kim Barrett wrote: >> On Apr 1, 2016, at 4:53 AM, Robbin Ehn wrote: >> >> Hi all, >> >> Please review this patch. >> >> This moves log writes methods to LogTagSet. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153254 >> Webrev: http://cr.openjdk.java.net/~rehn/8153254/webrev/ >> >> Tested with internal vm test and jprt. >> >> Thanks! >> >> /Robbin > > I think LogTagSetMapping might need to have a GuardTag template > parameter added, similar to that of Log, so that things like > > LogTagSetMapping< LOG_TAGS(__VAR_ARGS__) > > > can be used in some of Stefan's planned followups to this change. I also found a other need to for the guards. (we use LogTagSetMapping directly in some internal code) So I added them back. > > Otherwise, looks good. > Thanks! (pushing without new webrev) /Robbin From robbin.ehn at oracle.com Mon Apr 4 13:17:22 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Mon, 4 Apr 2016 15:17:22 +0200 Subject: RFR: 8153254: Delegate (v)write from Log to LogTagSet In-Reply-To: <57022FA8.4080206@oracle.com> References: <56FE370B.3020204@oracle.com> <76B81C9B-7E5E-465B-94D9-3ADB19ADAD54@oracle.com> <57022FA8.4080206@oracle.com> Message-ID: <57026962.6030607@oracle.com> Hi, On 04/04/2016 11:11 AM, Stefan Karlsson wrote: > Hi Kim, > > On 2016-04-04 10:24, Kim Barrett wrote: >>> On Apr 1, 2016, at 4:53 AM, Robbin Ehn wrote: >>> >>> Hi all, >>> >>> Please review this patch. >>> >>> This moves log writes methods to LogTagSet. >>> >>> Bug: https://bugs.openjdk.java.net/browse/JDK-8153254 >>> Webrev: http://cr.openjdk.java.net/~rehn/8153254/webrev/ >>> >>> Tested with internal vm test and jprt. >>> >>> Thanks! >>> >>> /Robbin >> I think LogTagSetMapping might need to have a GuardTag template >> parameter added, similar to that of Log, so that things like >> >> LogTagSetMapping< LOG_TAGS(__VAR_ARGS__) > >> >> can be used in some of Stefan's planned followups to this change. > > Actually, I don't have a need for the GuardTag template parameter. All > my usages of LogTagSetMapping are encapsulated within other classes and > I don't expose them to code taking the tags as varargs macros. I'm OK > with leaving out the GuardTag until we need it. As I said to Kim, I found a place where they matters, so I added them back. (as from IRL you are ok with this, hence no webrev) Thanks, Robbin > > StefanK > >> >> Otherwise, looks good. >> > From stefan.karlsson at oracle.com Mon Apr 4 14:21:40 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 16:21:40 +0200 Subject: RFR: 8153254: Delegate (v)write from Log to LogTagSet In-Reply-To: <57026962.6030607@oracle.com> References: <56FE370B.3020204@oracle.com> <76B81C9B-7E5E-465B-94D9-3ADB19ADAD54@oracle.com> <57022FA8.4080206@oracle.com> <57026962.6030607@oracle.com> Message-ID: <57027874.5040201@oracle.com> On 2016-04-04 15:17, Robbin Ehn wrote: > Hi, > > On 04/04/2016 11:11 AM, Stefan Karlsson wrote: >> Hi Kim, >> >> On 2016-04-04 10:24, Kim Barrett wrote: >>>> On Apr 1, 2016, at 4:53 AM, Robbin Ehn wrote: >>>> >>>> Hi all, >>>> >>>> Please review this patch. >>>> >>>> This moves log writes methods to LogTagSet. >>>> >>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8153254 >>>> Webrev: http://cr.openjdk.java.net/~rehn/8153254/webrev/ >>>> >>>> Tested with internal vm test and jprt. >>>> >>>> Thanks! >>>> >>>> /Robbin >>> I think LogTagSetMapping might need to have a GuardTag template >>> parameter added, similar to that of Log, so that things like >>> >>> LogTagSetMapping< LOG_TAGS(__VAR_ARGS__) > >>> >>> can be used in some of Stefan's planned followups to this change. >> >> Actually, I don't have a need for the GuardTag template parameter. All >> my usages of LogTagSetMapping are encapsulated within other classes and >> I don't expose them to code taking the tags as varargs macros. I'm OK >> with leaving out the GuardTag until we need it. > > As I said to Kim, I found a place where they matters, so I added them > back. > > (as from IRL you are ok with this, hence no webrev) Sounds good to me. Thanks, StefanK > > Thanks, Robbin > >> >> StefanK >> >>> >>> Otherwise, looks good. >>> >> From robbin.ehn at oracle.com Mon Apr 4 15:15:47 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Mon, 4 Apr 2016 17:15:47 +0200 Subject: RFR: 8152637: Create a stack allocatable LogStream class In-Reply-To: <5702587E.5090003@oracle.com> References: <56FA877B.9030601@oracle.com> <5702587E.5090003@oracle.com> Message-ID: <57028523.4040600@oracle.com> Hi Stefan, Looks even better, thanks! /Robbin On 04/04/2016 02:05 PM, Stefan Karlsson wrote: > Hi all, > > I've updated the patch to use LogTagSets instead of function pointers: > > http://cr.openjdk.java.net/~stefank/8152637/webrev.02.delta > http://cr.openjdk.java.net/~stefank/8152637/webrev.02 > > The patch is rebased against the LogTagSet enhancements in: > > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022368.html > > I've also updated the unit tests after feedback on earlier UL patches. > > Thanks, > StefanK > > On 2016-03-29 15:47, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to create a stack allocatable LogStream class >> that embeds the ResourceMark. >> >> http://cr.openjdk.java.net/~stefank/8152637/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-815263 >> >> The patch is applied on top of the patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022303.html >> >> I've converted the code in binaryTreeDictionary.cpp to show how to use >> the class. >> >> Test: new internal vm test, jprt >> >> Thanks, >> StefanK > From mikael.gerdin at oracle.com Mon Apr 4 15:16:30 2016 From: mikael.gerdin at oracle.com (Mikael Gerdin) Date: Mon, 4 Apr 2016 17:16:30 +0200 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <56FD4F63.8020103@oracle.com> <56FE8711.3080703@oracle.com> Message-ID: <5702854E.7010307@oracle.com> Hi Filipp, On 2016-04-02 13:32, Filipp Zhinkin wrote: > Here is an webrev updated according Mikael's comments: > > http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ I noticed that you changed this compared to my suggestion. From my understanding the explicit cast to void is unnecessary, all pointer types implicitly convert to void*. I've been forced to accept that people prefer "&foo[i]" over "foo + i" in the past so I'm ok with that change. - new ((ParScanThreadState*)_data + i) + new((void*) &_per_thread_states[i]) I suspect that you can remove 33 // correct linkage required to compile w/o warnings 34 // (must be on file level - cannot be local) 35 extern "C" { typedef int (*ftype)(const void*, const void*); } 36 as well, it appears to only have been used by the sort() methods of the removed array classes. I haven't checked all the other uses but from a GC perspective I think this is good to go. /Mikael > > Tested using hotspot_all tests w/ CMS turned on. > > Thanks, > Filipp. > > On Fri, Apr 1, 2016 at 5:34 PM, Mikael Gerdin wrote: >> Hi Filipp >> >> On 2016-04-01 16:27, Filipp Zhinkin wrote: >>> >>> Hi Mikael, >>> >>> On Thu, Mar 31, 2016 at 7:25 PM, Mikael Gerdin >>> wrote: >>>> >>>> Hi, >>>> >>>> I like the cleanup, can't we also remove CHeapArray in arrays.hpp? >>> >>> >>> Sure! I've missed that it is not used at all. >> >> >> Great! >> >>> >>>> >>>> As for the CMS change, I would prefer this instead (untested!): >>>> http://cr.openjdk.java.net/~mgerdin/pss-array/webrev/ >>> >>> >>> Thanks, your implementation looks much better. >>> If you don't mind I'll incorporate it into my change. >> >> >> Go ahead, that was my intention. >> >>> >>> Also, it seems like in ParNewGeneration::collect we have to create >>> ResourceMark before ParScanThreadStateSet, right? >> >> >> There is a ResourceMark in the caller so I don't think it's needed. >> The old version of the code used resource allocation as well and was fine so >> I don't think there is a need to introduce another ResourceMark. >> >> >> /Mikael >> >> >>> >>> Thanks, >>> Filipp. >>> >>>> >>>> /Mikael >>>> >>>> >>>> On 2016-03-31 17:14, Vladimir Kozlov wrote: >>>>> >>>>> >>>>> Hi Filipp, >>>>> >>>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups >>>>> to look on. >>>>> >>>>> Thanks, >>>>> Vladimir >>>>> >>>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>>> >>>>>> >>>>>> Hi Vladimir, >>>>>> >>>>>> thank you for looking at this change. >>>>>> >>>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>>> wrote: >>>>>>> >>>>>>> >>>>>>> Nice clean up but I don't see any source code removed. What benefits >>>>>>> we have >>>>>>> then? >>>>>>> I understand that we don't generate subclasses for ResourceArray and >>>>>>> use >>>>>>> GrowableArray. But it will not save space I think. >>>>>>> What prevents us to remove ResourceArray at all? >>>>>> >>>>>> >>>>>> >>>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>>> so it should be updated before removing ResourceArray: >>>>>> >>>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>>> >>>>>>> >>>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> Hi all, >>>>>>>> >>>>>>>> please review a fix for JDK-8149374: >>>>>>>> >>>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>>> Testing done: hotspot_all tests + CTW >>>>>>>> >>>>>>>> I've replaced all usages of collections defined via define_array and >>>>>>>> define_stack macros with GrowableArray. >>>>>>>> >>>>>>>> There are good and bad news regarding performance impact of that >>>>>>>> change. >>>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>>> >>>>>>> >>>>>>> >>>>>>> >>>>>>> It is acceptable regression I think. I don't think we should optimize >>>>>>> and >>>>>>> make more complex GrowableArray just to save 0.5% of performance for >>>>>>> C2. >>>>>> >>>>>> >>>>>> >>>>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>>>> may be beneficial to optimize it, >>>>>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>>>>> significant difference. >>>>>> >>>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>>> GrowableArray's initialization >>>>>> in its current state unless there will be other reasons to speed it up. >>>>>> >>>>>> Thanks, >>>>>> Filipp. >>>>>> >>>>>>> >>>>>>> Thanks, >>>>>>> Vladimir >>>>>>> >>>>>>> >>>>>>>> >>>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>>> force >>>>>>>> initialization and de-initialization during array's >>>>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>>>> pointers such initialization does not make much sense, because >>>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>>> initialization. >>>>>>>> >>>>>>>> I've measured CTW time with following workaround which implements >>>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>>> compilation time returned back to values that were measured before >>>>>>>> original change (51.06?0.24s): >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>>> >>>>>>>> >>>>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>>> >>>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>>> initialization for some types, don't we? >>>>>>>> >>>>>>>> Best regards, >>>>>>>> Filipp >>>>>>>> >>>>>>>> [1] >>>>>>>> >>>>>>>> >>>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>>> >>>>>>>> >>>>>>> >>>> >> From robbin.ehn at oracle.com Mon Apr 4 15:17:33 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Mon, 4 Apr 2016 17:17:33 +0200 Subject: RFR: 8152711: Create a non-template Log wrapper class In-Reply-To: <57025C1B.6080707@oracle.com> References: <56FA87AC.6000402@oracle.com> <67C1AA71-7C36-43BE-BE33-7091C5AC5F1A@oracle.com> <56FB8BC9.8070006@oracle.com> <57025C1B.6080707@oracle.com> Message-ID: <5702858D.4000708@oracle.com> Hi Stefan, This also looks better than before, thanks! /Robbin On 04/04/2016 02:20 PM, Stefan Karlsson wrote: > Hi all, > > I've created a new patch to use LogTagSets instead of function pointers: > > http://cr.openjdk.java.net/~stefank/8152711/webrev.02.delta/ > http://cr.openjdk.java.net/~stefank/8152711/webrev.02/ > > The patch is rebased against the LogTagSet enhancements in: > > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022368.html > > I've also updated the unit tests after feedback on earlier UL patches. > > Thanks, > StefanK > > On 2016-03-30 10:18, Stefan Karlsson wrote: >> On 2016-03-29 19:10, Kim Barrett wrote: >>>> On Mar 29, 2016, at 9:48 AM, Stefan Karlsson >>>> wrote: >>>> >>>> Hi all, >>>> >>>> Please review this patch to introduce type-erased Log and LogTarget >>>> wrapper classes. These classes can be used where we don't want or >>>> can't use the template parameters associated with the Log and >>>> LogTarget classes. >>>> >>>> http://cr.openjdk.java.net/~stefank/8152711/webrev.01 >>>> https://bugs.openjdk.java.net/browse/JDK-8152711 >>>> >>>> The patch is applied on top of the patch in: >>>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022304.html >>>> >>>> >>>> I've received internal feedback that we probably want to reuse the >>>> LogTagSet instances. I'd prefer if that could be prototyped and >>>> implemented as a separate RFE. >>> I don't think the change being proposed by this RFE should be made. >>> Instead, I think the approach of using existing LogTagSet should be >>> taken. Introducing the new classes here and starting to use them will >>> just make it harder to to adopt the LogTagSet approach, which I feel >>> is superior, and is not hard. I already provided a prototype last >>> week. >> >> I've talked to Kim about this offline. >> >> Kim envisioned the direct usage of the LogTagSet when logging in our >> code. But I see a value in having a smaller, simpler Log[Target]Handle >> interface instead of exposing the rather big LogTagSet interface to >> the rest of our code. I think Kim now agrees on this. >> >> What this then boils down to is the difference of using function >> pointers vs LogTagSet and LogLevels, to implement the >> Log[Target]Handle wrapper classes. IMHO, that's not enough of a reason >> to block my patch. We could easily have changed the Log[Target]Handle >> implementation to use LogTagSets without changing code outside the UL >> framework. And that was what I had asked for, both in this RFR and in >> offline communications. >> >> I worked with Kim yesterday to provide a prototype to use LogTagSets >> instead of function pointers: >> http://cr.openjdk.java.net/~stefank/8152711/webrev.logHandleWithLogTagSetsImpl/ >> >> >> I'm pretty happy with this change. >> >> The plan to get this pushed is to: >> 1) Kim, or someone else, will send out an RFR regarding the >> infrastructural changes to LogTagSet >> 2) I'll update my LogStream patch to use LogTagSets >> 3) I'll update my LogHandle patch to use LogTagSets >> >> So, until (1) is done, this RFR will be put on hold. >> >> StefanK >> >>> >>>> I've changed the implementation of GCTraceTime to show how >>>> LogHandles can be used to lower the amount of template parameters >>>> used throughout the implementation. >>>> >>>> Test: new internal vm test, jprt >>>> >>>> Thanks, >>>> StefanK >>> >> > From stefan.karlsson at oracle.com Mon Apr 4 15:26:55 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 17:26:55 +0200 Subject: RFR: 8152637: Create a stack allocatable LogStream class In-Reply-To: <57028523.4040600@oracle.com> References: <56FA877B.9030601@oracle.com> <5702587E.5090003@oracle.com> <57028523.4040600@oracle.com> Message-ID: <570287BF.2010807@oracle.com> Thanks, Robbin. StefanK On 2016-04-04 17:15, Robbin Ehn wrote: > Hi Stefan, > > Looks even better, thanks! > > /Robbin > > On 04/04/2016 02:05 PM, Stefan Karlsson wrote: >> Hi all, >> >> I've updated the patch to use LogTagSets instead of function pointers: >> >> http://cr.openjdk.java.net/~stefank/8152637/webrev.02.delta >> http://cr.openjdk.java.net/~stefank/8152637/webrev.02 >> >> The patch is rebased against the LogTagSet enhancements in: >> >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022368.html >> >> I've also updated the unit tests after feedback on earlier UL patches. >> >> Thanks, >> StefanK >> >> On 2016-03-29 15:47, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to create a stack allocatable LogStream class >>> that embeds the ResourceMark. >>> >>> http://cr.openjdk.java.net/~stefank/8152637/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-815263 >>> >>> The patch is applied on top of the patch in: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022303.html >>> >>> >>> I've converted the code in binaryTreeDictionary.cpp to show how to use >>> the class. >>> >>> Test: new internal vm test, jprt >>> >>> Thanks, >>> StefanK >> From stefan.karlsson at oracle.com Mon Apr 4 15:27:15 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 4 Apr 2016 17:27:15 +0200 Subject: RFR: 8152711: Create a non-template Log wrapper class In-Reply-To: <5702858D.4000708@oracle.com> References: <56FA87AC.6000402@oracle.com> <67C1AA71-7C36-43BE-BE33-7091C5AC5F1A@oracle.com> <56FB8BC9.8070006@oracle.com> <57025C1B.6080707@oracle.com> <5702858D.4000708@oracle.com> Message-ID: <570287D3.7070002@oracle.com> Thanks, Robbin. StefanK On 2016-04-04 17:17, Robbin Ehn wrote: > Hi Stefan, > > This also looks better than before, thanks! > > /Robbin > > On 04/04/2016 02:20 PM, Stefan Karlsson wrote: >> Hi all, >> >> I've created a new patch to use LogTagSets instead of function pointers: >> >> http://cr.openjdk.java.net/~stefank/8152711/webrev.02.delta/ >> http://cr.openjdk.java.net/~stefank/8152711/webrev.02/ >> >> The patch is rebased against the LogTagSet enhancements in: >> >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022368.html >> >> I've also updated the unit tests after feedback on earlier UL patches. >> >> Thanks, >> StefanK >> >> On 2016-03-30 10:18, Stefan Karlsson wrote: >>> On 2016-03-29 19:10, Kim Barrett wrote: >>>>> On Mar 29, 2016, at 9:48 AM, Stefan Karlsson >>>>> wrote: >>>>> >>>>> Hi all, >>>>> >>>>> Please review this patch to introduce type-erased Log and LogTarget >>>>> wrapper classes. These classes can be used where we don't want or >>>>> can't use the template parameters associated with the Log and >>>>> LogTarget classes. >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8152711/webrev.01 >>>>> https://bugs.openjdk.java.net/browse/JDK-8152711 >>>>> >>>>> The patch is applied on top of the patch in: >>>>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022304.html >>>>> >>>>> >>>>> >>>>> I've received internal feedback that we probably want to reuse the >>>>> LogTagSet instances. I'd prefer if that could be prototyped and >>>>> implemented as a separate RFE. >>>> I don't think the change being proposed by this RFE should be made. >>>> Instead, I think the approach of using existing LogTagSet should be >>>> taken. Introducing the new classes here and starting to use them will >>>> just make it harder to to adopt the LogTagSet approach, which I feel >>>> is superior, and is not hard. I already provided a prototype last >>>> week. >>> >>> I've talked to Kim about this offline. >>> >>> Kim envisioned the direct usage of the LogTagSet when logging in our >>> code. But I see a value in having a smaller, simpler Log[Target]Handle >>> interface instead of exposing the rather big LogTagSet interface to >>> the rest of our code. I think Kim now agrees on this. >>> >>> What this then boils down to is the difference of using function >>> pointers vs LogTagSet and LogLevels, to implement the >>> Log[Target]Handle wrapper classes. IMHO, that's not enough of a reason >>> to block my patch. We could easily have changed the Log[Target]Handle >>> implementation to use LogTagSets without changing code outside the UL >>> framework. And that was what I had asked for, both in this RFR and in >>> offline communications. >>> >>> I worked with Kim yesterday to provide a prototype to use LogTagSets >>> instead of function pointers: >>> http://cr.openjdk.java.net/~stefank/8152711/webrev.logHandleWithLogTagSetsImpl/ >>> >>> >>> >>> I'm pretty happy with this change. >>> >>> The plan to get this pushed is to: >>> 1) Kim, or someone else, will send out an RFR regarding the >>> infrastructural changes to LogTagSet >>> 2) I'll update my LogStream patch to use LogTagSets >>> 3) I'll update my LogHandle patch to use LogTagSets >>> >>> So, until (1) is done, this RFR will be put on hold. >>> >>> StefanK >>> >>>> >>>>> I've changed the implementation of GCTraceTime to show how >>>>> LogHandles can be used to lower the amount of template parameters >>>>> used throughout the implementation. >>>>> >>>>> Test: new internal vm test, jprt >>>>> >>>>> Thanks, >>>>> StefanK >>>> >>> >> From edward.nevill at gmail.com Mon Apr 4 17:20:32 2016 From: edward.nevill at gmail.com (Edward Nevill) Date: Mon, 04 Apr 2016 18:20:32 +0100 Subject: RFR 8153310: AArch64: JEP 254: Implement byte_array_inflate and char_array_compress In-Reply-To: <56FEB045.8000905@redhat.com> References: <56FEB045.8000905@redhat.com> Message-ID: <1459790432.3233.7.camel@mint> Hi, On Fri, 2016-04-01 at 18:30 +0100, Andrew Haley wrote: > We need this for CompactStrings. > > http://cr.openjdk.java.net/~aph/8153310/ Looks fine to me. A few minor points. + // Short string: less than 8 bytes. + { + Label loop, around, tiny; Label 'aound' is declared and bound, but never used. + // Unpack the bytes 8 at a time. + bind(big); + andw(len, len, 7); + + { + Label loop, around; And also here + bind(loop); + ld1(vtmp2, T8B, post(src, 8)); + sub(rscratch1, rscratch1, 1); + zip1(vtmp3, T16B, vtmp2, vtmp1); + st1(vtmp3, T8H, post(dst, 16)); Would it be better to use ldrd and strq rather than ld1 and st1. It seems more natural to me and will give better performance. All the best, Ed. From sgehwolf at redhat.com Mon Apr 4 17:44:55 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Mon, 04 Apr 2016 19:44:55 +0200 Subject: RFR(XS): 8153275: Zero JVM fails to initialize after JDK-8152440 Message-ID: <1459791895.3762.14.camel@redhat.com> Hi, Could somebody please sponsor and review the following Zero-only fix? The fix for JDK-8152440 was incorrect in that it set the value for?InitArrayShortSize to an illegal value (-1) failing constraint validation. Albeit not being used it must still pass constraint validation. Otherwise, the JVM fails to initialize and all bets are off. Thoughts? Bug:?https://bugs.openjdk.java.net/browse/JDK-8153275 webrev:?http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.01/ Thanks, Severin From jesper.wilhelmsson at oracle.com Mon Apr 4 17:54:38 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Mon, 4 Apr 2016 19:54:38 +0200 Subject: RFR(XS): JDK-8153410 - Disable tests until JDK-8151460 gets to main Message-ID: <5702AA5E.9010003@oracle.com> Hi, Could I have a quick review for this fix to quarantine two tests. The tests are broken and are failing in PIT. The fix for the tests was pushed to hs-rt so we need to quarantine the tests in main until the testfix can be integrated to main. Bug: https://bugs.openjdk.java.net/browse/JDK-8153410 Webrev: http://cr.openjdk.java.net/~jwilhelm/8153410/webrev.00 Thanks, /Jesper From george.triantafillou at oracle.com Mon Apr 4 18:01:37 2016 From: george.triantafillou at oracle.com (George Triantafillou) Date: Mon, 4 Apr 2016 14:01:37 -0400 Subject: RFR(XS): JDK-8153410 - Disable tests until JDK-8151460 gets to main In-Reply-To: <5702AA5E.9010003@oracle.com> References: <5702AA5E.9010003@oracle.com> Message-ID: <5702AC01.1080100@oracle.com> Jesper, Looks good. -George On 4/4/2016 1:54 PM, Jesper Wilhelmsson wrote: > Hi, > > Could I have a quick review for this fix to quarantine two tests. > > The tests are broken and are failing in PIT. The fix for the tests was > pushed to hs-rt so we need to quarantine the tests in main until the > testfix can be integrated to main. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153410 > Webrev: http://cr.openjdk.java.net/~jwilhelm/8153410/webrev.00 > > Thanks, > /Jesper From vladimir.kozlov at oracle.com Mon Apr 4 18:04:28 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Mon, 4 Apr 2016 11:04:28 -0700 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <5702854E.7010307@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <56FD4F63.8020103@oracle.com> <56FE8711.3080703@oracle.com> <5702854E.7010307@oracle.com> Message-ID: <5702ACAC.5040301@oracle.com> On 4/4/16 8:16 AM, Mikael Gerdin wrote: > Hi Filipp, > > On 2016-04-02 13:32, Filipp Zhinkin wrote: >> Here is an webrev updated according Mikael's comments: >> >> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ > > I noticed that you changed this compared to my suggestion. > From my understanding the explicit cast to void is unnecessary, all > pointer types implicitly convert to void*. > I've been forced to accept that people prefer "&foo[i]" over "foo + i" > in the past so I'm ok with that change. > > - new ((ParScanThreadState*)_data + i) > + new((void*) &_per_thread_states[i]) > > > I suspect that you can remove > 33 // correct linkage required to compile w/o warnings > 34 // (must be on file level - cannot be local) > 35 extern "C" { typedef int (*ftype)(const void*, const void*); } > 36 > as well, it appears to only have been used by the sort() methods of the > removed array classes. > > I haven't checked all the other uses but from a GC perspective I think > this is good to go. C1 changes are good too. Thanks, Vladimir > > /Mikael > >> >> Tested using hotspot_all tests w/ CMS turned on. >> >> Thanks, >> Filipp. >> >> On Fri, Apr 1, 2016 at 5:34 PM, Mikael Gerdin >> wrote: >>> Hi Filipp >>> >>> On 2016-04-01 16:27, Filipp Zhinkin wrote: >>>> >>>> Hi Mikael, >>>> >>>> On Thu, Mar 31, 2016 at 7:25 PM, Mikael Gerdin >>>> >>>> wrote: >>>>> >>>>> Hi, >>>>> >>>>> I like the cleanup, can't we also remove CHeapArray in arrays.hpp? >>>> >>>> >>>> Sure! I've missed that it is not used at all. >>> >>> >>> Great! >>> >>>> >>>>> >>>>> As for the CMS change, I would prefer this instead (untested!): >>>>> http://cr.openjdk.java.net/~mgerdin/pss-array/webrev/ >>>> >>>> >>>> Thanks, your implementation looks much better. >>>> If you don't mind I'll incorporate it into my change. >>> >>> >>> Go ahead, that was my intention. >>> >>>> >>>> Also, it seems like in ParNewGeneration::collect we have to create >>>> ResourceMark before ParScanThreadStateSet, right? >>> >>> >>> There is a ResourceMark in the caller so I don't think it's needed. >>> The old version of the code used resource allocation as well and was >>> fine so >>> I don't think there is a need to introduce another ResourceMark. >>> >>> >>> /Mikael >>> >>> >>>> >>>> Thanks, >>>> Filipp. >>>> >>>>> >>>>> /Mikael >>>>> >>>>> >>>>> On 2016-03-31 17:14, Vladimir Kozlov wrote: >>>>>> >>>>>> >>>>>> Hi Filipp, >>>>>> >>>>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC >>>>>> groups >>>>>> to look on. >>>>>> >>>>>> Thanks, >>>>>> Vladimir >>>>>> >>>>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>>>> >>>>>>> >>>>>>> Hi Vladimir, >>>>>>> >>>>>>> thank you for looking at this change. >>>>>>> >>>>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>>>> wrote: >>>>>>>> >>>>>>>> >>>>>>>> Nice clean up but I don't see any source code removed. What >>>>>>>> benefits >>>>>>>> we have >>>>>>>> then? >>>>>>>> I understand that we don't generate subclasses for ResourceArray >>>>>>>> and >>>>>>>> use >>>>>>>> GrowableArray. But it will not save space I think. >>>>>>>> What prevents us to remove ResourceArray at all? >>>>>>> >>>>>>> >>>>>>> >>>>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>>>> so it should be updated before removing ResourceArray: >>>>>>> >>>>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>>>> >>>>>>>> >>>>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> Hi all, >>>>>>>>> >>>>>>>>> please review a fix for JDK-8149374: >>>>>>>>> >>>>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>>>> Testing done: hotspot_all tests + CTW >>>>>>>>> >>>>>>>>> I've replaced all usages of collections defined via >>>>>>>>> define_array and >>>>>>>>> define_stack macros with GrowableArray. >>>>>>>>> >>>>>>>>> There are good and bad news regarding performance impact of that >>>>>>>>> change. >>>>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release >>>>>>>>> bits >>>>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> It is acceptable regression I think. I don't think we should >>>>>>>> optimize >>>>>>>> and >>>>>>>> make more complex GrowableArray just to save 0.5% of performance >>>>>>>> for >>>>>>>> C2. >>>>>>> >>>>>>> >>>>>>> >>>>>>> As long as GrowableArray is used in different Hotspot's >>>>>>> subsystems it >>>>>>> may be beneficial to optimize it, >>>>>>> but I've executed SPECjvm2008's startup.* benchmarks and there >>>>>>> were no >>>>>>> significant difference. >>>>>>> >>>>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>>>> GrowableArray's initialization >>>>>>> in its current state unless there will be other reasons to speed >>>>>>> it up. >>>>>>> >>>>>>> Thanks, >>>>>>> Filipp. >>>>>>> >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Vladimir >>>>>>>> >>>>>>>> >>>>>>>>> >>>>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>>>> force >>>>>>>>> initialization and de-initialization during array's >>>>>>>>> growing/destruction, but for some types like c++ primitive >>>>>>>>> types or >>>>>>>>> pointers such initialization does not make much sense, because >>>>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>>>> initialization. >>>>>>>>> >>>>>>>>> I've measured CTW time with following workaround which implements >>>>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>>>> compilation time returned back to values that were measured before >>>>>>>>> original change (51.06?0.24s): >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> I've also measured C2 compilation time and it dropped down by a >>>>>>>>> few >>>>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>>>> >>>>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>>>> initialization for some types, don't we? >>>>>>>>> >>>>>>>>> Best regards, >>>>>>>>> Filipp >>>>>>>>> >>>>>>>>> [1] >>>>>>>>> >>>>>>>>> >>>>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>> >>>>> >>> From daniel.daugherty at oracle.com Mon Apr 4 18:36:45 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Mon, 4 Apr 2016 12:36:45 -0600 Subject: RFR(XS): JDK-8153410 - Disable tests until JDK-8151460 gets to main In-Reply-To: <5702AA5E.9010003@oracle.com> References: <5702AA5E.9010003@oracle.com> Message-ID: <5702B43D.4090107@oracle.com> On 4/4/16 11:54 AM, Jesper Wilhelmsson wrote: > Hi, > > Could I have a quick review for this fix to quarantine two tests. > > The tests are broken and are failing in PIT. The fix for the tests was > pushed to hs-rt so we need to quarantine the tests in main until the > testfix can be integrated to main. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153410 > Webrev: http://cr.openjdk.java.net/~jwilhelm/8153410/webrev.00 test/gc/metaspace/TestMetaspacePerfCounters.java test/gc/metaspace/TestPerfCountersAndMemoryPools.java No comments on either. Thumbs up. Dan > > Thanks, > /Jesper > From jesper.wilhelmsson at oracle.com Mon Apr 4 18:45:52 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Mon, 4 Apr 2016 20:45:52 +0200 Subject: RFR(XS): JDK-8153410 - Disable tests until JDK-8151460 gets to main In-Reply-To: <5702AC01.1080100@oracle.com> References: <5702AA5E.9010003@oracle.com> <5702AC01.1080100@oracle.com> Message-ID: <5702B660.2000300@oracle.com> Thanks George! /Jesper Den 4/4/16 kl. 20:01, skrev George Triantafillou: > Jesper, > > Looks good. > > -George > > On 4/4/2016 1:54 PM, Jesper Wilhelmsson wrote: >> Hi, >> >> Could I have a quick review for this fix to quarantine two tests. >> >> The tests are broken and are failing in PIT. The fix for the tests was pushed >> to hs-rt so we need to quarantine the tests in main until the testfix can be >> integrated to main. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153410 >> Webrev: http://cr.openjdk.java.net/~jwilhelm/8153410/webrev.00 >> >> Thanks, >> /Jesper > From jesper.wilhelmsson at oracle.com Mon Apr 4 18:46:13 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Mon, 4 Apr 2016 20:46:13 +0200 Subject: RFR(XS): JDK-8153410 - Disable tests until JDK-8151460 gets to main In-Reply-To: <5702B43D.4090107@oracle.com> References: <5702AA5E.9010003@oracle.com> <5702B43D.4090107@oracle.com> Message-ID: <5702B675.3090900@oracle.com> Thanks Dan! /Jesper Den 4/4/16 kl. 20:36, skrev Daniel D. Daugherty: > On 4/4/16 11:54 AM, Jesper Wilhelmsson wrote: >> Hi, >> >> Could I have a quick review for this fix to quarantine two tests. >> >> The tests are broken and are failing in PIT. The fix for the tests was pushed >> to hs-rt so we need to quarantine the tests in main until the testfix can be >> integrated to main. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153410 >> Webrev: http://cr.openjdk.java.net/~jwilhelm/8153410/webrev.00 > > test/gc/metaspace/TestMetaspacePerfCounters.java > test/gc/metaspace/TestPerfCountersAndMemoryPools.java > No comments on either. > > Thumbs up. > > Dan > > >> >> Thanks, >> /Jesper >> > From coleen.phillimore at oracle.com Mon Apr 4 20:21:04 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 4 Apr 2016 16:21:04 -0400 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <56FD3ECF.1090800@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> Message-ID: <5702CCB0.9050701@oracle.com> Thank you for CCing hotspot-dev. This change is great! I reviewed the runtime files. http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/src/share/vm/prims/jvmtiRedefineClasses.cpp.udiff.html Why does this have three parameters? - _index_map_p = new intArray(scratch_cp->length(), -1); + _index_map_p = new intArray(scratch_cp->length(), scratch_cp->length(), -1); Why not just change it to: _index_map_p = new GrowableArray(scratch_cp->length()); I don't see the three argument constructor to GrowableArray that takes -1 (??) Is it possible to completely eliminate intArray, intStack, boolArray and boolStack, and the CHeapArray ? If so array.hpp should really go in directory oops since the only Array<> left is for metaspace. Maybe this can be a further cleanup? Wow, thanks! Coleen On 3/31/16 11:14 AM, Vladimir Kozlov wrote: > Hi Filipp, > > Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups > to look on. > > Thanks, > Vladimir > > On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >> Hi Vladimir, >> >> thank you for looking at this change. >> >> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >> wrote: >>> Nice clean up but I don't see any source code removed. What benefits >>> we have >>> then? >>> I understand that we don't generate subclasses for ResourceArray and >>> use >>> GrowableArray. But it will not save space I think. >>> What prevents us to remove ResourceArray at all? >> >> CMS's ParScanThreadStateSet is inherited from ResourceArray, >> so it should be updated before removing ResourceArray: >> >> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >> >>> >>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>> >>>> Hi all, >>>> >>>> please review a fix for JDK-8149374: >>>> >>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>> Testing done: hotspot_all tests + CTW >>>> >>>> I've replaced all usages of collections defined via define_array and >>>> define_stack macros with GrowableArray. >>>> >>>> There are good and bad news regarding performance impact of that >>>> change. >>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>> >>> >>> It is acceptable regression I think. I don't think we should >>> optimize and >>> make more complex GrowableArray just to save 0.5% of performance for >>> C2. >> >> As long as GrowableArray is used in different Hotspot's subsystems it >> may be beneficial to optimize it, >> but I've executed SPECjvm2008's startup.* benchmarks and there were no >> significant difference. >> >> If ~3% regression is OK for C1 then I'm fine with leaving >> GrowableArray's initialization >> in its current state unless there will be other reasons to speed it up. >> >> Thanks, >> Filipp. >> >>> >>> Thanks, >>> Vladimir >>> >>> >>>> >>>> Such difference caused by eager initialization of GrowableArray's >>>> backing array elements [1]. I can imagine when we actually need to >>>> force >>>> initialization and de-initialization during array's >>>> growing/destruction, but for some types like c++ primitive types or >>>> pointers such initialization does not make much sense, because >>>> GrowableArray is not allowing to access an element which was not >>>> explicitly placed inside of it. And as long as GrowableArray most >>>> widely used to store pointers we're simply wasting the time with >>>> initialization. >>>> >>>> I've measured CTW time with following workaround which implements >>>> initialization for numeric types and pointers as no-op and C1 >>>> compilation time returned back to values that were measured before >>>> original change (51.06?0.24s): >>>> >>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>> >>>> >>>> I've also measured C2 compilation time and it dropped down by a few >>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>> >>>> Summing up: I guess we should avoid GrowableArray's backing array >>>> initialization for some types, don't we? >>>> >>>> Best regards, >>>> Filipp >>>> >>>> [1] >>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>> >>>> >>> From christian.thalinger at oracle.com Mon Apr 4 21:15:02 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Mon, 4 Apr 2016 11:15:02 -1000 Subject: JMH and JDK9 In-Reply-To: <56FE29EE.1090801@oracle.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> <56FE29EE.1090801@oracle.com> Message-ID: <0C6C1D21-1592-426A-B9DE-33458FC90CF9@oracle.com> > On Mar 31, 2016, at 9:57 PM, Alan Bateman wrote: > > > On 01/04/2016 02:01, Christian Thalinger wrote: >> Maybe totally unrelated but I?m seeing these exceptions with Eclipse 4.6M6: >> >> > I don't recognize this but there has been at least one issue with Eclipse [1] where it needed a configuration change due to the ongoing effort to move non-core classes out of the boot loader. > > In this case then the types are in module java.annotations.common. We moved those types to the extension class loader (now called the "platform class loader" btw) last year. In JDK 8 and older then the types in this module were defined to the boot loader. If changing the defining class loader is caused this then I would have expected we would have heard about it before now but perhaps there aren't too many people running Eclipse on the latest JDK 9 builds. > > One other thing about these so-called "Common Annotations" is that Java SE only defines a small subset whereas Java EE defines all the annotations that JSR-250 defined. If they have been putting the EE version on the class path then it's not going to work now because we can't split packages between the class path and modules. The way to upgrade is to deploy the EE version as a module on the "upgrade module path". I have no idea if they are using it of course but mentioning it in case it might be relevant. > > Are you going to submit a bug to Eclipse on this? My lack of understanding of the problem tells me no. Maybe you should :-) Btw. Eclipse 4.6M5 works fine. > > -Alan > > [1] https://bugs.eclipse.org/bugs/show_bug.cgi?id=466683 From dmitry.dmitriev at oracle.com Mon Apr 4 21:27:18 2016 From: dmitry.dmitriev at oracle.com (Dmitry Dmitriev) Date: Tue, 5 Apr 2016 00:27:18 +0300 Subject: RFR(XS): 8153437: Temporary exclude AllocatePrefetchDistance from testing Message-ID: <5702DC36.8080200@oracle.com> Hello, Please, review small fix which temporary exclude AllocatePrefetchDistance option from testing in TestOptionsWithRanges until JDK-8153340 is fixed. Thanks! JBS: https://bugs.openjdk.java.net/browse/JDK-8153437 webrev.00: http://cr.openjdk.java.net/~ddmitriev/8153437/webrev.00/ Dmitry From daniel.daugherty at oracle.com Mon Apr 4 21:34:29 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Mon, 4 Apr 2016 15:34:29 -0600 Subject: RFR(XS): 8153437: Temporary exclude AllocatePrefetchDistance from testing In-Reply-To: <5702DC36.8080200@oracle.com> References: <5702DC36.8080200@oracle.com> Message-ID: <5702DDE5.3000701@oracle.com> On 4/4/16 3:27 PM, Dmitry Dmitriev wrote: > Hello, > > Please, review small fix which temporary exclude > AllocatePrefetchDistance option from testing in TestOptionsWithRanges > until JDK-8153340 is fixed. Thanks! > > JBS: https://bugs.openjdk.java.net/browse/JDK-8153437 > webrev.00: http://cr.openjdk.java.net/~ddmitriev/8153437/webrev.00/ > test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java No comments. Thumbs up! I think the HotSpot trivial code review rule applies here. Dan > > Dmitry From dmitry.dmitriev at oracle.com Mon Apr 4 21:34:52 2016 From: dmitry.dmitriev at oracle.com (Dmitry Dmitriev) Date: Tue, 5 Apr 2016 00:34:52 +0300 Subject: RFR(XS): 8153437: Temporary exclude AllocatePrefetchDistance from testing In-Reply-To: <5702DDE5.3000701@oracle.com> References: <5702DC36.8080200@oracle.com> <5702DDE5.3000701@oracle.com> Message-ID: <5702DDFC.40102@oracle.com> Dan, thank you for the quick review! Yes, I think that this change is trivial and will start a push job soon. Dmitry On 05.04.2016 0:34, Daniel D. Daugherty wrote: > On 4/4/16 3:27 PM, Dmitry Dmitriev wrote: >> Hello, >> >> Please, review small fix which temporary exclude >> AllocatePrefetchDistance option from testing in TestOptionsWithRanges >> until JDK-8153340 is fixed. Thanks! >> >> JBS: https://bugs.openjdk.java.net/browse/JDK-8153437 >> webrev.00: http://cr.openjdk.java.net/~ddmitriev/8153437/webrev.00/ >> > > test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java > No comments. > > Thumbs up! > > I think the HotSpot trivial code review rule applies here. > > Dan > > >> >> Dmitry > From vladimir.kozlov at oracle.com Mon Apr 4 23:09:07 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Mon, 4 Apr 2016 16:09:07 -0700 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <56FE1595.20603@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> <56FE1595.20603@oracle.com> Message-ID: <5702F413.1080903@oracle.com> Thank you for explaining. webrev.01 looks good. Thanks, Vladimir On 3/31/16 11:30 PM, Tobias Hartmann wrote: > Hi Vladimir, > > On 31.03.2016 17:49, Vladimir Kozlov wrote: >> On 3/31/16 6:22 AM, Tobias Hartmann wrote: >>> Hi, >>> >>> please review the following patch: >>> >>> https://bugs.openjdk.java.net/browse/JDK-8074553 >>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >>> >>> While the code cache sweeper processes a nmethod in NMethodSweeper::process_nmethod(), safepoints may happen and the GC may unload the currently processed nmethod. To prevent this, the sweeper uses a NMethodMarker which saves the nmethod in CodeCacheSweeperThread::_scanned_nmethod. The nmethod is then passed to the GC through a CodeBlobClosure in CodeCacheSweeperThread::oops_do() to keep it alive when the GC iterates over all threads. >>> >>> The problem is that G1 calls nmethods_do() on all threads in the remark phase (see G1RemarkThreadsClosure::do_thread()) which is not overwritten by the sweeper thread. Since the currently processed nmethod is not passed through nmethods_do() by any thread, it is unloaded and we later hit the assert when encountering the nmethod through oops_do(). >>> >>> Mikael Gerdin and Stefan Karlsson (thanks again!) suggested to overwrite nmethods_do() as well in CodeCacheSweeperThread and pass _scanned_nmethod to the closure. I also modified Threads::nmethods_do() to ignore the sweeper thread because we want to avoid marking the _scanned_nmethod as seen on the stack when scanning stacks from the sweeper (the nmethod may already be zombie and only referenced by the sweeper). >> >> I did not get this. If you exclude CodeCacheSweeperThread in Threads::nmethods_do() then CodeCacheSweeperThread::nmethods_do() will not be called. What is the point? > > The point is that the GC code calls JavaThread::nmethods_do() (which I modified to include _scanned_nmethod) and not Threads::nmethods_do(). The latter one is only used by the CodeCacheSweeperThread to mark nmethods active on the Java stack and should therefore *exclude* _scanned_nmethod. This is because _scanned_nmethod should only be prevented from being unloaded by the GC but the hotness value or stack marking should not be affected (it may very well a zombie already). > > Please also note that there is Thread*s*::nmethods_do() and Thread::nmethods_do() which is a bit confusing. > > However, Mikael is right that nmethods_do() should be virtual (like oops_do() is) to allow the GC code to call the CodeCacheSweeperThread::nmethods_do() version. > > Thanks, > Tobias > >> >> Thanks, >> Vladimir >> >>> >>> Unfortunately, this bug is extremely hard to reproduce (it showed up 18 times since early 2015). I was able to reproduce it only once after thousands of runs and therefore not often enough to verify the fix. However, I'm very confident that this solves the problem. >>> >>> Tested with JPRT and RBT (running). >>> >>> Thanks, >>> Tobias >>> From kim.barrett at oracle.com Tue Apr 5 00:07:18 2016 From: kim.barrett at oracle.com (Kim Barrett) Date: Mon, 4 Apr 2016 20:07:18 -0400 Subject: RFR: 8152711: Create a non-template Log wrapper class In-Reply-To: <57025C1B.6080707@oracle.com> References: <56FA87AC.6000402@oracle.com> <67C1AA71-7C36-43BE-BE33-7091C5AC5F1A@oracle.com> <56FB8BC9.8070006@oracle.com> <57025C1B.6080707@oracle.com> Message-ID: <03162CE0-0437-4AD8-9079-117656D46358@oracle.com> > On Apr 4, 2016, at 8:20 AM, Stefan Karlsson wrote: > > Hi all, > > I've created a new patch to use LogTagSets instead of function pointers: > > http://cr.openjdk.java.net/~stefank/8152711/webrev.02.delta/ > http://cr.openjdk.java.net/~stefank/8152711/webrev.02/ > > The patch is rebased against the LogTagSet enhancements in: > > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022368.html > > I've also updated the unit tests after feedback on earlier UL patches. A couple of minor comments. I don't need a new webrev for these. Otherwise, looks good. ------------------------------------------------------------------------------ src/share/vm/logging/logStream.inline.hpp 81 // LogTargt(Debug, gc) log; LogTargt => LogTarget ------------------------------------------------------------------------------ src/share/vm/gc/shared/gcTraceTime.inline.hpp 137 #define INJECT_START_TAG(T1, T2, T3, T4) \ This macro is injected into all including code. I'd prefer a name that is tied to GCTraceTime, like GC_TRACE_TIME_INJECT_START_TAG. Being paranoid about macro name capture is generally a good thing. ------------------------------------------------------------------------------ From mikhailo.seledtsov at oracle.com Tue Apr 5 00:13:32 2016 From: mikhailo.seledtsov at oracle.com (Mikhailo Seledtsov) Date: Mon, 4 Apr 2016 17:13:32 -0700 Subject: RFR [XS] 8153300 - [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files Message-ID: <5703032C.4060208@oracle.com> Hi Ioi, I support the idea of incorporating jar file support into test utilities. A number of tests use jars instead of simple class files; this will simplify test development and maintenance. Overall this change looks good. I have one comment: ClassFileInstaller.java:75 public static boolean DEBUG = Boolean.getBoolean("ClassFileInstaller.debug"); I believe this should be: public static boolean DEBUG = Boolean.getBoolean(System.getProperty("ClassFileInstaller.debug", false)); The rest looks good to me. Thank you, Misha >Sorry, the bug ID is 8153300. I have fixed the e-mail subject. > >- Ioi > >On 4/1/16 9:45 AM, Ioi Lam wrote: >/Please review a very small fix: />//>/http://cr.openjdk.java.net/~iklam/jdk9/8153300-enhance-classfileinstaller.v01/ />//>//>/Bug: [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to />/support JAR files />//>/https://bugs.openjdk.java.net/browse/JDK-8153300 />//>/Summary of fix: />//>/Many hotspot tests need to create JAR files. The current method is />/messy: />//>/[1] Because JTREG compiles .class files into various directories, />/we need />/to use ClassFileInstaller to find the .class file and copy it />/into the />/current directory. />/[2] Then, there are various ad-hoc calls to sun.tools.jar.Main to />/create the />/JAR file. This is not desirable because sun.tools.jar is an />/internal />/package and javac gives warnings about it. />//>/I have improved ClassFileInstaller so that JAR files can be easily />/created using JTREG tags: />//>/* @build ClassFileInstaller sun.hotspot.WhiteBox />/* @run main ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox />//>/To handle more complex use cases, new APIs are also added to />/ClassFileInstaller />/for programmatically creating JAR files inside the test source code. />//>/I converted two CDS test cases to use the new functionality. />//>/FYI, I am also working on new tests for a closed issue />/(JDK-8153297) that use />/the new functionality. />//>/Thanks />/- Ioi />//>//>// From ioi.lam at oracle.com Tue Apr 5 01:00:12 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Mon, 04 Apr 2016 18:00:12 -0700 Subject: RFR [XS] 8153300 - [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files In-Reply-To: <5703032C.4060208@oracle.com> References: <5703032C.4060208@oracle.com> Message-ID: <57030E1C.9060105@oracle.com> Hi Misha, Thanks for the review. The Boolean.getBoolean(String) API actually will read the named system property. So this is a common pattern used by the JDK source code. E.g., jdk/src/jdk.jcmd/share/classes/sun/tools/jstat/Parser.java: private static boolean pdebug = Boolean.getBoolean("jstat.parser.debug"); Thanks - Ioi On 4/4/16 5:13 PM, Mikhailo Seledtsov wrote: > Hi Ioi, > > I support the idea of incorporating jar file support into test utilities. A number of tests use jars instead of simple class files; this will simplify test development and maintenance. > > Overall this change looks good. I have one comment: > ClassFileInstaller.java:75 > public static boolean DEBUG = Boolean.getBoolean("ClassFileInstaller.debug"); > I believe this should be: > public static boolean DEBUG = Boolean.getBoolean(System.getProperty("ClassFileInstaller.debug", false)); > > The rest looks good to me. > > Thank you, > Misha > > >Sorry, the bug ID is 8153300. I have fixed the e-mail subject. > > > >- Ioi > > > >On 4/1/16 9:45 AM, Ioi Lam wrote: > >/ Please review a very small fix: > />/ > />/ http://cr.openjdk.java.net/~iklam/jdk9/8153300-enhance-classfileinstaller.v01/ > />/ > />/ > />/ Bug: [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to > />/ support JAR files > />/ > />/ https://bugs.openjdk.java.net/browse/JDK-8153300 > />/ > />/ Summary of fix: > />/ > />/ Many hotspot tests need to create JAR files. The current method is > />/ messy: > />/ > />/ [1] Because JTREG compiles .class files into various directories, > />/ we need > />/ to use ClassFileInstaller to find the .class file and copy it > />/ into the > />/ current directory. > />/ [2] Then, there are various ad-hoc calls to sun.tools.jar.Main to > />/ create the > />/ JAR file. This is not desirable because sun.tools.jar is an > />/ internal > />/ package and javac gives warnings about it. > />/ > />/ I have improved ClassFileInstaller so that JAR files can be easily > />/ created using JTREG tags: > />/ > />/ * @build ClassFileInstaller sun.hotspot.WhiteBox > />/ * @run main ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox > />/ > />/ To handle more complex use cases, new APIs are also added to > />/ ClassFileInstaller > />/ for programmatically creating JAR files inside the test source code. > />/ > />/ I converted two CDS test cases to use the new functionality. > />/ > />/ FYI, I am also working on new tests for a closed issue > />/ (JDK-8153297) that use > />/ the new functionality. > />/ > />/ Thanks > />/ - Ioi > />/ > />/ > />/ > / From mikhailo.seledtsov at oracle.com Tue Apr 5 01:14:27 2016 From: mikhailo.seledtsov at oracle.com (Mikhailo Seledtsov) Date: Mon, 4 Apr 2016 18:14:27 -0700 Subject: RFR [XS] 8153300 - [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files In-Reply-To: <57030E1C.9060105@oracle.com> References: <5703032C.4060208@oracle.com> <57030E1C.9060105@oracle.com> Message-ID: <57031173.3010206@oracle.com> On 4/4/2016 6:00 PM, Ioi Lam wrote: > Hi Misha, > > Thanks for the review. > > The Boolean.getBoolean(String) API actually will read the named system > property. So this is a common pattern used by the JDK source code. > E.g., jdk/src/jdk.jcmd/share/classes/sun/tools/jstat/Parser.java: Good - I've learned something new today. Thanks, Misha > > private static boolean pdebug = > Boolean.getBoolean("jstat.parser.debug"); > > Thanks > - Ioi > > On 4/4/16 5:13 PM, Mikhailo Seledtsov wrote: >> Hi Ioi, >> >> I support the idea of incorporating jar file support into test utilities. A number of tests use jars instead of simple class files; this will simplify test development and maintenance. >> >> Overall this change looks good. I have one comment: >> ClassFileInstaller.java:75 >> public static boolean DEBUG = Boolean.getBoolean("ClassFileInstaller.debug"); >> I believe this should be: >> public static boolean DEBUG = Boolean.getBoolean(System.getProperty("ClassFileInstaller.debug", false)); >> >> The rest looks good to me. >> >> Thank you, >> Misha >> >> >Sorry, the bug ID is 8153300. I have fixed the e-mail subject. >> > >> >- Ioi >> > >> >On 4/1/16 9:45 AM, Ioi Lam wrote: >> >/Please review a very small fix: />//>/http://cr.openjdk.java.net/~iklam/jdk9/8153300-enhance-classfileinstaller.v01/ >> >> />//>//>/Bug: [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to />/support JAR files />//>/https://bugs.openjdk.java.net/browse/JDK-8153300 />//>/Summary of fix: />//>/Many hotspot tests need to create JAR files. The current method is />/messy: />//>/[1] Because JTREG compiles .class files into various directories, />/we need />/to use ClassFileInstaller to find the .class file and copy it />/into the />/current directory. />/[2] Then, there are various ad-hoc calls to sun.tools.jar.Main to />/create the />/JAR file. This is not desirable because sun.tools.jar is an />/internal />/package and javac gives warnings about it. />//>/I have improved ClassFileInstaller so that JAR files can be easily />/created using JTREG tags: />//>/* @build ClassFileInstaller sun.hotspot.WhiteBox />/* @run main ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox />//>/To handle more complex use cases, new APIs are also added to />/ClassFileInstaller />/for programmatically creating JAR files inside the test source code. />//>/I converted two CDS test cases to use the new functionality. />//>/FYI, I am also working on new tests for a closed issue />/(JDK-8153297) that use />/the new functionality. />//>/Thanks />/- Ioi />//>//>// > From mandy.chung at oracle.com Tue Apr 5 01:29:53 2016 From: mandy.chung at oracle.com (Mandy Chung) Date: Mon, 4 Apr 2016 18:29:53 -0700 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <5702FC9B.7020600@oracle.com> References: <5702FC9B.7020600@oracle.com> Message-ID: > On Apr 4, 2016, at 4:45 PM, Brent Christian wrote: > > Hi, > > I'd like to check in some footprint and code reduction changes to the java.lang.StackWalker implementation. > > Webrev: > http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ > Bug: > https://bugs.openjdk.java.net/browse/JDK-8153123 > This looks good to me. One thing to mention is that this patch is a follow-up work from the investigation on what it takes to enable Throwable to use StackWalker (JDK-8141239). The current built-in VM backtrace is very compact and performant. We have identified and prototypes the performance improvements if Throwable backtrace is generated using stack walker. There are some performance gaps that we agree to defer JDK-8141239 to a future release and improve the footprint performance and GC throughput concerns when MemberNames are stored in the throwable backtrace. Mandy From tobias.hartmann at oracle.com Tue Apr 5 05:09:02 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Tue, 5 Apr 2016 07:09:02 +0200 Subject: [9] RFR(S): 8074553: Crash with assert(!is_unloaded()) failed: should not call follow on unloaded nmethod In-Reply-To: <5702F413.1080903@oracle.com> References: <56FD24A3.1090504@oracle.com> <56FD4720.90800@oracle.com> <56FE1595.20603@oracle.com> <5702F413.1080903@oracle.com> Message-ID: <5703486E.5000206@oracle.com> Thanks, Vladimir! Best regards, Tobias On 05.04.2016 01:09, Vladimir Kozlov wrote: > Thank you for explaining. webrev.01 looks good. > > Thanks, > Vladimir > > On 3/31/16 11:30 PM, Tobias Hartmann wrote: >> Hi Vladimir, >> >> On 31.03.2016 17:49, Vladimir Kozlov wrote: >>> On 3/31/16 6:22 AM, Tobias Hartmann wrote: >>>> Hi, >>>> >>>> please review the following patch: >>>> >>>> https://bugs.openjdk.java.net/browse/JDK-8074553 >>>> http://cr.openjdk.java.net/~thartmann/8074553/webrev.00/ >>>> >>>> While the code cache sweeper processes a nmethod in NMethodSweeper::process_nmethod(), safepoints may happen and the GC may unload the currently processed nmethod. To prevent this, the sweeper uses a NMethodMarker which saves the nmethod in CodeCacheSweeperThread::_scanned_nmethod. The nmethod is then passed to the GC through a CodeBlobClosure in CodeCacheSweeperThread::oops_do() to keep it alive when the GC iterates over all threads. >>>> >>>> The problem is that G1 calls nmethods_do() on all threads in the remark phase (see G1RemarkThreadsClosure::do_thread()) which is not overwritten by the sweeper thread. Since the currently processed nmethod is not passed through nmethods_do() by any thread, it is unloaded and we later hit the assert when encountering the nmethod through oops_do(). >>>> >>>> Mikael Gerdin and Stefan Karlsson (thanks again!) suggested to overwrite nmethods_do() as well in CodeCacheSweeperThread and pass _scanned_nmethod to the closure. I also modified Threads::nmethods_do() to ignore the sweeper thread because we want to avoid marking the _scanned_nmethod as seen on the stack when scanning stacks from the sweeper (the nmethod may already be zombie and only referenced by the sweeper). >>> >>> I did not get this. If you exclude CodeCacheSweeperThread in Threads::nmethods_do() then CodeCacheSweeperThread::nmethods_do() will not be called. What is the point? >> >> The point is that the GC code calls JavaThread::nmethods_do() (which I modified to include _scanned_nmethod) and not Threads::nmethods_do(). The latter one is only used by the CodeCacheSweeperThread to mark nmethods active on the Java stack and should therefore *exclude* _scanned_nmethod. This is because _scanned_nmethod should only be prevented from being unloaded by the GC but the hotness value or stack marking should not be affected (it may very well a zombie already). >> >> Please also note that there is Thread*s*::nmethods_do() and Thread::nmethods_do() which is a bit confusing. >> >> However, Mikael is right that nmethods_do() should be virtual (like oops_do() is) to allow the GC code to call the CodeCacheSweeperThread::nmethods_do() version. >> >> Thanks, >> Tobias >> >>> >>> Thanks, >>> Vladimir >>> >>>> >>>> Unfortunately, this bug is extremely hard to reproduce (it showed up 18 times since early 2015). I was able to reproduce it only once after thousands of runs and therefore not often enough to verify the fix. However, I'm very confident that this solves the problem. >>>> >>>> Tested with JPRT and RBT (running). >>>> >>>> Thanks, >>>> Tobias >>>> From stefan.karlsson at oracle.com Tue Apr 5 05:31:38 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 5 Apr 2016 07:31:38 +0200 Subject: RFR: 8152711: Create a non-template Log wrapper class In-Reply-To: <03162CE0-0437-4AD8-9079-117656D46358@oracle.com> References: <56FA87AC.6000402@oracle.com> <67C1AA71-7C36-43BE-BE33-7091C5AC5F1A@oracle.com> <56FB8BC9.8070006@oracle.com> <57025C1B.6080707@oracle.com> <03162CE0-0437-4AD8-9079-117656D46358@oracle.com> Message-ID: <57034DBA.4010903@oracle.com> Hi Kim, On 2016-04-05 02:07, Kim Barrett wrote: >> On Apr 4, 2016, at 8:20 AM, Stefan Karlsson wrote: >> >> Hi all, >> >> I've created a new patch to use LogTagSets instead of function pointers: >> >> http://cr.openjdk.java.net/~stefank/8152711/webrev.02.delta/ >> http://cr.openjdk.java.net/~stefank/8152711/webrev.02/ >> >> The patch is rebased against the LogTagSet enhancements in: >> >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022368.html >> >> I've also updated the unit tests after feedback on earlier UL patches. > A couple of minor comments. I don't need a new webrev for these. > > Otherwise, looks good. > > ------------------------------------------------------------------------------ > src/share/vm/logging/logStream.inline.hpp > 81 // LogTargt(Debug, gc) log; > > LogTargt => LogTarget OK. > > ------------------------------------------------------------------------------ > src/share/vm/gc/shared/gcTraceTime.inline.hpp > 137 #define INJECT_START_TAG(T1, T2, T3, T4) \ > > This macro is injected into all including code. I'd prefer a name > that is tied to GCTraceTime, like GC_TRACE_TIME_INJECT_START_TAG. > Being paranoid about macro name capture is generally a good thing. I prefer the shorter name. I'll undef the macro after its usage instead. I understand that leaking macros in header files that are shipped to third-parties, is a problem. However, this is used internally within HotSpot and we can easily fix any (unlikely) conflict. Or are we afraid that the macro name would accidentally end up in, say, jni.h? Is there another reason to not leak the macro names? I was considering promoting this macro into the UL headers, and rename it to LOG_INJECT_TAG_START, since other places might want to use it. That would also leak the macro name, but to many more places. Thanks for the review! StefanK > > ------------------------------------------------------------------------------ > From filipp.zhinkin at gmail.com Tue Apr 5 06:49:28 2016 From: filipp.zhinkin at gmail.com (Filipp Zhinkin) Date: Tue, 5 Apr 2016 09:49:28 +0300 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <5702854E.7010307@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <56FD4F63.8020103@oracle.com> <56FE8711.3080703@oracle.com> <5702854E.7010307@oracle.com> Message-ID: Hi Mikael, On Mon, Apr 4, 2016 at 6:16 PM, Mikael Gerdin wrote: > Hi Filipp, > > On 2016-04-02 13:32, Filipp Zhinkin wrote: >> >> Here is an webrev updated according Mikael's comments: >> >> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ > > > I noticed that you changed this compared to my suggestion. > From my understanding the explicit cast to void is unnecessary, all pointer > types implicitly convert to void*. Yes, it's unnecessary. I'll change. > I've been forced to accept that people prefer "&foo[i]" over "foo + i" in > the past so I'm ok with that change. > > - new ((ParScanThreadState*)_data + i) > + new((void*) &_per_thread_states[i]) > > > I suspect that you can remove > 33 // correct linkage required to compile w/o warnings > 34 // (must be on file level - cannot be local) > 35 extern "C" { typedef int (*ftype)(const void*, const void*); } > 36 > as well, it appears to only have been used by the sort() methods of the > removed array classes. I'm too inattentive. :( > > I haven't checked all the other uses but from a GC perspective I think this > is good to go. Thanks! Filipp. > > /Mikael > > >> >> Tested using hotspot_all tests w/ CMS turned on. >> >> Thanks, >> Filipp. >> >> On Fri, Apr 1, 2016 at 5:34 PM, Mikael Gerdin >> wrote: >>> >>> Hi Filipp >>> >>> On 2016-04-01 16:27, Filipp Zhinkin wrote: >>>> >>>> >>>> Hi Mikael, >>>> >>>> On Thu, Mar 31, 2016 at 7:25 PM, Mikael Gerdin >>>> >>>> wrote: >>>>> >>>>> >>>>> Hi, >>>>> >>>>> I like the cleanup, can't we also remove CHeapArray in arrays.hpp? >>>> >>>> >>>> >>>> Sure! I've missed that it is not used at all. >>> >>> >>> >>> Great! >>> >>>> >>>>> >>>>> As for the CMS change, I would prefer this instead (untested!): >>>>> http://cr.openjdk.java.net/~mgerdin/pss-array/webrev/ >>>> >>>> >>>> >>>> Thanks, your implementation looks much better. >>>> If you don't mind I'll incorporate it into my change. >>> >>> >>> >>> Go ahead, that was my intention. >>> >>>> >>>> Also, it seems like in ParNewGeneration::collect we have to create >>>> ResourceMark before ParScanThreadStateSet, right? >>> >>> >>> >>> There is a ResourceMark in the caller so I don't think it's needed. >>> The old version of the code used resource allocation as well and was fine >>> so >>> I don't think there is a need to introduce another ResourceMark. >>> >>> >>> /Mikael >>> >>> >>>> >>>> Thanks, >>>> Filipp. >>>> >>>>> >>>>> /Mikael >>>>> >>>>> >>>>> On 2016-03-31 17:14, Vladimir Kozlov wrote: >>>>>> >>>>>> >>>>>> >>>>>> Hi Filipp, >>>>>> >>>>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups >>>>>> to look on. >>>>>> >>>>>> Thanks, >>>>>> Vladimir >>>>>> >>>>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>>>> >>>>>>> >>>>>>> >>>>>>> Hi Vladimir, >>>>>>> >>>>>>> thank you for looking at this change. >>>>>>> >>>>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>>>> wrote: >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> Nice clean up but I don't see any source code removed. What benefits >>>>>>>> we have >>>>>>>> then? >>>>>>>> I understand that we don't generate subclasses for ResourceArray and >>>>>>>> use >>>>>>>> GrowableArray. But it will not save space I think. >>>>>>>> What prevents us to remove ResourceArray at all? >>>>>>> >>>>>>> >>>>>>> >>>>>>> >>>>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>>>> so it should be updated before removing ResourceArray: >>>>>>> >>>>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>>>> >>>>>>>> >>>>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> Hi all, >>>>>>>>> >>>>>>>>> please review a fix for JDK-8149374: >>>>>>>>> >>>>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>>>> Testing done: hotspot_all tests + CTW >>>>>>>>> >>>>>>>>> I've replaced all usages of collections defined via define_array >>>>>>>>> and >>>>>>>>> define_stack macros with GrowableArray. >>>>>>>>> >>>>>>>>> There are good and bad news regarding performance impact of that >>>>>>>>> change. >>>>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> It is acceptable regression I think. I don't think we should >>>>>>>> optimize >>>>>>>> and >>>>>>>> make more complex GrowableArray just to save 0.5% of performance for >>>>>>>> C2. >>>>>>> >>>>>>> >>>>>>> >>>>>>> >>>>>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>>>>> may be beneficial to optimize it, >>>>>>> but I've executed SPECjvm2008's startup.* benchmarks and there were >>>>>>> no >>>>>>> significant difference. >>>>>>> >>>>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>>>> GrowableArray's initialization >>>>>>> in its current state unless there will be other reasons to speed it >>>>>>> up. >>>>>>> >>>>>>> Thanks, >>>>>>> Filipp. >>>>>>> >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Vladimir >>>>>>>> >>>>>>>> >>>>>>>>> >>>>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>>>> force >>>>>>>>> initialization and de-initialization during array's >>>>>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>>>>> pointers such initialization does not make much sense, because >>>>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>>>> initialization. >>>>>>>>> >>>>>>>>> I've measured CTW time with following workaround which implements >>>>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>>>> compilation time returned back to values that were measured before >>>>>>>>> original change (51.06?0.24s): >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>>>> >>>>>>>>> >>>>>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>>>> >>>>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>>>> initialization for some types, don't we? >>>>>>>>> >>>>>>>>> Best regards, >>>>>>>>> Filipp >>>>>>>>> >>>>>>>>> [1] >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>>>> >>>>>>>>> >>>>>>>> >>>>> >>> > From filipp.zhinkin at gmail.com Tue Apr 5 06:52:07 2016 From: filipp.zhinkin at gmail.com (Filipp Zhinkin) Date: Tue, 5 Apr 2016 09:52:07 +0300 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <5702ACAC.5040301@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <56FD4F63.8020103@oracle.com> <56FE8711.3080703@oracle.com> <5702854E.7010307@oracle.com> <5702ACAC.5040301@oracle.com> Message-ID: Thanks you, Vladimir. Regards, Filipp. On Mon, Apr 4, 2016 at 9:04 PM, Vladimir Kozlov wrote: > On 4/4/16 8:16 AM, Mikael Gerdin wrote: >> >> Hi Filipp, >> >> On 2016-04-02 13:32, Filipp Zhinkin wrote: >>> >>> Here is an webrev updated according Mikael's comments: >>> >>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ >> >> >> I noticed that you changed this compared to my suggestion. >> From my understanding the explicit cast to void is unnecessary, all >> pointer types implicitly convert to void*. >> I've been forced to accept that people prefer "&foo[i]" over "foo + i" >> in the past so I'm ok with that change. >> >> - new ((ParScanThreadState*)_data + i) >> + new((void*) &_per_thread_states[i]) >> >> >> I suspect that you can remove >> 33 // correct linkage required to compile w/o warnings >> 34 // (must be on file level - cannot be local) >> 35 extern "C" { typedef int (*ftype)(const void*, const void*); } >> 36 >> as well, it appears to only have been used by the sort() methods of the >> removed array classes. >> >> I haven't checked all the other uses but from a GC perspective I think >> this is good to go. > > > C1 changes are good too. > > Thanks, > Vladimir > > >> >> /Mikael >> >>> >>> Tested using hotspot_all tests w/ CMS turned on. >>> >>> Thanks, >>> Filipp. >>> >>> On Fri, Apr 1, 2016 at 5:34 PM, Mikael Gerdin >>> wrote: >>>> >>>> Hi Filipp >>>> >>>> On 2016-04-01 16:27, Filipp Zhinkin wrote: >>>>> >>>>> >>>>> Hi Mikael, >>>>> >>>>> On Thu, Mar 31, 2016 at 7:25 PM, Mikael Gerdin >>>>> >>>>> wrote: >>>>>> >>>>>> >>>>>> Hi, >>>>>> >>>>>> I like the cleanup, can't we also remove CHeapArray in arrays.hpp? >>>>> >>>>> >>>>> >>>>> Sure! I've missed that it is not used at all. >>>> >>>> >>>> >>>> Great! >>>> >>>>> >>>>>> >>>>>> As for the CMS change, I would prefer this instead (untested!): >>>>>> http://cr.openjdk.java.net/~mgerdin/pss-array/webrev/ >>>>> >>>>> >>>>> >>>>> Thanks, your implementation looks much better. >>>>> If you don't mind I'll incorporate it into my change. >>>> >>>> >>>> >>>> Go ahead, that was my intention. >>>> >>>>> >>>>> Also, it seems like in ParNewGeneration::collect we have to create >>>>> ResourceMark before ParScanThreadStateSet, right? >>>> >>>> >>>> >>>> There is a ResourceMark in the caller so I don't think it's needed. >>>> The old version of the code used resource allocation as well and was >>>> fine so >>>> I don't think there is a need to introduce another ResourceMark. >>>> >>>> >>>> /Mikael >>>> >>>> >>>>> >>>>> Thanks, >>>>> Filipp. >>>>> >>>>>> >>>>>> /Mikael >>>>>> >>>>>> >>>>>> On 2016-03-31 17:14, Vladimir Kozlov wrote: >>>>>>> >>>>>>> >>>>>>> >>>>>>> Hi Filipp, >>>>>>> >>>>>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC >>>>>>> groups >>>>>>> to look on. >>>>>>> >>>>>>> Thanks, >>>>>>> Vladimir >>>>>>> >>>>>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> Hi Vladimir, >>>>>>>> >>>>>>>> thank you for looking at this change. >>>>>>>> >>>>>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>>>>> wrote: >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> Nice clean up but I don't see any source code removed. What >>>>>>>>> benefits >>>>>>>>> we have >>>>>>>>> then? >>>>>>>>> I understand that we don't generate subclasses for ResourceArray >>>>>>>>> and >>>>>>>>> use >>>>>>>>> GrowableArray. But it will not save space I think. >>>>>>>>> What prevents us to remove ResourceArray at all? >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>>>>> so it should be updated before removing ResourceArray: >>>>>>>> >>>>>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>>>>> >>>>>>>>> >>>>>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> Hi all, >>>>>>>>>> >>>>>>>>>> please review a fix for JDK-8149374: >>>>>>>>>> >>>>>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>>>>> Testing done: hotspot_all tests + CTW >>>>>>>>>> >>>>>>>>>> I've replaced all usages of collections defined via >>>>>>>>>> define_array and >>>>>>>>>> define_stack macros with GrowableArray. >>>>>>>>>> >>>>>>>>>> There are good and bad news regarding performance impact of that >>>>>>>>>> change. >>>>>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release >>>>>>>>>> bits >>>>>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> It is acceptable regression I think. I don't think we should >>>>>>>>> optimize >>>>>>>>> and >>>>>>>>> make more complex GrowableArray just to save 0.5% of performance >>>>>>>>> for >>>>>>>>> C2. >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> As long as GrowableArray is used in different Hotspot's >>>>>>>> subsystems it >>>>>>>> may be beneficial to optimize it, >>>>>>>> but I've executed SPECjvm2008's startup.* benchmarks and there >>>>>>>> were no >>>>>>>> significant difference. >>>>>>>> >>>>>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>>>>> GrowableArray's initialization >>>>>>>> in its current state unless there will be other reasons to speed >>>>>>>> it up. >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Filipp. >>>>>>>> >>>>>>>>> >>>>>>>>> Thanks, >>>>>>>>> Vladimir >>>>>>>>> >>>>>>>>> >>>>>>>>>> >>>>>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>>>>> force >>>>>>>>>> initialization and de-initialization during array's >>>>>>>>>> growing/destruction, but for some types like c++ primitive >>>>>>>>>> types or >>>>>>>>>> pointers such initialization does not make much sense, because >>>>>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>>>>> initialization. >>>>>>>>>> >>>>>>>>>> I've measured CTW time with following workaround which implements >>>>>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>>>>> compilation time returned back to values that were measured before >>>>>>>>>> original change (51.06?0.24s): >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> I've also measured C2 compilation time and it dropped down by a >>>>>>>>>> few >>>>>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>>>>> >>>>>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>>>>> initialization for some types, don't we? >>>>>>>>>> >>>>>>>>>> Best regards, >>>>>>>>>> Filipp >>>>>>>>>> >>>>>>>>>> [1] >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>> >>>>>> >>>> > From mikael.gerdin at oracle.com Tue Apr 5 06:55:48 2016 From: mikael.gerdin at oracle.com (Mikael Gerdin) Date: Tue, 5 Apr 2016 08:55:48 +0200 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <56FD4F63.8020103@oracle.com> <56FE8711.3080703@oracle.com> <5702854E.7010307@oracle.com> Message-ID: <57036174.8090504@oracle.com> Filipp, On 2016-04-05 08:49, Filipp Zhinkin wrote: > Hi Mikael, > > On Mon, Apr 4, 2016 at 6:16 PM, Mikael Gerdin wrote: >> Hi Filipp, >> >> On 2016-04-02 13:32, Filipp Zhinkin wrote: >>> >>> Here is an webrev updated according Mikael's comments: >>> >>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ >> >> >> I noticed that you changed this compared to my suggestion. >> From my understanding the explicit cast to void is unnecessary, all pointer >> types implicitly convert to void*. > > Yes, it's unnecessary. I'll change. > >> I've been forced to accept that people prefer "&foo[i]" over "foo + i" in >> the past so I'm ok with that change. >> >> - new ((ParScanThreadState*)_data + i) >> + new((void*) &_per_thread_states[i]) >> >> >> I suspect that you can remove >> 33 // correct linkage required to compile w/o warnings >> 34 // (must be on file level - cannot be local) >> 35 extern "C" { typedef int (*ftype)(const void*, const void*); } >> 36 >> as well, it appears to only have been used by the sort() methods of the >> removed array classes. > > I'm too inattentive. :( It's easy to miss things like that when doing cleanups. This is a great change so don't feel disheartened. /Mikael > >> >> I haven't checked all the other uses but from a GC perspective I think this >> is good to go. > > Thanks! > > Filipp. > >> >> /Mikael >> >> >>> >>> Tested using hotspot_all tests w/ CMS turned on. >>> >>> Thanks, >>> Filipp. >>> >>> On Fri, Apr 1, 2016 at 5:34 PM, Mikael Gerdin >>> wrote: >>>> >>>> Hi Filipp >>>> >>>> On 2016-04-01 16:27, Filipp Zhinkin wrote: >>>>> >>>>> >>>>> Hi Mikael, >>>>> >>>>> On Thu, Mar 31, 2016 at 7:25 PM, Mikael Gerdin >>>>> >>>>> wrote: >>>>>> >>>>>> >>>>>> Hi, >>>>>> >>>>>> I like the cleanup, can't we also remove CHeapArray in arrays.hpp? >>>>> >>>>> >>>>> >>>>> Sure! I've missed that it is not used at all. >>>> >>>> >>>> >>>> Great! >>>> >>>>> >>>>>> >>>>>> As for the CMS change, I would prefer this instead (untested!): >>>>>> http://cr.openjdk.java.net/~mgerdin/pss-array/webrev/ >>>>> >>>>> >>>>> >>>>> Thanks, your implementation looks much better. >>>>> If you don't mind I'll incorporate it into my change. >>>> >>>> >>>> >>>> Go ahead, that was my intention. >>>> >>>>> >>>>> Also, it seems like in ParNewGeneration::collect we have to create >>>>> ResourceMark before ParScanThreadStateSet, right? >>>> >>>> >>>> >>>> There is a ResourceMark in the caller so I don't think it's needed. >>>> The old version of the code used resource allocation as well and was fine >>>> so >>>> I don't think there is a need to introduce another ResourceMark. >>>> >>>> >>>> /Mikael >>>> >>>> >>>>> >>>>> Thanks, >>>>> Filipp. >>>>> >>>>>> >>>>>> /Mikael >>>>>> >>>>>> >>>>>> On 2016-03-31 17:14, Vladimir Kozlov wrote: >>>>>>> >>>>>>> >>>>>>> >>>>>>> Hi Filipp, >>>>>>> >>>>>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups >>>>>>> to look on. >>>>>>> >>>>>>> Thanks, >>>>>>> Vladimir >>>>>>> >>>>>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> Hi Vladimir, >>>>>>>> >>>>>>>> thank you for looking at this change. >>>>>>>> >>>>>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>>>>> wrote: >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> Nice clean up but I don't see any source code removed. What benefits >>>>>>>>> we have >>>>>>>>> then? >>>>>>>>> I understand that we don't generate subclasses for ResourceArray and >>>>>>>>> use >>>>>>>>> GrowableArray. But it will not save space I think. >>>>>>>>> What prevents us to remove ResourceArray at all? >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>>>>> so it should be updated before removing ResourceArray: >>>>>>>> >>>>>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>>>>> >>>>>>>>> >>>>>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> Hi all, >>>>>>>>>> >>>>>>>>>> please review a fix for JDK-8149374: >>>>>>>>>> >>>>>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>>>>> Testing done: hotspot_all tests + CTW >>>>>>>>>> >>>>>>>>>> I've replaced all usages of collections defined via define_array >>>>>>>>>> and >>>>>>>>>> define_stack macros with GrowableArray. >>>>>>>>>> >>>>>>>>>> There are good and bad news regarding performance impact of that >>>>>>>>>> change. >>>>>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> >>>>>>>>> It is acceptable regression I think. I don't think we should >>>>>>>>> optimize >>>>>>>>> and >>>>>>>>> make more complex GrowableArray just to save 0.5% of performance for >>>>>>>>> C2. >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>>>>>> may be beneficial to optimize it, >>>>>>>> but I've executed SPECjvm2008's startup.* benchmarks and there were >>>>>>>> no >>>>>>>> significant difference. >>>>>>>> >>>>>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>>>>> GrowableArray's initialization >>>>>>>> in its current state unless there will be other reasons to speed it >>>>>>>> up. >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Filipp. >>>>>>>> >>>>>>>>> >>>>>>>>> Thanks, >>>>>>>>> Vladimir >>>>>>>>> >>>>>>>>> >>>>>>>>>> >>>>>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>>>>> force >>>>>>>>>> initialization and de-initialization during array's >>>>>>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>>>>>> pointers such initialization does not make much sense, because >>>>>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>>>>> initialization. >>>>>>>>>> >>>>>>>>>> I've measured CTW time with following workaround which implements >>>>>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>>>>> compilation time returned back to values that were measured before >>>>>>>>>> original change (51.06?0.24s): >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>>>>> >>>>>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>>>>> initialization for some types, don't we? >>>>>>>>>> >>>>>>>>>> Best regards, >>>>>>>>>> Filipp >>>>>>>>>> >>>>>>>>>> [1] >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>>>>> >>>>>>>>>> >>>>>>>>> >>>>>> >>>> >> From filipp.zhinkin at gmail.com Tue Apr 5 07:23:49 2016 From: filipp.zhinkin at gmail.com (Filipp Zhinkin) Date: Tue, 5 Apr 2016 10:23:49 +0300 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <5702CCB0.9050701@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <5702CCB0.9050701@oracle.com> Message-ID: Hi Coleen, thanks for taking a look at it. On Mon, Apr 4, 2016 at 11:21 PM, Coleen Phillimore wrote: > > Thank you for CCing hotspot-dev. This change is great! I reviewed the > runtime files. > > http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/src/share/vm/prims/jvmtiRedefineClasses.cpp.udiff.html > > Why does this have three parameters? > > - _index_map_p = new intArray(scratch_cp->length(), -1); > + _index_map_p = new intArray(scratch_cp->length(), scratch_cp->length(), > -1); GrowableArray won't initialize elements in backing array until you ask it to. And it also won't allow to access elements that were not initialized. So we have to pass three parameters there to allocate backing array and fill it with -1. > > Why not just change it to: > > _index_map_p = new GrowableArray(scratch_cp->length()); We use -1 there for CP entries that were not mapped during constant pools merging. > > I don't see the three argument constructor to GrowableArray that takes -1 > (??) It's the one that take init size, length, filler and few other implicit parameters: http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/annotate/tip/src/share/vm/utilities/growableArray.hpp#l178 > > Is it possible to completely eliminate intArray, intStack, boolArray and > boolStack, and the CHeapArray ? If so array.hpp should really go in > directory oops since the only Array<> left is for metaspace. Maybe this > can be a further cleanup? I've already eliminated CHeapArray in the latest webrev [*], so only typedefs are preventing array.hpp movement. I'd prefer to eliminate typedefs and move array.hpp to oops directory in separate CR just to avoid webrev's growing and simplify reviewing. But if it's ok, then I can do it within this CR. Thanks, Filipp. [*] http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ > > Wow, thanks! > > Coleen > > > On 3/31/16 11:14 AM, Vladimir Kozlov wrote: >> >> Hi Filipp, >> >> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups to >> look on. >> >> Thanks, >> Vladimir >> >> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>> >>> Hi Vladimir, >>> >>> thank you for looking at this change. >>> >>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>> wrote: >>>> >>>> Nice clean up but I don't see any source code removed. What benefits we >>>> have >>>> then? >>>> I understand that we don't generate subclasses for ResourceArray and use >>>> GrowableArray. But it will not save space I think. >>>> What prevents us to remove ResourceArray at all? >>> >>> >>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>> so it should be updated before removing ResourceArray: >>> >>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>> >>>> >>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>> >>>>> >>>>> Hi all, >>>>> >>>>> please review a fix for JDK-8149374: >>>>> >>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>> Testing done: hotspot_all tests + CTW >>>>> >>>>> I've replaced all usages of collections defined via define_array and >>>>> define_stack macros with GrowableArray. >>>>> >>>>> There are good and bad news regarding performance impact of that >>>>> change. >>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>> >>>> >>>> >>>> It is acceptable regression I think. I don't think we should optimize >>>> and >>>> make more complex GrowableArray just to save 0.5% of performance for C2. >>> >>> >>> As long as GrowableArray is used in different Hotspot's subsystems it >>> may be beneficial to optimize it, >>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>> significant difference. >>> >>> If ~3% regression is OK for C1 then I'm fine with leaving >>> GrowableArray's initialization >>> in its current state unless there will be other reasons to speed it up. >>> >>> Thanks, >>> Filipp. >>> >>>> >>>> Thanks, >>>> Vladimir >>>> >>>> >>>>> >>>>> Such difference caused by eager initialization of GrowableArray's >>>>> backing array elements [1]. I can imagine when we actually need to >>>>> force >>>>> initialization and de-initialization during array's >>>>> growing/destruction, but for some types like c++ primitive types or >>>>> pointers such initialization does not make much sense, because >>>>> GrowableArray is not allowing to access an element which was not >>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>> widely used to store pointers we're simply wasting the time with >>>>> initialization. >>>>> >>>>> I've measured CTW time with following workaround which implements >>>>> initialization for numeric types and pointers as no-op and C1 >>>>> compilation time returned back to values that were measured before >>>>> original change (51.06?0.24s): >>>>> >>>>> >>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>> >>>>> I've also measured C2 compilation time and it dropped down by a few >>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>> >>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>> initialization for some types, don't we? >>>>> >>>>> Best regards, >>>>> Filipp >>>>> >>>>> [1] >>>>> >>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>> >>>> > From stefan.karlsson at oracle.com Tue Apr 5 08:22:00 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 5 Apr 2016 10:22:00 +0200 Subject: RFR: 8152637: Create a stack allocatable LogStream class In-Reply-To: <56FA877B.9030601@oracle.com> References: <56FA877B.9030601@oracle.com> Message-ID: <570375A8.6040308@oracle.com> Hi all, I did a code walkthrough with Bengt and have updated the webrev: http://cr.openjdk.java.net/~stefank/8152637/webrev.03.delta http://cr.openjdk.java.net/~stefank/8152637/webrev.03 Changes: 1) Code style in test 2) Reverted the unneeded log.hpp functions 3) Removed the LogStreamNoResourceMark::print indirection 4) Renamed the local variable in the usage example in binaryTreeDictionary StefanK On 2016-03-29 15:47, Stefan Karlsson wrote: > Hi all, > > Please review this patch to create a stack allocatable LogStream class > that embeds the ResourceMark. > > http://cr.openjdk.java.net/~stefank/8152637/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-815263 > > The patch is applied on top of the patch in: > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022303.html > > I've converted the code in binaryTreeDictionary.cpp to show how to use > the class. > > Test: new internal vm test, jprt > > Thanks, > StefanK From bengt.rutisson at oracle.com Tue Apr 5 08:23:36 2016 From: bengt.rutisson at oracle.com (Bengt Rutisson) Date: Tue, 5 Apr 2016 10:23:36 +0200 Subject: RFR: 8152637: Create a stack allocatable LogStream class In-Reply-To: <570375A8.6040308@oracle.com> References: <56FA877B.9030601@oracle.com> <570375A8.6040308@oracle.com> Message-ID: <57037608.8030808@oracle.com> Hi StefanK, On 2016-04-05 10:22, Stefan Karlsson wrote: > Hi all, > > I did a code walkthrough with Bengt and have updated the webrev: > > http://cr.openjdk.java.net/~stefank/8152637/webrev.03.delta > > http://cr.openjdk.java.net/~stefank/8152637/webrev.03 > > > Changes: > 1) Code style in test > 2) Reverted the unneeded log.hpp functions > 3) Removed the LogStreamNoResourceMark::print indirection > 4) Renamed the local variable in the usage example in > binaryTreeDictionary Thanks for fixing this! Looks good. Bengt > > StefanK > > On 2016-03-29 15:47, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to create a stack allocatable LogStream >> class that embeds the ResourceMark. >> >> http://cr.openjdk.java.net/~stefank/8152637/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-815263 >> >> The patch is applied on top of the patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022303.html >> >> >> I've converted the code in binaryTreeDictionary.cpp to show how to >> use the class. >> >> Test: new internal vm test, jprt >> >> Thanks, >> StefanK > From stefan.karlsson at oracle.com Tue Apr 5 08:42:35 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 5 Apr 2016 10:42:35 +0200 Subject: RFR: 8152637: Create a stack allocatable LogStream class In-Reply-To: <57037608.8030808@oracle.com> References: <56FA877B.9030601@oracle.com> <570375A8.6040308@oracle.com> <57037608.8030808@oracle.com> Message-ID: <57037A7B.5080405@oracle.com> Thanks, Bengt. StefanK On 2016-04-05 10:23, Bengt Rutisson wrote: > > Hi StefanK, > > On 2016-04-05 10:22, Stefan Karlsson wrote: >> Hi all, >> >> I did a code walkthrough with Bengt and have updated the webrev: >> >> http://cr.openjdk.java.net/~stefank/8152637/webrev.03.delta >> >> http://cr.openjdk.java.net/~stefank/8152637/webrev.03 >> >> >> Changes: >> 1) Code style in test >> 2) Reverted the unneeded log.hpp functions >> 3) Removed the LogStreamNoResourceMark::print indirection >> 4) Renamed the local variable in the usage example in >> binaryTreeDictionary > > Thanks for fixing this! > > Looks good. > > Bengt > >> >> StefanK >> >> On 2016-03-29 15:47, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to create a stack allocatable LogStream >>> class that embeds the ResourceMark. >>> >>> http://cr.openjdk.java.net/~stefank/8152637/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-815263 >>> >>> The patch is applied on top of the patch in: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022303.html >>> >>> >>> I've converted the code in binaryTreeDictionary.cpp to show how to >>> use the class. >>> >>> Test: new internal vm test, jprt >>> >>> Thanks, >>> StefanK >> > From aph at redhat.com Tue Apr 5 08:44:50 2016 From: aph at redhat.com (Andrew Haley) Date: Tue, 5 Apr 2016 09:44:50 +0100 Subject: RFR 8153310: AArch64: JEP 254: Implement byte_array_inflate and char_array_compress In-Reply-To: <1459790432.3233.7.camel@mint> References: <56FEB045.8000905@redhat.com> <1459790432.3233.7.camel@mint> Message-ID: <57037B02.6030208@redhat.com> On 04/04/16 18:20, Edward Nevill wrote: > On Fri, 2016-04-01 at 18:30 +0100, Andrew Haley wrote: >> We need this for CompactStrings. >> >> http://cr.openjdk.java.net/~aph/8153310/ > > Looks fine to me. A few minor points. > > + // Short string: less than 8 bytes. > + { > + Label loop, around, tiny; > > Label 'aound' is declared and bound, but never used. OK. > + // Unpack the bytes 8 at a time. > + bind(big); > + andw(len, len, 7); > + > + { > + Label loop, around; > > And also here > > + bind(loop); > + ld1(vtmp2, T8B, post(src, 8)); > + sub(rscratch1, rscratch1, 1); > + zip1(vtmp3, T16B, vtmp2, vtmp1); > + st1(vtmp3, T8H, post(dst, 16)); > > Would it be better to use ldrd and strq rather than ld1 and st1. It > seems more natural to me and will give better performance. Better performance? Why? I see a latency of 5 cycles. Andrew. From robbin.ehn at oracle.com Tue Apr 5 08:45:09 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Tue, 5 Apr 2016 10:45:09 +0200 Subject: RFR: 8152637: Create a stack allocatable LogStream class In-Reply-To: <570375A8.6040308@oracle.com> References: <56FA877B.9030601@oracle.com> <570375A8.6040308@oracle.com> Message-ID: <57037B15.3060601@oracle.com> Hi Stefan, Looks good, thanks! /Robbin On 04/05/2016 10:22 AM, Stefan Karlsson wrote: > Hi all, > > I did a code walkthrough with Bengt and have updated the webrev: > > http://cr.openjdk.java.net/~stefank/8152637/webrev.03.delta > > http://cr.openjdk.java.net/~stefank/8152637/webrev.03 > > > Changes: > 1) Code style in test > 2) Reverted the unneeded log.hpp functions > 3) Removed the LogStreamNoResourceMark::print indirection > 4) Renamed the local variable in the usage example in binaryTreeDictionary > > StefanK > > On 2016-03-29 15:47, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to create a stack allocatable LogStream class >> that embeds the ResourceMark. >> >> http://cr.openjdk.java.net/~stefank/8152637/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-815263 >> >> The patch is applied on top of the patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022303.html >> >> I've converted the code in binaryTreeDictionary.cpp to show how to use >> the class. >> >> Test: new internal vm test, jprt >> >> Thanks, >> StefanK > From stefan.karlsson at oracle.com Tue Apr 5 08:45:27 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 5 Apr 2016 10:45:27 +0200 Subject: RFR: 8152637: Create a stack allocatable LogStream class In-Reply-To: <57037B15.3060601@oracle.com> References: <56FA877B.9030601@oracle.com> <570375A8.6040308@oracle.com> <57037B15.3060601@oracle.com> Message-ID: <57037B27.7010504@oracle.com> Thanks, Robbin! StefanK On 2016-04-05 10:45, Robbin Ehn wrote: > Hi Stefan, > > Looks good, thanks! > > /Robbin > > On 04/05/2016 10:22 AM, Stefan Karlsson wrote: >> Hi all, >> >> I did a code walkthrough with Bengt and have updated the webrev: >> >> http://cr.openjdk.java.net/~stefank/8152637/webrev.03.delta >> >> http://cr.openjdk.java.net/~stefank/8152637/webrev.03 >> >> >> Changes: >> 1) Code style in test >> 2) Reverted the unneeded log.hpp functions >> 3) Removed the LogStreamNoResourceMark::print indirection >> 4) Renamed the local variable in the usage example in >> binaryTreeDictionary >> >> StefanK >> >> On 2016-03-29 15:47, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to create a stack allocatable LogStream class >>> that embeds the ResourceMark. >>> >>> http://cr.openjdk.java.net/~stefank/8152637/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-815263 >>> >>> The patch is applied on top of the patch in: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-March/022303.html >>> >>> >>> I've converted the code in binaryTreeDictionary.cpp to show how to use >>> the class. >>> >>> Test: new internal vm test, jprt >>> >>> Thanks, >>> StefanK >> From aph at redhat.com Tue Apr 5 08:48:59 2016 From: aph at redhat.com (Andrew Haley) Date: Tue, 5 Apr 2016 09:48:59 +0100 Subject: RFR(XS): 8153275: Zero JVM fails to initialize after JDK-8152440 In-Reply-To: <1459791895.3762.14.camel@redhat.com> References: <1459791895.3762.14.camel@redhat.com> Message-ID: <57037BFB.1060606@redhat.com> On 04/04/16 18:44, Severin Gehwolf wrote: > Hi, > > Could somebody please sponsor and review the following Zero-only fix? > The fix for JDK-8152440 was incorrect in that it set the value > for InitArrayShortSize to an illegal value (-1) failing constraint > validation. Albeit not being used it must still pass constraint > validation. Otherwise, the JVM fails to initialize and all bets are > off. Thoughts? > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153275 > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.01/ OK, but please make the comment clearer. I didn't understand it. "the allowed range [ 0 ... 9223372036854775807 ]" is much clearer. Andrew. From adinn at redhat.com Tue Apr 5 09:25:41 2016 From: adinn at redhat.com (Andrew Dinn) Date: Tue, 5 Apr 2016 10:25:41 +0100 Subject: JMH and JDK9 In-Reply-To: <0C6C1D21-1592-426A-B9DE-33458FC90CF9@oracle.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> <56FE29EE.1090801@oracle.com> <0C6C1D21-1592-426A-B9DE-33458FC90CF9@oracle.com> Message-ID: <57038495.40603@redhat.com> On 04/04/16 22:15, Christian Thalinger wrote: > >> On Mar 31, 2016, at 9:57 PM, Alan Bateman wrote: >> Are you going to submit a bug to Eclipse on this? > . . . > My lack of understanding of the problem tells me no. Maybe you should :-) This is actually one example of a more general problem where both Java EE components and Java EE applications need to employ classes which extend the class set of a package currently bundled in a Jigsaw module. For example, as well as the common annotations classes there is also package javax.transaction into which package the JDK inserts the 3 TX Exception (RollbackException etc) classes but omits various other TX classes such as Synchronization left for EE components to define -- for more details see this dialogue between Sanne Grinovero and Alan http://mail.openjdk.java.net/pipermail/jigsaw-dev/2016-March/007156.html http://mail.openjdk.java.net/pipermail/jigsaw-dev/2016-March/007157.html Alan's advice is for EE containers to use -upgrademodule (or -Xpatch) to override the classes provided in the JDK with the EE implementation version. This is indeed a feasible workaround (EE container implementors should probably be expected to understand the problem and solution). However, this neglects the fact that there are also standalone uses of these EE components e.g. for unit testing or for applications which embed, say, TX behaviour without employing a full EE container. The test case issue is the most significant one here (although I am sure that EE component devs will be unhappy about both) since requiring the general user to understand and fix this problem is a different order of burden. I have asked Sanne to post a note about this to jpms-spec-comments at openjdk.java.net for inclusion in the list of open jigsaw issues to be considered by the implementors and expert group. I think it is important to consider whether there are other ways we can approach this problem. For example, would it be possible to provide an escape clause in Jigsaw which allows stub modules such as this to be tagged so that they can be automatically extended or replaced from the classpath without needing any intervention on the command line? n.b. I don't want an answer here but I want to be sure the problem is recorded and addressed in the proper place. regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) From sgehwolf at redhat.com Tue Apr 5 09:30:14 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Tue, 05 Apr 2016 11:30:14 +0200 Subject: RFR(XS): 8153275: Zero JVM fails to initialize after JDK-8152440 In-Reply-To: <57037BFB.1060606@redhat.com> References: <1459791895.3762.14.camel@redhat.com> <57037BFB.1060606@redhat.com> Message-ID: <1459848614.4486.13.camel@redhat.com> On Tue, 2016-04-05 at 09:48 +0100, Andrew Haley wrote: > On 04/04/16 18:44, Severin Gehwolf wrote: > > > > Hi, > > > > Could somebody please sponsor and review the following Zero-only fix? > > The fix for JDK-8152440 was incorrect in that it set the value > > for InitArrayShortSize to an illegal value (-1) failing constraint > > validation. Albeit not being used it must still pass constraint > > validation. Otherwise, the JVM fails to initialize and all bets are > > off. Thoughts? > > > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153275 > > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.01/ > OK, but please make the comment clearer.??I didn't understand it. > > ?"the allowed range [ 0 ... 9223372036854775807 ]" > > is much clearer. Thanks?for the review! Updated webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.02/ Cheers, Severin From Alan.Bateman at oracle.com Tue Apr 5 10:17:18 2016 From: Alan.Bateman at oracle.com (Alan Bateman) Date: Tue, 5 Apr 2016 11:17:18 +0100 Subject: JMH and JDK9 In-Reply-To: <57038495.40603@redhat.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> <56FE29EE.1090801@oracle.com> <0C6C1D21-1592-426A-B9DE-33458FC90CF9@oracle.com> <57038495.40603@redhat.com> Message-ID: <570390AE.6050406@oracle.com> On 05/04/2016 10:25, Andrew Dinn wrote: > : > This is actually one example of a more general problem where both Java > EE components and Java EE applications need to employ classes which > extend the class set of a package currently bundled in a Jigsaw module. > > For example, as well as the common annotations classes there is also > package javax.transaction into which package the JDK inserts the 3 TX > Exception (RollbackException etc) classes but omits various other TX > classes such as Synchronization left for EE components to define -- for > more details see this dialogue between Sanne Grinovero and Alan > > http://mail.openjdk.java.net/pipermail/jigsaw-dev/2016-March/007156.html > > http://mail.openjdk.java.net/pipermail/jigsaw-dev/2016-March/007157.html > > Alan's advice is for EE containers to use -upgrademodule (or -Xpatch) > to override the classes provided in the JDK with the EE implementation > version. > > This is indeed a feasible workaround (EE container implementors should > probably be expected to understand the problem and solution). However, > this neglects the fact that there are also standalone uses of these EE > components The same applies to standalone usages of these EE components (no different to the app server case). There is more on this topic in JEP 261. > e.g. for unit testing or for applications which embed, say, > TX behaviour without employing a full EE container. The test case issue > is the most significant one here (although I am sure that EE component > devs will be unhappy about both) since requiring the general user to > understand and fix this problem is a different order of burden. > > I have asked Sanne to post a note about this to > jpms-spec-comments at openjdk.java.net for inclusion in the list of open > jigsaw issues to be considered by the implementors and expert group. I > think it is important to consider whether there are other ways we can > approach this problem. For example, would it be possible to provide an > escape clause in Jigsaw which allows stub modules such as this to be > tagged so that they can be automatically extended or replaced from the > classpath without needing any intervention on the command line? n.b. I > don't want an answer here but I want to be sure the problem is recorded > and addressed in the proper place. We recently updated JEP 261 proposing that "java.se" be the only java.* root module that is resolved when compiling code in the unnamed module or at runtime when the main class is loaded from the class path [1]. On the surface then it will "appear" to developers that the JDK does not have the EE components but from everything we've seen so far, then those EE components are usually on the class path anyway. -Alan. [1] http://mail.openjdk.java.net/pipermail/jigsaw-dev/2016-April/007209.html From coleen.phillimore at oracle.com Tue Apr 5 12:40:27 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 5 Apr 2016 08:40:27 -0400 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <5702CCB0.9050701@oracle.com> Message-ID: <5703B23B.2030008@oracle.com> Filip, Thank you for your answers. This change looks really good!! Coleen On 4/5/16 3:23 AM, Filipp Zhinkin wrote: > Hi Coleen, > > thanks for taking a look at it. > > On Mon, Apr 4, 2016 at 11:21 PM, Coleen Phillimore > wrote: >> Thank you for CCing hotspot-dev. This change is great! I reviewed the >> runtime files. >> >> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/src/share/vm/prims/jvmtiRedefineClasses.cpp.udiff.html >> >> Why does this have three parameters? >> >> - _index_map_p = new intArray(scratch_cp->length(), -1); >> + _index_map_p = new intArray(scratch_cp->length(), scratch_cp->length(), >> -1); > GrowableArray won't initialize elements in backing array until you ask it to. > And it also won't allow to access elements that were not initialized. > > So we have to pass three parameters there to allocate backing array > and fill it with -1. > >> Why not just change it to: >> >> _index_map_p = new GrowableArray(scratch_cp->length()); > We use -1 there for CP entries that were not mapped during constant > pools merging. > >> I don't see the three argument constructor to GrowableArray that takes -1 >> (??) > It's the one that take init size, length, filler and few other > implicit parameters: > http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/annotate/tip/src/share/vm/utilities/growableArray.hpp#l178 > >> Is it possible to completely eliminate intArray, intStack, boolArray and >> boolStack, and the CHeapArray ? If so array.hpp should really go in >> directory oops since the only Array<> left is for metaspace. Maybe this >> can be a further cleanup? > I've already eliminated CHeapArray in the latest webrev [*], > so only typedefs are preventing array.hpp movement. > > I'd prefer to eliminate typedefs and move array.hpp to oops directory > in separate CR just > to avoid webrev's growing and simplify reviewing. > But if it's ok, then I can do it within this CR. > > Thanks, > Filipp. > > [*] http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ > >> Wow, thanks! >> >> Coleen >> >> >> On 3/31/16 11:14 AM, Vladimir Kozlov wrote: >>> Hi Filipp, >>> >>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups to >>> look on. >>> >>> Thanks, >>> Vladimir >>> >>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>> Hi Vladimir, >>>> >>>> thank you for looking at this change. >>>> >>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>> wrote: >>>>> Nice clean up but I don't see any source code removed. What benefits we >>>>> have >>>>> then? >>>>> I understand that we don't generate subclasses for ResourceArray and use >>>>> GrowableArray. But it will not save space I think. >>>>> What prevents us to remove ResourceArray at all? >>>> >>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>> so it should be updated before removing ResourceArray: >>>> >>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>> >>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>> >>>>>> Hi all, >>>>>> >>>>>> please review a fix for JDK-8149374: >>>>>> >>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>> Testing done: hotspot_all tests + CTW >>>>>> >>>>>> I've replaced all usages of collections defined via define_array and >>>>>> define_stack macros with GrowableArray. >>>>>> >>>>>> There are good and bad news regarding performance impact of that >>>>>> change. >>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>> >>>>> >>>>> It is acceptable regression I think. I don't think we should optimize >>>>> and >>>>> make more complex GrowableArray just to save 0.5% of performance for C2. >>>> >>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>> may be beneficial to optimize it, >>>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>>> significant difference. >>>> >>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>> GrowableArray's initialization >>>> in its current state unless there will be other reasons to speed it up. >>>> >>>> Thanks, >>>> Filipp. >>>> >>>>> Thanks, >>>>> Vladimir >>>>> >>>>> >>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>> force >>>>>> initialization and de-initialization during array's >>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>> pointers such initialization does not make much sense, because >>>>>> GrowableArray is not allowing to access an element which was not >>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>> widely used to store pointers we're simply wasting the time with >>>>>> initialization. >>>>>> >>>>>> I've measured CTW time with following workaround which implements >>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>> compilation time returned back to values that were measured before >>>>>> original change (51.06?0.24s): >>>>>> >>>>>> >>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>> >>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>> >>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>> initialization for some types, don't we? >>>>>> >>>>>> Best regards, >>>>>> Filipp >>>>>> >>>>>> [1] >>>>>> >>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>> From lois.foltan at oracle.com Tue Apr 5 14:08:23 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Tue, 05 Apr 2016 10:08:23 -0400 Subject: RFR [XS] 8153297 - [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files In-Reply-To: <56FEA59C.9020805@oracle.com> References: <56FEA59C.9020805@oracle.com> Message-ID: <5703C6D7.6010002@oracle.com> Hi Ioi, This change looks good, thanks by the way since I am working on supporting an updated -Xpatch format and have to write tests to create JAR files as well. I had copied BasicJarBuilder.java from the test/runtime/SharedArchiveFile directory. Can you remove BasicJarBuilder.java as well as part of this change? Thanks, Lois On 4/1/2016 12:45 PM, Ioi Lam wrote: > Please review a very small fix: > > http://cr.openjdk.java.net/~iklam/jdk9/8153300-enhance-classfileinstaller.v01/ > > > Bug: [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to > support JAR files > > https://bugs.openjdk.java.net/browse/JDK-8153300 > > Summary of fix: > > Many hotspot tests need to create JAR files. The current method is > messy: > > [1] Because JTREG compiles .class files into various directories, > we need > to use ClassFileInstaller to find the .class file and copy it > into the > current directory. > [2] Then, there are various ad-hoc calls to sun.tools.jar.Main to > create the > JAR file. This is not desirable because sun.tools.jar is an > internal > package and javac gives warnings about it. > > I have improved ClassFileInstaller so that JAR files can be easily > created using JTREG tags: > > * @build ClassFileInstaller sun.hotspot.WhiteBox > * @run main ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox > > To handle more complex use cases, new APIs are also added to > ClassFileInstaller > for programmatically creating JAR files inside the test source code. > > I converted two CDS test cases to use the new functionality. > > FYI, I am also working on new tests for a closed issue > (JDK-8153297) that use > the new functionality. > > Thanks > - Ioi > > > From martin.doerr at sap.com Tue Apr 5 14:13:49 2016 From: martin.doerr at sap.com (Doerr, Martin) Date: Tue, 5 Apr 2016 14:13:49 +0000 Subject: PPC64 VSX load/store instructions in stubs In-Reply-To: <56FEDBB3.5030106@linux.vnet.ibm.com> References: <56FEDBB3.5030106@linux.vnet.ibm.com> Message-ID: Hi Gustavo, I think such changes are appreciated if they improve performance. I think VSX instructions can be used as long as we don't violate the ABI (only use volatile registers). If you add tests for availability of instructions to vm_version_ppc, please also add them to the feature-string in VM_Version::initialize() (add a "%s" and the name of the instruction). We can assist in getting such changes pushed into hs-comp. Thanks for working on it. Best regards, Martin -----Original Message----- From: Gustavo Romero [mailto:gromero at linux.vnet.ibm.com] Sent: Freitag, 1. April 2016 22:36 To: Doerr, Martin ; Simonis, Volker ; ppc-aix-port-dev at openjdk.java.net; hotspot-dev at openjdk.java.net Cc: brenohl at br.ibm.com Subject: PPC64 VSX load/store instructions in stubs Hi Martin, Hi Volker Currently VSX load/store instructions are not being used in PPC64 stubs, particularly in arraycopy stubs inside generate_arraycopy_stubs() like, but not limited to, generate_disjoint_{byte,short,int,long}_copy. We can speed up mass copy using VSX (Vector-Scalar Extension) load/store instruction in processors >= POWER8, the same way it's already done for libc memcpy(). This is an initial patch just for jshort_disjoint_arraycopy() VSX vector load/store: http://81.de.7a9f.ip4.static.sl-reverse.com/202539/webrev What are your thoughts on that? Is there any impediment to use VSX instructions in OpenJDK at the moment? Thank you. Best regards, Gustavo From marcus.larsson at oracle.com Tue Apr 5 14:14:59 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Tue, 5 Apr 2016 16:14:59 +0200 Subject: RFR: 8145934: Make ttyLocker equivalent for Unified Logging framework In-Reply-To: <56FE78B3.2060802@oracle.com> References: <56BB3FD0.5000104@oracle.com> <3910DA9B-43C9-4C1A-8FD0-993A54225550@oracle.com> <56BCA8C9.102@oracle.com> <56C34F0E.4090803@oracle.com> <90DC33E3-F597-40E4-A317-6C92F4969575@oracle.com> <56EC03A4.1030705@oracle.com> <56FCE56C.6070606@oracle.com> <56FD1481.3090707@oracle.com> <56FD3CC1.4050502@oracle.com> <56FE78B3.2060802@oracle.com> Message-ID: <5703C863.4080403@oracle.com> Hi, Rebased and aligned the patch with the latest changes to the UL API. Webrevs updated in place. Are we ready to wrap this up? Thanks, Marcus On 04/01/2016 03:33 PM, Marcus Larsson wrote: > Hi again, > > Updated webrev with removed decoration buffers. Decorations are now > written directly to the streams with the help of flockfile/funlockfile > as you suggested. > > Webrev: > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/ > > Incremental: > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02-03/ > > Thanks, > Marcus > > On 03/31/2016 05:05 PM, Marcus Larsson wrote: >> >> On 03/31/2016 03:40 PM, Thomas St?fe wrote: >>> Hi Marcus, >>> >>> On Thu, Mar 31, 2016 at 2:13 PM, Marcus Larsson >>> > wrote: >>> >>> Hi Thomas, >>> >>> >>> On 03/31/2016 12:48 PM, Thomas St?fe wrote: >>>> Hi Marcus, >>>> >>>> nice to see progress on that issue! >>> >>> Thanks for taking time to look at it. >>> >>>> >>>> Unfortunately your patch did not apply to my freshly synced hs-rt >>>> repository. So I took a "dry look" at your code, and here some >>>> feedback (by no means complete, and also I am not a (R)eviewer): >>> >>> I'll rebase it and update the webrev. >>> >>>> >>>> - thank you for taking my input and avoiding resource area for >>>> memory. I am still apprehensive about UL using NEW_C_HEAP_ARRAY >>>> instead of raw malloc() here, but I see it has pros and cons. >>> >>> It might be worth investigating, but if so it should probably be a >>> separate RFE. >>> >>> >>> Ok. Easy enough to fix should e.g. NMT ever want to use UL. >>> >>>> >>>> - I am not sure about flockfile(): I really do not like file >>>> locks, this always bites in customer scenarios. Also, by using >>>> this lock, are we not just reintroducing the ttyLocker at a >>>> deeper level? >>> >>> The fprintfs locks the FILE* internally even if we don't. This is >>> AFAIU how fprintf guarantees the writes to be atomic. With the >>> explicit flock calls we're just ensuring nothing can be printed >>> in-between our fprintf calls, it shouldn't add any cost. >>> >>> >>> Ah, I see. If we really feel safe about flockfile(), we might just >>> as well use it in LogFileStreamOutput::write() too. There, we >>> assemble the decorators in a stack local buffer to fprintf them out >>> to the FILE* in a separate step - I guess to prevent tearing? But if >>> flockfile comes without cost, we could save the stack local buffer >>> and do: >>> >>> flockfile() >>> fputs(decorators) >>> fputs(message) >>> funlockfile() >> >> Good idea. >> >>> >>>> Instead, how about assembling the total message in memory - like >>>> it would appear in the file - and print it in one go using >>>> ::write()? That usually is atomic. This way you would have to >>>> write out the decorators for each line in memory as they are >>>> added, but you could get rid of the _lines[] array and all its >>>> surrounding code. So, no lock, less complicated code, at the cost >>>> of a bit more memory usage. >>> >>> As the message might go to different outputs, configured for >>> different levels, we can't really get rid of the _lines[] array. >>> We could assemble each applicable message as a long string for >>> each of the outputs, but given how fprintf seems to work we won't >>> really have gained anything for that extra work and memory usage. >>> >>> >>> Oh, I see. I did not understand the complexity of the whole thing. >>> Why is it needed to write lines to a message with different log >>> levels? I may be slow, but I find that not easy to understand. The >>> fact that different lines in my message may go to different outputs >>> is a bit surprising. I would have thought a message is just a text >>> blob I assemble offline and send to the logging framework in one go, >>> like a glorified String, and that I would hand it down to UL "send >>> this for this level/tagset combination". And that the message itself >>> would not even need to know anything about log levels and tagsets. >> >> The use case I want to support with multi-part messages on different >> levels is when you have an event you want to log, on for example info >> level, but where part of that event might include data that is too >> verbose to fit the info level. So then you could split the event into >> two parts, one line with the basic information on info level and the >> other line (or multiple lines) on debug or trace level. The framework >> then makes sure these lines are delivered together non-interleaved. >> >>> >>>> - If I understand this correctly, there is no way to print part >>>> of a line to the message object? So, if I would want to assemble >>>> a line from various inputs, I would still have to assemble it on >>>> the stack and feed it to say ScopedLogMessage::debug() in one go? >>>> Would it be posssible to get an outputStream* from the >>>> ScopedLogMessage to write into? >>> >>> Yes, that's right. I wanted to avoid streams for multi-line >>> messages because I thought the API would become a bit messy with >>> that functionality. The logStreams we have today are line >>> buffered, and will send completed lines to the log outputs when >>> they see a terminating newline character. This means that it won't >>> be obvious how lines from different streams or writes to the >>> message will be ordered in the output. Perhaps it's not that bad, >>> but I figured that we could use stringStreams or similar for when >>> we need to build up lines for the message. This has the nice side >>> effect that it will be very obvious when, and in what order, each >>> line is written to the outputs. Perhaps it's worth a follow up RFE >>> if we find ourselves writing one too many log cases with >>> stringStreams? >>> >>> >>> Sorry, I think I was not clear enough. What I meant was simpler. We >>> have now ScopedLogMessage::debug() which does >>> LogMessageBuffer::write() which writes a line and terminates the >>> line. Line outputStream::print_cr(). I would like to have an option >>> to just write but not terminate the current line, like >>> outputStream::print(). That way one could assemble a line piece by >>> piece, maybe in a loop (e.g. for table row values) without needing >>> another temporary buffer. >> >> Ok, so say we add the debug_no_cr() family of functions that writes >> into the log message buffer without newlines. Then, what does it mean >> if someone does debug_no_cr(s1); trace_no_cr(s2); info(s3); ? >> >> It would be simpler if it wasn't for the support for different levels >> on different parts of the message. Maybe some well defined rules for >> how it should work would solve this, but I intended to avoid the >> whole use case for now. It can be done manually with stringStreams, >> so I don't think it's that serious. >> >>> >>>> >>>> - I like how you implemented os::log_vsnprintf(), using >>>> _vscprintf() on windows. Would it be worthwhile to merge this >>>> with jio_vsnprintf(), which does the same but returns -1 on >>>> truncation? >>> >>> The patch for JDK-8138916 [0] added the log_vsnprintf. You mean to >>> change jio_vsnprintf to not return -1 on truncation, and instead >>> work like vsnprintf on POSIX? I think that would be useful, and it >>> allows us to remove log_vsnprintf. >>> >>> >>> That is exactly what I meant. I think that would be a separate RFE >>> though, one would have to check on all callers of jio_snprintf. >> >> Yeah. >> >> Regards, >> Marcus >> >>> >>> Thanks, >>> Marcus >>> >>> >>> Thank you! >>> >>> ..Thomas >>> >>> [0] https://bugs.openjdk.java.net/browse/JDK-8138916 >>> >>> >>>> >>>> Kind Regards, Thomas >>>> >>>> >>>> On Thu, Mar 31, 2016 at 10:53 AM, Marcus Larsson >>>> > >>>> wrote: >>>> >>>> Any further feedback on this? >>>> >>>> >>>> >>>> On 03/18/2016 02:33 PM, Marcus Larsson wrote: >>>> >>>> Hi again, >>>> >>>> New webrev: >>>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02/ >>>> >>>> >>>> Incremental: >>>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt-02/ >>>> >>>> >>>> Made all allocations regular C heap allocations because >>>> of the problems with resource allocations that Thomas >>>> brought up. We can do a follow up change for resource >>>> allocation support if we really need it. >>>> Also added some more tests for scoped messages. >>>> >>>> >>>> On 02/17/2016 12:19 AM, John Rose wrote: >>>> >>>> On Feb 16, 2016, at 8:32 AM, Marcus Larsson >>>> >>> >>>> >>> >> wrote: >>>> >>>> >>>> Alternative version where a LogMessage >>>> automatically writes its messages when it goes >>>> out of scope: >>>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt/ >>>> >>>> >>>> >>>> >>>> >>>> I like this, with the LogMessageBuffer that does the >>>> heavy work, and the [Scoped]LogMessage which is the >>>> simplest way to use it. >>>> >>>> The LogMessageBuffer should have a neutral >>>> unallocated state, for use through the LogMessage >>>> macro. I.e., is_c_allocated should be a three-state >>>> flag, including 'not allocated at all'. That way, if >>>> you create the thing only to ask 'is_debug' and get a >>>> false answer, you won't have done more than a few >>>> cycles of work. Probably the set_prefix operation >>>> should be lazy in the same way. >>>> >>>> >>>> Fixed. Since I removed the resource allocation completely >>>> I could keep it as a boolean. >>>> >>>> >>>> I think the destructor should call a user-callable >>>> flush function, something like this: >>>> >>>> ~ScopedLogMessage() { flush(); } >>>> // in LogMessageBuffer: >>>> void flush() { >>>> if (_line_count > 0) { >>>> _log.write(*this); >>>> reset(); >>>> } >>>> } >>>> void reset() { >>>> _line_count = 0; >>>> _message_buffer_size = 0; >>>> } >>>> >>>> It will be rare for user code to want to either flush >>>> early or cancel pending output, but when you need it, >>>> it should be there. >>>> >>>> >>>> Fixed. >>>> >>>> >>>> I still prefer the first patch though, where >>>> messages are neither tied to a particular log, >>>> nor automatically written when they go out of >>>> scope. Like I've said, the explicit write line >>>> makes it easier to read the code. >>>> >>>> >>>> There's a tradeoff here: It's easier to read the >>>> *logging* code if all the *logging* operations are >>>> explicit. But the point of logging code is to add >>>> logging to code that is busy doing *other* operations >>>> besides logging. That's why (I assume) people have >>>> been noting that some uses of logging are >>>> "intrusive": The logging logic calls too much >>>> attention to itself, and with attention being a >>>> limited resource, it takes away attention from the >>>> actual algorithm that's being logged about. >>>> >>>> The scoped (RAII) log buffer, with automatic write, >>>> is the best way I know to reduce the intrusiveness of >>>> this auxiliary mechanism. >>>> >>>> >>>> Fair point. I'm going with the automatic write on out of >>>> scope. >>>> >>>> >>>> Of course, I'm interested in finding out what your >>>> everyday customers think about it. (Rachel, Coleen, >>>> David, Dan?) >>>> >>>> For comparison I've updated the first suggestion >>>> with the guarantee for unwritten messages, as >>>> well as cleaning it up a bit by moving the >>>> implementation to the .cpp rather than the .hpp. >>>> Full >>>> webrev:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.01/ >>>> >>>> >>>> Incremental:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.00-01/ >>>> >>>> >>>> >>>> Let me know what you think. >>>> >>>> >>>> That option is more intrusive than the RAII buffered >>>> log alias. >>>> >>>> Separately, the review thread on JDK-8149383 shows a >>>> use for LogMessageBuffer to collect a complex log >>>> message. The log message can then be sent down one >>>> of two log streams. Something like: >>>> >>>> if (need_to_log) { >>>> ResourceMark rm; >>>> LogMessageBuffer buf; >>>> buf.write("Revoking bias of object " >>>> INTPTR_FORMAT " , mark " >>>> INTPTR_FORMAT " , type %s , prototype header " >>>> INTPTR_FORMAT >>>> " , allow rebias %d , >>>> requesting thread " INTPTR_FORMAT, >>>> p2i((void *)obj), >>>> (intptr_t) mark, >>>> obj->klass()->external_name(), >>>> (intptr_t) obj->klass()->prototype_header(), >>>> (allow_rebias ? 1 : 0), >>>> (intptr_t) requesting_thread); >>>> if (!is_bulk) >>>> log_info(biasedlocking).write(buf); >>>> else >>>> log_trace(biasedlocking).write(buf); >>>> } >>>> >>>> It is important here (like you pointed out) that the >>>> LogMessageBuffer is decoupled from log levels and >>>> streams, so that it can be used as a flexible >>>> component of logic like this. >>>> >>>> But the commonest usage should (IMO) be supported by >>>> a scoped auto-writing log alias. >>>> >>>> >>>> Yeah, I agree. >>>> >>>> Thanks, >>>> Marcus >>>> >>>> >>>> >>> >>> >> > From adinn at redhat.com Tue Apr 5 14:16:17 2016 From: adinn at redhat.com (Andrew Dinn) Date: Tue, 5 Apr 2016 15:16:17 +0100 Subject: JMH and JDK9 In-Reply-To: <570390AE.6050406@oracle.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> <56FE29EE.1090801@oracle.com> <0C6C1D21-1592-426A-B9DE-33458FC90CF9@oracle.com> <57038495.40603@redhat.com> <570390AE.6050406@oracle.com> Message-ID: <5703C8B1.5080501@redhat.com> On 05/04/16 11:17, Alan Bateman wrote: . . . > We recently updated JEP 261 proposing that "java.se" be the only java.* > root module that is resolved when compiling code in the unnamed module > or at runtime when the main class is loaded from the class path [1]. On > the surface then it will "appear" to developers that the JDK does not > have the EE components but from everything we've seen so far, then those > EE components are usually on the class path anyway. Ah ok, so this means that the problem has been punted to the other foot i.e. the only apps affected will be those which i) don't have an EE jar on their classpath and ii) require the (partial) stub implementations provided by Java SE. That sounds much better since such a configuration is of almost no use to anyone and hence is very unlikely to arise. regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) From dmitry.fazunenko at oracle.com Tue Apr 5 14:45:54 2016 From: dmitry.fazunenko at oracle.com (Dmitry Fazunenko) Date: Tue, 5 Apr 2016 17:45:54 +0300 Subject: RFR (S) 8152432: Implement setting jtreg @requires properties vm.flavor, vm.bits, vm.compMode Message-ID: <5703CFA2.4050403@oracle.com> Hello, Would you please review a relatively simple fix which starts using new jtreg functionality: ability to define custom properties for use with the @requires tag. https://bugs.openjdk.java.net/browse/JDK-8152432 http://cr.openjdk.java.net/~dfazunen/8152432/webrev.00/ As the first experience of using this functionality I just fixed setting of properties which set by jtreg, but set incorrectly relying only on specified vm flags. In the near future we are going to introduce new properties. Tested locally. Thanks, Dima From volker.simonis at gmail.com Tue Apr 5 17:23:54 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Tue, 5 Apr 2016 19:23:54 +0200 Subject: PPC64 VSX load/store instructions in stubs In-Reply-To: <56FEDBB3.5030106@linux.vnet.ibm.com> References: <56FEDBB3.5030106@linux.vnet.ibm.com> Message-ID: Hi Gustavo, thanks a lot for your contribution. Can you please describe if you've run benchmarks and which performance improvements you saw? With your change if we're running on Power 8, we will only use the fast path for arrays with at least 32 elements. For smaller arrays, we will fall-back to copying only 2 elements at a time which will be slower than the initial version which copied 4 at a time in that case. Did you verified your changes on both, little and big endian? And what about unaligned memory accesses? As far as I read, lxvd2x/stxvd2x still work, but may be slower. I saw there also exist instructions for aligned load/stores. Would it make sens (performance-wise) to use them for the cases where we can be sure that we have aligned memory accesses? Thank you and best regards, Volker On Fri, Apr 1, 2016 at 10:36 PM, Gustavo Romero wrote: > Hi Martin, Hi Volker > > Currently VSX load/store instructions are not being used in PPC64 stubs, > particularly in arraycopy stubs inside generate_arraycopy_stubs() like, > but not limited to, generate_disjoint_{byte,short,int,long}_copy. > > We can speed up mass copy using VSX (Vector-Scalar Extension) load/store > instruction in processors >= POWER8, the same way it's already done for > libc memcpy(). > > This is an initial patch just for jshort_disjoint_arraycopy() VSX vector > load/store: > > http://81.de.7a9f.ip4.static.sl-reverse.com/202539/webrev > > What are your thoughts on that? Is there any impediment to use VSX > instructions in OpenJDK at the moment? > > Thank you. > > Best regards, > Gustavo > From coleen.phillimore at oracle.com Tue Apr 5 17:27:21 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 5 Apr 2016 13:27:21 -0400 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: References: <5702FC9B.7020600@oracle.com> Message-ID: <5703F579.8050702@oracle.com> Hi, I've reviewed the hotspot changes and some of the jdk changes. This looks really good. One comment about the jvm function names: I think FillInStackTraceElement is too close of a name to Throwable::fill_in_stack_trace(). -JVM_ENTRY(void, JVM_SetMethodInfo(JNIEnv *env, jobject frame)) +JVM_ENTRY(void, JVM_FillInStackTraceElement(JNIEnv *env, jobject frame, jobject stack)) JVMWrapper("JVM_SetMethodInfo"); - Handle stackFrame(THREAD, JNIHandles::resolve(frame)); - java_lang_StackFrameInfo::fill_methodInfo(stackFrame, THREAD); + Handle stack_frame_info(THREAD, JNIHandles::resolve(frame)); + Handle stack_trace_element(THREAD, JNIHandles::resolve(stack)); + java_lang_StackFrameInfo::fill_methodInfo(stack_frame_info, stack_trace_element, THREAD); JVM_END And the function is called fill_methodInfo in the javaClasses function. I think the JVM and the java_lang_StackFrameInfo function names should be closer. I wonder if the name JVM_ToStackFrameElement() and java_lang_StackFrameInfo::to_stack_frame_element() would be better and then it'd match the Java name. Thanks! Coleen On 4/4/16 9:29 PM, Mandy Chung wrote: >> On Apr 4, 2016, at 4:45 PM, Brent Christian wrote: >> >> Hi, >> >> I'd like to check in some footprint and code reduction changes to the java.lang.StackWalker implementation. >> >> Webrev: >> http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ >> Bug: >> https://bugs.openjdk.java.net/browse/JDK-8153123 >> > This looks good to me. > > One thing to mention is that this patch is a follow-up work from the investigation on what it takes to enable Throwable to use StackWalker (JDK-8141239). The current built-in VM backtrace is very compact and performant. We have identified and prototypes the performance improvements if Throwable backtrace is generated using stack walker. There are some performance gaps that we agree to defer JDK-8141239 to a future release and improve the footprint performance and GC throughput concerns when MemberNames are stored in the throwable backtrace. > > Mandy > From coleen.phillimore at oracle.com Tue Apr 5 17:29:11 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 5 Apr 2016 13:29:11 -0400 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <5703F579.8050702@oracle.com> References: <5702FC9B.7020600@oracle.com> <5703F579.8050702@oracle.com> Message-ID: <5703F5E7.4060404@oracle.com> Also meant to include core-libs-dev in the email. Thanks, Coleen On 4/5/16 1:27 PM, Coleen Phillimore wrote: > > Hi, I've reviewed the hotspot changes and some of the jdk changes. > This looks really good. > > One comment about the jvm function names: > > I think FillInStackTraceElement is too close of a name to > Throwable::fill_in_stack_trace(). > > -JVM_ENTRY(void, JVM_SetMethodInfo(JNIEnv *env, jobject frame)) > +JVM_ENTRY(void, JVM_FillInStackTraceElement(JNIEnv *env, jobject > frame, jobject stack)) > JVMWrapper("JVM_SetMethodInfo"); > - Handle stackFrame(THREAD, JNIHandles::resolve(frame)); > - java_lang_StackFrameInfo::fill_methodInfo(stackFrame, THREAD); > + Handle stack_frame_info(THREAD, JNIHandles::resolve(frame)); > + Handle stack_trace_element(THREAD, JNIHandles::resolve(stack)); > + java_lang_StackFrameInfo::fill_methodInfo(stack_frame_info, > stack_trace_element, THREAD); JVM_END > > > And the function is called fill_methodInfo in the javaClasses function. > > I think the JVM and the java_lang_StackFrameInfo function names should > be closer. > > I wonder if the name JVM_ToStackFrameElement() and > java_lang_StackFrameInfo::to_stack_frame_element() would be better and > then it'd match the Java name. > > Thanks! > Coleen > > On 4/4/16 9:29 PM, Mandy Chung wrote: >>> On Apr 4, 2016, at 4:45 PM, Brent Christian >>> wrote: >>> >>> Hi, >>> >>> I'd like to check in some footprint and code reduction changes to >>> the java.lang.StackWalker implementation. >>> >>> Webrev: >>> http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ >>> Bug: >>> https://bugs.openjdk.java.net/browse/JDK-8153123 >>> >> This looks good to me. >> >> One thing to mention is that this patch is a follow-up work from the >> investigation on what it takes to enable Throwable to use StackWalker >> (JDK-8141239). The current built-in VM backtrace is very compact and >> performant. We have identified and prototypes the performance >> improvements if Throwable backtrace is generated using stack walker. >> There are some performance gaps that we agree to defer JDK-8141239 to >> a future release and improve the footprint performance and GC >> throughput concerns when MemberNames are stored in the throwable >> backtrace. >> >> Mandy >> > From vladimir.x.ivanov at oracle.com Tue Apr 5 17:44:39 2016 From: vladimir.x.ivanov at oracle.com (Vladimir Ivanov) Date: Tue, 5 Apr 2016 20:44:39 +0300 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <5703B23B.2030008@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <5702CCB0.9050701@oracle.com> <5703B23B.2030008@oracle.com> Message-ID: <5703F987.1000701@oracle.com> Filipp, I'll sponsor the change. Thanks for taking care of it. Best regards, Vladimir Ivanov On 4/5/16 3:40 PM, Coleen Phillimore wrote: > > Filip, Thank you for your answers. This change looks really good!! > Coleen > > On 4/5/16 3:23 AM, Filipp Zhinkin wrote: >> Hi Coleen, >> >> thanks for taking a look at it. >> >> On Mon, Apr 4, 2016 at 11:21 PM, Coleen Phillimore >> wrote: >>> Thank you for CCing hotspot-dev. This change is great! I reviewed >>> the >>> runtime files. >>> >>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/src/share/vm/prims/jvmtiRedefineClasses.cpp.udiff.html >>> >>> >>> Why does this have three parameters? >>> >>> - _index_map_p = new intArray(scratch_cp->length(), -1); >>> + _index_map_p = new intArray(scratch_cp->length(), >>> scratch_cp->length(), >>> -1); >> GrowableArray won't initialize elements in backing array until you ask >> it to. >> And it also won't allow to access elements that were not initialized. >> >> So we have to pass three parameters there to allocate backing array >> and fill it with -1. >> >>> Why not just change it to: >>> >>> _index_map_p = new GrowableArray(scratch_cp->length()); >> We use -1 there for CP entries that were not mapped during constant >> pools merging. >> >>> I don't see the three argument constructor to GrowableArray that >>> takes -1 >>> (??) >> It's the one that take init size, length, filler and few other >> implicit parameters: >> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/annotate/tip/src/share/vm/utilities/growableArray.hpp#l178 >> >> >>> Is it possible to completely eliminate intArray, intStack, boolArray and >>> boolStack, and the CHeapArray ? If so array.hpp should really go in >>> directory oops since the only Array<> left is for metaspace. Maybe >>> this >>> can be a further cleanup? >> I've already eliminated CHeapArray in the latest webrev [*], >> so only typedefs are preventing array.hpp movement. >> >> I'd prefer to eliminate typedefs and move array.hpp to oops directory >> in separate CR just >> to avoid webrev's growing and simplify reviewing. >> But if it's ok, then I can do it within this CR. >> >> Thanks, >> Filipp. >> >> [*] http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ >> >>> Wow, thanks! >>> >>> Coleen >>> >>> >>> On 3/31/16 11:14 AM, Vladimir Kozlov wrote: >>>> Hi Filipp, >>>> >>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC >>>> groups to >>>> look on. >>>> >>>> Thanks, >>>> Vladimir >>>> >>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>> Hi Vladimir, >>>>> >>>>> thank you for looking at this change. >>>>> >>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>> wrote: >>>>>> Nice clean up but I don't see any source code removed. What >>>>>> benefits we >>>>>> have >>>>>> then? >>>>>> I understand that we don't generate subclasses for ResourceArray >>>>>> and use >>>>>> GrowableArray. But it will not save space I think. >>>>>> What prevents us to remove ResourceArray at all? >>>>> >>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>> so it should be updated before removing ResourceArray: >>>>> >>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>> >>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>> >>>>>>> Hi all, >>>>>>> >>>>>>> please review a fix for JDK-8149374: >>>>>>> >>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>> Testing done: hotspot_all tests + CTW >>>>>>> >>>>>>> I've replaced all usages of collections defined via define_array and >>>>>>> define_stack macros with GrowableArray. >>>>>>> >>>>>>> There are good and bad news regarding performance impact of that >>>>>>> change. >>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>> >>>>>> >>>>>> It is acceptable regression I think. I don't think we should optimize >>>>>> and >>>>>> make more complex GrowableArray just to save 0.5% of performance >>>>>> for C2. >>>>> >>>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>>> may be beneficial to optimize it, >>>>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>>>> significant difference. >>>>> >>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>> GrowableArray's initialization >>>>> in its current state unless there will be other reasons to speed it >>>>> up. >>>>> >>>>> Thanks, >>>>> Filipp. >>>>> >>>>>> Thanks, >>>>>> Vladimir >>>>>> >>>>>> >>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>> force >>>>>>> initialization and de-initialization during array's >>>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>>> pointers such initialization does not make much sense, because >>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>> initialization. >>>>>>> >>>>>>> I've measured CTW time with following workaround which implements >>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>> compilation time returned back to values that were measured before >>>>>>> original change (51.06?0.24s): >>>>>>> >>>>>>> >>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>> >>>>>>> >>>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>> >>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>> initialization for some types, don't we? >>>>>>> >>>>>>> Best regards, >>>>>>> Filipp >>>>>>> >>>>>>> [1] >>>>>>> >>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>> >>>>>>> > From daniel.daugherty at oracle.com Tue Apr 5 18:10:04 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Tue, 5 Apr 2016 12:10:04 -0600 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <56F4F0EE.1040508@oracle.com> References: <56F4F0EE.1040508@oracle.com> Message-ID: <5703FF7C.6010309@oracle.com> On 3/25/16 2:03 AM, Erik Joelsson wrote: > Hello, > > Here is the initial review for the new Hotspot Build System, as > described in " JEP 284: New HotSpot Build System". This patch adds the > new build system along side the old and makes the new system the > default. The old build system will remain for a (hopefully) short > while until we feel confident it is no longer needed. This enables us > to iron out any details that we might have missed with minimal > disruption for the users. The goal is to remove the old system after > one week of the new going in. During that time, both build systems > will have to be kept in sync. For that to be possible, all changes > touching anything in the make directory need to be reviewed by me. > > In this patch, the makefiles for the new build system are located in > hotspot/makefiles. When we apply the second phase, where we remove the > old build system, the new will move into the proper hotspot/make > directory. > > To activate the old build system after this patch has been applied, > use the configure arg "--disable-new-hotspot-build". > > For more information about how the new build works and how to interact > with it, Magnus wrote a document that is still relevant: > http://hg.openjdk.java.net/build-infra/jdk9/file/tip/support/new-hotspot-build.md > > The hotspot build differs from all other libraries in the JDK in that the > library is (potentially) built multiple times with different conditions (-D > flags, for instance). The most common combination is building both the 'client' > and 'server' variant, but other combinations are possible. While this state of > affairs is not universally appreciated :-), it still is a use case that we need > to support, and it affects the entire build system for hotspot. Thanks for the humor and for retaining this important difference in the way HotSpot builds versus more sane/simple systems. > The new build supports the following variants: > > * server (C1+C2) The above "server" variant is the "tiered server". Does the new build system support the "C2 server" variant? What about the 32-bit server and 64-bit server build variants? For example, on Linux you can have: * C1/Client, 32-bit * C2/Server, 32-bit * Tiered (C1 & C2), 32-bit * C2/Server, 64-bit * Tiered (C1 + C2), 64-bit The above wide range of variants is also true for Win*. > The main method of verification for this patch has been running the > compare.sh script to verify that the output is equivalent to the old > build in as many cases as possible. In most configurations we have > reached a high level of confidence that we produce equivalent > binaries, but there are some exceptions that should be mentioned: > > * Solaris sparcv9 slowdebug produces differences when comparing > disassembly output from libjvm.so. I have not been able to find any > meaningful differences in compiler or linker flags to explain this. > * Windows server jvm.dll ends up with some functions in different > order in the disassembly output. From what I can tell, the bits are > otherwise equivalent. > > We have also run the runtime nightlies with no notable failures. > > This is a pretty big patch and I expect it to take some time to get > properly reviewed. It contains contributions from Magnus Ihse Bursie, > Erik Joelsson and Ingemar ?berg. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8152666 > Webrev: http://cr.openjdk.java.net/~erikj/8152666/webrev.01/index.html General Please make sure all the copyrights are updated. common/autoconf/basics.m4 No comments. common/autoconf/build-performance.m4 No comments. common/autoconf/buildjdk-spec.gmk.in No comments. common/autoconf/compare.sh.in No comments. common/autoconf/configure No comments. common/autoconf/configure.ac No comments. common/autoconf/flags.m4 L274: SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" L275: JVM_CFLAGS="$JVM_CFLAGS -fPIC" L275 is new, but seeing it next to L274 makes me wonder if $PICFLAG should be used instead of the literal '-fPIC'? L303: JVM_CFLAGS="$JVM_CFLAGS -fPIC" Same question about literal '-fPIC'. For most of the changes to flags.m4, I can't see how any of it relates to the new HotSpot build. Update: Now I'm wondering if this is one of those files that we typically don't review because it is auto generated. Sorry, don't remember for sure. common/autoconf/generated-configure.sh 2642 lines changed... I think this is one of those files you're supposed to skip in build-dev review... :-| common/autoconf/help.m4 L179: $PRINTF "Which are valid to use depends on the target platform.\n " L180: $PRINTF "%s " $VALID_JVM_FEATURES Why are there blanks after the last '\n' on L179 instead of at the beginning of L180? common/autoconf/hotspot-spec.gmk.in No comments. common/autoconf/hotspot.m4 L46: # Check if the specified JVM features are explicitely enabled. To be used in Typo: 'explicitely' -> 'explicitly' L59: # server: normal interpreter, and a tiered C1/C2 compiler So no support for a C2-only server config? L77: # Have the user listed more than one variant? Typo: 'Have' -> 'Has' common/autoconf/jdk-options.m4 No comments other than to say thanks for keeping support for 'optimized' builds. common/autoconf/jdk-version.m4 No comments. common/autoconf/lib-std.m4 No comments. common/autoconf/libraries.m4 No comments. common/autoconf/platform.m4 No comments, but mind numbing amount of diffs. common/autoconf/spec.gmk.in No comments. common/autoconf/toolchain.m4 No comments. common/autoconf/version-numbers No comments. common/bin/compare.sh No comments. common/bin/compare_exceptions.sh.incl No comments. make/Jprt.gmk No comments. make/Main.gmk No comments other than the 'hotspot-ide-project' target looks interesting... make/common/MakeBase.gmk No comments. make/common/NativeCompilation.gmk L649: else ifeq (LOW, $$($1_OPTIMIZATION)) L650: $1_OPT_CFLAGS := $(C_O_FLAG_NORM) L651: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM) Instead of "_NORM", I was expecting "_LOW". L652: else ifeq (HIGH, $$($1_OPTIMIZATION)) L653: $1_OPT_CFLAGS := $(C_O_FLAG_HI) L654: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI) Instead of "_HI" I was expecting "_HIGH". make/jprt.properties L136: # Don't disable precompiled headers on windows. It's simply too slow. This is a surprise. Not the slowness part, but not being able to do a non-PCH JPRT build on Win*. IMHO, it's a little too much motherhood... jdk/make/Import.gmk No comments. jdk/make/copy/Copy-java.base.gmk No comments. jdk/make/lib/CoreLibraries.gmk No comments. hotspot/makefiles/BuildHotspot.gmk No comments. hotspot/makefiles/Dist.gmk L52: define macosx_universalize I thought MacOS X universal support was going away? Update: OK, I see the mention of 8069540 ahead... L120: # these files are identical, and just pick one arbitrarily to use as souce. Typo: 'souce' -> 'source' L139: # This might have been defined in a custom extenstion Typo: 'extenstion' -> 'extension' L168: # NOTE: In the old build, this file was not copied on Windows. L169: ifneq ($(OPENJDK_TARGET_OS), windows) L170: $(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \ I'm not quite sure why the jvmti.html work is done for more than a single platform. Update: Thinking about this more... I vaguely remember that JVM/TI tracing used to be disabled in Client VMs. Don't know if that's still the case. hotspot/makefiles/HotspotCommon.gmk No comments. hotspot/makefiles/gensrc/GenerateSources.gmk No comments. hotspot/makefiles/gensrc/GensrcAdlc.gmk L98: # NOTE: Windows adlc flags was different in the old build. Is this really L99: # correct? John Rose may know the answer to this historical question. hotspot/makefiles/gensrc/GensrcDtrace.gmk No comments. hotspot/makefiles/gensrc/GensrcJvmti.gmk No comments. hotspot/makefiles/ide/CreateVSProject.gmk No comments. hotspot/makefiles/lib/CompileDtracePostJvm.gmk No comments. hotspot/makefiles/lib/CompileDtracePreJvm.gmk No comments. hotspot/makefiles/lib/CompileJvm.gmk No comments. hotspot/makefiles/lib/CompileLibjsig.gmk No comments. hotspot/makefiles/lib/CompileLibraries.gmk No comments. hotspot/makefiles/lib/JvmFeatures.gmk No comments. hotspot/makefiles/lib/JvmMapfile.gmk No comments. hotspot/makefiles/lib/JvmOverrideFiles.gmk No comments. hotspot/makefiles/mapfiles/libjsig/mapfile-vers-solaris hotspot/makefiles/mapfiles/libjvm_db/mapfile-vers hotspot/makefiles/mapfiles/libjvm_dtrace/mapfile-vers No comments on the mapfiles. hotspot/makefiles/symbols/symbols-aix hotspot/makefiles/symbols/symbols-aix-debug hotspot/makefiles/symbols/symbols-linux hotspot/makefiles/symbols/symbols-macosx hotspot/makefiles/symbols/symbols-shared hotspot/makefiles/symbols/symbols-solaris hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler1 hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler2 hotspot/makefiles/symbols/symbols-unix No comments on the symbol files. Thumbs up on this fix; I don't think that anything I noted above is a show stopper for this changeset. Dan > > /Erik From claes.redestad at oracle.com Tue Apr 5 18:10:56 2016 From: claes.redestad at oracle.com (Claes Redestad) Date: Tue, 5 Apr 2016 20:10:56 +0200 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <5702FC9B.7020600@oracle.com> References: <5702FC9B.7020600@oracle.com> Message-ID: <5703FFB0.5090303@oracle.com> Hi, On 04/05/2016 01:45 AM, Brent Christian wrote: > Hi, > > I'd like to check in some footprint and code reduction changes to the > java.lang.StackWalker implementation. > > Webrev: > http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ this looks really good to me. It seems the new implementation of StackFrameInfo::toStackTraceElement reads the volatile field ste twice on the fast path, though, so perhaps consider something like this: + StackTraceElement s = ste; + if (s == null) { + synchronized(this) { + s = ste; + if (s == null) { + s = new StackTraceElement(); + toStackTraceElement0(s); + ste = s; + } + } + } + return s; Thanks! /Claes > Bug: > https://bugs.openjdk.java.net/browse/JDK-8153123 > > A summary of the changes: > > * remove the "stackwalk.newThrowable" system property and > "MemberNameInStackFrame" VM flag, originally left in to aid benchmarking > > * Streamline StackFrameInfo fields > > * Refactor/streamline StackStreamFactory (no more separate > classes[]/StackFrame[] arrays, remove unneeded (for now) > StackStreamFactory.StackTrace class) > > > Given the hotspot changes, I plan to push this through hs-rt. > > Thanks, > -Brent > From coleen.phillimore at oracle.com Tue Apr 5 18:25:19 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 5 Apr 2016 14:25:19 -0400 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <5703F5E7.4060404@oracle.com> References: <5702FC9B.7020600@oracle.com> <5703F579.8050702@oracle.com> <5703F5E7.4060404@oracle.com> Message-ID: <5704030F.80906@oracle.com> A correction below. On 4/5/16 1:29 PM, Coleen Phillimore wrote: > > Also meant to include core-libs-dev in the email. > Thanks, > Coleen > > On 4/5/16 1:27 PM, Coleen Phillimore wrote: >> >> Hi, I've reviewed the hotspot changes and some of the jdk changes. >> This looks really good. >> >> One comment about the jvm function names: >> >> I think FillInStackTraceElement is too close of a name to >> Throwable::fill_in_stack_trace(). >> >> -JVM_ENTRY(void, JVM_SetMethodInfo(JNIEnv *env, jobject frame)) >> +JVM_ENTRY(void, JVM_FillInStackTraceElement(JNIEnv *env, jobject >> frame, jobject stack)) >> JVMWrapper("JVM_SetMethodInfo"); >> - Handle stackFrame(THREAD, JNIHandles::resolve(frame)); >> - java_lang_StackFrameInfo::fill_methodInfo(stackFrame, THREAD); >> + Handle stack_frame_info(THREAD, JNIHandles::resolve(frame)); >> + Handle stack_trace_element(THREAD, JNIHandles::resolve(stack)); >> + java_lang_StackFrameInfo::fill_methodInfo(stack_frame_info, >> stack_trace_element, THREAD); JVM_END >> >> >> And the function is called fill_methodInfo in the javaClasses function. >> >> I think the JVM and the java_lang_StackFrameInfo function names >> should be closer. >> >> I wonder if the name JVM_ToStackFrameElement() and >> java_lang_StackFrameInfo::to_stack_frame_element() would be better >> and then it'd match the Java name. >> I meant JVM_ToStackTraceElement() and java_lang_StackFrameInfo::to_stack_trace_element(), since it's producing a StackTraceElement. thanks, Coleen >> Thanks! >> Coleen >> >> On 4/4/16 9:29 PM, Mandy Chung wrote: >>>> On Apr 4, 2016, at 4:45 PM, Brent Christian >>>> wrote: >>>> >>>> Hi, >>>> >>>> I'd like to check in some footprint and code reduction changes to >>>> the java.lang.StackWalker implementation. >>>> >>>> Webrev: >>>> http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ >>>> Bug: >>>> https://bugs.openjdk.java.net/browse/JDK-8153123 >>>> >>> This looks good to me. >>> >>> One thing to mention is that this patch is a follow-up work from the >>> investigation on what it takes to enable Throwable to use >>> StackWalker (JDK-8141239). The current built-in VM backtrace is very >>> compact and performant. We have identified and prototypes the >>> performance improvements if Throwable backtrace is generated using >>> stack walker. There are some performance gaps that we agree to >>> defer JDK-8141239 to a future release and improve the footprint >>> performance and GC throughput concerns when MemberNames are stored >>> in the throwable backtrace. >>> >>> Mandy >>> >> > From ioi.lam at oracle.com Tue Apr 5 20:58:07 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Tue, 05 Apr 2016 13:58:07 -0700 Subject: RFR [XS] 8153297 - [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files In-Reply-To: <5703C6D7.6010002@oracle.com> References: <56FEA59C.9020805@oracle.com> <5703C6D7.6010002@oracle.com> Message-ID: <570426DF.40607@oracle.com> HI Lois, Thanks for the review. I've talked to Misha about the removal of BasicJarBuilder, and he said it's better to do in in a follow on bug, which he will own. So I'll push the change as is and let Misha handle the removal of BasicJarBuilder. Thanks - Ioi On 4/5/16 7:08 AM, Lois Foltan wrote: > Hi Ioi, > > This change looks good, thanks by the way since I am working on > supporting an updated -Xpatch format and have to write tests to create > JAR files as well. I had copied BasicJarBuilder.java from the > test/runtime/SharedArchiveFile directory. Can you remove > BasicJarBuilder.java as well as part of this change? > > Thanks, > Lois > > On 4/1/2016 12:45 PM, Ioi Lam wrote: >> Please review a very small fix: >> >> http://cr.openjdk.java.net/~iklam/jdk9/8153300-enhance-classfileinstaller.v01/ >> >> >> Bug: [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to >> support JAR files >> >> https://bugs.openjdk.java.net/browse/JDK-8153300 >> >> Summary of fix: >> >> Many hotspot tests need to create JAR files. The current method >> is messy: >> >> [1] Because JTREG compiles .class files into various directories, >> we need >> to use ClassFileInstaller to find the .class file and copy it >> into the >> current directory. >> [2] Then, there are various ad-hoc calls to sun.tools.jar.Main to >> create the >> JAR file. This is not desirable because sun.tools.jar is an >> internal >> package and javac gives warnings about it. >> >> I have improved ClassFileInstaller so that JAR files can be easily >> created using JTREG tags: >> >> * @build ClassFileInstaller sun.hotspot.WhiteBox >> * @run main ClassFileInstaller -jar whitebox.jar >> sun.hotspot.WhiteBox >> >> To handle more complex use cases, new APIs are also added to >> ClassFileInstaller >> for programmatically creating JAR files inside the test source code. >> >> I converted two CDS test cases to use the new functionality. >> >> FYI, I am also working on new tests for a closed issue >> (JDK-8153297) that use >> the new functionality. >> >> Thanks >> - Ioi >> >> >> > From lois.foltan at oracle.com Tue Apr 5 22:37:49 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Tue, 05 Apr 2016 18:37:49 -0400 Subject: RFR [XS] 8153297 - [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to support JAR files In-Reply-To: <570426DF.40607@oracle.com> References: <56FEA59C.9020805@oracle.com> <5703C6D7.6010002@oracle.com> <570426DF.40607@oracle.com> Message-ID: <57043E3D.8010001@oracle.com> On 4/5/2016 4:58 PM, Ioi Lam wrote: > HI Lois, > > Thanks for the review. I've talked to Misha about the removal of > BasicJarBuilder, and he said it's better to do in in a follow on bug, > which he will own. > > So I'll push the change as is and let Misha handle the removal of > BasicJarBuilder. Sounds good. Lois > > Thanks > - Ioi > > On 4/5/16 7:08 AM, Lois Foltan wrote: >> Hi Ioi, >> >> This change looks good, thanks by the way since I am working on >> supporting an updated -Xpatch format and have to write tests to >> create JAR files as well. I had copied BasicJarBuilder.java from the >> test/runtime/SharedArchiveFile directory. Can you remove >> BasicJarBuilder.java as well as part of this change? >> >> Thanks, >> Lois >> >> On 4/1/2016 12:45 PM, Ioi Lam wrote: >>> Please review a very small fix: >>> >>> http://cr.openjdk.java.net/~iklam/jdk9/8153300-enhance-classfileinstaller.v01/ >>> >>> >>> Bug: [TESTBUG] Enhance test/testlibrary/ClassFileInstaller.java to >>> support JAR files >>> >>> https://bugs.openjdk.java.net/browse/JDK-8153300 >>> >>> Summary of fix: >>> >>> Many hotspot tests need to create JAR files. The current method >>> is messy: >>> >>> [1] Because JTREG compiles .class files into various >>> directories, we need >>> to use ClassFileInstaller to find the .class file and copy >>> it into the >>> current directory. >>> [2] Then, there are various ad-hoc calls to sun.tools.jar.Main >>> to create the >>> JAR file. This is not desirable because sun.tools.jar is an >>> internal >>> package and javac gives warnings about it. >>> >>> I have improved ClassFileInstaller so that JAR files can be easily >>> created using JTREG tags: >>> >>> * @build ClassFileInstaller sun.hotspot.WhiteBox >>> * @run main ClassFileInstaller -jar whitebox.jar >>> sun.hotspot.WhiteBox >>> >>> To handle more complex use cases, new APIs are also added to >>> ClassFileInstaller >>> for programmatically creating JAR files inside the test source >>> code. >>> >>> I converted two CDS test cases to use the new functionality. >>> >>> FYI, I am also working on new tests for a closed issue >>> (JDK-8153297) that use >>> the new functionality. >>> >>> Thanks >>> - Ioi >>> >>> >>> >> > From coleen.phillimore at oracle.com Tue Apr 5 23:37:48 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 5 Apr 2016 19:37:48 -0400 Subject: RFR 8151939: VM_Version_init() print buffer is too small Message-ID: <57044C4C.9080601@oracle.com> Summary: Increase buffer size, use logging to print out version and os information This replaces several -XX:+PrintMiscellaneous -XX:+Verbose to -Xlog:os or -Xlog:os+cpu. Most use info level logging because it's only printed once at the beginning, except where printing is in the signal handler, which uses debug level. Also, errors in setup use info level (not warning) since they never printed the warnings before without PrintMiscellaneous and Verbose. busaa027% java -Xlog:os -version [0.008s][info][os] SafePoint Polling address: 0x00007fde1d37f000 [0.008s][info][os] Memory Serialize Page address: 0x00007fde1d37d000 [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 java version "9-internal" Java(TM) SE Runtime Environment (fastdebug build 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) Java HotSpot(TM) 64-Bit Server VM (fastdebug build 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed mode) busaa027% java -Xlog:os,os+cpu -version [0.008s][info][os] SafePoint Polling address: 0x00007f49c021f000 [0.008s][info][os] Memory Serialize Page address: 0x00007f49c021d000 [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 [0.011s][info][os,cpu] Logical CPUs per core: 2 [0.011s][info][os,cpu] L1 data cache line size: 64 [0.011s][info][os,cpu] UseSSE=4 UseAVX=2 UseAES=1 MaxVectorSize=64Allocation prefetching: PREFETCHNTA at distance 192, 4 lines of 64 bytes [0.011s][info][os,cpu] PrefetchCopyIntervalInBytes 576 [0.011s][info][os,cpu] PrefetchScanIntervalInBytes 576 [0.011s][info][os,cpu] PrefetchFieldsAhead 1 [0.011s][info][os,cpu] ContendedPaddingWidth 128 [0.011s][info][os,cpu] CPU:total 72 (18 cores per cpu, 2 threads per core) family 6 model 63 stepping 2, cmov, cx8, fxsr, mmx, sse, sse2, sse3, ssse3, sse4.1, sse4.2, popcnt, avx, avx2, aes, clmul, erms, lzcnt, ht, tsc, tscinvbit, bmi1, bmi2 [0.011s][info][os,cpu] CPU Model and flags from /proc/cpuinfo: [0.011s][info][os,cpu] model name : Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz [0.011s][info][os,cpu] flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm ida arat epb xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid java version "9-internal" Java(TM) SE Runtime Environment (fastdebug build 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) Java HotSpot(TM) 64-Bit Server VM (fastdebug build 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed mode) open webrev at http://cr.openjdk.java.net/~coleenp/8151939.01/webrev bug link https://bugs.openjdk.java.net/browse/JDK-8151939 Tested in rbt and jprt. Thanks, Coleen From christian.thalinger at oracle.com Wed Apr 6 01:23:17 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Tue, 5 Apr 2016 15:23:17 -1000 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <5703FF7C.6010309@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> Message-ID: <1FE98FBA-A48E-4C1A-A622-74331E7BAB30@oracle.com> > On Apr 5, 2016, at 8:10 AM, Daniel D. Daugherty wrote: ? > make/Main.gmk > No comments other than the 'hotspot-ide-project' target > looks interesting... Btw. there is already support to generate IDE configurations today via mx: https://wiki.openjdk.java.net/display/Graal/Instructions integrated with: https://bugs.openjdk.java.net/browse/JDK-8139921 One main advantage, as I pointed out in the review, is that it also includes generated files so there are no unresolved includes or methods anymore: http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-November/020626.html I?m using this every day. From alejandro.murillo at oracle.com Wed Apr 6 05:14:00 2016 From: alejandro.murillo at oracle.com (Alejandro Murillo) Date: Tue, 5 Apr 2016 23:14:00 -0600 Subject: [9] RFR JDK-8153564: Add java/nio/Buffer/BasicByte.java to exclude list until JDK-8153563 is fixed Message-ID: <57049B18.5050307@oracle.com> I'd like to push the changeset below to exclude java/nio/Buffer/BasicByte.java It started failing after the hotspot snapshot was pushed to jdk9/dev tonight. https://bugs.openjdk.java.net/browse/JDK-8153563 has been filed for that failure. $ hg -R jdk9.dev/jdk tip -pv changeset: 14082:5c98c9ad8ff2 tag: tip user: amurillo date: Tue Apr 05 22:06:15 2016 -0700 files: test/ProblemList.txt description: 8153564: Add java/nio/Buffer/BasicByte.java to exclude list until JDK-8153563 is fixed Reviewed-by: tbd diff -r 04f56d4ca167 -r 5c98c9ad8ff2 test/ProblemList.txt --- a/test/ProblemList.txt Tue Apr 05 20:02:21 2016 -0700 +++ b/test/ProblemList.txt Tue Apr 05 22:06:15 2016 -0700 @@ -185,6 +185,8 @@ java/nio/charset/coders/BashStreams.java 8149712 generic-all +java/nio/Buffer/BasicByte.java 8153563 generic-all + ############################################################################ # jdk_rmi -- Alejandro From mandy.chung at oracle.com Wed Apr 6 05:36:23 2016 From: mandy.chung at oracle.com (Mandy Chung) Date: Tue, 5 Apr 2016 22:36:23 -0700 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <57044EC8.7050602@oracle.com> References: <5702FC9B.7020600@oracle.com> <5703F579.8050702@oracle.com> <5703F5E7.4060404@oracle.com> <5704030F.80906@oracle.com> <57044EC8.7050602@oracle.com> Message-ID: > On Apr 5, 2016, at 4:48 PM, Brent Christian wrote: > > Thanks, Coleen. Coordinating method/function names on "to stack trace element" is a fine thing. I've done so in the updated webrev, and also implemented Claes's suggestion. > > http://cr.openjdk.java.net/~bchristi/8153123/webrev.01/index.html > Looks good. Nit: can you add a space after ?synchronized? in StackFrameInfo.java line 109: 109 synchronized(this) { Mandy From alejandro.murillo at oracle.com Wed Apr 6 06:22:29 2016 From: alejandro.murillo at oracle.com (Alejandro Murillo) Date: Wed, 6 Apr 2016 00:22:29 -0600 Subject: [9] RFR JDK-8153564: Add java/nio/Buffer/BasicByte.java to exclude list until JDK-8153563 is fixed In-Reply-To: <5704A5CF.2090804@oracle.com> References: <57049B18.5050307@oracle.com> <5704A5CF.2090804@oracle.com> Message-ID: <5704AB25.903@oracle.com> I didn't see that one failing on my jprt sanity job I went to check the code CopyDirectMemory.java, and it doesn't use jdk.internal.misc.Unsafe (at least directly), so doesn't look like it's the same issue. where did you see the failure ? Alejandro On 4/5/2016 11:59 PM, Amy Lu wrote: > java/nio/Buffer/CopyDirectMemory.java > run into the same issue, maybe it could be problem listed together in > this patch? > > Thanks, > Amy > > On 4/6/16 1:14 PM, Alejandro Murillo wrote: >> >> I'd like to push the changeset below to exclude >> java/nio/Buffer/BasicByte.java >> It started failing after the hotspot snapshot was pushed to jdk9/dev >> tonight. >> https://bugs.openjdk.java.net/browse/JDK-8153563 has been filed for >> that failure. >> >> $ hg -R jdk9.dev/jdk tip -pv >> >> changeset: 14082:5c98c9ad8ff2 >> tag: tip >> user: amurillo >> date: Tue Apr 05 22:06:15 2016 -0700 >> files: test/ProblemList.txt >> description: >> 8153564: Add java/nio/Buffer/BasicByte.java to exclude list until >> JDK-8153563 is fixed >> Reviewed-by: tbd >> >> >> diff -r 04f56d4ca167 -r 5c98c9ad8ff2 test/ProblemList.txt >> --- a/test/ProblemList.txt Tue Apr 05 20:02:21 2016 -0700 >> +++ b/test/ProblemList.txt Tue Apr 05 22:06:15 2016 -0700 >> @@ -185,6 +185,8 @@ >> >> java/nio/charset/coders/BashStreams.java 8149712 generic-all >> >> +java/nio/Buffer/BasicByte.java 8153563 generic-all >> + >> ############################################################################ >> >> >> # jdk_rmi >> >> > -- Alejandro From Alan.Bateman at oracle.com Wed Apr 6 07:36:54 2016 From: Alan.Bateman at oracle.com (Alan Bateman) Date: Wed, 6 Apr 2016 08:36:54 +0100 Subject: [9] RFR JDK-8153564: Add java/nio/Buffer/BasicByte.java to exclude list until JDK-8153563 is fixed In-Reply-To: <57049B18.5050307@oracle.com> References: <57049B18.5050307@oracle.com> Message-ID: <5704BC96.1060303@oracle.com> On 06/04/2016 06:14, Alejandro Murillo wrote: > > I'd like to push the changeset below to exclude > java/nio/Buffer/BasicByte.java > It started failing after the hotspot snapshot was pushed to jdk9/dev > tonight. > https://bugs.openjdk.java.net/browse/JDK-8153563 has been filed for > that failure. Would it be possible to add @modules java.base/jdk.internal.misc, as below, and not exclude these tests? -Alan $ hg diff -g . diff --git a/test/java/nio/Buffer/Basic.java b/test/java/nio/Buffer/Basic.java --- a/test/java/nio/Buffer/Basic.java +++ b/test/java/nio/Buffer/Basic.java @@ -22,6 +22,7 @@ */ /* @test + * @modules java.base/jdk.internal.misc * @summary Unit test for buffers * @bug 4413135 4414911 4416536 4416562 4418782 4471053 4472779 4490253 4523725 * 4526177 4463011 4660660 4661219 4663521 4782970 4804304 4938424 6231529 diff --git a/test/java/nio/Buffer/CopyDirectMemory.java b/test/java/nio/Buffer/CopyDirectMemory.java --- a/test/java/nio/Buffer/CopyDirectMemory.java +++ b/test/java/nio/Buffer/CopyDirectMemory.java @@ -25,6 +25,7 @@ * @summary Test view buffer bulk operations for large buffers. * @bug 4463011 * + * @modules java.base/jdk.internal.misc * @build Basic * @run main CopyDirectMemory */ From sgehwolf at redhat.com Wed Apr 6 08:38:08 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Wed, 06 Apr 2016 10:38:08 +0200 Subject: RFR(XS): 8153275: Zero JVM fails to initialize after JDK-8152440 In-Reply-To: <1459848614.4486.13.camel@redhat.com> References: <1459791895.3762.14.camel@redhat.com> <57037BFB.1060606@redhat.com> <1459848614.4486.13.camel@redhat.com> Message-ID: <1459931888.3613.10.camel@redhat.com> On Tue, 2016-04-05 at 11:30 +0200, Severin Gehwolf wrote: > On Tue, 2016-04-05 at 09:48 +0100, Andrew Haley wrote: > > > > On 04/04/16 18:44, Severin Gehwolf wrote: > > > > > > > > > Hi, > > > > > > Could somebody please sponsor and review the following Zero-only > > > fix? > > > The fix for JDK-8152440 was incorrect in that it set the value > > > for InitArrayShortSize to an illegal value (-1) failing > > > constraint > > > validation. Albeit not being used it must still pass constraint > > > validation. Otherwise, the JVM fails to initialize and all bets > > > are > > > off. Thoughts? > > > > > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153275 > > > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/ > > > webrev.01/ > > OK, but please make the comment clearer.??I didn't understand it. > > > > ?"the allowed range [ 0 ... 9223372036854775807 ]" > > > > is much clearer. > Thanks?for the review! > > Updated webrev: > http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.02/ Could somebody sponsor this please? Thanks, Severin From erik.joelsson at oracle.com Wed Apr 6 09:10:38 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Wed, 6 Apr 2016 11:10:38 +0200 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <5703FF7C.6010309@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> Message-ID: <5704D28E.1060304@oracle.com> Hello Dan and thank you for the review! I know it's a lot to chew through. I have incorporated your changes and published a new webrev: http://cr.openjdk.java.net/~erikj/8152666/webrev.02/ On 2016-04-05 20:10, Daniel D. Daugherty wrote: > > > > The new build supports the following variants: > > > > * server (C1+C2) > > The above "server" variant is the "tiered server". Does the new > build system support the "C2 server" variant? What about the > 32-bit server and 64-bit server build variants? For example, > on Linux you can have: > > * C1/Client, 32-bit > * C2/Server, 32-bit > * Tiered (C1 & C2), 32-bit > * C2/Server, 64-bit > * Tiered (C1 + C2), 64-bit > > The above wide range of variants is also true for Win*. > There is a way to achieve this even if it's not as straight forward. It's controlled through the new "jvm-feature" setting. To build a completely custom set of features for a jvm, you set the --with-jvm-variants=custom and then define the full feature set using --with-jvm-features=compiler2,... For "server, client, core, minimal, zero and zeroshark" there is a predefined set of features while the custom variant has no features by default. > > General > Please make sure all the copyrights are updated. > Done > > common/autoconf/basics.m4 > No comments. > > common/autoconf/build-performance.m4 > No comments. > > common/autoconf/buildjdk-spec.gmk.in > No comments. > > common/autoconf/compare.sh.in > No comments. > > common/autoconf/configure > No comments. > > common/autoconf/configure.ac > No comments. > > common/autoconf/flags.m4 > L274: SHARED_LIBRARY_FLAGS="-dynamiclib > -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" > L275: JVM_CFLAGS="$JVM_CFLAGS -fPIC" > > L275 is new, but seeing it next to L274 makes me wonder if > $PICFLAG should be used instead of the literal '-fPIC'? Fixed > > L303: JVM_CFLAGS="$JVM_CFLAGS -fPIC" > Same question about literal '-fPIC'. > Not sure, leaving for now. It seems we leave the PICFLAG empty for the JDK build and only add it to the hotspot build. This should be addressed in a followup where we try to align flag usage more between the different libraries. > For most of the changes to flags.m4, I can't see how any of it > relates to the new HotSpot build. > > Update: Now I'm wondering if this is one of those files that > we typically don't review because it is auto generated. > Sorry, don't remember for sure. It's a file that should be reviewed, only generated-configure.sh can be ignored. The majority of the changes in here are related to cross compiling in the modular world. When cross compiling now, we need to also build a jvm for the build platform in order to run jlink and jmod when building images. With the old hotspot build, that was simpler, just invoke the hotspot build with some ARCH and compiler related variables set. For the rest of the JDK build, an approximation of flags used was enough so the problem was never fully solved. In the new build, we derive all the compiler options in configure so I had to introduce a more proper solution. I did this by parameterizing some macros in flags.m4 and platform.m4 so that we can run them twice, once for the "target" toolchain" and one for the "build" toolchain. These are the majority of the changes you are seeing. I also removed the old hard coded "build" versions of certain flag and platform variables. > common/autoconf/generated-configure.sh > 2642 lines changed... I think this is one of those files > you're supposed to skip in build-dev review... :-| Yes, please do. > > common/autoconf/help.m4 > L179: $PRINTF "Which are valid to use depends on the target > platform.\n " > L180: $PRINTF "%s " $VALID_JVM_FEATURES > Why are there blanks after the last '\n' on L179 instead of > at the beginning of L180? > If you do $PRINTF " %s " $VALID_JVM_FEATURES, it adds those spaces between every element in VALID_JVM_FEATURES. > common/autoconf/hotspot-spec.gmk.in > No comments. > > common/autoconf/hotspot.m4 > L46: # Check if the specified JVM features are explicitely > enabled. To be used in > Typo: 'explicitely' -> 'explicitly' > > L59: # server: normal interpreter, and a tiered C1/C2 compiler > So no support for a C2-only server config? > > L77: # Have the user listed more than one variant? > Typo: 'Have' -> 'Has' > fixed > common/autoconf/jdk-options.m4 > No comments other than to say thanks for keeping support > for 'optimized' builds. > > common/autoconf/jdk-version.m4 > No comments. > > common/autoconf/lib-std.m4 > No comments. > > common/autoconf/libraries.m4 > No comments. > > common/autoconf/platform.m4 > No comments, but mind numbing amount of diffs. > Same explanation as for flags.m4 > common/autoconf/spec.gmk.in > No comments. > > common/autoconf/toolchain.m4 > No comments. > > common/autoconf/version-numbers > No comments. > > common/bin/compare.sh > No comments. > > common/bin/compare_exceptions.sh.incl > No comments. > > make/Jprt.gmk > No comments. > > make/Main.gmk > No comments other than the 'hotspot-ide-project' target > looks interesting... > This is the replacement for the visual studio project generator. We currently only support VS here. > make/common/MakeBase.gmk > No comments. > > make/common/NativeCompilation.gmk > L649: else ifeq (LOW, $$($1_OPTIMIZATION)) > L650: $1_OPT_CFLAGS := $(C_O_FLAG_NORM) > L651: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM) > Instead of "_NORM", I was expecting "_LOW". > > L652: else ifeq (HIGH, $$($1_OPTIMIZATION)) > L653: $1_OPT_CFLAGS := $(C_O_FLAG_HI) > L654: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI) > Instead of "_HI" I was expecting "_HIGH". > The names here were defined way back when we did build infra for the JDK build. I wouldn't mind better alignment in naming the optimization levels. > make/jprt.properties > L136: # Don't disable precompiled headers on windows. It's simply > too slow. > This is a surprise. Not the slowness part, but not being > able to do a non-PCH JPRT build on Win*. IMHO, it's a > little too much motherhood... > Actually, the old hotspot build does not allow disabling of PCH for windows at all. The flag is simply ignored. In the new build, we treat the flag the same on all platforms, so disabling precompiled headers works on Windows. In the current JPRT config, we disable precompiled headers on all fastdebug builds as a way of making sure we aren't breaking that build configuration. We noticed a major build time regression on Windows fastdebug builds in JPRT until we figured out it was caused by this. Since we aren't currently disabling precompiled header on Windows, I see no reason to start now. The build time regression for just building hotspot is around 2m->12m. > jdk/make/Import.gmk > No comments. > > jdk/make/copy/Copy-java.base.gmk > No comments. > > jdk/make/lib/CoreLibraries.gmk > No comments. > > hotspot/makefiles/BuildHotspot.gmk > No comments. > > hotspot/makefiles/Dist.gmk > L52: define macosx_universalize > I thought MacOS X universal support was going away? > > Update: OK, I see the mention of 8069540 ahead... > Yeah, we need to be binary the same as the old build for now. Hopefully we can get rid of the universal stuff soon. > L120: # these files are identical, and just pick one arbitrarily > to use as souce. > Typo: 'souce' -> 'source' > > L139: # This might have been defined in a custom extenstion > Typo: 'extenstion' -> 'extension' > fixed > L168: # NOTE: In the old build, this file was not copied on Windows. > L169: ifneq ($(OPENJDK_TARGET_OS), windows) > L170: $(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \ > I'm not quite sure why the jvmti.html work is done for > more than a single platform. > > Update: Thinking about this more... I vaguely remember that > JVM/TI tracing used to be disabled in Client VMs. Don't know > if that's still the case. The jvmti.html file is just copied into the docs bundle later. IMO, the docs bundle should be the same regardless of platform. In practice we only publish the bundle from one build platform anyway. /Erik > > hotspot/makefiles/HotspotCommon.gmk > No comments. > > hotspot/makefiles/gensrc/GenerateSources.gmk > No comments. > > hotspot/makefiles/gensrc/GensrcAdlc.gmk > L98: # NOTE: Windows adlc flags was different in the old > build. Is this really > L99: # correct? > John Rose may know the answer to this historical question. > > hotspot/makefiles/gensrc/GensrcDtrace.gmk > No comments. > > hotspot/makefiles/gensrc/GensrcJvmti.gmk > No comments. > > hotspot/makefiles/ide/CreateVSProject.gmk > No comments. > > hotspot/makefiles/lib/CompileDtracePostJvm.gmk > No comments. > > hotspot/makefiles/lib/CompileDtracePreJvm.gmk > No comments. > > hotspot/makefiles/lib/CompileJvm.gmk > No comments. > > hotspot/makefiles/lib/CompileLibjsig.gmk > No comments. > > hotspot/makefiles/lib/CompileLibraries.gmk > No comments. > > hotspot/makefiles/lib/JvmFeatures.gmk > No comments. > > hotspot/makefiles/lib/JvmMapfile.gmk > No comments. > > hotspot/makefiles/lib/JvmOverrideFiles.gmk > No comments. > > hotspot/makefiles/mapfiles/libjsig/mapfile-vers-solaris > hotspot/makefiles/mapfiles/libjvm_db/mapfile-vers > hotspot/makefiles/mapfiles/libjvm_dtrace/mapfile-vers > No comments on the mapfiles. > > hotspot/makefiles/symbols/symbols-aix > hotspot/makefiles/symbols/symbols-aix-debug > hotspot/makefiles/symbols/symbols-linux > hotspot/makefiles/symbols/symbols-macosx > hotspot/makefiles/symbols/symbols-shared > hotspot/makefiles/symbols/symbols-solaris > hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler1 > hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler2 > hotspot/makefiles/symbols/symbols-unix > No comments on the symbol files. > > > Thumbs up on this fix; I don't think that anything I noted > above is a show stopper for this changeset. > > Dan > > >> >> /Erik > From erik.joelsson at oracle.com Wed Apr 6 09:14:38 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Wed, 6 Apr 2016 11:14:38 +0200 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <1FE98FBA-A48E-4C1A-A622-74331E7BAB30@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <1FE98FBA-A48E-4C1A-A622-74331E7BAB30@oracle.com> Message-ID: <5704D37E.8040208@oracle.com> Hello, I assume the mx projects are for Java code or do they also generate projects for native? The new top level target is only meant to replace the old Visual Studio project generator, at least for now. /Erik On 2016-04-06 03:23, Christian Thalinger wrote: > >> On Apr 5, 2016, at 8:10 AM, Daniel D. Daugherty >> > wrote: > > ? > >> make/Main.gmk >> No comments other than the 'hotspot-ide-project' target >> looks interesting... > > Btw. there is already support to generate IDE configurations today via mx: > > https://wiki.openjdk.java.net/display/Graal/Instructions > > integrated with: > > https://bugs.openjdk.java.net/browse/JDK-8139921 > > One main advantage, as I pointed out in the review, is that it also > includes generated files so there are no unresolved includes or > methods anymore: > > http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-November/020626.html > > I?m using this every day. From stefan.karlsson at oracle.com Wed Apr 6 09:30:32 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 6 Apr 2016 11:30:32 +0200 Subject: RFR: 8153583: Make OutputAnalyzer.reportDiagnosticSummary public Message-ID: <5704D738.4010205@oracle.com> Hi all, Please review this patch to make one of the testlibrary functions publicly accessible for tests. http://cr.openjdk.java.net/~stefank/8153583/webrev.01 https://bugs.openjdk.java.net/browse/JDK-8153583 The reason why I need it to be public is that I want to write more complex verification of the the output streams, and still be able to call reportDiagnosticSummary when it fails. For example: long size = parseMemoryString(pageSizeStr); if (size != expectedSize) { output.reportDiagnosticSummary(); throw new RuntimeException("Match from '" + pattern + "' got " + size + " expected: " + expectedSize); } Thanks, StefanK From bengt.rutisson at oracle.com Wed Apr 6 09:33:51 2016 From: bengt.rutisson at oracle.com (Bengt Rutisson) Date: Wed, 6 Apr 2016 11:33:51 +0200 Subject: RFR: 8153583: Make OutputAnalyzer.reportDiagnosticSummary public In-Reply-To: <5704D738.4010205@oracle.com> References: <5704D738.4010205@oracle.com> Message-ID: <5704D7FF.3080007@oracle.com> Hi StefanK, On 2016-04-06 11:30, Stefan Karlsson wrote: > Hi all, > > Please review this patch to make one of the testlibrary functions > publicly accessible for tests. > > http://cr.openjdk.java.net/~stefank/8153583/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8153583 Looks good. Bengt > > The reason why I need it to be public is that I want to write more > complex verification of the the output streams, and still be able to > call reportDiagnosticSummary when it fails. For example: > > long size = parseMemoryString(pageSizeStr); > if (size != expectedSize) { > output.reportDiagnosticSummary(); > throw new RuntimeException("Match from '" + pattern + "' got " + > size + " expected: " + expectedSize); > } > > Thanks, > StefanK From thomas.stuefe at gmail.com Wed Apr 6 09:38:54 2016 From: thomas.stuefe at gmail.com (=?UTF-8?Q?Thomas_St=C3=BCfe?=) Date: Wed, 6 Apr 2016 11:38:54 +0200 Subject: RFR: 8145934: Make ttyLocker equivalent for Unified Logging framework In-Reply-To: <5703C863.4080403@oracle.com> References: <56BB3FD0.5000104@oracle.com> <3910DA9B-43C9-4C1A-8FD0-993A54225550@oracle.com> <56BCA8C9.102@oracle.com> <56C34F0E.4090803@oracle.com> <90DC33E3-F597-40E4-A317-6C92F4969575@oracle.com> <56EC03A4.1030705@oracle.com> <56FCE56C.6070606@oracle.com> <56FD1481.3090707@oracle.com> <56FD3CC1.4050502@oracle.com> <56FE78B3.2060802@oracle.com> <5703C863.4080403@oracle.com> Message-ID: Hi Marcus, still no luck applying your patch to hs-rt. On a freshly cloned repo I get: hg qpush -v .... cannot patch src/share/vm/logging/logMessage.hpp: file is not tracked .... ---- I still feel that the benefit of different levels per log message is not worth the added complexity, especially since it prevents one from using the log message like a string stream (as you explained, using different log levels means a write must always be a complete line). I understand your motivation, but what you describe could just as well be done as (pseudocode): LogMessage(logging) msg; if (level >= debug) { msg.print("debug message"); if (level >= trace) { msg.print("additional trace information"); } } easier to understand, too. At the added costs of additional comparisons in the caller code. That way LogMessage does not have to know anything about log levels, and hence does not need to keep meta infos about lines, and could have a print() and print_cr() method. But that is just my opinion. .... Other than that, code looks fine. Small remarks: http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/log.cpp.udiff.html file_contains_substrings_in_order: Potential truncation if line length > 1024. -- http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/logFileStreamOutput.cpp.udiff.html Both LogFileStreamOutput::write(const LogDecorations& decorations, const char* msg) and LogFileStreamOutput::write(LogMessageBuffer::Iterator msg_iterator) can be made a tiny bit smaller by unifying the else branches, eg: + int written = 0; + os::flockfile(_stream); + for (; !msg_iterator.is_at_end(); msg_iterator++) { + if (use_decorations) { + written += write_decorations(msg_iterator.decorations()); + } + written += jio_fprintf(_stream, "%s\n", msg_iterator.message()); + } + fflush(_stream); + os::funlockfile(_stream); --- http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/logPrefix.hpp.udiff.html DEBUG_ONLY(buf[0] = '\0';) \ I would get either rid of this or do this for product too. Doing this only for debug just hides "append-assuming-empty-string" errors in debug case. Otherwise it looks fine to me. Still not a reviewer though :) so others should look at this too. Kind Regards, Thomas On Tue, Apr 5, 2016 at 4:14 PM, Marcus Larsson wrote: > Hi, > > Rebased and aligned the patch with the latest changes to the UL API. > Webrevs updated in place. > > Are we ready to wrap this up? > > Thanks, > Marcus > > > On 04/01/2016 03:33 PM, Marcus Larsson wrote: > >> Hi again, >> >> Updated webrev with removed decoration buffers. Decorations are now >> written directly to the streams with the help of flockfile/funlockfile as >> you suggested. >> >> Webrev: >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/ >> >> Incremental: >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02-03/ >> >> Thanks, >> Marcus >> >> On 03/31/2016 05:05 PM, Marcus Larsson wrote: >> >>> >>> On 03/31/2016 03:40 PM, Thomas St?fe wrote: >>> >>>> Hi Marcus, >>>> >>>> On Thu, Mar 31, 2016 at 2:13 PM, Marcus Larsson < >>>> marcus.larsson at oracle.com > wrote: >>>> >>>> Hi Thomas, >>>> >>>> >>>> On 03/31/2016 12:48 PM, Thomas St?fe wrote: >>>> >>>>> Hi Marcus, >>>>> >>>>> nice to see progress on that issue! >>>>> >>>> >>>> Thanks for taking time to look at it. >>>> >>>> >>>>> Unfortunately your patch did not apply to my freshly synced hs-rt >>>>> repository. So I took a "dry look" at your code, and here some >>>>> feedback (by no means complete, and also I am not a (R)eviewer): >>>>> >>>> >>>> I'll rebase it and update the webrev. >>>> >>>> >>>>> - thank you for taking my input and avoiding resource area for >>>>> memory. I am still apprehensive about UL using NEW_C_HEAP_ARRAY >>>>> instead of raw malloc() here, but I see it has pros and cons. >>>>> >>>> >>>> It might be worth investigating, but if so it should probably be a >>>> separate RFE. >>>> >>>> >>>> Ok. Easy enough to fix should e.g. NMT ever want to use UL. >>>> >>>> >>>>> - I am not sure about flockfile(): I really do not like file >>>>> locks, this always bites in customer scenarios. Also, by using >>>>> this lock, are we not just reintroducing the ttyLocker at a >>>>> deeper level? >>>>> >>>> >>>> The fprintfs locks the FILE* internally even if we don't. This is >>>> AFAIU how fprintf guarantees the writes to be atomic. With the >>>> explicit flock calls we're just ensuring nothing can be printed >>>> in-between our fprintf calls, it shouldn't add any cost. >>>> >>>> >>>> Ah, I see. If we really feel safe about flockfile(), we might just as >>>> well use it in LogFileStreamOutput::write() too. There, we assemble the >>>> decorators in a stack local buffer to fprintf them out to the FILE* in a >>>> separate step - I guess to prevent tearing? But if flockfile comes without >>>> cost, we could save the stack local buffer and do: >>>> >>>> flockfile() >>>> fputs(decorators) >>>> fputs(message) >>>> funlockfile() >>>> >>> >>> Good idea. >>> >>> >>>> Instead, how about assembling the total message in memory - like >>>>> it would appear in the file - and print it in one go using >>>>> ::write()? That usually is atomic. This way you would have to >>>>> write out the decorators for each line in memory as they are >>>>> added, but you could get rid of the _lines[] array and all its >>>>> surrounding code. So, no lock, less complicated code, at the cost >>>>> of a bit more memory usage. >>>>> >>>> >>>> As the message might go to different outputs, configured for >>>> different levels, we can't really get rid of the _lines[] array. >>>> We could assemble each applicable message as a long string for >>>> each of the outputs, but given how fprintf seems to work we won't >>>> really have gained anything for that extra work and memory usage. >>>> >>>> >>>> Oh, I see. I did not understand the complexity of the whole thing. Why >>>> is it needed to write lines to a message with different log levels? I may >>>> be slow, but I find that not easy to understand. The fact that different >>>> lines in my message may go to different outputs is a bit surprising. I >>>> would have thought a message is just a text blob I assemble offline and >>>> send to the logging framework in one go, like a glorified String, and that >>>> I would hand it down to UL "send this for this level/tagset combination". >>>> And that the message itself would not even need to know anything about log >>>> levels and tagsets. >>>> >>> >>> The use case I want to support with multi-part messages on different >>> levels is when you have an event you want to log, on for example info >>> level, but where part of that event might include data that is too verbose >>> to fit the info level. So then you could split the event into two parts, >>> one line with the basic information on info level and the other line (or >>> multiple lines) on debug or trace level. The framework then makes sure >>> these lines are delivered together non-interleaved. >>> >>> >>>> - If I understand this correctly, there is no way to print part >>>>> of a line to the message object? So, if I would want to assemble >>>>> a line from various inputs, I would still have to assemble it on >>>>> the stack and feed it to say ScopedLogMessage::debug() in one go? >>>>> Would it be posssible to get an outputStream* from the >>>>> ScopedLogMessage to write into? >>>>> >>>> >>>> Yes, that's right. I wanted to avoid streams for multi-line >>>> messages because I thought the API would become a bit messy with >>>> that functionality. The logStreams we have today are line >>>> buffered, and will send completed lines to the log outputs when >>>> they see a terminating newline character. This means that it won't >>>> be obvious how lines from different streams or writes to the >>>> message will be ordered in the output. Perhaps it's not that bad, >>>> but I figured that we could use stringStreams or similar for when >>>> we need to build up lines for the message. This has the nice side >>>> effect that it will be very obvious when, and in what order, each >>>> line is written to the outputs. Perhaps it's worth a follow up RFE >>>> if we find ourselves writing one too many log cases with >>>> stringStreams? >>>> >>>> >>>> Sorry, I think I was not clear enough. What I meant was simpler. We >>>> have now ScopedLogMessage::debug() which does LogMessageBuffer::write() >>>> which writes a line and terminates the line. Line outputStream::print_cr(). >>>> I would like to have an option to just write but not terminate the current >>>> line, like outputStream::print(). That way one could assemble a line piece >>>> by piece, maybe in a loop (e.g. for table row values) without needing >>>> another temporary buffer. >>>> >>> >>> Ok, so say we add the debug_no_cr() family of functions that writes into >>> the log message buffer without newlines. Then, what does it mean if someone >>> does debug_no_cr(s1); trace_no_cr(s2); info(s3); ? >>> >>> It would be simpler if it wasn't for the support for different levels on >>> different parts of the message. Maybe some well defined rules for how it >>> should work would solve this, but I intended to avoid the whole use case >>> for now. It can be done manually with stringStreams, so I don't think it's >>> that serious. >>> >>> >>>> >>>>> - I like how you implemented os::log_vsnprintf(), using >>>>> _vscprintf() on windows. Would it be worthwhile to merge this >>>>> with jio_vsnprintf(), which does the same but returns -1 on >>>>> truncation? >>>>> >>>> >>>> The patch for JDK-8138916 [0] added the log_vsnprintf. You mean to >>>> change jio_vsnprintf to not return -1 on truncation, and instead >>>> work like vsnprintf on POSIX? I think that would be useful, and it >>>> allows us to remove log_vsnprintf. >>>> >>>> >>>> That is exactly what I meant. I think that would be a separate RFE >>>> though, one would have to check on all callers of jio_snprintf. >>>> >>> >>> Yeah. >>> >>> Regards, >>> Marcus >>> >>> >>>> Thanks, >>>> Marcus >>>> >>>> >>>> Thank you! >>>> >>>> ..Thomas >>>> >>>> [0] https://bugs.openjdk.java.net/browse/JDK-8138916 >>>> >>>> >>>> >>>>> Kind Regards, Thomas >>>>> >>>>> >>>>> On Thu, Mar 31, 2016 at 10:53 AM, Marcus Larsson >>>>> > >>>>> wrote: >>>>> >>>>> Any further feedback on this? >>>>> >>>>> >>>>> >>>>> On 03/18/2016 02:33 PM, Marcus Larsson wrote: >>>>> >>>>> Hi again, >>>>> >>>>> New webrev: >>>>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02/ >>>>> >>>>> >>>>> Incremental: >>>>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt-02/ >>>>> >>>>> >>>>> Made all allocations regular C heap allocations because >>>>> of the problems with resource allocations that Thomas >>>>> brought up. We can do a follow up change for resource >>>>> allocation support if we really need it. >>>>> Also added some more tests for scoped messages. >>>>> >>>>> >>>>> On 02/17/2016 12:19 AM, John Rose wrote: >>>>> >>>>> On Feb 16, 2016, at 8:32 AM, Marcus Larsson >>>>> >>>> >>>>> >>>> >> wrote: >>>>> >>>>> >>>>> Alternative version where a LogMessage >>>>> automatically writes its messages when it goes >>>>> out of scope: >>>>> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt/ >>>>> >>>>> >>>>> >>>>> >>>>> >>>>> I like this, with the LogMessageBuffer that does the >>>>> heavy work, and the [Scoped]LogMessage which is the >>>>> simplest way to use it. >>>>> >>>>> The LogMessageBuffer should have a neutral >>>>> unallocated state, for use through the LogMessage >>>>> macro. I.e., is_c_allocated should be a three-state >>>>> flag, including 'not allocated at all'. That way, if >>>>> you create the thing only to ask 'is_debug' and get a >>>>> false answer, you won't have done more than a few >>>>> cycles of work. Probably the set_prefix operation >>>>> should be lazy in the same way. >>>>> >>>>> >>>>> Fixed. Since I removed the resource allocation completely >>>>> I could keep it as a boolean. >>>>> >>>>> >>>>> I think the destructor should call a user-callable >>>>> flush function, something like this: >>>>> >>>>> ~ScopedLogMessage() { flush(); } >>>>> // in LogMessageBuffer: >>>>> void flush() { >>>>> if (_line_count > 0) { >>>>> _log.write(*this); >>>>> reset(); >>>>> } >>>>> } >>>>> void reset() { >>>>> _line_count = 0; >>>>> _message_buffer_size = 0; >>>>> } >>>>> >>>>> It will be rare for user code to want to either flush >>>>> early or cancel pending output, but when you need it, >>>>> it should be there. >>>>> >>>>> >>>>> Fixed. >>>>> >>>>> >>>>> I still prefer the first patch though, where >>>>> messages are neither tied to a particular log, >>>>> nor automatically written when they go out of >>>>> scope. Like I've said, the explicit write line >>>>> makes it easier to read the code. >>>>> >>>>> >>>>> There's a tradeoff here: It's easier to read the >>>>> *logging* code if all the *logging* operations are >>>>> explicit. But the point of logging code is to add >>>>> logging to code that is busy doing *other* operations >>>>> besides logging. That's why (I assume) people have >>>>> been noting that some uses of logging are >>>>> "intrusive": The logging logic calls too much >>>>> attention to itself, and with attention being a >>>>> limited resource, it takes away attention from the >>>>> actual algorithm that's being logged about. >>>>> >>>>> The scoped (RAII) log buffer, with automatic write, >>>>> is the best way I know to reduce the intrusiveness of >>>>> this auxiliary mechanism. >>>>> >>>>> >>>>> Fair point. I'm going with the automatic write on out of >>>>> scope. >>>>> >>>>> >>>>> Of course, I'm interested in finding out what your >>>>> everyday customers think about it. (Rachel, Coleen, >>>>> David, Dan?) >>>>> >>>>> For comparison I've updated the first suggestion >>>>> with the guarantee for unwritten messages, as >>>>> well as cleaning it up a bit by moving the >>>>> implementation to the .cpp rather than the .hpp. >>>>> Full >>>>> webrev:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.01/ >>>>> >>>>> >>>>> Incremental:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.00-01/ >>>>> >>>>> >>>>> >>>>> Let me know what you think. >>>>> >>>>> >>>>> That option is more intrusive than the RAII buffered >>>>> log alias. >>>>> >>>>> Separately, the review thread on JDK-8149383 shows a >>>>> use for LogMessageBuffer to collect a complex log >>>>> message. The log message can then be sent down one >>>>> of two log streams. Something like: >>>>> >>>>> if (need_to_log) { >>>>> ResourceMark rm; >>>>> LogMessageBuffer buf; >>>>> buf.write("Revoking bias of object " >>>>> INTPTR_FORMAT " , mark " >>>>> INTPTR_FORMAT " , type %s , prototype header " >>>>> INTPTR_FORMAT >>>>> " , allow rebias %d , >>>>> requesting thread " INTPTR_FORMAT, >>>>> p2i((void *)obj), >>>>> (intptr_t) mark, >>>>> obj->klass()->external_name(), >>>>> (intptr_t) obj->klass()->prototype_header(), >>>>> (allow_rebias ? 1 : 0), >>>>> (intptr_t) requesting_thread); >>>>> if (!is_bulk) >>>>> log_info(biasedlocking).write(buf); >>>>> else >>>>> log_trace(biasedlocking).write(buf); >>>>> } >>>>> >>>>> It is important here (like you pointed out) that the >>>>> LogMessageBuffer is decoupled from log levels and >>>>> streams, so that it can be used as a flexible >>>>> component of logic like this. >>>>> >>>>> But the commonest usage should (IMO) be supported by >>>>> a scoped auto-writing log alias. >>>>> >>>>> >>>>> Yeah, I agree. >>>>> >>>>> Thanks, >>>>> Marcus >>>>> >>>>> >>>>> >>>>> >>>> >>>> >>> >> > From stefan.johansson at oracle.com Wed Apr 6 10:58:38 2016 From: stefan.johansson at oracle.com (Stefan Johansson) Date: Wed, 6 Apr 2016 12:58:38 +0200 Subject: RFR: 8153583: Make OutputAnalyzer.reportDiagnosticSummary public In-Reply-To: <5704D738.4010205@oracle.com> References: <5704D738.4010205@oracle.com> Message-ID: <5704EBDE.6040006@oracle.com> Looks good, StefanJ On 2016-04-06 11:30, Stefan Karlsson wrote: > Hi all, > > Please review this patch to make one of the testlibrary functions > publicly accessible for tests. > > http://cr.openjdk.java.net/~stefank/8153583/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8153583 > > The reason why I need it to be public is that I want to write more > complex verification of the the output streams, and still be able to > call reportDiagnosticSummary when it fails. For example: > > long size = parseMemoryString(pageSizeStr); > if (size != expectedSize) { > output.reportDiagnosticSummary(); > throw new RuntimeException("Match from '" + pattern + "' got " + > size + " expected: " + expectedSize); > } > > Thanks, > StefanK From stefan.karlsson at oracle.com Wed Apr 6 11:11:04 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 6 Apr 2016 13:11:04 +0200 Subject: RFR: 8153583: Make OutputAnalyzer.reportDiagnosticSummary public In-Reply-To: <5704D7FF.3080007@oracle.com> References: <5704D738.4010205@oracle.com> <5704D7FF.3080007@oracle.com> Message-ID: <5704EEC8.5000507@oracle.com> Thanks, Bengt. StefanK On 2016-04-06 11:33, Bengt Rutisson wrote: > > Hi StefanK, > > On 2016-04-06 11:30, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to make one of the testlibrary functions >> publicly accessible for tests. >> >> http://cr.openjdk.java.net/~stefank/8153583/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8153583 > > Looks good. > > Bengt > >> >> The reason why I need it to be public is that I want to write more >> complex verification of the the output streams, and still be able to >> call reportDiagnosticSummary when it fails. For example: >> >> long size = parseMemoryString(pageSizeStr); >> if (size != expectedSize) { >> output.reportDiagnosticSummary(); >> throw new RuntimeException("Match from '" + pattern + "' got " + >> size + " expected: " + expectedSize); >> } >> >> Thanks, >> StefanK > From stefan.karlsson at oracle.com Wed Apr 6 11:11:19 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 6 Apr 2016 13:11:19 +0200 Subject: RFR: 8153583: Make OutputAnalyzer.reportDiagnosticSummary public In-Reply-To: <5704EBDE.6040006@oracle.com> References: <5704D738.4010205@oracle.com> <5704EBDE.6040006@oracle.com> Message-ID: <5704EED7.4080609@oracle.com> Thanks, Stefan. StefanK On 2016-04-06 12:58, Stefan Johansson wrote: > Looks good, > StefanJ > > On 2016-04-06 11:30, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to make one of the testlibrary functions >> publicly accessible for tests. >> >> http://cr.openjdk.java.net/~stefank/8153583/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8153583 >> >> The reason why I need it to be public is that I want to write more >> complex verification of the the output streams, and still be able to >> call reportDiagnosticSummary when it fails. For example: >> >> long size = parseMemoryString(pageSizeStr); >> if (size != expectedSize) { >> output.reportDiagnosticSummary(); >> throw new RuntimeException("Match from '" + pattern + "' got " + >> size + " expected: " + expectedSize); >> } >> >> Thanks, >> StefanK > From daniel.daugherty at oracle.com Wed Apr 6 13:26:17 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Wed, 6 Apr 2016 07:26:17 -0600 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <5704D28E.1060304@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <5704D28E.1060304@oracle.com> Message-ID: <57050E79.4000509@oracle.com> I'm good with all of your replies/resolutions. I did forget to say thanks for sticking with the rework of the HotSpot build system! There have been a few attempts in the past to rework the HotSpot build system, but no one was able to make much progress let alone finish it. Kudos!! Dan On 4/6/16 3:10 AM, Erik Joelsson wrote: > Hello Dan and thank you for the review! I know it's a lot to chew through. > > I have incorporated your changes and published a new webrev: > http://cr.openjdk.java.net/~erikj/8152666/webrev.02/ > > On 2016-04-05 20:10, Daniel D. Daugherty wrote: >> >> >> > The new build supports the following variants: >> > >> > * server (C1+C2) >> >> The above "server" variant is the "tiered server". Does the new >> build system support the "C2 server" variant? What about the >> 32-bit server and 64-bit server build variants? For example, >> on Linux you can have: >> >> * C1/Client, 32-bit >> * C2/Server, 32-bit >> * Tiered (C1 & C2), 32-bit >> * C2/Server, 64-bit >> * Tiered (C1 + C2), 64-bit >> >> The above wide range of variants is also true for Win*. >> > There is a way to achieve this even if it's not as straight forward. > It's controlled through the new "jvm-feature" setting. To build a > completely custom set of features for a jvm, you set the > --with-jvm-variants=custom and then define the full feature set using > --with-jvm-features=compiler2,... For "server, client, core, minimal, > zero and zeroshark" there is a predefined set of features while the > custom variant has no features by default. >> >> General >> Please make sure all the copyrights are updated. >> > Done >> >> common/autoconf/basics.m4 >> No comments. >> >> common/autoconf/build-performance.m4 >> No comments. >> >> common/autoconf/buildjdk-spec.gmk.in >> No comments. >> >> common/autoconf/compare.sh.in >> No comments. >> >> common/autoconf/configure >> No comments. >> >> common/autoconf/configure.ac >> No comments. >> >> common/autoconf/flags.m4 >> L274: SHARED_LIBRARY_FLAGS="-dynamiclib >> -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" >> L275: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >> >> L275 is new, but seeing it next to L274 makes me wonder if >> $PICFLAG should be used instead of the literal '-fPIC'? > Fixed >> >> L303: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >> Same question about literal '-fPIC'. >> > Not sure, leaving for now. It seems we leave the PICFLAG empty for the > JDK build and only add it to the hotspot build. This should be > addressed in a followup where we try to align flag usage more between > the different libraries. >> For most of the changes to flags.m4, I can't see how any of it >> relates to the new HotSpot build. >> >> Update: Now I'm wondering if this is one of those files that >> we typically don't review because it is auto generated. >> Sorry, don't remember for sure. > It's a file that should be reviewed, only generated-configure.sh can > be ignored. The majority of the changes in here are related to cross > compiling in the modular world. When cross compiling now, we need to > also build a jvm for the build platform in order to run jlink and jmod > when building images. With the old hotspot build, that was simpler, > just invoke the hotspot build with some ARCH and compiler related > variables set. For the rest of the JDK build, an approximation of > flags used was enough so the problem was never fully solved. > > In the new build, we derive all the compiler options in configure so I > had to introduce a more proper solution. I did this by parameterizing > some macros in flags.m4 and platform.m4 so that we can run them twice, > once for the "target" toolchain" and one for the "build" toolchain. > These are the majority of the changes you are seeing. I also removed > the old hard coded "build" versions of certain flag and platform > variables. >> common/autoconf/generated-configure.sh >> 2642 lines changed... I think this is one of those files >> you're supposed to skip in build-dev review... :-| > Yes, please do. >> >> common/autoconf/help.m4 >> L179: $PRINTF "Which are valid to use depends on the target >> platform.\n " >> L180: $PRINTF "%s " $VALID_JVM_FEATURES >> Why are there blanks after the last '\n' on L179 instead of >> at the beginning of L180? >> > If you do $PRINTF " %s " $VALID_JVM_FEATURES, it adds those spaces > between every element in VALID_JVM_FEATURES. >> common/autoconf/hotspot-spec.gmk.in >> No comments. >> >> common/autoconf/hotspot.m4 >> L46: # Check if the specified JVM features are explicitely >> enabled. To be used in >> Typo: 'explicitely' -> 'explicitly' >> >> L59: # server: normal interpreter, and a tiered C1/C2 compiler >> So no support for a C2-only server config? >> >> L77: # Have the user listed more than one variant? >> Typo: 'Have' -> 'Has' >> > fixed >> common/autoconf/jdk-options.m4 >> No comments other than to say thanks for keeping support >> for 'optimized' builds. >> >> common/autoconf/jdk-version.m4 >> No comments. >> >> common/autoconf/lib-std.m4 >> No comments. >> >> common/autoconf/libraries.m4 >> No comments. >> >> common/autoconf/platform.m4 >> No comments, but mind numbing amount of diffs. >> > Same explanation as for flags.m4 >> common/autoconf/spec.gmk.in >> No comments. >> >> common/autoconf/toolchain.m4 >> No comments. >> >> common/autoconf/version-numbers >> No comments. >> >> common/bin/compare.sh >> No comments. >> >> common/bin/compare_exceptions.sh.incl >> No comments. >> >> make/Jprt.gmk >> No comments. >> >> make/Main.gmk >> No comments other than the 'hotspot-ide-project' target >> looks interesting... >> > This is the replacement for the visual studio project generator. We > currently only support VS here. >> make/common/MakeBase.gmk >> No comments. >> >> make/common/NativeCompilation.gmk >> L649: else ifeq (LOW, $$($1_OPTIMIZATION)) >> L650: $1_OPT_CFLAGS := $(C_O_FLAG_NORM) >> L651: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM) >> Instead of "_NORM", I was expecting "_LOW". >> >> L652: else ifeq (HIGH, $$($1_OPTIMIZATION)) >> L653: $1_OPT_CFLAGS := $(C_O_FLAG_HI) >> L654: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI) >> Instead of "_HI" I was expecting "_HIGH". >> > The names here were defined way back when we did build infra for the > JDK build. I wouldn't mind better alignment in naming the optimization > levels. >> make/jprt.properties >> L136: # Don't disable precompiled headers on windows. It's simply >> too slow. >> This is a surprise. Not the slowness part, but not being >> able to do a non-PCH JPRT build on Win*. IMHO, it's a >> little too much motherhood... >> > Actually, the old hotspot build does not allow disabling of PCH for > windows at all. The flag is simply ignored. In the new build, we treat > the flag the same on all platforms, so disabling precompiled headers > works on Windows. In the current JPRT config, we disable precompiled > headers on all fastdebug builds as a way of making sure we aren't > breaking that build configuration. We noticed a major build time > regression on Windows fastdebug builds in JPRT until we figured out it > was caused by this. Since we aren't currently disabling precompiled > header on Windows, I see no reason to start now. The build time > regression for just building hotspot is around 2m->12m. >> jdk/make/Import.gmk >> No comments. >> >> jdk/make/copy/Copy-java.base.gmk >> No comments. >> >> jdk/make/lib/CoreLibraries.gmk >> No comments. >> >> hotspot/makefiles/BuildHotspot.gmk >> No comments. >> >> hotspot/makefiles/Dist.gmk >> L52: define macosx_universalize >> I thought MacOS X universal support was going away? >> >> Update: OK, I see the mention of 8069540 ahead... >> > Yeah, we need to be binary the same as the old build for now. > Hopefully we can get rid of the universal stuff soon. >> L120: # these files are identical, and just pick one arbitrarily >> to use as souce. >> Typo: 'souce' -> 'source' >> >> L139: # This might have been defined in a custom extenstion >> Typo: 'extenstion' -> 'extension' >> > fixed >> L168: # NOTE: In the old build, this file was not copied on Windows. >> L169: ifneq ($(OPENJDK_TARGET_OS), windows) >> L170: $(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \ >> I'm not quite sure why the jvmti.html work is done for >> more than a single platform. >> >> Update: Thinking about this more... I vaguely remember that >> JVM/TI tracing used to be disabled in Client VMs. Don't know >> if that's still the case. > The jvmti.html file is just copied into the docs bundle later. IMO, > the docs bundle should be the same regardless of platform. In practice > we only publish the bundle from one build platform anyway. > > /Erik >> >> hotspot/makefiles/HotspotCommon.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GenerateSources.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GensrcAdlc.gmk >> L98: # NOTE: Windows adlc flags was different in the old >> build. Is this really >> L99: # correct? >> John Rose may know the answer to this historical question. >> >> hotspot/makefiles/gensrc/GensrcDtrace.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GensrcJvmti.gmk >> No comments. >> >> hotspot/makefiles/ide/CreateVSProject.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileDtracePostJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileDtracePreJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileLibjsig.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileLibraries.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmFeatures.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmMapfile.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmOverrideFiles.gmk >> No comments. >> >> hotspot/makefiles/mapfiles/libjsig/mapfile-vers-solaris >> hotspot/makefiles/mapfiles/libjvm_db/mapfile-vers >> hotspot/makefiles/mapfiles/libjvm_dtrace/mapfile-vers >> No comments on the mapfiles. >> >> hotspot/makefiles/symbols/symbols-aix >> hotspot/makefiles/symbols/symbols-aix-debug >> hotspot/makefiles/symbols/symbols-linux >> hotspot/makefiles/symbols/symbols-macosx >> hotspot/makefiles/symbols/symbols-shared >> hotspot/makefiles/symbols/symbols-solaris >> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler1 >> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler2 >> hotspot/makefiles/symbols/symbols-unix >> No comments on the symbol files. >> >> >> Thumbs up on this fix; I don't think that anything I noted >> above is a show stopper for this changeset. >> >> Dan >> >> >>> >>> /Erik >> > From marcus.larsson at oracle.com Wed Apr 6 13:45:35 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Wed, 6 Apr 2016 15:45:35 +0200 Subject: RFR: 8145934: Make ttyLocker equivalent for Unified Logging framework In-Reply-To: References: <56BB3FD0.5000104@oracle.com> <3910DA9B-43C9-4C1A-8FD0-993A54225550@oracle.com> <56BCA8C9.102@oracle.com> <56C34F0E.4090803@oracle.com> <90DC33E3-F597-40E4-A317-6C92F4969575@oracle.com> <56EC03A4.1030705@oracle.com> <56FCE56C.6070606@oracle.com> <56FD1481.3090707@oracle.com> <56FD3CC1.4050502@oracle.com> <56FE78B3.2060802@oracle.com> <5703C863.4080403@oracle.com> Message-ID: <570512FF.9070908@oracle.com> Hi, On 2016-04-06 11:38, Thomas St?fe wrote: > Hi Marcus, > > still no luck applying your patch to hs-rt. On a freshly cloned repo I > get: > > hg qpush -v > > .... > cannot patch src/share/vm/logging/logMessage.hpp: file is not tracked > .... > Weird. Seems like webrev doesn't like my patch queue. I've regenerated the webrev with a single patch, updated in place. > ---- > > I still feel that the benefit of different levels per log message is > not worth the added complexity, especially since it prevents one from > using the log message like a string stream (as you explained, using > different log levels means a write must always be a complete line). > > I understand your motivation, but what you describe could just as well > be done as (pseudocode): > > LogMessage(logging) msg; > if (level >= debug) { > msg.print("debug message"); > if (level >= trace) { > msg.print("additional trace information"); > } > } > > easier to understand, too. At the added costs of additional > comparisons in the caller code. That way LogMessage does not have to > know anything about log levels, and hence does not need to keep meta > infos about lines, and could have a print() and print_cr() method. Assuming that message would then be written on debug level, you would get trace messages included in the debug output depending on whether or not you have trace enabled. It makes it all very confusing for the consumers instead. > > But that is just my opinion. > > .... > > Other than that, code looks fine. Small remarks: > > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/log.cpp.udiff.html > > > file_contains_substrings_in_order: > > Potential truncation if line length > 1024. > Will fix. > -- > > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/logFileStreamOutput.cpp.udiff.html > > > Both LogFileStreamOutput::write(const LogDecorations& decorations, > const char* msg) and > LogFileStreamOutput::write(LogMessageBuffer::Iterator msg_iterator) > can be made a tiny bit smaller by unifying the else branches, eg: > > + int written = 0; > + os::flockfile(_stream); > + for (; !msg_iterator.is_at_end(); msg_iterator++) { > + if (use_decorations) { > + written += write_decorations(msg_iterator.decorations()); > + } > + written += jio_fprintf(_stream, "%s\n", msg_iterator.message()); > + } > + fflush(_stream); > + os::funlockfile(_stream); I will have to include a jio_fprintf in the if-case (or in write_decorations) for the separating space between decorations and message. It saves the else case though, so I'll update it. > --- > > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/logPrefix.hpp.udiff.html > > > DEBUG_ONLY(buf[0] = '\0';) \ > > I would get either rid of this or do this for product too. Doing this > only for debug just hides "append-assuming-empty-string" errors in > debug case. I'll remove it and change the assert to: assert(ret == 0 || ret == strlen(buf), ... > > Otherwise it looks fine to me. Still not a reviewer though :) so > others should look at this too. > > Kind Regards, Thomas Thanks! Marcus > > > > On Tue, Apr 5, 2016 at 4:14 PM, Marcus Larsson > > wrote: > > Hi, > > Rebased and aligned the patch with the latest changes to the UL > API. Webrevs updated in place. > > Are we ready to wrap this up? > > Thanks, > Marcus > > > On 04/01/2016 03:33 PM, Marcus Larsson wrote: > > Hi again, > > Updated webrev with removed decoration buffers. Decorations > are now written directly to the streams with the help of > flockfile/funlockfile as you suggested. > > Webrev: > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/ > > > Incremental: > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02-03/ > > > Thanks, > Marcus > > On 03/31/2016 05:05 PM, Marcus Larsson wrote: > > > On 03/31/2016 03:40 PM, Thomas St?fe wrote: > > Hi Marcus, > > On Thu, Mar 31, 2016 at 2:13 PM, Marcus Larsson > > >> wrote: > > Hi Thomas, > > > On 03/31/2016 12:48 PM, Thomas St?fe wrote: > > Hi Marcus, > > nice to see progress on that issue! > > > Thanks for taking time to look at it. > > > Unfortunately your patch did not apply to my > freshly synced hs-rt > repository. So I took a "dry look" at your > code, and here some > feedback (by no means complete, and also I am > not a (R)eviewer): > > > I'll rebase it and update the webrev. > > > - thank you for taking my input and avoiding > resource area for > memory. I am still apprehensive about UL using > NEW_C_HEAP_ARRAY > instead of raw malloc() here, but I see it has > pros and cons. > > > It might be worth investigating, but if so it > should probably be a > separate RFE. > > > Ok. Easy enough to fix should e.g. NMT ever want to > use UL. > > > - I am not sure about flockfile(): I really do > not like file > locks, this always bites in customer > scenarios. Also, by using > this lock, are we not just reintroducing the > ttyLocker at a > deeper level? > > > The fprintfs locks the FILE* internally even if we > don't. This is > AFAIU how fprintf guarantees the writes to be > atomic. With the > explicit flock calls we're just ensuring nothing > can be printed > in-between our fprintf calls, it shouldn't add any > cost. > > > Ah, I see. If we really feel safe about flockfile(), > we might just as well use it in > LogFileStreamOutput::write() too. There, we assemble > the decorators in a stack local buffer to fprintf them > out to the FILE* in a separate step - I guess to > prevent tearing? But if flockfile comes without cost, > we could save the stack local buffer and do: > > flockfile() > fputs(decorators) > fputs(message) > funlockfile() > > > Good idea. > > > Instead, how about assembling the total > message in memory - like > it would appear in the file - and print it in > one go using > ::write()? That usually is atomic. This way > you would have to > write out the decorators for each line in > memory as they are > added, but you could get rid of the _lines[] > array and all its > surrounding code. So, no lock, less > complicated code, at the cost > of a bit more memory usage. > > > As the message might go to different outputs, > configured for > different levels, we can't really get rid of the > _lines[] array. > We could assemble each applicable message as a > long string for > each of the outputs, but given how fprintf seems > to work we won't > really have gained anything for that extra work > and memory usage. > > > Oh, I see. I did not understand the complexity of the > whole thing. Why is it needed to write lines to a > message with different log levels? I may be slow, but > I find that not easy to understand. The fact that > different lines in my message may go to different > outputs is a bit surprising. I would have thought a > message is just a text blob I assemble offline and > send to the logging framework in one go, like a > glorified String, and that I would hand it down to UL > "send this for this level/tagset combination". And > that the message itself would not even need to know > anything about log levels and tagsets. > > > The use case I want to support with multi-part messages on > different levels is when you have an event you want to > log, on for example info level, but where part of that > event might include data that is too verbose to fit the > info level. So then you could split the event into two > parts, one line with the basic information on info level > and the other line (or multiple lines) on debug or trace > level. The framework then makes sure these lines are > delivered together non-interleaved. > > > - If I understand this correctly, there is no > way to print part > of a line to the message object? So, if I > would want to assemble > a line from various inputs, I would still have > to assemble it on > the stack and feed it to say > ScopedLogMessage::debug() in one go? > Would it be posssible to get an outputStream* > from the > ScopedLogMessage to write into? > > > Yes, that's right. I wanted to avoid streams for > multi-line > messages because I thought the API would become a > bit messy with > that functionality. The logStreams we have today > are line > buffered, and will send completed lines to the log > outputs when > they see a terminating newline character. This > means that it won't > be obvious how lines from different streams or > writes to the > message will be ordered in the output. Perhaps > it's not that bad, > but I figured that we could use stringStreams or > similar for when > we need to build up lines for the message. This > has the nice side > effect that it will be very obvious when, and in > what order, each > line is written to the outputs. Perhaps it's worth > a follow up RFE > if we find ourselves writing one too many log > cases with > stringStreams? > > > Sorry, I think I was not clear enough. What I meant > was simpler. We have now ScopedLogMessage::debug() > which does LogMessageBuffer::write() which writes a > line and terminates the line. Line > outputStream::print_cr(). I would like to have an > option to just write but not terminate the current > line, like outputStream::print(). That way one could > assemble a line piece by piece, maybe in a loop (e.g. > for table row values) without needing another > temporary buffer. > > > Ok, so say we add the debug_no_cr() family of functions > that writes into the log message buffer without newlines. > Then, what does it mean if someone does debug_no_cr(s1); > trace_no_cr(s2); info(s3); ? > > It would be simpler if it wasn't for the support for > different levels on different parts of the message. Maybe > some well defined rules for how it should work would solve > this, but I intended to avoid the whole use case for now. > It can be done manually with stringStreams, so I don't > think it's that serious. > > > > - I like how you implemented > os::log_vsnprintf(), using > _vscprintf() on windows. Would it be > worthwhile to merge this > with jio_vsnprintf(), which does the same but > returns -1 on > truncation? > > > The patch for JDK-8138916 [0] added the > log_vsnprintf. You mean to > change jio_vsnprintf to not return -1 on > truncation, and instead > work like vsnprintf on POSIX? I think that would > be useful, and it > allows us to remove log_vsnprintf. > > > That is exactly what I meant. I think that would be a > separate RFE though, one would have to check on all > callers of jio_snprintf. > > > Yeah. > > Regards, > Marcus > > > Thanks, > Marcus > > > Thank you! > > ..Thomas > > [0] https://bugs.openjdk.java.net/browse/JDK-8138916 > > > > Kind Regards, Thomas > > > On Thu, Mar 31, 2016 at 10:53 AM, Marcus Larsson > > >> wrote: > > Any further feedback on this? > > > > On 03/18/2016 02:33 PM, Marcus Larsson wrote: > > Hi again, > > New webrev: > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02/ > > > > Incremental: > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt-02/ > > > > Made all allocations regular C heap > allocations because > of the problems with resource > allocations that Thomas > brought up. We can do a follow up > change for resource > allocation support if we really need it. > Also added some more tests for scoped > messages. > > > On 02/17/2016 12:19 AM, John Rose wrote: > > On Feb 16, 2016, at 8:32 AM, > Marcus Larsson > > > > > >>> wrote: > > > Alternative version where a > LogMessage > automatically writes its > messages when it goes > out of scope: > http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt/ > > > > > > > I like this, with the > LogMessageBuffer that does the > heavy work, and the > [Scoped]LogMessage which is the > simplest way to use it. > > The LogMessageBuffer should have a > neutral > unallocated state, for use through > the LogMessage > macro. I.e., is_c_allocated > should be a three-state > flag, including 'not allocated at > all'. That way, if > you create the thing only to ask > 'is_debug' and get a > false answer, you won't have done > more than a few > cycles of work. Probably the > set_prefix operation > should be lazy in the same way. > > > Fixed. Since I removed the resource > allocation completely > I could keep it as a boolean. > > > I think the destructor should call > a user-callable > flush function, something like this: > > ~ScopedLogMessage() { flush(); } > // in LogMessageBuffer: > void flush() { > if (_line_count > 0) { > _log.write(*this); > reset(); > } > } > void reset() { > _line_count = 0; > _message_buffer_size = 0; > } > > It will be rare for user code to > want to either flush > early or cancel pending output, > but when you need it, > it should be there. > > > Fixed. > > > I still prefer the first patch > though, where > messages are neither tied to a > particular log, > nor automatically written when > they go out of > scope. Like I've said, the > explicit write line > makes it easier to read the code. > > > There's a tradeoff here: It's > easier to read the > *logging* code if all the > *logging* operations are > explicit. But the point of > logging code is to add > logging to code that is busy doing > *other* operations > besides logging. That's why (I > assume) people have > been noting that some uses of > logging are > "intrusive": The logging logic > calls too much > attention to itself, and with > attention being a > limited resource, it takes away > attention from the > actual algorithm that's being > logged about. > > The scoped (RAII) log buffer, with > automatic write, > is the best way I know to reduce > the intrusiveness of > this auxiliary mechanism. > > > Fair point. I'm going with the > automatic write on out of > scope. > > > Of course, I'm interested in > finding out what your > everyday customers think about > it. (Rachel, Coleen, > David, Dan?) > > For comparison I've updated > the first suggestion > with the guarantee for > unwritten messages, as > well as cleaning it up a bit > by moving the > implementation to the .cpp > rather than the .hpp. > Full > webrev:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.01/ > > > > Incremental:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.00-01/ > > > > > Let me know what you think. > > > That option is more intrusive than > the RAII buffered > log alias. > > Separately, the review thread on > JDK-8149383 shows a > use for LogMessageBuffer to > collect a complex log > message. The log message can then > be sent down one > of two log streams. Something like: > > if (need_to_log) { > ResourceMark rm; > LogMessageBuffer buf; > buf.write("Revoking bias of > object " > INTPTR_FORMAT " , mark " > INTPTR_FORMAT " , type %s , > prototype header " > INTPTR_FORMAT > " , > allow rebias %d , > requesting thread " INTPTR_FORMAT, > p2i((void *)obj), > (intptr_t) mark, > obj->klass()->external_name(), > (intptr_t) > obj->klass()->prototype_header(), > (allow_rebias ? 1 : 0), > (intptr_t) requesting_thread); > if (!is_bulk) > log_info(biasedlocking).write(buf); > else > log_trace(biasedlocking).write(buf); > } > > It is important here (like you > pointed out) that the > LogMessageBuffer is decoupled from > log levels and > streams, so that it can be used as > a flexible > component of logic like this. > > But the commonest usage should > (IMO) be supported by > a scoped auto-writing log alias. > > > Yeah, I agree. > > Thanks, > Marcus > > > > > > > > > From filipp.zhinkin at gmail.com Wed Apr 6 16:02:38 2016 From: filipp.zhinkin at gmail.com (Filipp Zhinkin) Date: Wed, 6 Apr 2016 19:02:38 +0300 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <5703B23B.2030008@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <5702CCB0.9050701@oracle.com> <5703B23B.2030008@oracle.com> Message-ID: Thank you, Coleen. On Tue, Apr 5, 2016 at 3:40 PM, Coleen Phillimore wrote: > > Filip, Thank you for your answers. This change looks really good!! > Coleen > > > On 4/5/16 3:23 AM, Filipp Zhinkin wrote: >> >> Hi Coleen, >> >> thanks for taking a look at it. >> >> On Mon, Apr 4, 2016 at 11:21 PM, Coleen Phillimore >> wrote: >>> >>> Thank you for CCing hotspot-dev. This change is great! I reviewed the >>> runtime files. >>> >>> >>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/src/share/vm/prims/jvmtiRedefineClasses.cpp.udiff.html >>> >>> Why does this have three parameters? >>> >>> - _index_map_p = new intArray(scratch_cp->length(), -1); >>> + _index_map_p = new intArray(scratch_cp->length(), scratch_cp->length(), >>> -1); >> >> GrowableArray won't initialize elements in backing array until you ask it >> to. >> And it also won't allow to access elements that were not initialized. >> >> So we have to pass three parameters there to allocate backing array >> and fill it with -1. >> >>> Why not just change it to: >>> >>> _index_map_p = new GrowableArray(scratch_cp->length()); >> >> We use -1 there for CP entries that were not mapped during constant >> pools merging. >> >>> I don't see the three argument constructor to GrowableArray that takes -1 >>> (??) >> >> It's the one that take init size, length, filler and few other >> implicit parameters: >> >> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/annotate/tip/src/share/vm/utilities/growableArray.hpp#l178 >> >>> Is it possible to completely eliminate intArray, intStack, boolArray and >>> boolStack, and the CHeapArray ? If so array.hpp should really go in >>> directory oops since the only Array<> left is for metaspace. Maybe this >>> can be a further cleanup? >> >> I've already eliminated CHeapArray in the latest webrev [*], >> so only typedefs are preventing array.hpp movement. >> >> I'd prefer to eliminate typedefs and move array.hpp to oops directory >> in separate CR just >> to avoid webrev's growing and simplify reviewing. >> But if it's ok, then I can do it within this CR. >> >> Thanks, >> Filipp. >> >> [*] http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ >> >>> Wow, thanks! >>> >>> Coleen >>> >>> >>> On 3/31/16 11:14 AM, Vladimir Kozlov wrote: >>>> >>>> Hi Filipp, >>>> >>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC groups >>>> to >>>> look on. >>>> >>>> Thanks, >>>> Vladimir >>>> >>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>> >>>>> Hi Vladimir, >>>>> >>>>> thank you for looking at this change. >>>>> >>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>> wrote: >>>>>> >>>>>> Nice clean up but I don't see any source code removed. What benefits >>>>>> we >>>>>> have >>>>>> then? >>>>>> I understand that we don't generate subclasses for ResourceArray and >>>>>> use >>>>>> GrowableArray. But it will not save space I think. >>>>>> What prevents us to remove ResourceArray at all? >>>>> >>>>> >>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>> so it should be updated before removing ResourceArray: >>>>> >>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>> >>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>> >>>>>>> >>>>>>> Hi all, >>>>>>> >>>>>>> please review a fix for JDK-8149374: >>>>>>> >>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>> Testing done: hotspot_all tests + CTW >>>>>>> >>>>>>> I've replaced all usages of collections defined via define_array and >>>>>>> define_stack macros with GrowableArray. >>>>>>> >>>>>>> There are good and bad news regarding performance impact of that >>>>>>> change. >>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>> >>>>>> >>>>>> >>>>>> It is acceptable regression I think. I don't think we should optimize >>>>>> and >>>>>> make more complex GrowableArray just to save 0.5% of performance for >>>>>> C2. >>>>> >>>>> >>>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>>> may be beneficial to optimize it, >>>>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>>>> significant difference. >>>>> >>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>> GrowableArray's initialization >>>>> in its current state unless there will be other reasons to speed it up. >>>>> >>>>> Thanks, >>>>> Filipp. >>>>> >>>>>> Thanks, >>>>>> Vladimir >>>>>> >>>>>> >>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>> force >>>>>>> initialization and de-initialization during array's >>>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>>> pointers such initialization does not make much sense, because >>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>> initialization. >>>>>>> >>>>>>> I've measured CTW time with following workaround which implements >>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>> compilation time returned back to values that were measured before >>>>>>> original change (51.06?0.24s): >>>>>>> >>>>>>> >>>>>>> >>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>> >>>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>> >>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>> initialization for some types, don't we? >>>>>>> >>>>>>> Best regards, >>>>>>> Filipp >>>>>>> >>>>>>> [1] >>>>>>> >>>>>>> >>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>> > From filipp.zhinkin at gmail.com Wed Apr 6 16:06:05 2016 From: filipp.zhinkin at gmail.com (Filipp Zhinkin) Date: Wed, 6 Apr 2016 19:06:05 +0300 Subject: RFR (L): 8149374: Replace C1-specific collection classes with universal collection classes In-Reply-To: <5703F987.1000701@oracle.com> References: <56FAFF3E.3020507@oracle.com> <56FD3ECF.1090800@oracle.com> <5702CCB0.9050701@oracle.com> <5703B23B.2030008@oracle.com> <5703F987.1000701@oracle.com> Message-ID: Thank you, Vladimir. Here is the latest webrev (I've update array.hpp & parNewGeneration.cpp according to Mikael's comments): http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.03/ Regards, Filipp. On Tue, Apr 5, 2016 at 8:44 PM, Vladimir Ivanov wrote: > Filipp, I'll sponsor the change. > > Thanks for taking care of it. > > Best regards, > Vladimir Ivanov > > > On 4/5/16 3:40 PM, Coleen Phillimore wrote: >> >> >> Filip, Thank you for your answers. This change looks really good!! >> Coleen >> >> On 4/5/16 3:23 AM, Filipp Zhinkin wrote: >>> >>> Hi Coleen, >>> >>> thanks for taking a look at it. >>> >>> On Mon, Apr 4, 2016 at 11:21 PM, Coleen Phillimore >>> wrote: >>>> >>>> Thank you for CCing hotspot-dev. This change is great! I reviewed >>>> the >>>> runtime files. >>>> >>>> >>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/src/share/vm/prims/jvmtiRedefineClasses.cpp.udiff.html >>>> >>>> >>>> Why does this have three parameters? >>>> >>>> - _index_map_p = new intArray(scratch_cp->length(), -1); >>>> + _index_map_p = new intArray(scratch_cp->length(), >>>> scratch_cp->length(), >>>> -1); >>> >>> GrowableArray won't initialize elements in backing array until you ask >>> it to. >>> And it also won't allow to access elements that were not initialized. >>> >>> So we have to pass three parameters there to allocate backing array >>> and fill it with -1. >>> >>>> Why not just change it to: >>>> >>>> _index_map_p = new GrowableArray(scratch_cp->length()); >>> >>> We use -1 there for CP entries that were not mapped during constant >>> pools merging. >>> >>>> I don't see the three argument constructor to GrowableArray that >>>> takes -1 >>>> (??) >>> >>> It's the one that take init size, length, filler and few other >>> implicit parameters: >>> >>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/annotate/tip/src/share/vm/utilities/growableArray.hpp#l178 >>> >>> >>>> Is it possible to completely eliminate intArray, intStack, boolArray and >>>> boolStack, and the CHeapArray ? If so array.hpp should really go in >>>> directory oops since the only Array<> left is for metaspace. Maybe >>>> this >>>> can be a further cleanup? >>> >>> I've already eliminated CHeapArray in the latest webrev [*], >>> so only typedefs are preventing array.hpp movement. >>> >>> I'd prefer to eliminate typedefs and move array.hpp to oops directory >>> in separate CR just >>> to avoid webrev's growing and simplify reviewing. >>> But if it's ok, then I can do it within this CR. >>> >>> Thanks, >>> Filipp. >>> >>> [*] http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.02/ >>> >>>> Wow, thanks! >>>> >>>> Coleen >>>> >>>> >>>> On 3/31/16 11:14 AM, Vladimir Kozlov wrote: >>>>> >>>>> Hi Filipp, >>>>> >>>>> Yes, this looks better. CCing to hotspot-dev for Runtime and GC >>>>> groups to >>>>> look on. >>>>> >>>>> Thanks, >>>>> Vladimir >>>>> >>>>> On 3/31/16 8:08 AM, Filipp Zhinkin wrote: >>>>>> >>>>>> Hi Vladimir, >>>>>> >>>>>> thank you for looking at this change. >>>>>> >>>>>> On Wed, Mar 30, 2016 at 1:18 AM, Vladimir Kozlov >>>>>> wrote: >>>>>>> >>>>>>> Nice clean up but I don't see any source code removed. What >>>>>>> benefits we >>>>>>> have >>>>>>> then? >>>>>>> I understand that we don't generate subclasses for ResourceArray >>>>>>> and use >>>>>>> GrowableArray. But it will not save space I think. >>>>>>> What prevents us to remove ResourceArray at all? >>>>>> >>>>>> >>>>>> CMS's ParScanThreadStateSet is inherited from ResourceArray, >>>>>> so it should be updated before removing ResourceArray: >>>>>> >>>>>> http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.01/ >>>>>> >>>>>>> On 3/11/16 3:42 AM, Filipp Zhinkin wrote: >>>>>>>> >>>>>>>> >>>>>>>> Hi all, >>>>>>>> >>>>>>>> please review a fix for JDK-8149374: >>>>>>>> >>>>>>>> Webrev: http://cr.openjdk.java.net/~fzhinkin/8149374/webrev.00/ >>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149374 >>>>>>>> Testing done: hotspot_all tests + CTW >>>>>>>> >>>>>>>> I've replaced all usages of collections defined via define_array and >>>>>>>> define_stack macros with GrowableArray. >>>>>>>> >>>>>>>> There are good and bad news regarding performance impact of that >>>>>>>> change. >>>>>>>> Unfortunately, C1 compilation time for CTW-scenario w/ release bits >>>>>>>> increased from 51.07?0.28s to 52.99?0.23s (it's about 3.5%). >>>>>>> >>>>>>> >>>>>>> >>>>>>> It is acceptable regression I think. I don't think we should optimize >>>>>>> and >>>>>>> make more complex GrowableArray just to save 0.5% of performance >>>>>>> for C2. >>>>>> >>>>>> >>>>>> As long as GrowableArray is used in different Hotspot's subsystems it >>>>>> may be beneficial to optimize it, >>>>>> but I've executed SPECjvm2008's startup.* benchmarks and there were no >>>>>> significant difference. >>>>>> >>>>>> If ~3% regression is OK for C1 then I'm fine with leaving >>>>>> GrowableArray's initialization >>>>>> in its current state unless there will be other reasons to speed it >>>>>> up. >>>>>> >>>>>> Thanks, >>>>>> Filipp. >>>>>> >>>>>>> Thanks, >>>>>>> Vladimir >>>>>>> >>>>>>> >>>>>>>> Such difference caused by eager initialization of GrowableArray's >>>>>>>> backing array elements [1]. I can imagine when we actually need to >>>>>>>> force >>>>>>>> initialization and de-initialization during array's >>>>>>>> growing/destruction, but for some types like c++ primitive types or >>>>>>>> pointers such initialization does not make much sense, because >>>>>>>> GrowableArray is not allowing to access an element which was not >>>>>>>> explicitly placed inside of it. And as long as GrowableArray most >>>>>>>> widely used to store pointers we're simply wasting the time with >>>>>>>> initialization. >>>>>>>> >>>>>>>> I've measured CTW time with following workaround which implements >>>>>>>> initialization for numeric types and pointers as no-op and C1 >>>>>>>> compilation time returned back to values that were measured before >>>>>>>> original change (51.06?0.24s): >>>>>>>> >>>>>>>> >>>>>>>> >>>>>>>> http://cr.openjdk.java.net/~fzhinkin/growableArrayInitialization/webrev/ >>>>>>>> >>>>>>>> >>>>>>>> I've also measured C2 compilation time and it dropped down by a few >>>>>>>> seconds too: 1138?9s w/o GrowableArray's change and 1132?5s w/ it. >>>>>>>> >>>>>>>> Summing up: I guess we should avoid GrowableArray's backing array >>>>>>>> initialization for some types, don't we? >>>>>>>> >>>>>>>> Best regards, >>>>>>>> Filipp >>>>>>>> >>>>>>>> [1] >>>>>>>> >>>>>>>> >>>>>>>> http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/file/323b8370b0f6/src/share/vm/utilities/growableArray.hpp#l165 >>>>>>>> >>>>>>>> >> > From dmitry.fazunenko at oracle.com Wed Apr 6 16:09:26 2016 From: dmitry.fazunenko at oracle.com (Dmitry Fazunenko) Date: Wed, 6 Apr 2016 19:09:26 +0300 Subject: RFR (S) 8152432: Implement setting jtreg @requires properties vm.flavor, vm.bits, vm.compMode In-Reply-To: <5703CFA2.4050403@oracle.com> References: <5703CFA2.4050403@oracle.com> Message-ID: <570534B6.4090401@oracle.com> I got offline comments from Igor Ignatiev. New version: https://bugs.openjdk.java.net/browse/JDK-8152432 http://cr.openjdk.java.net/~dfazunen/8152432/webrev.01/ changes: http://cr.openjdk.java.net/~dfazunen/8152432/webrev.00vs01/ Thanks, Dima On 05.04.2016 17:45, Dmitry Fazunenko wrote: > Hello, > > Would you please review a relatively simple fix which starts using new > jtreg functionality: > ability to define custom properties for use with the @requires tag. > > https://bugs.openjdk.java.net/browse/JDK-8152432 > http://cr.openjdk.java.net/~dfazunen/8152432/webrev.00/ > > As the first experience of using this functionality I just fixed > setting of properties which set by jtreg, > but set incorrectly relying only on specified vm flags. > In the near future we are going to introduce new properties. > > Tested locally. > > Thanks, > Dima > > > > From gerard.ziemski at oracle.com Wed Apr 6 16:35:47 2016 From: gerard.ziemski at oracle.com (Gerard Ziemski) Date: Wed, 6 Apr 2016 11:35:47 -0500 Subject: RFR (S): rev2 8152856: Xcode 7.3 -Wshift-negative-value compile failure on Mac OS X In-Reply-To: References: Message-ID: <185B4145-0862-44AF-93F5-C75F81056195@oracle.com> hi all, I have implemented and tested out the new proposed fix using constant instead of casts. Please review this new fix: bug: https://bugs.openjdk.java.net/browse/JDK-8152856 webrev: http://cr.openjdk.java.net/~gziemski/8152856_rev2 Passes JPRT hotspot and RBT hotspot/test/:hotspot_runtime cheers > On Mar 31, 2016, at 9:08 AM, Gerard Ziemski wrote: > > hi all, > > Since no-one has reviewed this yet and in the meantime we changed https://wiki.openjdk.java.net/display/HotSpot/StyleGuide to discourage using enums in hotspot (see JDK-8153116 for more details) I?m going to withdraw this proposed fix and try doing it by replacing enums with constant literals instead of using casts on enums. > > I therefore withdraw this fix for now. > > > cheers > >> On Mar 29, 2016, at 4:30 PM, Gerard Ziemski wrote: >> >> hi all, >> >> Please review this small fix that enables Xcde 7.3 support for building hotspot, which involved 2 things: >> >> - modify gcc.make to add support for 7.3 version of clang compiler >> - modify src/cpu/x86/vm/stubGenerator_x86_64.cpp, src/share/vm/oops/klass.hpp and src/share/vm/opto/library_call.cpp to cast 4 instances of signed int to unsigned when using shift operator to avoid "-Wshift-negative-value" warning. >> >> https://bugs.openjdk.java.net/browse/JDK-8152856 >> http://cr.openjdk.java.net/~gziemski/8152856_rev1 >> >> Passes JPRT hotspot on all platforms >> > From alejandro.murillo at oracle.com Wed Apr 6 16:48:32 2016 From: alejandro.murillo at oracle.com (Alejandro Murillo) Date: Wed, 6 Apr 2016 10:48:32 -0600 Subject: [9] RFR JDK-8153564: Add java/nio/Buffer/BasicByte.java to exclude list until JDK-8153563 is fixed In-Reply-To: <5704BC96.1060303@oracle.com> References: <57049B18.5050307@oracle.com> <5704BC96.1060303@oracle.com> Message-ID: <57053DE0.6080001@oracle.com> It was late for me, so Amy took over and fixed it Thanks Alejandro On 4/6/2016 1:36 AM, Alan Bateman wrote: > > > On 06/04/2016 06:14, Alejandro Murillo wrote: >> >> I'd like to push the changeset below to exclude >> java/nio/Buffer/BasicByte.java >> It started failing after the hotspot snapshot was pushed to jdk9/dev >> tonight. >> https://bugs.openjdk.java.net/browse/JDK-8153563 has been filed for >> that failure. > > Would it be possible to add @modules java.base/jdk.internal.misc, as > below, and not exclude these tests? > > -Alan > > > $ hg diff -g . > diff --git a/test/java/nio/Buffer/Basic.java > b/test/java/nio/Buffer/Basic.java > --- a/test/java/nio/Buffer/Basic.java > +++ b/test/java/nio/Buffer/Basic.java > @@ -22,6 +22,7 @@ > */ > > /* @test > + * @modules java.base/jdk.internal.misc > * @summary Unit test for buffers > * @bug 4413135 4414911 4416536 4416562 4418782 4471053 4472779 > 4490253 4523725 > * 4526177 4463011 4660660 4661219 4663521 4782970 4804304 > 4938424 6231529 > diff --git a/test/java/nio/Buffer/CopyDirectMemory.java > b/test/java/nio/Buffer/CopyDirectMemory.java > --- a/test/java/nio/Buffer/CopyDirectMemory.java > +++ b/test/java/nio/Buffer/CopyDirectMemory.java > @@ -25,6 +25,7 @@ > * @summary Test view buffer bulk operations for large buffers. > * @bug 4463011 > * > + * @modules java.base/jdk.internal.misc > * @build Basic > * @run main CopyDirectMemory > */ -- Alejandro From christian.thalinger at oracle.com Wed Apr 6 17:08:25 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Wed, 6 Apr 2016 07:08:25 -1000 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <5704D37E.8040208@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <1FE98FBA-A48E-4C1A-A622-74331E7BAB30@oracle.com> <5704D37E.8040208@oracle.com> Message-ID: <2CC4CE46-DDCC-4BD0-B7BC-0A1E4053B9B7@oracle.com> > On Apr 5, 2016, at 11:14 PM, Erik Joelsson wrote: > > Hello, > > I assume the mx projects are for Java code or do they also generate projects for native? Also native. Look at the screenshots I posted. Particularly this one: http://cr.openjdk.java.net/~twisti/8139921/Screen%20Shot%202015-11-10%20at%202.18.20%20PM.png > The new top level target is only meant to replace the old Visual Studio project generator, at least for now. > > /Erik > > On 2016-04-06 03:23, Christian Thalinger wrote: >> >>> On Apr 5, 2016, at 8:10 AM, Daniel D. Daugherty > wrote: >> >> ? >> >>> make/Main.gmk >>> No comments other than the 'hotspot-ide-project' target >>> looks interesting... >> >> Btw. there is already support to generate IDE configurations today via mx: >> >> https://wiki.openjdk.java.net/display/Graal/Instructions >> >> integrated with: >> >> https://bugs.openjdk.java.net/browse/JDK-8139921 >> >> One main advantage, as I pointed out in the review, is that it also includes generated files so there are no unresolved includes or methods anymore: >> >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-November/020626.html >> >> I?m using this every day. > From stefan.karlsson at oracle.com Wed Apr 6 17:30:34 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 6 Apr 2016 19:30:34 +0200 Subject: RFR: 8153658: Redundant memory copy in LogStreamNoResourceMark Message-ID: <570547BA.7070003@oracle.com> Hi all, Please review this patch to remove a redundant memory copy in the UL log stream classes. http://cr.openjdk.java.net/~stefank/8153658/webrev.01 https://bugs.openjdk.java.net/browse/JDK-8153658 LogStreamNoResourceMark copies the resource allocated string buffer into a new resource allocated string buffer before copying the data to UL. Moreover, this also causes problem when implementing a log stream class using CHeap memory instead of Resource memory. Even though the first allocation is done from CHeap the second copy comes from Resource memory. Thanks, StefanK From stefan.karlsson at oracle.com Wed Apr 6 17:54:50 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 6 Apr 2016 19:54:50 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class Message-ID: <57054D6A.8030405@oracle.com> Hi all, Please review this patch to add a LogStream class that allocates its backing buffer from CHeap memory instead of Resource memory. http://cr.openjdk.java.net/~stefank/8153659/webrev.01 https://bugs.openjdk.java.net/browse/JDK-8153659 The main motivation for this is that we can't use Resource allocated memory during initialization, until Thread::current() has been initialized. So, a CHeap backed LogStream is desirable when we execute, for example, the following code during large pages initialization: void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count) { if (TracePageSizes) { tty->print("%s: ", str); for (int i = 0; i < count; ++i) { tty->print(" " SIZE_FORMAT, page_sizes[i]); } tty->cr(); } } The patch restructures the code and creates a LogStreamBase template base class, which takes the backing outputStream class as a template parameter. We then have three concrete LogStream classes: LogStream - Buffer resource allocated with an embedded ResourceMark LogStreamNoResourceMark - Buffer resource allocated without an embedded ResourceMark LogStreamCHeap - Buffer CHeap allocated I moved the LogStream class from the logStream.inline.hpp file to logStream.hpp, for consistency. If that's causing problems while reviewing this, I can move it in a separate patch. Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and internal VM tests. Thanks, StefanK From coleen.phillimore at oracle.com Wed Apr 6 17:58:56 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Wed, 6 Apr 2016 13:58:56 -0400 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <57044EC8.7050602@oracle.com> References: <5702FC9B.7020600@oracle.com> <5703F579.8050702@oracle.com> <5703F5E7.4060404@oracle.com> <5704030F.80906@oracle.com> <57044EC8.7050602@oracle.com> Message-ID: <57054E60.6030504@oracle.com> On 4/5/16 7:48 PM, Brent Christian wrote: > Thanks, Coleen. Coordinating method/function names on "to stack trace > element" is a fine thing. I've done so in the updated webrev, and > also implemented Claes's suggestion. > > http://cr.openjdk.java.net/~bchristi/8153123/webrev.01/index.html Thank you for making this change. It looks good! I've reviewed this. Coleen > > -Brent > On 04/05/2016 11:25 AM, Coleen Phillimore wrote: >> >> A correction below. >> >> On 4/5/16 1:29 PM, Coleen Phillimore wrote: >>> >>> Also meant to include core-libs-dev in the email. >>> Thanks, >>> Coleen >>> >>> On 4/5/16 1:27 PM, Coleen Phillimore wrote: >>>> >>>> Hi, I've reviewed the hotspot changes and some of the jdk changes. >>>> This looks really good. >>>> >>>> One comment about the jvm function names: >>>> >>>> I think FillInStackTraceElement is too close of a name to >>>> Throwable::fill_in_stack_trace(). >>>> >>>> -JVM_ENTRY(void, JVM_SetMethodInfo(JNIEnv *env, jobject frame)) >>>> +JVM_ENTRY(void, JVM_FillInStackTraceElement(JNIEnv *env, jobject >>>> frame, jobject stack)) >>>> JVMWrapper("JVM_SetMethodInfo"); >>>> - Handle stackFrame(THREAD, JNIHandles::resolve(frame)); >>>> - java_lang_StackFrameInfo::fill_methodInfo(stackFrame, THREAD); >>>> + Handle stack_frame_info(THREAD, JNIHandles::resolve(frame)); >>>> + Handle stack_trace_element(THREAD, JNIHandles::resolve(stack)); >>>> + java_lang_StackFrameInfo::fill_methodInfo(stack_frame_info, >>>> stack_trace_element, THREAD); JVM_END >>>> >>>> >>>> And the function is called fill_methodInfo in the javaClasses >>>> function. >>>> >>>> I think the JVM and the java_lang_StackFrameInfo function names >>>> should be closer. >>>> >>>> I wonder if the name JVM_ToStackFrameElement() and >>>> java_lang_StackFrameInfo::to_stack_frame_element() would be better >>>> and then it'd match the Java name. >>>> >> >> I meant JVM_ToStackTraceElement() and >> java_lang_StackFrameInfo::to_stack_trace_element(), since it's producing >> a StackTraceElement. >> >> thanks, >> Coleen >>>> Thanks! >>>> Coleen >>>> >>>> On 4/4/16 9:29 PM, Mandy Chung wrote: >>>>>> On Apr 4, 2016, at 4:45 PM, Brent Christian >>>>>> wrote: >>>>>> >>>>>> Hi, >>>>>> >>>>>> I'd like to check in some footprint and code reduction changes to >>>>>> the java.lang.StackWalker implementation. >>>>>> >>>>>> Webrev: >>>>>> http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ >>>>>> Bug: >>>>>> https://bugs.openjdk.java.net/browse/JDK-8153123 >>>>>> >>>>> This looks good to me. >>>>> >>>>> One thing to mention is that this patch is a follow-up work from the >>>>> investigation on what it takes to enable Throwable to use >>>>> StackWalker (JDK-8141239). The current built-in VM backtrace is very >>>>> compact and performant. We have identified and prototypes the >>>>> performance improvements if Throwable backtrace is generated using >>>>> stack walker. There are some performance gaps that we agree to >>>>> defer JDK-8141239 to a future release and improve the footprint >>>>> performance and GC throughput concerns when MemberNames are stored >>>>> in the throwable backtrace. >>>>> >>>>> Mandy >>>>> >>>> >>> >> > From coleen.phillimore at oracle.com Wed Apr 6 18:39:51 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Wed, 6 Apr 2016 14:39:51 -0400 Subject: RFR 8151939: VM_Version_init() print buffer is too small In-Reply-To: <57044C4C.9080601@oracle.com> References: <57044C4C.9080601@oracle.com> Message-ID: <570557F7.5050400@oracle.com> I removed ancient logging from the signal handler and left a corrected comment instead. open webrev at http://cr.openjdk.java.net/~coleenp/8151939.02/webrev bug link https://bugs.openjdk.java.net/browse/JDK-8151939 Thanks, Coleen On 4/5/16 7:37 PM, Coleen Phillimore wrote: > Summary: Increase buffer size, use logging to print out version and os > information > > This replaces several -XX:+PrintMiscellaneous -XX:+Verbose to -Xlog:os > or -Xlog:os+cpu. Most use info level logging because it's only > printed once at the beginning, except where printing is in the signal > handler, which uses debug level. Also, errors in setup use info level > (not warning) since they never printed the warnings before without > PrintMiscellaneous and Verbose. > > busaa027% java -Xlog:os -version > [0.008s][info][os] SafePoint Polling address: 0x00007fde1d37f000 > [0.008s][info][os] Memory Serialize Page address: 0x00007fde1d37d000 > [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 > java version "9-internal" > Java(TM) SE Runtime Environment (fastdebug build > 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) > Java HotSpot(TM) 64-Bit Server VM (fastdebug build > 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed > mode) > > busaa027% java -Xlog:os,os+cpu -version > [0.008s][info][os] SafePoint Polling address: 0x00007f49c021f000 > [0.008s][info][os] Memory Serialize Page address: 0x00007f49c021d000 > [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 > [0.011s][info][os,cpu] Logical CPUs per core: 2 > [0.011s][info][os,cpu] L1 data cache line size: 64 > [0.011s][info][os,cpu] UseSSE=4 UseAVX=2 UseAES=1 > MaxVectorSize=64Allocation prefetching: PREFETCHNTA at distance 192, 4 > lines of 64 bytes > [0.011s][info][os,cpu] PrefetchCopyIntervalInBytes 576 > [0.011s][info][os,cpu] PrefetchScanIntervalInBytes 576 > [0.011s][info][os,cpu] PrefetchFieldsAhead 1 > [0.011s][info][os,cpu] ContendedPaddingWidth 128 > [0.011s][info][os,cpu] CPU:total 72 (18 cores per cpu, 2 threads per > core) family 6 model 63 stepping 2, cmov, cx8, fxsr, mmx, sse, sse2, > sse3, ssse3, sse4.1, sse4.2, popcnt, avx, avx2, aes, clmul, erms, > lzcnt, ht, tsc, tscinvbit, bmi1, bmi2 > [0.011s][info][os,cpu] CPU Model and flags from /proc/cpuinfo: > [0.011s][info][os,cpu] model name : Intel(R) Xeon(R) CPU E5-2699 v3 > @ 2.30GHz > [0.011s][info][os,cpu] flags : fpu vme de pse tsc msr pae mce > cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse > sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc > arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf > eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 fma > cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt > tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm ida arat epb > xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase > tsc_adjust bmi1 avx2 smep bmi2 erms invpcid > java version "9-internal" > Java(TM) SE Runtime Environment (fastdebug build > 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) > Java HotSpot(TM) 64-Bit Server VM (fastdebug build > 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed > mode) > > open webrev at http://cr.openjdk.java.net/~coleenp/8151939.01/webrev > bug link https://bugs.openjdk.java.net/browse/JDK-8151939 > > Tested in rbt and jprt. > > Thanks, > Coleen From gnu.andrew at redhat.com Wed Apr 6 18:59:33 2016 From: gnu.andrew at redhat.com (Andrew Hughes) Date: Wed, 6 Apr 2016 14:59:33 -0400 (EDT) Subject: RFR(XS): 8153275: Zero JVM fails to initialize after JDK-8152440 In-Reply-To: <57037BFB.1060606@redhat.com> References: <1459791895.3762.14.camel@redhat.com> <57037BFB.1060606@redhat.com> Message-ID: <1716616804.19704221.1459969173210.JavaMail.zimbra@redhat.com> ----- Original Message ----- > On 04/04/16 18:44, Severin Gehwolf wrote: > > Hi, > > > > Could somebody please sponsor and review the following Zero-only fix? > > The fix for JDK-8152440 was incorrect in that it set the value > > for InitArrayShortSize to an illegal value (-1) failing constraint > > validation. Albeit not being used it must still pass constraint > > validation. Otherwise, the JVM fails to initialize and all bets are > > off. Thoughts? > > > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153275 > > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.01/ > > OK, but please make the comment clearer. I didn't understand it. > > "the allowed range [ 0 ... 9223372036854775807 ]" > > is much clearer. > > Andrew. > > Maybe using 2^63-1 for the max range would be even clearer? Or 0x7FFFFFFFFFFFFFFF, which is what is used in globalDefinitions.hpp. It's also wrong for 32-bit architectures; the range there is 0 ... 0x7FFFFFFF or 0 ... 2^31-1. >From globalDefinitions.hpp: typedef intptr_t intx; const intx min_intx = (intx)1 << (sizeof(intx)*BitsPerByte-1); const intx max_intx = (uintx)min_intx - 1; -- Andrew :) Senior Free Java Software Engineer Red Hat, Inc. (http://www.redhat.com) PGP Key: ed25519/35964222 (hkp://keys.gnupg.net) Fingerprint = 5132 579D D154 0ED2 3E04 C5A0 CFDA 0F9B 3596 4222 From vladimir.kozlov at oracle.com Wed Apr 6 20:17:50 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Wed, 6 Apr 2016 13:17:50 -0700 Subject: RFR 8151939: VM_Version_init() print buffer is too small In-Reply-To: <570557F7.5050400@oracle.com> References: <57044C4C.9080601@oracle.com> <570557F7.5050400@oracle.com> Message-ID: <57056EEE.2060904@oracle.com> Should we use p2i() instead of (intprt_t) for polling and mem_serialize pages? os_windows.cpp - does ~Log destruct or will generate 'new line'? I am simple asking since I don't know. os_linux_x86.cpp - do we need \n at the end of logging line?: + log_info(os)("OS version is %d.%d, which %s support SSE/SSE2\n", Why new OsCpuLoggingTest.java test has SAP Copyright? Thanks, Vladimir On 4/6/16 11:39 AM, Coleen Phillimore wrote: > > I removed ancient logging from the signal handler and left a corrected > comment instead. > > open webrev at http://cr.openjdk.java.net/~coleenp/8151939.02/webrev > bug link https://bugs.openjdk.java.net/browse/JDK-8151939 > > Thanks, > Coleen > > On 4/5/16 7:37 PM, Coleen Phillimore wrote: >> Summary: Increase buffer size, use logging to print out version and os >> information >> >> This replaces several -XX:+PrintMiscellaneous -XX:+Verbose to -Xlog:os >> or -Xlog:os+cpu. Most use info level logging because it's only >> printed once at the beginning, except where printing is in the signal >> handler, which uses debug level. Also, errors in setup use info level >> (not warning) since they never printed the warnings before without >> PrintMiscellaneous and Verbose. >> >> busaa027% java -Xlog:os -version >> [0.008s][info][os] SafePoint Polling address: 0x00007fde1d37f000 >> [0.008s][info][os] Memory Serialize Page address: 0x00007fde1d37d000 >> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >> java version "9-internal" >> Java(TM) SE Runtime Environment (fastdebug build >> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed >> mode) >> >> busaa027% java -Xlog:os,os+cpu -version >> [0.008s][info][os] SafePoint Polling address: 0x00007f49c021f000 >> [0.008s][info][os] Memory Serialize Page address: 0x00007f49c021d000 >> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >> [0.011s][info][os,cpu] Logical CPUs per core: 2 >> [0.011s][info][os,cpu] L1 data cache line size: 64 >> [0.011s][info][os,cpu] UseSSE=4 UseAVX=2 UseAES=1 >> MaxVectorSize=64Allocation prefetching: PREFETCHNTA at distance 192, 4 >> lines of 64 bytes >> [0.011s][info][os,cpu] PrefetchCopyIntervalInBytes 576 >> [0.011s][info][os,cpu] PrefetchScanIntervalInBytes 576 >> [0.011s][info][os,cpu] PrefetchFieldsAhead 1 >> [0.011s][info][os,cpu] ContendedPaddingWidth 128 >> [0.011s][info][os,cpu] CPU:total 72 (18 cores per cpu, 2 threads per >> core) family 6 model 63 stepping 2, cmov, cx8, fxsr, mmx, sse, sse2, >> sse3, ssse3, sse4.1, sse4.2, popcnt, avx, avx2, aes, clmul, erms, >> lzcnt, ht, tsc, tscinvbit, bmi1, bmi2 >> [0.011s][info][os,cpu] CPU Model and flags from /proc/cpuinfo: >> [0.011s][info][os,cpu] model name : Intel(R) Xeon(R) CPU E5-2699 v3 >> @ 2.30GHz >> [0.011s][info][os,cpu] flags : fpu vme de pse tsc msr pae mce >> cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse >> sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc >> arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf >> eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 fma >> cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt >> tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm ida arat epb >> xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase >> tsc_adjust bmi1 avx2 smep bmi2 erms invpcid >> java version "9-internal" >> Java(TM) SE Runtime Environment (fastdebug build >> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed >> mode) >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8151939.01/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8151939 >> >> Tested in rbt and jprt. >> >> Thanks, >> Coleen > From jesper.wilhelmsson at oracle.com Wed Apr 6 21:07:56 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Wed, 6 Apr 2016 23:07:56 +0200 Subject: RFR(xs): JDK-8153671 - Quarantine serviceability/tmtools/jstack/JstackThreadTest.java until JDK-8153319 is fixed Message-ID: <57057AAC.50303@oracle.com> Hi, Can I have a quick Review of this change to quarantine a broken test? Bug: https://bugs.openjdk.java.net/browse/JDK-8153671 Webrev: http://cr.openjdk.java.net/~jwilhelm/8153671/webrev.00/ Thanks, /Jesper From joseph.provino at oracle.com Wed Apr 6 21:11:05 2016 From: joseph.provino at oracle.com (Joseph Provino) Date: Wed, 6 Apr 2016 17:11:05 -0400 Subject: RFR(xs): JDK-8153671 - Quarantine serviceability/tmtools/jstack/JstackThreadTest.java until JDK-8153319 is fixed In-Reply-To: <57057AAC.50303@oracle.com> References: <57057AAC.50303@oracle.com> Message-ID: <57057B69.10409@oracle.com> Looks good. joe On 4/6/2016 5:07 PM, Jesper Wilhelmsson wrote: > Hi, > > Can I have a quick Review of this change to quarantine a broken test? > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153671 > Webrev: http://cr.openjdk.java.net/~jwilhelm/8153671/webrev.00/ > > Thanks, > /Jesper From jesper.wilhelmsson at oracle.com Wed Apr 6 21:12:00 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Wed, 6 Apr 2016 23:12:00 +0200 Subject: RFR(xs): JDK-8153671 - Quarantine serviceability/tmtools/jstack/JstackThreadTest.java until JDK-8153319 is fixed In-Reply-To: <57057B69.10409@oracle.com> References: <57057AAC.50303@oracle.com> <57057B69.10409@oracle.com> Message-ID: <57057BA0.6000500@oracle.com> Thanks Joe! /Jesper Den 6/4/16 kl. 23:11, skrev Joseph Provino: > > Looks good. > > joe > > On 4/6/2016 5:07 PM, Jesper Wilhelmsson wrote: >> Hi, >> >> Can I have a quick Review of this change to quarantine a broken test? >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153671 >> Webrev: http://cr.openjdk.java.net/~jwilhelm/8153671/webrev.00/ >> >> Thanks, >> /Jesper > From amy.lu at oracle.com Wed Apr 6 05:59:43 2016 From: amy.lu at oracle.com (Amy Lu) Date: Wed, 6 Apr 2016 13:59:43 +0800 Subject: [9] RFR JDK-8153564: Add java/nio/Buffer/BasicByte.java to exclude list until JDK-8153563 is fixed In-Reply-To: <57049B18.5050307@oracle.com> References: <57049B18.5050307@oracle.com> Message-ID: <5704A5CF.2090804@oracle.com> java/nio/Buffer/CopyDirectMemory.java run into the same issue, maybe it could be problem listed together in this patch? Thanks, Amy On 4/6/16 1:14 PM, Alejandro Murillo wrote: > > I'd like to push the changeset below to exclude > java/nio/Buffer/BasicByte.java > It started failing after the hotspot snapshot was pushed to jdk9/dev > tonight. > https://bugs.openjdk.java.net/browse/JDK-8153563 has been filed for > that failure. > > $ hg -R jdk9.dev/jdk tip -pv > > changeset: 14082:5c98c9ad8ff2 > tag: tip > user: amurillo > date: Tue Apr 05 22:06:15 2016 -0700 > files: test/ProblemList.txt > description: > 8153564: Add java/nio/Buffer/BasicByte.java to exclude list until > JDK-8153563 is fixed > Reviewed-by: tbd > > > diff -r 04f56d4ca167 -r 5c98c9ad8ff2 test/ProblemList.txt > --- a/test/ProblemList.txt Tue Apr 05 20:02:21 2016 -0700 > +++ b/test/ProblemList.txt Tue Apr 05 22:06:15 2016 -0700 > @@ -185,6 +185,8 @@ > > java/nio/charset/coders/BashStreams.java 8149712 generic-all > > +java/nio/Buffer/BasicByte.java 8153563 generic-all > + > ############################################################################ > > > # jdk_rmi > > From daniel.daugherty at oracle.com Wed Apr 6 21:40:56 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Wed, 6 Apr 2016 15:40:56 -0600 Subject: RFR(xs): JDK-8153671 - Quarantine serviceability/tmtools/jstack/JstackThreadTest.java until JDK-8153319 is fixed In-Reply-To: <57057AAC.50303@oracle.com> References: <57057AAC.50303@oracle.com> Message-ID: <57058268.8010802@oracle.com> Thumbs up. Dan On 4/6/16 3:07 PM, Jesper Wilhelmsson wrote: > Hi, > > Can I have a quick Review of this change to quarantine a broken test? > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153671 > Webrev: http://cr.openjdk.java.net/~jwilhelm/8153671/webrev.00/ > > Thanks, > /Jesper From jesper.wilhelmsson at oracle.com Wed Apr 6 21:41:02 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Wed, 6 Apr 2016 23:41:02 +0200 Subject: RFR(xs): JDK-8153671 - Quarantine serviceability/tmtools/jstack/JstackThreadTest.java until JDK-8153319 is fixed In-Reply-To: <57058268.8010802@oracle.com> References: <57057AAC.50303@oracle.com> <57058268.8010802@oracle.com> Message-ID: <5705826E.7010205@oracle.com> Thanks Dan! /Jesper Den 6/4/16 kl. 23:40, skrev Daniel D. Daugherty: > Thumbs up. > > Dan > > > On 4/6/16 3:07 PM, Jesper Wilhelmsson wrote: >> Hi, >> >> Can I have a quick Review of this change to quarantine a broken test? >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153671 >> Webrev: http://cr.openjdk.java.net/~jwilhelm/8153671/webrev.00/ >> >> Thanks, >> /Jesper > From mikael.vidstedt at oracle.com Wed Apr 6 22:19:02 2016 From: mikael.vidstedt at oracle.com (Mikael Vidstedt) Date: Wed, 6 Apr 2016 15:19:02 -0700 Subject: Merging jdk9/hs-rt with jdk9/hs In-Reply-To: <56F08ACA.6030705@oracle.com> References: <56F08ACA.6030705@oracle.com> Message-ID: <57058B56.7060605@oracle.com> Having heard no feedback[1], we're going to go ahead with this experiment and the plan is to do the switch next week, *Friday April 15th*. Again, please note that any outstanding work based on jdk9/hs-rt will have to be rebased on jdk9/hs once the switch is made. More information as we get closer to the actual switchover. Let us know if you have any concerns with the date, and/or any feedback on how it's working out. Cheers, Mikael [1] Not even from Volker *hint* ;) On 3/21/2016 4:59 PM, Mikael Vidstedt wrote: > > All, > > The JDK 9 development of Hotspot is primarily done in two different > mercurial forests: jdk9/hs-rt[1], and jdk9/hs-comp[2]. In June of last > year we moved[3] all the GC development from jdk9/hs-gc[4] to > jdk9/hs-rt, and the experience so far has been a good one. Change > propagation (from jdk9/hs-rt to jdk9/hs-gc and vice verse) is now a > non-issue, we get testing faster on the union of the changes where > previously it could take weeks to catch a GC related bug in RT > testing, etc. > > However, both jdk9/hs-rt and jdk9/hs-comp still integrate through a > third forest - jdk9/hs[5], aka. hs "main" - before the changes are > integrated to jdk9/dev[6]. In line with the previous simplification, > we would like to suggest a further simplification of the forest > structure. Specifically, we suggest that the work currently done on > the jdk9/hs-rt forest moves directly to the jdk9/hs forest. In > addition to making the forest structure easier to understand, this > would have the benefit of removing one set of integrations (jdk9/hs > <-> jdk9/hs-rt), which further reduces cost and propagation time. It > is also paving the way for eventually integrating up to jdk9/dev more > often (but that is a separate discussion). > > We suggest that the experiment starts on April 15th, and goes on for > at least two weeks (giving us some time to adapt in case of issues). > Monitoring and evaluation of the new structure will take place > continuously, with an option to revert back if things do not work out. > The experiment would keep going for at least a few months, after which > we will evaluate it and depending on the results consider making it > the new standard. If so, the jdk9/hs-rt forest will eventually be > retired, with an option of looking at further reduction of forests > going forward. At least for now, we suggest that jdk9/hs-comp remains > a separate forest and that it integrates through jdk9/hs just like it > does today. > > Much like when we merged the jdk9/hs-gc and jdk9/hs-rt forests we > would leave the jdk9/hs-rt forest around until we see if the > experiment works out. We would also lock it down so that no accidental > integrations are made to it. Once the jdk9/hs-rt forest is locked > down, any work in flight based on it would have to be rebased on jdk9/hs. > > Please let us know if you have any feedback or questions! > > Cheers, > Mikael > > [1]http://hg.openjdk.java.net/jdk9/hs-rt > [2]http://hg.openjdk.java.net/jdk9/hs-comp > [3]http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-May/thread.html > > [4]http://hg.openjdk.java.net/jdk9/hs-gc > [5]http://hg.openjdk.java.net/jdk9/hs > [6]http://hg.openjdk.java.net/jdk9/dev > From kim.barrett at oracle.com Thu Apr 7 05:17:51 2016 From: kim.barrett at oracle.com (Kim Barrett) Date: Thu, 7 Apr 2016 01:17:51 -0400 Subject: RFR: 8153658: Redundant memory copy in LogStreamNoResourceMark In-Reply-To: <570547BA.7070003@oracle.com> References: <570547BA.7070003@oracle.com> Message-ID: > On Apr 6, 2016, at 1:30 PM, Stefan Karlsson wrote: > > Hi all, > > Please review this patch to remove a redundant memory copy in the UL log stream classes. > > http://cr.openjdk.java.net/~stefank/8153658/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8153658 > > LogStreamNoResourceMark copies the resource allocated string buffer into a new resource allocated string buffer before copying the data to UL. > > Moreover, this also causes problem when implementing a log stream class using CHeap memory instead of Resource memory. Even though the first allocation is done from CHeap the second copy comes from Resource memory. ------------------------------------------------------------------------------ src/share/vm/logging/logStream.inline.hpp 36 _current_line.write("\0", 1); I think this write isn't needed. stringStream purportedly ensures the internal buffer is NUL terminated, according to a comment in stringStream::write. And the code seems to do that. Hm, except maybe at construction time? That seems like a bug. And I think bad things might happen if a stringStream is constructed with initial_size or fixed_buffer_size of zero, which would also be a bug. ------------------------------------------------------------------------------ From kim.barrett at oracle.com Thu Apr 7 05:23:52 2016 From: kim.barrett at oracle.com (Kim Barrett) Date: Thu, 7 Apr 2016 01:23:52 -0400 Subject: RFR: 8153658: Redundant memory copy in LogStreamNoResourceMark In-Reply-To: References: <570547BA.7070003@oracle.com> Message-ID: > On Apr 7, 2016, at 1:17 AM, Kim Barrett wrote: > >> On Apr 6, 2016, at 1:30 PM, Stefan Karlsson wrote: >> >> Hi all, >> >> Please review this patch to remove a redundant memory copy in the UL log stream classes. >> >> http://cr.openjdk.java.net/~stefank/8153658/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8153658 >> >> LogStreamNoResourceMark copies the resource allocated string buffer into a new resource allocated string buffer before copying the data to UL. >> >> Moreover, this also causes problem when implementing a log stream class using CHeap memory instead of Resource memory. Even though the first allocation is done from CHeap the second copy comes from Resource memory. > > ------------------------------------------------------------------------------ > src/share/vm/logging/logStream.inline.hpp > 36 _current_line.write("\0", 1); > > I think this write isn't needed. stringStream purportedly ensures the > internal buffer is NUL terminated, according to a comment in > stringStream::write. And the code seems to do that. Hm, except maybe > at construction time? That seems like a bug. And I think bad things > might happen if a stringStream is constructed with initial_size or > fixed_buffer_size of zero, which would also be a bug. > > ------------------------------------------------------------------------------ I forgot to say, the rest of the change looks fine, subject to possible bugs in stringStream. From stefan.karlsson at oracle.com Thu Apr 7 06:54:39 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 7 Apr 2016 08:54:39 +0200 Subject: RFR: 8153658: Redundant memory copy in LogStreamNoResourceMark In-Reply-To: References: <570547BA.7070003@oracle.com> Message-ID: <5706042F.5010507@oracle.com> Hi Kim, On 2016-04-07 07:17, Kim Barrett wrote: >> On Apr 6, 2016, at 1:30 PM, Stefan Karlsson wrote: >> >> Hi all, >> >> Please review this patch to remove a redundant memory copy in the UL log stream classes. >> >> http://cr.openjdk.java.net/~stefank/8153658/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8153658 >> >> LogStreamNoResourceMark copies the resource allocated string buffer into a new resource allocated string buffer before copying the data to UL. >> >> Moreover, this also causes problem when implementing a log stream class using CHeap memory instead of Resource memory. Even though the first allocation is done from CHeap the second copy comes from Resource memory. > ------------------------------------------------------------------------------ > src/share/vm/logging/logStream.inline.hpp > 36 _current_line.write("\0", 1); > > I think this write isn't needed. stringStream purportedly ensures the > internal buffer is NUL terminated, according to a comment in > stringStream::write. And the code seems to do that. You're right. My intention is to be able to use this code together with a bufferedStream, which doesn't NULL terminate. See: http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022477.html So, this is the easiest way for me to be able to use the same code for both stringStream and bufferedStream. > Hm, except maybe > at construction time? That seems like a bug. And I think bad things > might happen if a stringStream is constructed with initial_size or > fixed_buffer_size of zero, which would also be a bug. stringStream::as_string() also NULL terminate, so I'm not sure this is a real bug. It is a weird inconsistency, though. Thanks for reviewing, StefanK > > ------------------------------------------------------------------------------ > From volker.simonis at gmail.com Thu Apr 7 07:28:50 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Thu, 7 Apr 2016 09:28:50 +0200 Subject: Merging jdk9/hs-rt with jdk9/hs In-Reply-To: <57058B56.7060605@oracle.com> References: <56F08ACA.6030705@oracle.com> <57058B56.7060605@oracle.com> Message-ID: +1 :) On Thu, Apr 7, 2016 at 12:19 AM, Mikael Vidstedt wrote: > > Having heard no feedback[1], we're going to go ahead with this experiment > and the plan is to do the switch next week, *Friday April 15th*. Again, > please note that any outstanding work based on jdk9/hs-rt will have to be > rebased on jdk9/hs once the switch is made. More information as we get > closer to the actual switchover. > > Let us know if you have any concerns with the date, and/or any feedback on > how it's working out. > > Cheers, > Mikael > > [1] Not even from Volker *hint* ;) > > > On 3/21/2016 4:59 PM, Mikael Vidstedt wrote: >> >> >> All, >> >> The JDK 9 development of Hotspot is primarily done in two different >> mercurial forests: jdk9/hs-rt[1], and jdk9/hs-comp[2]. In June of last year >> we moved[3] all the GC development from jdk9/hs-gc[4] to jdk9/hs-rt, and the >> experience so far has been a good one. Change propagation (from jdk9/hs-rt >> to jdk9/hs-gc and vice verse) is now a non-issue, we get testing faster on >> the union of the changes where previously it could take weeks to catch a GC >> related bug in RT testing, etc. >> >> However, both jdk9/hs-rt and jdk9/hs-comp still integrate through a third >> forest - jdk9/hs[5], aka. hs "main" - before the changes are integrated to >> jdk9/dev[6]. In line with the previous simplification, we would like to >> suggest a further simplification of the forest structure. Specifically, we >> suggest that the work currently done on the jdk9/hs-rt forest moves directly >> to the jdk9/hs forest. In addition to making the forest structure easier to >> understand, this would have the benefit of removing one set of integrations >> (jdk9/hs <-> jdk9/hs-rt), which further reduces cost and propagation time. >> It is also paving the way for eventually integrating up to jdk9/dev more >> often (but that is a separate discussion). >> >> We suggest that the experiment starts on April 15th, and goes on for at >> least two weeks (giving us some time to adapt in case of issues). Monitoring >> and evaluation of the new structure will take place continuously, with an >> option to revert back if things do not work out. The experiment would keep >> going for at least a few months, after which we will evaluate it and >> depending on the results consider making it the new standard. If so, the >> jdk9/hs-rt forest will eventually be retired, with an option of looking at >> further reduction of forests going forward. At least for now, we suggest >> that jdk9/hs-comp remains a separate forest and that it integrates through >> jdk9/hs just like it does today. >> >> Much like when we merged the jdk9/hs-gc and jdk9/hs-rt forests we would >> leave the jdk9/hs-rt forest around until we see if the experiment works out. >> We would also lock it down so that no accidental integrations are made to >> it. Once the jdk9/hs-rt forest is locked down, any work in flight based on >> it would have to be rebased on jdk9/hs. >> >> Please let us know if you have any feedback or questions! >> >> Cheers, >> Mikael >> >> [1]http://hg.openjdk.java.net/jdk9/hs-rt >> [2]http://hg.openjdk.java.net/jdk9/hs-comp >> [3]http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-May/thread.html >> [4]http://hg.openjdk.java.net/jdk9/hs-gc >> [5]http://hg.openjdk.java.net/jdk9/hs >> [6]http://hg.openjdk.java.net/jdk9/dev >> > From magnus.ihse.bursie at oracle.com Thu Apr 7 09:57:21 2016 From: magnus.ihse.bursie at oracle.com (Magnus Ihse Bursie) Date: Thu, 7 Apr 2016 11:57:21 +0200 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <5704D28E.1060304@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <5704D28E.1060304@oracle.com> Message-ID: <57062F01.70701@oracle.com> On 2016-04-06 11:10, Erik Joelsson wrote: > Hello Dan and thank you for the review! I know it's a lot to chew > through. > > I have incorporated your changes and published a new webrev: > http://cr.openjdk.java.net/~erikj/8152666/webrev.02/ I'm not sure if I'm formally allowed to be a reviewer, since I've wrote the absolute majority of the code myself. Nevertheless, I've looked through the webrev carefully, including the latest changes by you, and it looks good to me. Ship it! :-) Just a few minor comments: In compare.sh.in: Why the added export of DEBUG_LEVEL? I can't find any reference to it in the changes in compare.sh. Was it references in some earlier change and we missed to export it? In flags.m4/platform.m4: It is unfortunate that we needed to make the build/target duplication in this change. It makes the messy addition of the JVM_CFLAGS even messier. :( But then again, we've always planned a follow-up restructuring of the flag handling after the integration of the new Hotspot build system. It just got a bit more urgent. /Magnus > > On 2016-04-05 20:10, Daniel D. Daugherty wrote: >> >> >> > The new build supports the following variants: >> > >> > * server (C1+C2) >> >> The above "server" variant is the "tiered server". Does the new >> build system support the "C2 server" variant? What about the >> 32-bit server and 64-bit server build variants? For example, >> on Linux you can have: >> >> * C1/Client, 32-bit >> * C2/Server, 32-bit >> * Tiered (C1 & C2), 32-bit >> * C2/Server, 64-bit >> * Tiered (C1 + C2), 64-bit >> >> The above wide range of variants is also true for Win*. >> > There is a way to achieve this even if it's not as straight forward. > It's controlled through the new "jvm-feature" setting. To build a > completely custom set of features for a jvm, you set the > --with-jvm-variants=custom and then define the full feature set using > --with-jvm-features=compiler2,... For "server, client, core, minimal, > zero and zeroshark" there is a predefined set of features while the > custom variant has no features by default. >> >> General >> Please make sure all the copyrights are updated. >> > Done >> >> common/autoconf/basics.m4 >> No comments. >> >> common/autoconf/build-performance.m4 >> No comments. >> >> common/autoconf/buildjdk-spec.gmk.in >> No comments. >> >> common/autoconf/compare.sh.in >> No comments. >> >> common/autoconf/configure >> No comments. >> >> common/autoconf/configure.ac >> No comments. >> >> common/autoconf/flags.m4 >> L274: SHARED_LIBRARY_FLAGS="-dynamiclib >> -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" >> L275: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >> >> L275 is new, but seeing it next to L274 makes me wonder if >> $PICFLAG should be used instead of the literal '-fPIC'? > Fixed >> >> L303: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >> Same question about literal '-fPIC'. >> > Not sure, leaving for now. It seems we leave the PICFLAG empty for the > JDK build and only add it to the hotspot build. This should be > addressed in a followup where we try to align flag usage more between > the different libraries. >> For most of the changes to flags.m4, I can't see how any of it >> relates to the new HotSpot build. >> >> Update: Now I'm wondering if this is one of those files that >> we typically don't review because it is auto generated. >> Sorry, don't remember for sure. > It's a file that should be reviewed, only generated-configure.sh can > be ignored. The majority of the changes in here are related to cross > compiling in the modular world. When cross compiling now, we need to > also build a jvm for the build platform in order to run jlink and jmod > when building images. With the old hotspot build, that was simpler, > just invoke the hotspot build with some ARCH and compiler related > variables set. For the rest of the JDK build, an approximation of > flags used was enough so the problem was never fully solved. > > In the new build, we derive all the compiler options in configure so I > had to introduce a more proper solution. I did this by parameterizing > some macros in flags.m4 and platform.m4 so that we can run them twice, > once for the "target" toolchain" and one for the "build" toolchain. > These are the majority of the changes you are seeing. I also removed > the old hard coded "build" versions of certain flag and platform > variables. >> common/autoconf/generated-configure.sh >> 2642 lines changed... I think this is one of those files >> you're supposed to skip in build-dev review... :-| > Yes, please do. >> >> common/autoconf/help.m4 >> L179: $PRINTF "Which are valid to use depends on the target >> platform.\n " >> L180: $PRINTF "%s " $VALID_JVM_FEATURES >> Why are there blanks after the last '\n' on L179 instead of >> at the beginning of L180? >> > If you do $PRINTF " %s " $VALID_JVM_FEATURES, it adds those spaces > between every element in VALID_JVM_FEATURES. >> common/autoconf/hotspot-spec.gmk.in >> No comments. >> >> common/autoconf/hotspot.m4 >> L46: # Check if the specified JVM features are explicitely >> enabled. To be used in >> Typo: 'explicitely' -> 'explicitly' >> >> L59: # server: normal interpreter, and a tiered C1/C2 compiler >> So no support for a C2-only server config? >> >> L77: # Have the user listed more than one variant? >> Typo: 'Have' -> 'Has' >> > fixed >> common/autoconf/jdk-options.m4 >> No comments other than to say thanks for keeping support >> for 'optimized' builds. >> >> common/autoconf/jdk-version.m4 >> No comments. >> >> common/autoconf/lib-std.m4 >> No comments. >> >> common/autoconf/libraries.m4 >> No comments. >> >> common/autoconf/platform.m4 >> No comments, but mind numbing amount of diffs. >> > Same explanation as for flags.m4 >> common/autoconf/spec.gmk.in >> No comments. >> >> common/autoconf/toolchain.m4 >> No comments. >> >> common/autoconf/version-numbers >> No comments. >> >> common/bin/compare.sh >> No comments. >> >> common/bin/compare_exceptions.sh.incl >> No comments. >> >> make/Jprt.gmk >> No comments. >> >> make/Main.gmk >> No comments other than the 'hotspot-ide-project' target >> looks interesting... >> > This is the replacement for the visual studio project generator. We > currently only support VS here. >> make/common/MakeBase.gmk >> No comments. >> >> make/common/NativeCompilation.gmk >> L649: else ifeq (LOW, $$($1_OPTIMIZATION)) >> L650: $1_OPT_CFLAGS := $(C_O_FLAG_NORM) >> L651: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM) >> Instead of "_NORM", I was expecting "_LOW". >> >> L652: else ifeq (HIGH, $$($1_OPTIMIZATION)) >> L653: $1_OPT_CFLAGS := $(C_O_FLAG_HI) >> L654: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI) >> Instead of "_HI" I was expecting "_HIGH". >> > The names here were defined way back when we did build infra for the > JDK build. I wouldn't mind better alignment in naming the optimization > levels. >> make/jprt.properties >> L136: # Don't disable precompiled headers on windows. It's simply >> too slow. >> This is a surprise. Not the slowness part, but not being >> able to do a non-PCH JPRT build on Win*. IMHO, it's a >> little too much motherhood... >> > Actually, the old hotspot build does not allow disabling of PCH for > windows at all. The flag is simply ignored. In the new build, we treat > the flag the same on all platforms, so disabling precompiled headers > works on Windows. In the current JPRT config, we disable precompiled > headers on all fastdebug builds as a way of making sure we aren't > breaking that build configuration. We noticed a major build time > regression on Windows fastdebug builds in JPRT until we figured out it > was caused by this. Since we aren't currently disabling precompiled > header on Windows, I see no reason to start now. The build time > regression for just building hotspot is around 2m->12m. >> jdk/make/Import.gmk >> No comments. >> >> jdk/make/copy/Copy-java.base.gmk >> No comments. >> >> jdk/make/lib/CoreLibraries.gmk >> No comments. >> >> hotspot/makefiles/BuildHotspot.gmk >> No comments. >> >> hotspot/makefiles/Dist.gmk >> L52: define macosx_universalize >> I thought MacOS X universal support was going away? >> >> Update: OK, I see the mention of 8069540 ahead... >> > Yeah, we need to be binary the same as the old build for now. > Hopefully we can get rid of the universal stuff soon. >> L120: # these files are identical, and just pick one arbitrarily >> to use as souce. >> Typo: 'souce' -> 'source' >> >> L139: # This might have been defined in a custom extenstion >> Typo: 'extenstion' -> 'extension' >> > fixed >> L168: # NOTE: In the old build, this file was not copied on Windows. >> L169: ifneq ($(OPENJDK_TARGET_OS), windows) >> L170: $(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \ >> I'm not quite sure why the jvmti.html work is done for >> more than a single platform. >> >> Update: Thinking about this more... I vaguely remember that >> JVM/TI tracing used to be disabled in Client VMs. Don't know >> if that's still the case. > The jvmti.html file is just copied into the docs bundle later. IMO, > the docs bundle should be the same regardless of platform. In practice > we only publish the bundle from one build platform anyway. > > /Erik >> >> hotspot/makefiles/HotspotCommon.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GenerateSources.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GensrcAdlc.gmk >> L98: # NOTE: Windows adlc flags was different in the old >> build. Is this really >> L99: # correct? >> John Rose may know the answer to this historical question. >> >> hotspot/makefiles/gensrc/GensrcDtrace.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GensrcJvmti.gmk >> No comments. >> >> hotspot/makefiles/ide/CreateVSProject.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileDtracePostJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileDtracePreJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileLibjsig.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileLibraries.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmFeatures.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmMapfile.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmOverrideFiles.gmk >> No comments. >> >> hotspot/makefiles/mapfiles/libjsig/mapfile-vers-solaris >> hotspot/makefiles/mapfiles/libjvm_db/mapfile-vers >> hotspot/makefiles/mapfiles/libjvm_dtrace/mapfile-vers >> No comments on the mapfiles. >> >> hotspot/makefiles/symbols/symbols-aix >> hotspot/makefiles/symbols/symbols-aix-debug >> hotspot/makefiles/symbols/symbols-linux >> hotspot/makefiles/symbols/symbols-macosx >> hotspot/makefiles/symbols/symbols-shared >> hotspot/makefiles/symbols/symbols-solaris >> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler1 >> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler2 >> hotspot/makefiles/symbols/symbols-unix >> No comments on the symbol files. >> >> >> Thumbs up on this fix; I don't think that anything I noted >> above is a show stopper for this changeset. >> >> Dan >> >> >>> >>> /Erik >> > From erik.joelsson at oracle.com Thu Apr 7 10:47:28 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Thu, 7 Apr 2016 12:47:28 +0200 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <57062F01.70701@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <5704D28E.1060304@oracle.com> <57062F01.70701@oracle.com> Message-ID: <57063AC0.2000306@oracle.com> On 2016-04-07 11:57, Magnus Ihse Bursie wrote: > On 2016-04-06 11:10, Erik Joelsson wrote: >> Hello Dan and thank you for the review! I know it's a lot to chew >> through. >> >> I have incorporated your changes and published a new webrev: >> http://cr.openjdk.java.net/~erikj/8152666/webrev.02/ > > I'm not sure if I'm formally allowed to be a reviewer, since I've > wrote the absolute majority of the code myself. > > Nevertheless, I've looked through the webrev carefully, including the > latest changes by you, and it looks good to me. Ship it! :-) > Thanks! With you me and Dan we have ample support for pushing this. Now I'm just waiting for hs-rt to get updated from hs-main to avoid having others do the merges with b112. > Just a few minor comments: > > In compare.sh.in: > Why the added export of DEBUG_LEVEL? I can't find any reference to it > in the changes in compare.sh. Was it references in some earlier change > and we missed to export it? > It's in compare_exceptions. I had to do some special exceptions for slowdebug in some cases. /Erik > In flags.m4/platform.m4: > It is unfortunate that we needed to make the build/target duplication > in this change. It makes the messy addition of the JVM_CFLAGS even > messier. :( But then again, we've always planned a follow-up > restructuring of the flag handling after the integration of the new > Hotspot build system. It just got a bit more urgent. > > /Magnus > > >> >> On 2016-04-05 20:10, Daniel D. Daugherty wrote: >>> >>> >>> > The new build supports the following variants: >>> > >>> > * server (C1+C2) >>> >>> The above "server" variant is the "tiered server". Does the new >>> build system support the "C2 server" variant? What about the >>> 32-bit server and 64-bit server build variants? For example, >>> on Linux you can have: >>> >>> * C1/Client, 32-bit >>> * C2/Server, 32-bit >>> * Tiered (C1 & C2), 32-bit >>> * C2/Server, 64-bit >>> * Tiered (C1 + C2), 64-bit >>> >>> The above wide range of variants is also true for Win*. >>> >> There is a way to achieve this even if it's not as straight forward. >> It's controlled through the new "jvm-feature" setting. To build a >> completely custom set of features for a jvm, you set the >> --with-jvm-variants=custom and then define the full feature set using >> --with-jvm-features=compiler2,... For "server, client, core, minimal, >> zero and zeroshark" there is a predefined set of features while the >> custom variant has no features by default. >>> >>> General >>> Please make sure all the copyrights are updated. >>> >> Done >>> >>> common/autoconf/basics.m4 >>> No comments. >>> >>> common/autoconf/build-performance.m4 >>> No comments. >>> >>> common/autoconf/buildjdk-spec.gmk.in >>> No comments. >>> >>> common/autoconf/compare.sh.in >>> No comments. >>> >>> common/autoconf/configure >>> No comments. >>> >>> common/autoconf/configure.ac >>> No comments. >>> >>> common/autoconf/flags.m4 >>> L274: SHARED_LIBRARY_FLAGS="-dynamiclib >>> -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" >>> L275: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >>> >>> L275 is new, but seeing it next to L274 makes me wonder if >>> $PICFLAG should be used instead of the literal '-fPIC'? >> Fixed >>> >>> L303: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >>> Same question about literal '-fPIC'. >>> >> Not sure, leaving for now. It seems we leave the PICFLAG empty for >> the JDK build and only add it to the hotspot build. This should be >> addressed in a followup where we try to align flag usage more between >> the different libraries. >>> For most of the changes to flags.m4, I can't see how any of it >>> relates to the new HotSpot build. >>> >>> Update: Now I'm wondering if this is one of those files that >>> we typically don't review because it is auto generated. >>> Sorry, don't remember for sure. >> It's a file that should be reviewed, only generated-configure.sh can >> be ignored. The majority of the changes in here are related to cross >> compiling in the modular world. When cross compiling now, we need to >> also build a jvm for the build platform in order to run jlink and >> jmod when building images. With the old hotspot build, that was >> simpler, just invoke the hotspot build with some ARCH and compiler >> related variables set. For the rest of the JDK build, an >> approximation of flags used was enough so the problem was never fully >> solved. >> >> In the new build, we derive all the compiler options in configure so >> I had to introduce a more proper solution. I did this by >> parameterizing some macros in flags.m4 and platform.m4 so that we can >> run them twice, once for the "target" toolchain" and one for the >> "build" toolchain. These are the majority of the changes you are >> seeing. I also removed the old hard coded "build" versions of certain >> flag and platform variables. >>> common/autoconf/generated-configure.sh >>> 2642 lines changed... I think this is one of those files >>> you're supposed to skip in build-dev review... :-| >> Yes, please do. >>> >>> common/autoconf/help.m4 >>> L179: $PRINTF "Which are valid to use depends on the target >>> platform.\n " >>> L180: $PRINTF "%s " $VALID_JVM_FEATURES >>> Why are there blanks after the last '\n' on L179 instead of >>> at the beginning of L180? >>> >> If you do $PRINTF " %s " $VALID_JVM_FEATURES, it adds those spaces >> between every element in VALID_JVM_FEATURES. >>> common/autoconf/hotspot-spec.gmk.in >>> No comments. >>> >>> common/autoconf/hotspot.m4 >>> L46: # Check if the specified JVM features are explicitely >>> enabled. To be used in >>> Typo: 'explicitely' -> 'explicitly' >>> >>> L59: # server: normal interpreter, and a tiered C1/C2 compiler >>> So no support for a C2-only server config? >>> >>> L77: # Have the user listed more than one variant? >>> Typo: 'Have' -> 'Has' >>> >> fixed >>> common/autoconf/jdk-options.m4 >>> No comments other than to say thanks for keeping support >>> for 'optimized' builds. >>> >>> common/autoconf/jdk-version.m4 >>> No comments. >>> >>> common/autoconf/lib-std.m4 >>> No comments. >>> >>> common/autoconf/libraries.m4 >>> No comments. >>> >>> common/autoconf/platform.m4 >>> No comments, but mind numbing amount of diffs. >>> >> Same explanation as for flags.m4 >>> common/autoconf/spec.gmk.in >>> No comments. >>> >>> common/autoconf/toolchain.m4 >>> No comments. >>> >>> common/autoconf/version-numbers >>> No comments. >>> >>> common/bin/compare.sh >>> No comments. >>> >>> common/bin/compare_exceptions.sh.incl >>> No comments. >>> >>> make/Jprt.gmk >>> No comments. >>> >>> make/Main.gmk >>> No comments other than the 'hotspot-ide-project' target >>> looks interesting... >>> >> This is the replacement for the visual studio project generator. We >> currently only support VS here. >>> make/common/MakeBase.gmk >>> No comments. >>> >>> make/common/NativeCompilation.gmk >>> L649: else ifeq (LOW, $$($1_OPTIMIZATION)) >>> L650: $1_OPT_CFLAGS := $(C_O_FLAG_NORM) >>> L651: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM) >>> Instead of "_NORM", I was expecting "_LOW". >>> >>> L652: else ifeq (HIGH, $$($1_OPTIMIZATION)) >>> L653: $1_OPT_CFLAGS := $(C_O_FLAG_HI) >>> L654: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI) >>> Instead of "_HI" I was expecting "_HIGH". >>> >> The names here were defined way back when we did build infra for the >> JDK build. I wouldn't mind better alignment in naming the >> optimization levels. >>> make/jprt.properties >>> L136: # Don't disable precompiled headers on windows. It's >>> simply too slow. >>> This is a surprise. Not the slowness part, but not being >>> able to do a non-PCH JPRT build on Win*. IMHO, it's a >>> little too much motherhood... >>> >> Actually, the old hotspot build does not allow disabling of PCH for >> windows at all. The flag is simply ignored. In the new build, we >> treat the flag the same on all platforms, so disabling precompiled >> headers works on Windows. In the current JPRT config, we disable >> precompiled headers on all fastdebug builds as a way of making sure >> we aren't breaking that build configuration. We noticed a major build >> time regression on Windows fastdebug builds in JPRT until we figured >> out it was caused by this. Since we aren't currently disabling >> precompiled header on Windows, I see no reason to start now. The >> build time regression for just building hotspot is around 2m->12m. >>> jdk/make/Import.gmk >>> No comments. >>> >>> jdk/make/copy/Copy-java.base.gmk >>> No comments. >>> >>> jdk/make/lib/CoreLibraries.gmk >>> No comments. >>> >>> hotspot/makefiles/BuildHotspot.gmk >>> No comments. >>> >>> hotspot/makefiles/Dist.gmk >>> L52: define macosx_universalize >>> I thought MacOS X universal support was going away? >>> >>> Update: OK, I see the mention of 8069540 ahead... >>> >> Yeah, we need to be binary the same as the old build for now. >> Hopefully we can get rid of the universal stuff soon. >>> L120: # these files are identical, and just pick one arbitrarily >>> to use as souce. >>> Typo: 'souce' -> 'source' >>> >>> L139: # This might have been defined in a custom extenstion >>> Typo: 'extenstion' -> 'extension' >>> >> fixed >>> L168: # NOTE: In the old build, this file was not copied on >>> Windows. >>> L169: ifneq ($(OPENJDK_TARGET_OS), windows) >>> L170: $(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \ >>> I'm not quite sure why the jvmti.html work is done for >>> more than a single platform. >>> >>> Update: Thinking about this more... I vaguely remember that >>> JVM/TI tracing used to be disabled in Client VMs. Don't know >>> if that's still the case. >> The jvmti.html file is just copied into the docs bundle later. IMO, >> the docs bundle should be the same regardless of platform. In >> practice we only publish the bundle from one build platform anyway. >> >> /Erik >>> >>> hotspot/makefiles/HotspotCommon.gmk >>> No comments. >>> >>> hotspot/makefiles/gensrc/GenerateSources.gmk >>> No comments. >>> >>> hotspot/makefiles/gensrc/GensrcAdlc.gmk >>> L98: # NOTE: Windows adlc flags was different in the old >>> build. Is this really >>> L99: # correct? >>> John Rose may know the answer to this historical question. >>> >>> hotspot/makefiles/gensrc/GensrcDtrace.gmk >>> No comments. >>> >>> hotspot/makefiles/gensrc/GensrcJvmti.gmk >>> No comments. >>> >>> hotspot/makefiles/ide/CreateVSProject.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileDtracePostJvm.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileDtracePreJvm.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileJvm.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileLibjsig.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileLibraries.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/JvmFeatures.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/JvmMapfile.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/JvmOverrideFiles.gmk >>> No comments. >>> >>> hotspot/makefiles/mapfiles/libjsig/mapfile-vers-solaris >>> hotspot/makefiles/mapfiles/libjvm_db/mapfile-vers >>> hotspot/makefiles/mapfiles/libjvm_dtrace/mapfile-vers >>> No comments on the mapfiles. >>> >>> hotspot/makefiles/symbols/symbols-aix >>> hotspot/makefiles/symbols/symbols-aix-debug >>> hotspot/makefiles/symbols/symbols-linux >>> hotspot/makefiles/symbols/symbols-macosx >>> hotspot/makefiles/symbols/symbols-shared >>> hotspot/makefiles/symbols/symbols-solaris >>> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler1 >>> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler2 >>> hotspot/makefiles/symbols/symbols-unix >>> No comments on the symbol files. >>> >>> >>> Thumbs up on this fix; I don't think that anything I noted >>> above is a show stopper for this changeset. >>> >>> Dan >>> >>> >>>> >>>> /Erik >>> >> > From maurizio.cimadamore at oracle.com Thu Apr 7 11:20:23 2016 From: maurizio.cimadamore at oracle.com (Maurizio Cimadamore) Date: Thu, 7 Apr 2016 12:20:23 +0100 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <5704D37E.8040208@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <1FE98FBA-A48E-4C1A-A622-74331E7BAB30@oracle.com> <5704D37E.8040208@oracle.com> Message-ID: <57064277.4030009@oracle.com> On 06/04/16 10:14, Erik Joelsson wrote: > Hello, > > I assume the mx projects are for Java code or do they also generate > projects for native? The new top level target is only meant to replace > the old Visual Studio project generator, at least for now. +1 for having the build system a more central role when it comes to IDE project generation. While we can all resort to workarounds, I think that having IDE generation in a single place would be greatly beneficial in the long run. Maurizio > > /Erik > > On 2016-04-06 03:23, Christian Thalinger wrote: >> >>> On Apr 5, 2016, at 8:10 AM, Daniel D. Daugherty >>> > >>> wrote: >> >> ? >> >>> make/Main.gmk >>> No comments other than the 'hotspot-ide-project' target >>> looks interesting... >> >> Btw. there is already support to generate IDE configurations today >> via mx: >> >> https://wiki.openjdk.java.net/display/Graal/Instructions >> >> integrated with: >> >> https://bugs.openjdk.java.net/browse/JDK-8139921 >> >> One main advantage, as I pointed out in the review, is that it also >> includes generated files so there are no unresolved includes or >> methods anymore: >> >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-November/020626.html >> >> >> I?m using this every day. > From daniel.fuchs at oracle.com Thu Apr 7 11:24:02 2016 From: daniel.fuchs at oracle.com (Daniel Fuchs) Date: Thu, 7 Apr 2016 13:24:02 +0200 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <5702FC9B.7020600@oracle.com> References: <5702FC9B.7020600@oracle.com> Message-ID: <57064352.5010800@oracle.com> Hi Brent, This looks good! Thanks for taking care of this one. In http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/hotspot/src/share/vm/prims/jvm.cpp.frames.html 548 objArrayOop fa = objArrayOop(JNIHandles::resolve_non_null(frames)); 549 objArrayHandle frames_array_h(THREAD, fa); 550 551 int limit = start_index + frame_count; 552 if (frames_array_h.is_null()) { 553 THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "parameters and mode mismatch", NULL); 554 } Can frames_array_h.is_null() ever be true, given that we used JNIHandles::resolve_non_null(frames) at line 548? I wonder if lines 552-554 are a remnant of the previous implementation and could be removed now... 589 Handle stack_frame_info(THREAD, JNIHandles::resolve(frame)); 590 Handle stack_trace_element(THREAD, JNIHandles::resolve(stack)); Should these call JNIHandles::resolve_non_null instead? http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/jdk/src/java.base/share/classes/java/lang/StackFrameInfo.java.frames.html I'd be very tempted to make 'ste' private volatile. That's all for me :-) best regards, -- daniel On 05/04/16 01:45, Brent Christian wrote: > Hi, > > I'd like to check in some footprint and code reduction changes to the > java.lang.StackWalker implementation. > > Webrev: > http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ > Bug: > https://bugs.openjdk.java.net/browse/JDK-8153123 > > A summary of the changes: > > * remove the "stackwalk.newThrowable" system property and > "MemberNameInStackFrame" VM flag, originally left in to aid benchmarking > > * Streamline StackFrameInfo fields > > * Refactor/streamline StackStreamFactory (no more separate > classes[]/StackFrame[] arrays, remove unneeded (for now) > StackStreamFactory.StackTrace class) > > > Given the hotspot changes, I plan to push this through hs-rt. > > Thanks, > -Brent > From stefan.karlsson at oracle.com Thu Apr 7 11:30:37 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 7 Apr 2016 13:30:37 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <57054D6A.8030405@oracle.com> References: <57054D6A.8030405@oracle.com> Message-ID: <570644DD.3070304@oracle.com> Hi all, I've updated the patch: http://cr.openjdk.java.net/~stefank/8153659/webrev.02 The previous patch created the embedded ResourceMark after the stringStream instance was created. I discussed the layout of the classes with Bengt, and have decided to restructure this patch. I've changed the code so that the ResourceMark is embedded in a new stringStreamWithResourceMark class. This allows me to use the same LogStreamBase class, but different stringClass template parameters, for all three classes. I've put the stringStreamWithResourceMark class in logStream.hpp instead of ostream.hpp, to prevent the include of resourceArea.hpp to propagate through the ostream.hpp header. The resourceArea.hpp file is problematic, since it includes and uses thread.inline.hpp. The alternative would be to move the implementation of resourceArea.hpp into a resource.inline.hpp file, so that header files could create ResourceMark instances, without having to include thread.inline.hpp. I'm leaving that exercise for another RFE. Thanks, StefanK On 2016-04-06 19:54, Stefan Karlsson wrote: > Hi all, > > Please review this patch to add a LogStream class that allocates its > backing buffer from CHeap memory instead of Resource memory. > > http://cr.openjdk.java.net/~stefank/8153659/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8153659 > > The main motivation for this is that we can't use Resource allocated > memory during initialization, until Thread::current() has been > initialized. So, a CHeap backed LogStream is desirable when we > execute, for example, the following code during large pages > initialization: > > void os::trace_page_sizes(const char* str, const size_t* page_sizes, > int count) > { > if (TracePageSizes) { > tty->print("%s: ", str); > for (int i = 0; i < count; ++i) { > tty->print(" " SIZE_FORMAT, page_sizes[i]); > } > tty->cr(); > } > } > > The patch restructures the code and creates a LogStreamBase template > base class, which takes the backing outputStream class as a template > parameter. We then have three concrete LogStream classes: > > LogStream - Buffer resource allocated with an embedded ResourceMark > LogStreamNoResourceMark - Buffer resource allocated without an > embedded ResourceMark > LogStreamCHeap - Buffer CHeap allocated > > I moved the LogStream class from the logStream.inline.hpp file to > logStream.hpp, for consistency. If that's causing problems while > reviewing this, I can move it in a separate patch. > > Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and > internal VM tests. > > Thanks, > StefanK From bengt.rutisson at oracle.com Thu Apr 7 11:40:01 2016 From: bengt.rutisson at oracle.com (Bengt Rutisson) Date: Thu, 7 Apr 2016 13:40:01 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <570644DD.3070304@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> Message-ID: <57064711.2090206@oracle.com> Hi StefanK, On 2016-04-07 13:30, Stefan Karlsson wrote: > Hi all, > > I've updated the patch: > http://cr.openjdk.java.net/~stefank/8153659/webrev.02 Looks good! Nice fix! Bengt > > The previous patch created the embedded ResourceMark after the > stringStream instance was created. I discussed the layout of the > classes with Bengt, and have decided to restructure this patch. I've > changed the code so that the ResourceMark is embedded in a new > stringStreamWithResourceMark class. This allows me to use the same > LogStreamBase class, but different stringClass template parameters, > for all three classes. > > I've put the stringStreamWithResourceMark class in logStream.hpp > instead of ostream.hpp, to prevent the include of resourceArea.hpp to > propagate through the ostream.hpp header. The resourceArea.hpp file is > problematic, since it includes and uses thread.inline.hpp. The > alternative would be to move the implementation of resourceArea.hpp > into a resource.inline.hpp file, so that header files could create > ResourceMark instances, without having to include thread.inline.hpp. > I'm leaving that exercise for another RFE. > > Thanks, > StefanK > > On 2016-04-06 19:54, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to add a LogStream class that allocates its >> backing buffer from CHeap memory instead of Resource memory. >> >> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8153659 >> >> The main motivation for this is that we can't use Resource allocated >> memory during initialization, until Thread::current() has been >> initialized. So, a CHeap backed LogStream is desirable when we >> execute, for example, the following code during large pages >> initialization: >> >> void os::trace_page_sizes(const char* str, const size_t* page_sizes, >> int count) >> { >> if (TracePageSizes) { >> tty->print("%s: ", str); >> for (int i = 0; i < count; ++i) { >> tty->print(" " SIZE_FORMAT, page_sizes[i]); >> } >> tty->cr(); >> } >> } >> >> The patch restructures the code and creates a LogStreamBase template >> base class, which takes the backing outputStream class as a template >> parameter. We then have three concrete LogStream classes: >> >> LogStream - Buffer resource allocated with an embedded ResourceMark >> LogStreamNoResourceMark - Buffer resource allocated without an >> embedded ResourceMark >> LogStreamCHeap - Buffer CHeap allocated >> >> I moved the LogStream class from the logStream.inline.hpp file to >> logStream.hpp, for consistency. If that's causing problems while >> reviewing this, I can move it in a separate patch. >> >> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >> internal VM tests. >> >> Thanks, >> StefanK > From stefan.karlsson at oracle.com Thu Apr 7 11:49:02 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 7 Apr 2016 13:49:02 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <57064711.2090206@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57064711.2090206@oracle.com> Message-ID: <5706492E.5070906@oracle.com> Thanks, Bengt. StefanK On 2016-04-07 13:40, Bengt Rutisson wrote: > > Hi StefanK, > > On 2016-04-07 13:30, Stefan Karlsson wrote: >> Hi all, >> >> I've updated the patch: >> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 > > Looks good! Nice fix! > > Bengt > >> >> The previous patch created the embedded ResourceMark after the >> stringStream instance was created. I discussed the layout of the >> classes with Bengt, and have decided to restructure this patch. I've >> changed the code so that the ResourceMark is embedded in a new >> stringStreamWithResourceMark class. This allows me to use the same >> LogStreamBase class, but different stringClass template parameters, >> for all three classes. >> >> I've put the stringStreamWithResourceMark class in logStream.hpp >> instead of ostream.hpp, to prevent the include of resourceArea.hpp to >> propagate through the ostream.hpp header. The resourceArea.hpp file >> is problematic, since it includes and uses thread.inline.hpp. The >> alternative would be to move the implementation of resourceArea.hpp >> into a resource.inline.hpp file, so that header files could create >> ResourceMark instances, without having to include thread.inline.hpp. >> I'm leaving that exercise for another RFE. >> >> Thanks, >> StefanK >> >> On 2016-04-06 19:54, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to add a LogStream class that allocates its >>> backing buffer from CHeap memory instead of Resource memory. >>> >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>> >>> The main motivation for this is that we can't use Resource allocated >>> memory during initialization, until Thread::current() has been >>> initialized. So, a CHeap backed LogStream is desirable when we >>> execute, for example, the following code during large pages >>> initialization: >>> >>> void os::trace_page_sizes(const char* str, const size_t* page_sizes, >>> int count) >>> { >>> if (TracePageSizes) { >>> tty->print("%s: ", str); >>> for (int i = 0; i < count; ++i) { >>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>> } >>> tty->cr(); >>> } >>> } >>> >>> The patch restructures the code and creates a LogStreamBase template >>> base class, which takes the backing outputStream class as a template >>> parameter. We then have three concrete LogStream classes: >>> >>> LogStream - Buffer resource allocated with an embedded ResourceMark >>> LogStreamNoResourceMark - Buffer resource allocated without an >>> embedded ResourceMark >>> LogStreamCHeap - Buffer CHeap allocated >>> >>> I moved the LogStream class from the logStream.inline.hpp file to >>> logStream.hpp, for consistency. If that's causing problems while >>> reviewing this, I can move it in a separate patch. >>> >>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >>> internal VM tests. >>> >>> Thanks, >>> StefanK >> > From rickard.backman at oracle.com Thu Apr 7 12:12:21 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Thu, 7 Apr 2016 14:12:21 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot Message-ID: <20160407121221.GQ9504@rbackman> Hi, can I please have review for this patch please? So far CodeBlobs have required all the data (metadata, oops, code, etc) to be in one continuous blob With this patch we are looking to change that. It's been done by changing offsets in CodeBlob to addresses, making some methods virtual to allow different behavior and also creating a couple of new classes. CompiledMethod now sits inbetween CodeBlob and nmethod. CR: https://bugs.openjdk.java.net/browse/JDK-8152664 Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ Thanks /R From volker.simonis at gmail.com Thu Apr 7 12:22:04 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Thu, 7 Apr 2016 14:22:04 +0200 Subject: RFR (S): 8139921: add mx configuration files to support HotSpot IDE configuration generation In-Reply-To: <31F6B9E0-D7C2-4B73-8114-8616F82AE5B5@oracle.com> References: <31F6B9E0-D7C2-4B73-8114-8616F82AE5B5@oracle.com> Message-ID: Hi Christian, I'd like to try this but I couldn't figure out how it works. I don't have any experience with Graal/Truffel/JVMCI but as far as I understood the creation of an Eclipse project should work equally well for a vanilla hospot repository, right? The first question is where to get mx from (there's different information in the Wiki and this email thread for example) ? https://bitbucket.org/allr/mx oder https://github.com/graalvm/mx.git Now let's say I cloned the right mx version. How do I use it? Let's say I have a vanilla jdk9 forest under /share/OpenJDK/jdk9 I create an output directory under /share/OpenJDK/output-jdk9-dbg and from there I call configure and build the images: cd /share/OpenJDK/output-jdk9-dbg bash /share/OpenJDK/jdk9/configure --with-boot-jdk=.. --with-debug-level=slowdebug make images Taking this scenario, from which directory am I supposed to call 'mx ideinit', where will the Eclipse project be created at and how is mx supposed to find my configuration (i.e. platform) and generated files? Thanks a lot and best regards, Volker On Wed, Nov 11, 2015 at 1:42 AM, Christian Thalinger wrote: > [This is kind of a long email but contains pictures :-)] > > https://bugs.openjdk.java.net/browse/JDK-8139921 > http://cr.openjdk.java.net/~twisti/8139921/webrev/ > > In order to make the IDE experience more pleasant now that JEP 243 is integrated we would like to use mx (https://bitbucket.org/allr/mx) for IDE configuration generation. For this we have to integrate a few mx support files into the hotspot repository. > > The mx support files will be under a dot-directory: > > $ hg st --all .mx.jvmci/ > C .mx.jvmci/.project > C .mx.jvmci/.pydevproject > C .mx.jvmci/eclipse-settings/org.eclipse.jdt.core.prefs > C .mx.jvmci/hotspot/templates/eclipse/cproject > C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.core.prefs > C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.ui.prefs > C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.core.runtime.prefs > C .mx.jvmci/mx_jvmci.py > C .mx.jvmci/suite.py > > mx itself is and will stay an external tool. Some documentation on how to use it can be found here: > > https://wiki.openjdk.java.net/display/Graal/Instructions > https://wiki.openjdk.java.net/display/Graal/Eclipse > > It basically boils down to: > > $ mx ideinit > > and importing the configuration into your favorite IDE. > > This would give every developer the same view of the source code and we can also enforce code-style guidelines. > > Here is how the imported projects look like in Eclipse: > > > > This is most helpful for Compiler engineers who work on the JVMCI but there is value for others too. > > Notice the ?hotspot:*? projects at the top? These are projects for different HotSpot configurations. The main advantage here is that these include the generated files directory (if the configuration exists and the files are built). I only configured and built ?release? so these can been seen, fastdebug is empty: > > > > This makes it possible for Eclipse to find generated source code. Very helpful. For example, JVMTI. First, jvmtiUtils.hpp from the fastdebug configuration: > > > > and here is the release one: > > > > mx has lots of other commands but most of them are not really useful for us. The only ones worth mentioning besides ideinit are findbugs and checkstyle. > > findbugs runs FindBugs (doh!) on all Java projects that mx knows about: > > cthaling at macbook:~/ws/8139921/hotspot$ mx findbugs > Scanning archives (15 / 30) > 2 analysis passes to perform > Pass 1: Analyzing classes (524 / 524) - 100% complete > Pass 2: Analyzing classes (305 / 305) - 100% complete > Done with analysis > Calculating exit code... > Exit code set to: 0 > > checkstyle checks the Java projects against some predefined style. This is particularly helpful for people who don?t use an IDE or to make sure everything matches the style after applying an external patch: > > cthaling at macbook:~/ws/8139921/hotspot$ mx checkstyle > Running Checkstyle on /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src using /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.service/.checkstyle_checks.xml... > /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src/jdk/vm/ci/runtime/JVMCI.java:33: 'static' modifier out of order with the JLS suggestions. > > or: > > cthaling at macbook:~/ws/8139921/hotspot$ mx checkstyle > Running Checkstyle on /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src using /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.service/.checkstyle_checks.xml... > /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src/jdk/vm/ci/runtime/JVMCI.java:43: Name 'FOO' must match pattern '^[a-z][a-zA-Z0-9]*$'. > > That?s all, folks! From adinn at redhat.com Thu Apr 7 12:25:27 2016 From: adinn at redhat.com (Andrew Dinn) Date: Thu, 7 Apr 2016 13:25:27 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160407121221.GQ9504@rbackman> References: <20160407121221.GQ9504@rbackman> Message-ID: <570651B7.9020004@redhat.com> On 07/04/16 13:12, Rickard B?ckman wrote: > can I please have review for this patch please? > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > to be in one continuous blob With this patch we are looking to change > that. It's been done by changing offsets in CodeBlob to addresses, > making some methods virtual to allow different behavior and also > creating a couple of new classes. CompiledMethod now sits inbetween > CodeBlob and nmethod. > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ Why do 'we want to be able to have pieces of compiled code and metadata that are not located in one continuous piece of memory'? Is there a motive for making this change? regards, Andrew Dinn ----------- From igor.ignatyev at oracle.com Thu Apr 7 13:41:49 2016 From: igor.ignatyev at oracle.com (Igor Ignatyev) Date: Thu, 7 Apr 2016 16:41:49 +0300 Subject: RFR (S) 8152432: Implement setting jtreg @requires properties vm.flavor, vm.bits, vm.compMode In-Reply-To: <570534B6.4090401@oracle.com> References: <5703CFA2.4050403@oracle.com> <570534B6.4090401@oracle.com> Message-ID: <45371B87-68A2-43BB-A917-FFF9D0F82124@oracle.com> Dima, one minor comment : s/Ignatiev/Ignatyev/ ) otherwise looks good to me, thanks for addressing my comments. ? Igor > On Apr 6, 2016, at 7:09 PM, Dmitry Fazunenko wrote: > > I got offline comments from Igor Ignatiev. > New version: > https://bugs.openjdk.java.net/browse/JDK-8152432 > http://cr.openjdk.java.net/~dfazunen/8152432/webrev.01/ > changes: http://cr.openjdk.java.net/~dfazunen/8152432/webrev.00vs01/ > > Thanks, > Dima > > > On 05.04.2016 17:45, Dmitry Fazunenko wrote: >> Hello, >> >> Would you please review a relatively simple fix which starts using new jtreg functionality: >> ability to define custom properties for use with the @requires tag. >> >> https://bugs.openjdk.java.net/browse/JDK-8152432 >> http://cr.openjdk.java.net/~dfazunen/8152432/webrev.00/ >> >> As the first experience of using this functionality I just fixed setting of properties which set by jtreg, >> but set incorrectly relying only on specified vm flags. >> In the near future we are going to introduce new properties. >> >> Tested locally. >> >> Thanks, >> Dima >> >> >> >> > From dmitry.fazunenko at oracle.com Thu Apr 7 13:47:28 2016 From: dmitry.fazunenko at oracle.com (Dmitry Fazunenko) Date: Thu, 7 Apr 2016 16:47:28 +0300 Subject: RFR (S) 8152432: Implement setting jtreg @requires properties vm.flavor, vm.bits, vm.compMode In-Reply-To: <45371B87-68A2-43BB-A917-FFF9D0F82124@oracle.com> References: <5703CFA2.4050403@oracle.com> <570534B6.4090401@oracle.com> <45371B87-68A2-43BB-A917-FFF9D0F82124@oracle.com> Message-ID: <570664F0.1030702@oracle.com> Thank you so much, Igor. On 07.04.2016 16:41, Igor Ignatyev wrote: > Dima, > > one minor comment : s/Ignatiev/Ignatyev/ ) > otherwise looks good to me, thanks for addressing my comments. > > ? Igor > >> On Apr 6, 2016, at 7:09 PM, Dmitry Fazunenko wrote: >> >> I got offline comments from Igor Ignatiev. >> New version: >> https://bugs.openjdk.java.net/browse/JDK-8152432 >> http://cr.openjdk.java.net/~dfazunen/8152432/webrev.01/ >> changes: http://cr.openjdk.java.net/~dfazunen/8152432/webrev.00vs01/ >> >> Thanks, >> Dima >> >> >> On 05.04.2016 17:45, Dmitry Fazunenko wrote: >>> Hello, >>> >>> Would you please review a relatively simple fix which starts using new jtreg functionality: >>> ability to define custom properties for use with the @requires tag. >>> >>> https://bugs.openjdk.java.net/browse/JDK-8152432 >>> http://cr.openjdk.java.net/~dfazunen/8152432/webrev.00/ >>> >>> As the first experience of using this functionality I just fixed setting of properties which set by jtreg, >>> but set incorrectly relying only on specified vm flags. >>> In the near future we are going to introduce new properties. >>> >>> Tested locally. >>> >>> Thanks, >>> Dima >>> >>> >>> >>> From daniel.daugherty at oracle.com Thu Apr 7 13:54:33 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Thu, 7 Apr 2016 07:54:33 -0600 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <57062F01.70701@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <5704D28E.1060304@oracle.com> <57062F01.70701@oracle.com> Message-ID: <57066699.7030307@oracle.com> > I'm not sure if I'm formally allowed to be a reviewer, since I've > wrote the absolute majority of the code myself. The way I've done this in the past is a "Contributed-by:" line listing all of the folks that contributed and a "Reviewed-by:" line listing all the reviewers. Magnus, you reviewed Erik's changes and vice versa... Dan On 4/7/16 3:57 AM, Magnus Ihse Bursie wrote: > On 2016-04-06 11:10, Erik Joelsson wrote: >> Hello Dan and thank you for the review! I know it's a lot to chew >> through. >> >> I have incorporated your changes and published a new webrev: >> http://cr.openjdk.java.net/~erikj/8152666/webrev.02/ > > I'm not sure if I'm formally allowed to be a reviewer, since I've > wrote the absolute majority of the code myself. > > Nevertheless, I've looked through the webrev carefully, including the > latest changes by you, and it looks good to me. Ship it! :-) > > Just a few minor comments: > > In compare.sh.in: > Why the added export of DEBUG_LEVEL? I can't find any reference to it > in the changes in compare.sh. Was it references in some earlier change > and we missed to export it? > > In flags.m4/platform.m4: > It is unfortunate that we needed to make the build/target duplication > in this change. It makes the messy addition of the JVM_CFLAGS even > messier. :( But then again, we've always planned a follow-up > restructuring of the flag handling after the integration of the new > Hotspot build system. It just got a bit more urgent. > > /Magnus > > >> >> On 2016-04-05 20:10, Daniel D. Daugherty wrote: >>> >>> >>> > The new build supports the following variants: >>> > >>> > * server (C1+C2) >>> >>> The above "server" variant is the "tiered server". Does the new >>> build system support the "C2 server" variant? What about the >>> 32-bit server and 64-bit server build variants? For example, >>> on Linux you can have: >>> >>> * C1/Client, 32-bit >>> * C2/Server, 32-bit >>> * Tiered (C1 & C2), 32-bit >>> * C2/Server, 64-bit >>> * Tiered (C1 + C2), 64-bit >>> >>> The above wide range of variants is also true for Win*. >>> >> There is a way to achieve this even if it's not as straight forward. >> It's controlled through the new "jvm-feature" setting. To build a >> completely custom set of features for a jvm, you set the >> --with-jvm-variants=custom and then define the full feature set using >> --with-jvm-features=compiler2,... For "server, client, core, minimal, >> zero and zeroshark" there is a predefined set of features while the >> custom variant has no features by default. >>> >>> General >>> Please make sure all the copyrights are updated. >>> >> Done >>> >>> common/autoconf/basics.m4 >>> No comments. >>> >>> common/autoconf/build-performance.m4 >>> No comments. >>> >>> common/autoconf/buildjdk-spec.gmk.in >>> No comments. >>> >>> common/autoconf/compare.sh.in >>> No comments. >>> >>> common/autoconf/configure >>> No comments. >>> >>> common/autoconf/configure.ac >>> No comments. >>> >>> common/autoconf/flags.m4 >>> L274: SHARED_LIBRARY_FLAGS="-dynamiclib >>> -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" >>> L275: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >>> >>> L275 is new, but seeing it next to L274 makes me wonder if >>> $PICFLAG should be used instead of the literal '-fPIC'? >> Fixed >>> >>> L303: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >>> Same question about literal '-fPIC'. >>> >> Not sure, leaving for now. It seems we leave the PICFLAG empty for >> the JDK build and only add it to the hotspot build. This should be >> addressed in a followup where we try to align flag usage more between >> the different libraries. >>> For most of the changes to flags.m4, I can't see how any of it >>> relates to the new HotSpot build. >>> >>> Update: Now I'm wondering if this is one of those files that >>> we typically don't review because it is auto generated. >>> Sorry, don't remember for sure. >> It's a file that should be reviewed, only generated-configure.sh can >> be ignored. The majority of the changes in here are related to cross >> compiling in the modular world. When cross compiling now, we need to >> also build a jvm for the build platform in order to run jlink and >> jmod when building images. With the old hotspot build, that was >> simpler, just invoke the hotspot build with some ARCH and compiler >> related variables set. For the rest of the JDK build, an >> approximation of flags used was enough so the problem was never fully >> solved. >> >> In the new build, we derive all the compiler options in configure so >> I had to introduce a more proper solution. I did this by >> parameterizing some macros in flags.m4 and platform.m4 so that we can >> run them twice, once for the "target" toolchain" and one for the >> "build" toolchain. These are the majority of the changes you are >> seeing. I also removed the old hard coded "build" versions of certain >> flag and platform variables. >>> common/autoconf/generated-configure.sh >>> 2642 lines changed... I think this is one of those files >>> you're supposed to skip in build-dev review... :-| >> Yes, please do. >>> >>> common/autoconf/help.m4 >>> L179: $PRINTF "Which are valid to use depends on the target >>> platform.\n " >>> L180: $PRINTF "%s " $VALID_JVM_FEATURES >>> Why are there blanks after the last '\n' on L179 instead of >>> at the beginning of L180? >>> >> If you do $PRINTF " %s " $VALID_JVM_FEATURES, it adds those spaces >> between every element in VALID_JVM_FEATURES. >>> common/autoconf/hotspot-spec.gmk.in >>> No comments. >>> >>> common/autoconf/hotspot.m4 >>> L46: # Check if the specified JVM features are explicitely >>> enabled. To be used in >>> Typo: 'explicitely' -> 'explicitly' >>> >>> L59: # server: normal interpreter, and a tiered C1/C2 compiler >>> So no support for a C2-only server config? >>> >>> L77: # Have the user listed more than one variant? >>> Typo: 'Have' -> 'Has' >>> >> fixed >>> common/autoconf/jdk-options.m4 >>> No comments other than to say thanks for keeping support >>> for 'optimized' builds. >>> >>> common/autoconf/jdk-version.m4 >>> No comments. >>> >>> common/autoconf/lib-std.m4 >>> No comments. >>> >>> common/autoconf/libraries.m4 >>> No comments. >>> >>> common/autoconf/platform.m4 >>> No comments, but mind numbing amount of diffs. >>> >> Same explanation as for flags.m4 >>> common/autoconf/spec.gmk.in >>> No comments. >>> >>> common/autoconf/toolchain.m4 >>> No comments. >>> >>> common/autoconf/version-numbers >>> No comments. >>> >>> common/bin/compare.sh >>> No comments. >>> >>> common/bin/compare_exceptions.sh.incl >>> No comments. >>> >>> make/Jprt.gmk >>> No comments. >>> >>> make/Main.gmk >>> No comments other than the 'hotspot-ide-project' target >>> looks interesting... >>> >> This is the replacement for the visual studio project generator. We >> currently only support VS here. >>> make/common/MakeBase.gmk >>> No comments. >>> >>> make/common/NativeCompilation.gmk >>> L649: else ifeq (LOW, $$($1_OPTIMIZATION)) >>> L650: $1_OPT_CFLAGS := $(C_O_FLAG_NORM) >>> L651: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM) >>> Instead of "_NORM", I was expecting "_LOW". >>> >>> L652: else ifeq (HIGH, $$($1_OPTIMIZATION)) >>> L653: $1_OPT_CFLAGS := $(C_O_FLAG_HI) >>> L654: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI) >>> Instead of "_HI" I was expecting "_HIGH". >>> >> The names here were defined way back when we did build infra for the >> JDK build. I wouldn't mind better alignment in naming the >> optimization levels. >>> make/jprt.properties >>> L136: # Don't disable precompiled headers on windows. It's >>> simply too slow. >>> This is a surprise. Not the slowness part, but not being >>> able to do a non-PCH JPRT build on Win*. IMHO, it's a >>> little too much motherhood... >>> >> Actually, the old hotspot build does not allow disabling of PCH for >> windows at all. The flag is simply ignored. In the new build, we >> treat the flag the same on all platforms, so disabling precompiled >> headers works on Windows. In the current JPRT config, we disable >> precompiled headers on all fastdebug builds as a way of making sure >> we aren't breaking that build configuration. We noticed a major build >> time regression on Windows fastdebug builds in JPRT until we figured >> out it was caused by this. Since we aren't currently disabling >> precompiled header on Windows, I see no reason to start now. The >> build time regression for just building hotspot is around 2m->12m. >>> jdk/make/Import.gmk >>> No comments. >>> >>> jdk/make/copy/Copy-java.base.gmk >>> No comments. >>> >>> jdk/make/lib/CoreLibraries.gmk >>> No comments. >>> >>> hotspot/makefiles/BuildHotspot.gmk >>> No comments. >>> >>> hotspot/makefiles/Dist.gmk >>> L52: define macosx_universalize >>> I thought MacOS X universal support was going away? >>> >>> Update: OK, I see the mention of 8069540 ahead... >>> >> Yeah, we need to be binary the same as the old build for now. >> Hopefully we can get rid of the universal stuff soon. >>> L120: # these files are identical, and just pick one arbitrarily >>> to use as souce. >>> Typo: 'souce' -> 'source' >>> >>> L139: # This might have been defined in a custom extenstion >>> Typo: 'extenstion' -> 'extension' >>> >> fixed >>> L168: # NOTE: In the old build, this file was not copied on >>> Windows. >>> L169: ifneq ($(OPENJDK_TARGET_OS), windows) >>> L170: $(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \ >>> I'm not quite sure why the jvmti.html work is done for >>> more than a single platform. >>> >>> Update: Thinking about this more... I vaguely remember that >>> JVM/TI tracing used to be disabled in Client VMs. Don't know >>> if that's still the case. >> The jvmti.html file is just copied into the docs bundle later. IMO, >> the docs bundle should be the same regardless of platform. In >> practice we only publish the bundle from one build platform anyway. >> >> /Erik >>> >>> hotspot/makefiles/HotspotCommon.gmk >>> No comments. >>> >>> hotspot/makefiles/gensrc/GenerateSources.gmk >>> No comments. >>> >>> hotspot/makefiles/gensrc/GensrcAdlc.gmk >>> L98: # NOTE: Windows adlc flags was different in the old >>> build. Is this really >>> L99: # correct? >>> John Rose may know the answer to this historical question. >>> >>> hotspot/makefiles/gensrc/GensrcDtrace.gmk >>> No comments. >>> >>> hotspot/makefiles/gensrc/GensrcJvmti.gmk >>> No comments. >>> >>> hotspot/makefiles/ide/CreateVSProject.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileDtracePostJvm.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileDtracePreJvm.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileJvm.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileLibjsig.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/CompileLibraries.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/JvmFeatures.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/JvmMapfile.gmk >>> No comments. >>> >>> hotspot/makefiles/lib/JvmOverrideFiles.gmk >>> No comments. >>> >>> hotspot/makefiles/mapfiles/libjsig/mapfile-vers-solaris >>> hotspot/makefiles/mapfiles/libjvm_db/mapfile-vers >>> hotspot/makefiles/mapfiles/libjvm_dtrace/mapfile-vers >>> No comments on the mapfiles. >>> >>> hotspot/makefiles/symbols/symbols-aix >>> hotspot/makefiles/symbols/symbols-aix-debug >>> hotspot/makefiles/symbols/symbols-linux >>> hotspot/makefiles/symbols/symbols-macosx >>> hotspot/makefiles/symbols/symbols-shared >>> hotspot/makefiles/symbols/symbols-solaris >>> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler1 >>> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler2 >>> hotspot/makefiles/symbols/symbols-unix >>> No comments on the symbol files. >>> >>> >>> Thumbs up on this fix; I don't think that anything I noted >>> above is a show stopper for this changeset. >>> >>> Dan >>> >>> >>>> >>>> /Erik >>> >> > From aph at redhat.com Thu Apr 7 14:25:26 2016 From: aph at redhat.com (Andrew Haley) Date: Thu, 7 Apr 2016 15:25:26 +0100 Subject: [aarch64-port-dev ] RFR: aarch64: Add Arrays.fill stub code In-Reply-To: References: <570532CF.1050903@redhat.com> <57054CCA.1080900@redhat.com> Message-ID: <57066DD6.9030508@redhat.com> On 04/07/2016 02:53 PM, Long Chen wrote: > Thanks Adnrew! > > It doesn't make sense for "disjoint fill", comment fixed in http://people.linaro.org/~long.chen/ArrayFill/ArrayFill_v2.patch OK. Andrew. From tobias.hartmann at oracle.com Thu Apr 7 14:50:56 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Thu, 7 Apr 2016 16:50:56 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160407121221.GQ9504@rbackman> References: <20160407121221.GQ9504@rbackman> Message-ID: <570673D0.20108@oracle.com> Hi Rickard, I had a look at some parts of the changes. Here are my comments: codeCache.cpp -> If we still need NMethodIterator it should be merged with CompiledMethodIterator using C++ templates to avoid code duplication. sweeper.cpp -> NMethodMarker is not used and should be removed -> CompiledMethodMarker differs from NMethodMarker (it should be merged with latest changes) -> the comment in line 432 is confusing. Shouldn't it be something like "Only flushing nmethod so size..."? sweeper.hpp -> the comment describing the sweeper cycle should be updated -> line 69: "Current nmethod" should be changed to "Current compiled method" -> is_sweeping(nmethod* which) is not used and can be removed thread.hpp -> set_scanned_nmethod() name and comment should be fixed. I also wonder if it's necessary to track/lock CompiledMethod. Shouldn't it be sufficient to lock nmethod? I also noticed some minor style issues: codeCache.cpp -> "CompiledMethod *nm" vs. "CompiledMethod* nm" codeBlob.hpp -> typo: "deoptimizatation" nmethod.hpp -> wrong indentation in line 265 (whitespace was removed) vmStructs.cpp -> unnecessary newline in line 916 -> wrong indentation of "\" at line ends (multiple times) Best regards, Tobias On 07.04.2016 14:12, Rickard B?ckman wrote: > Hi, > > can I please have review for this patch please? > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > to be in one continuous blob With this patch we are looking to change > that. It's been done by changing offsets in CodeBlob to addresses, > making some methods virtual to allow different behavior and also > creating a couple of new classes. CompiledMethod now sits inbetween > CodeBlob and nmethod. > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > Thanks > /R > From coleen.phillimore at oracle.com Thu Apr 7 15:07:21 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Thu, 7 Apr 2016 11:07:21 -0400 Subject: RFR 8151939: VM_Version_init() print buffer is too small In-Reply-To: <57056EEE.2060904@oracle.com> References: <57044C4C.9080601@oracle.com> <570557F7.5050400@oracle.com> <57056EEE.2060904@oracle.com> Message-ID: <570677A9.3060901@oracle.com> On 4/6/16 4:17 PM, Vladimir Kozlov wrote: > Should we use p2i() instead of (intprt_t) for polling and > mem_serialize pages? Yes, this is better. I fixed these and recompiled. > > os_windows.cpp - does ~Log destruct or will generate 'new line'? I am > simple asking since I don't know. No the log.debug() lines each have a new line (which makes the output come out on multiple lines). The only way I know to make the output come out in all one line is to use a logStream() which requires a ResourceMark which is inconvenient in a lot of places in the os layer code (may not have current thread). > > os_linux_x86.cpp - do we need \n at the end of logging line?: > + log_info(os)("OS version is %d.%d, which %s support SSE/SSE2\n", The \n is unneeded, I missed this. Thanks. > > Why new OsCpuLoggingTest.java test has SAP Copyright? Oops, I copied this from the SAP test. Thank you for noticing this! Coleen > > Thanks, > Vladimir > > On 4/6/16 11:39 AM, Coleen Phillimore wrote: >> >> I removed ancient logging from the signal handler and left a corrected >> comment instead. >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8151939.02/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8151939 >> >> Thanks, >> Coleen >> >> On 4/5/16 7:37 PM, Coleen Phillimore wrote: >>> Summary: Increase buffer size, use logging to print out version and os >>> information >>> >>> This replaces several -XX:+PrintMiscellaneous -XX:+Verbose to -Xlog:os >>> or -Xlog:os+cpu. Most use info level logging because it's only >>> printed once at the beginning, except where printing is in the signal >>> handler, which uses debug level. Also, errors in setup use info level >>> (not warning) since they never printed the warnings before without >>> PrintMiscellaneous and Verbose. >>> >>> busaa027% java -Xlog:os -version >>> [0.008s][info][os] SafePoint Polling address: 0x00007fde1d37f000 >>> [0.008s][info][os] Memory Serialize Page address: 0x00007fde1d37d000 >>> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >>> java version "9-internal" >>> Java(TM) SE Runtime Environment (fastdebug build >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >>> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed >>> mode) >>> >>> busaa027% java -Xlog:os,os+cpu -version >>> [0.008s][info][os] SafePoint Polling address: 0x00007f49c021f000 >>> [0.008s][info][os] Memory Serialize Page address: 0x00007f49c021d000 >>> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >>> [0.011s][info][os,cpu] Logical CPUs per core: 2 >>> [0.011s][info][os,cpu] L1 data cache line size: 64 >>> [0.011s][info][os,cpu] UseSSE=4 UseAVX=2 UseAES=1 >>> MaxVectorSize=64Allocation prefetching: PREFETCHNTA at distance 192, 4 >>> lines of 64 bytes >>> [0.011s][info][os,cpu] PrefetchCopyIntervalInBytes 576 >>> [0.011s][info][os,cpu] PrefetchScanIntervalInBytes 576 >>> [0.011s][info][os,cpu] PrefetchFieldsAhead 1 >>> [0.011s][info][os,cpu] ContendedPaddingWidth 128 >>> [0.011s][info][os,cpu] CPU:total 72 (18 cores per cpu, 2 threads per >>> core) family 6 model 63 stepping 2, cmov, cx8, fxsr, mmx, sse, sse2, >>> sse3, ssse3, sse4.1, sse4.2, popcnt, avx, avx2, aes, clmul, erms, >>> lzcnt, ht, tsc, tscinvbit, bmi1, bmi2 >>> [0.011s][info][os,cpu] CPU Model and flags from /proc/cpuinfo: >>> [0.011s][info][os,cpu] model name : Intel(R) Xeon(R) CPU E5-2699 v3 >>> @ 2.30GHz >>> [0.011s][info][os,cpu] flags : fpu vme de pse tsc msr pae mce >>> cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse >>> sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc >>> arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf >>> eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 fma >>> cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt >>> tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm ida arat epb >>> xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase >>> tsc_adjust bmi1 avx2 smep bmi2 erms invpcid >>> java version "9-internal" >>> Java(TM) SE Runtime Environment (fastdebug build >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >>> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed >>> mode) >>> >>> open webrev at http://cr.openjdk.java.net/~coleenp/8151939.01/webrev >>> bug link https://bugs.openjdk.java.net/browse/JDK-8151939 >>> >>> Tested in rbt and jprt. >>> >>> Thanks, >>> Coleen >> From aph at redhat.com Thu Apr 7 15:33:39 2016 From: aph at redhat.com (Andrew Haley) Date: Thu, 7 Apr 2016 16:33:39 +0100 Subject: [aarch64-port-dev ] RFR: aarch64: Add Arrays.fill stub code In-Reply-To: <57066DD6.9030508@redhat.com> References: <570532CF.1050903@redhat.com> <57054CCA.1080900@redhat.com> <57066DD6.9030508@redhat.com> Message-ID: <57067DD3.1090503@redhat.com> On 04/07/2016 03:25 PM, Andrew Haley wrote: > On 04/07/2016 02:53 PM, Long Chen wrote: >> Thanks Adnrew! >> >> It doesn't make sense for "disjoint fill", comment fixed in http://people.linaro.org/~long.chen/ArrayFill/ArrayFill_v2.patch > > OK. By the way, patches which are anywhere other than cr.openjdk.java.net will not be accepted. Please create a webrev and put it there. Andrew. From volker.simonis at gmail.com Thu Apr 7 15:50:24 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Thu, 7 Apr 2016 17:50:24 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160407121221.GQ9504@rbackman> References: <20160407121221.GQ9504@rbackman> Message-ID: Hi Rickard, I'd also like to know what's the rational behind this quite large change. Do you expect some performance or memory consumption improvements or is this a prerequisite for another change which is still to come? The change itself currently doesn't work on ppc64 (neither on Linux nor on AIX). I get the following crash during the build when the newly built Hotspot is JIT-compiling java.lang.String::charAt on C1 : # # A fatal error has been detected by the Java Runtime Environment: # # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 # # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) # Java VM: OpenJDK 64-Bit Server VM (slowdebug 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, tiered, compressed oo ps, serial gc, linux-ppc64le) # Problematic frame: # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char const*, char*, bool)+0x40 # # No core dump will be written. Core dumps have been disabled. To enable core dumping, try "ulimit -c unlimited" before starting Java again # # If you would like to submit a bug report, please visit: # http://bugreport.java.com/bugreport/crash.jsp # --------------- S U M M A R Y ------------ Command Line: -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create --module-version 9-internal --os-name Linux --os-arch ppc64le --os-version 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods --hash-dependencies .* --exclude **_the.* --libs /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base --cmds /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base --config /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base --class-path /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # Please check /etc/os-release for details about this release. Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h 0m 0s) --------------- T H R E A D --------------- Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" daemon [_thread_in_vm, id=35404, stack(0x000010006a800000,0x000010006ac00000)] Current CompileTask: C1: 761 3 3 java.lang.String::charAt (25 bytes) Stack: [0x000010006a800000,0x000010006ac00000], sp=0x000010006abfc6c0, free space=4081k Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code) V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char const*, char*, bool)+0x40 V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) const+0x40 V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) const+0x4c V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, unsigned char*, unsigned char*)+0x170 V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, unsigned char*, unsigned char*)+0x78 V [libjvm.so+0x10719dc] trampoline_stub_Relocation::get_trampoline_for(unsigned char*, nmethod*)+0x78 V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned char*)+0x150 V [libjvm.so+0x106f5fc] CallRelocation::fix_relocation_after_move(CodeBuffer const*, CodeBuffer*)+0x74 V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) const+0x390 V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 V [libjvm.so+0x722670] CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, int)+0x320 V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, bool, bool, RTMState)+0x560 V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, int, DirectiveSet*)+0xc8 V [libjvm.so+0x7b188c] CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, Thread*)+0xa0 V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 C [libpthread.so.0+0x8a64] start_thread+0xf4 C [libc.so.6+0x1032a0] clone+0x98 I haven't identified the exact cause (will analyze it tomorrow) but the stack trace indicates that it is indeed related to your changes. Besides that I have some comments: codeBuffer.hpp: 472 CodeSection* insts() { return &_insts; } 475 const CodeSection* insts() const { return &_insts; } - do we really need both versions? codeBlob.hpp: 135 nmethod* as_nmethod_or_null() const { return is_nmethod() ? (nmethod*) this : NULL; } 136 nmethod* as_nmethod() const { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } 137 CompiledMethod* as_compiled_method_or_null() const { return is_compiled() ? (CompiledMethod*) this : NULL; } 138 CompiledMethod* as_compiled_method() const { assert(is_compiled(), "must be compiled"); return (CompiledMethod*) this; } 139 CodeBlob* as_codeblob_or_null() const { return (CodeBlob*) this; } - I don't like this code. You make the getters 'const' which implicitely makes 'this' a "pointer to const" but then the returned pointer is a normal pointer to a non-const object and therefore you have to statically cast away the "pointer to const" (that's why you need the cast even in the case where you return a CodeBlob*). So either remove the const qualifier from the method declarations or make them return "pointers to const". And by the way, as_codeblob_or_null() doesn't seemed to be used anywhere in the code, why do we need it at all? - Why do we need the non-virtual methods is_nmethod() and is_compiled() to manually simulate virtual behavior. Why can't we simply make them virtual and implement them accordingly in nmathod and CompiledMethod? Regards, Volker On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman wrote: > Hi, > > can I please have review for this patch please? > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > to be in one continuous blob With this patch we are looking to change > that. It's been done by changing offsets in CodeBlob to addresses, > making some methods virtual to allow different behavior and also > creating a couple of new classes. CompiledMethod now sits inbetween > CodeBlob and nmethod. > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > Thanks > /R From stefan.karlsson at oracle.com Thu Apr 7 16:41:11 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 7 Apr 2016 18:41:11 +0200 Subject: RFR: 8153742: Remove unnecessary thread.inline.hpp includes Message-ID: <57068DA7.9080506@oracle.com> Hi all, Please review this tiny patch to remove includes of thread.inline.hpp from some .hpp files. http://cr.openjdk.java.net/~stefank/8153742/webrev.01/ https://bugs.openjdk.java.net/browse/JDK-8153742 I'm especially interested in getting rid of the include inside resourceArea.hpp, since that will allow me to setup a ResourceMark in ostream.hpp without leaking inline.hpp files. I've verified that none of the changed files use any of the functions in thread.inline.hpp. Thanks, StefanK From stefan.karlsson at oracle.com Thu Apr 7 16:49:11 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 7 Apr 2016 18:49:11 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <570644DD.3070304@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> Message-ID: <57068F87.5050607@oracle.com> Hi again, I decided to fix the resourceArea.hpp problem, so that I could move the stringStreamWithResourceMark class into ostream.hpp. http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta http://cr.openjdk.java.net/~stefank/8153659/webrev.03 The patch is applied on top of the thread.inline.hpp patch in: http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html Thanks StefanK On 2016-04-07 13:30, Stefan Karlsson wrote: > Hi all, > > I've updated the patch: > http://cr.openjdk.java.net/~stefank/8153659/webrev.02 > > The previous patch created the embedded ResourceMark after the > stringStream instance was created. I discussed the layout of the > classes with Bengt, and have decided to restructure this patch. I've > changed the code so that the ResourceMark is embedded in a new > stringStreamWithResourceMark class. This allows me to use the same > LogStreamBase class, but different stringClass template parameters, > for all three classes. > > I've put the stringStreamWithResourceMark class in logStream.hpp > instead of ostream.hpp, to prevent the include of resourceArea.hpp to > propagate through the ostream.hpp header. The resourceArea.hpp file is > problematic, since it includes and uses thread.inline.hpp. The > alternative would be to move the implementation of resourceArea.hpp > into a resource.inline.hpp file, so that header files could create > ResourceMark instances, without having to include thread.inline.hpp. > I'm leaving that exercise for another RFE. > > Thanks, > StefanK > > On 2016-04-06 19:54, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to add a LogStream class that allocates its >> backing buffer from CHeap memory instead of Resource memory. >> >> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8153659 >> >> The main motivation for this is that we can't use Resource allocated >> memory during initialization, until Thread::current() has been >> initialized. So, a CHeap backed LogStream is desirable when we >> execute, for example, the following code during large pages >> initialization: >> >> void os::trace_page_sizes(const char* str, const size_t* page_sizes, >> int count) >> { >> if (TracePageSizes) { >> tty->print("%s: ", str); >> for (int i = 0; i < count; ++i) { >> tty->print(" " SIZE_FORMAT, page_sizes[i]); >> } >> tty->cr(); >> } >> } >> >> The patch restructures the code and creates a LogStreamBase template >> base class, which takes the backing outputStream class as a template >> parameter. We then have three concrete LogStream classes: >> >> LogStream - Buffer resource allocated with an embedded ResourceMark >> LogStreamNoResourceMark - Buffer resource allocated without an >> embedded ResourceMark >> LogStreamCHeap - Buffer CHeap allocated >> >> I moved the LogStream class from the logStream.inline.hpp file to >> logStream.hpp, for consistency. If that's causing problems while >> reviewing this, I can move it in a separate patch. >> >> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >> internal VM tests. >> >> Thanks, >> StefanK > From christian.thalinger at oracle.com Thu Apr 7 16:59:41 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Thu, 7 Apr 2016 06:59:41 -1000 Subject: RFR (S): 8139921: add mx configuration files to support HotSpot IDE configuration generation In-Reply-To: References: <31F6B9E0-D7C2-4B73-8114-8616F82AE5B5@oracle.com> Message-ID: <4E949060-D14C-4986-A21D-69EFE55E095D@oracle.com> > On Apr 7, 2016, at 2:22 AM, Volker Simonis wrote: > > Hi Christian, > > I'd like to try this but I couldn't figure out how it works. > I don't have any experience with Graal/Truffel/JVMCI but as far as I > understood the creation of an Eclipse project should work equally well > for a vanilla hospot repository, right? Correct. > > The first question is where to get mx from (there's different > information in the Wiki and this email thread for example) ? > > https://bitbucket.org/allr/mx > oder > https://github.com/graalvm/mx.git Since the review the mx repository got moved to github. That?s the one you want to use. > > Now let's say I cloned the right mx version. How do I use it? > Let's say I have a vanilla jdk9 forest under /share/OpenJDK/jdk9 > I create an output directory under /share/OpenJDK/output-jdk9-dbg and > from there I call configure and build the images: > > cd /share/OpenJDK/output-jdk9-dbg > bash /share/OpenJDK/jdk9/configure --with-boot-jdk=.. > --with-debug-level=slowdebug > make images > > Taking this scenario, from which directory am I supposed to call 'mx > ideinit', where will the Eclipse project be created at and how is mx > supposed to find my configuration (i.e. platform) and generated files? I have never tried to have the output directory not in the source directory and mx might not support this. But it?s not really necessary because everything goes into build/ by default anyway. If you really want to have a separate output directory I suppose we could add an option to mx. You run mx in the hotspot source directory. Since I?m using Eclipse I usually run the eclipseinit command directly: cthaling at macbook:~/ws/jdk9/hs-comp/hotspot$ mx eclipseinit created /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.classpath created /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.checkstyle created /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.project created /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.jdt.core.prefs created /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.jdt.ui.prefs created /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.core.resources.prefs ? You can see it generates all the necessary files into the various source directories. The hotspot files are here: cthaling at macbook:~/ws/jdk9/hs-comp/hotspot$ ls .mx.jvmci/hotspot/eclipse/ server-fastdebug/ server-release/ server-slowdebug/ So, you have to enable ?Search for nested projects? when importing into Eclipse. Then everything should magically show up. > > Thanks a lot and best regards, > Volker > > > On Wed, Nov 11, 2015 at 1:42 AM, Christian Thalinger > wrote: >> [This is kind of a long email but contains pictures :-)] >> >> https://bugs.openjdk.java.net/browse/JDK-8139921 >> http://cr.openjdk.java.net/~twisti/8139921/webrev/ >> >> In order to make the IDE experience more pleasant now that JEP 243 is integrated we would like to use mx (https://bitbucket.org/allr/mx) for IDE configuration generation. For this we have to integrate a few mx support files into the hotspot repository. >> >> The mx support files will be under a dot-directory: >> >> $ hg st --all .mx.jvmci/ >> C .mx.jvmci/.project >> C .mx.jvmci/.pydevproject >> C .mx.jvmci/eclipse-settings/org.eclipse.jdt.core.prefs >> C .mx.jvmci/hotspot/templates/eclipse/cproject >> C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.core.prefs >> C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.ui.prefs >> C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.core.runtime.prefs >> C .mx.jvmci/mx_jvmci.py >> C .mx.jvmci/suite.py >> >> mx itself is and will stay an external tool. Some documentation on how to use it can be found here: >> >> https://wiki.openjdk.java.net/display/Graal/Instructions >> https://wiki.openjdk.java.net/display/Graal/Eclipse >> >> It basically boils down to: >> >> $ mx ideinit >> >> and importing the configuration into your favorite IDE. >> >> This would give every developer the same view of the source code and we can also enforce code-style guidelines. >> >> Here is how the imported projects look like in Eclipse: >> >> >> >> This is most helpful for Compiler engineers who work on the JVMCI but there is value for others too. >> >> Notice the ?hotspot:*? projects at the top? These are projects for different HotSpot configurations. The main advantage here is that these include the generated files directory (if the configuration exists and the files are built). I only configured and built ?release? so these can been seen, fastdebug is empty: >> >> >> >> This makes it possible for Eclipse to find generated source code. Very helpful. For example, JVMTI. First, jvmtiUtils.hpp from the fastdebug configuration: >> >> >> >> and here is the release one: >> >> >> >> mx has lots of other commands but most of them are not really useful for us. The only ones worth mentioning besides ideinit are findbugs and checkstyle. >> >> findbugs runs FindBugs (doh!) on all Java projects that mx knows about: >> >> cthaling at macbook:~/ws/8139921/hotspot$ mx findbugs >> Scanning archives (15 / 30) >> 2 analysis passes to perform >> Pass 1: Analyzing classes (524 / 524) - 100% complete >> Pass 2: Analyzing classes (305 / 305) - 100% complete >> Done with analysis >> Calculating exit code... >> Exit code set to: 0 >> >> checkstyle checks the Java projects against some predefined style. This is particularly helpful for people who don?t use an IDE or to make sure everything matches the style after applying an external patch: >> >> cthaling at macbook:~/ws/8139921/hotspot$ mx checkstyle >> Running Checkstyle on /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src using /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.service/.checkstyle_checks.xml... >> /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src/jdk/vm/ci/runtime/JVMCI.java:33: 'static' modifier out of order with the JLS suggestions. >> >> or: >> >> cthaling at macbook:~/ws/8139921/hotspot$ mx checkstyle >> Running Checkstyle on /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src using /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.service/.checkstyle_checks.xml... >> /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src/jdk/vm/ci/runtime/JVMCI.java:43: Name 'FOO' must match pattern '^[a-z][a-zA-Z0-9]*$'. >> >> That?s all, folks! From coleen.phillimore at oracle.com Thu Apr 7 17:08:16 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Thu, 7 Apr 2016 13:08:16 -0400 Subject: RFR: 8153742: Remove unnecessary thread.inline.hpp includes In-Reply-To: <57068DA7.9080506@oracle.com> References: <57068DA7.9080506@oracle.com> Message-ID: <57069400.8040100@oracle.com> Looks good. Thanks for doing this! Coleen On 4/7/16 12:41 PM, Stefan Karlsson wrote: > Hi all, > > Please review this tiny patch to remove includes of thread.inline.hpp > from some .hpp files. > > http://cr.openjdk.java.net/~stefank/8153742/webrev.01/ > https://bugs.openjdk.java.net/browse/JDK-8153742 > > I'm especially interested in getting rid of the include inside > resourceArea.hpp, since that will allow me to setup a ResourceMark in > ostream.hpp without leaking inline.hpp files. > > I've verified that none of the changed files use any of the functions > in thread.inline.hpp. > > Thanks, > StefanK From stefan.karlsson at oracle.com Thu Apr 7 17:11:26 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 7 Apr 2016 19:11:26 +0200 Subject: RFR: 8153742: Remove unnecessary thread.inline.hpp includes In-Reply-To: <57069400.8040100@oracle.com> References: <57068DA7.9080506@oracle.com> <57069400.8040100@oracle.com> Message-ID: <570694BE.2050203@oracle.com> Thanks, Coleen! StefanK On 2016-04-07 19:08, Coleen Phillimore wrote: > > Looks good. Thanks for doing this! > Coleen > > On 4/7/16 12:41 PM, Stefan Karlsson wrote: >> Hi all, >> >> Please review this tiny patch to remove includes of thread.inline.hpp >> from some .hpp files. >> >> http://cr.openjdk.java.net/~stefank/8153742/webrev.01/ >> https://bugs.openjdk.java.net/browse/JDK-8153742 >> >> I'm especially interested in getting rid of the include inside >> resourceArea.hpp, since that will allow me to setup a ResourceMark in >> ostream.hpp without leaking inline.hpp files. >> >> I've verified that none of the changed files use any of the functions >> in thread.inline.hpp. >> >> Thanks, >> StefanK > From christian.thalinger at oracle.com Thu Apr 7 17:28:08 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Thu, 7 Apr 2016 07:28:08 -1000 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <570651B7.9020004@redhat.com> References: <20160407121221.GQ9504@rbackman> <570651B7.9020004@redhat.com> Message-ID: > On Apr 7, 2016, at 2:25 AM, Andrew Dinn wrote: > > On 07/04/16 13:12, Rickard B?ckman wrote: >> can I please have review for this patch please? >> >> So far CodeBlobs have required all the data (metadata, oops, code, etc) >> to be in one continuous blob With this patch we are looking to change >> that. It's been done by changing offsets in CodeBlob to addresses, >> making some methods virtual to allow different behavior and also >> creating a couple of new classes. CompiledMethod now sits inbetween >> CodeBlob and nmethod. >> >> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > Why do 'we want to be able to have pieces of compiled code and metadata > that are not located in one continuous piece of memory'? Is there a > motive for making this change? Yes, there are two reasons: 1) We would like to separate metadata from code in the CodeCache so that we (potentially) get better CPU cache utilization: https://bugs.openjdk.java.net/browse/JDK-7072317 2) As described in my last year?s JVMLS talk our AOT solution uses shared libraries as a container format and as you know code and data need to be separate. If you haven?t been at JVMLS here is the recording of my talk and the section about the container format: https://youtu.be/Xybzyv8qbOc?t=10m41s > > regards, > > > Andrew Dinn > ----------- > From jon.masamitsu at oracle.com Thu Apr 7 17:41:22 2016 From: jon.masamitsu at oracle.com (Jon Masamitsu) Date: Thu, 7 Apr 2016 10:41:22 -0700 Subject: RFR: 8153658: Redundant memory copy in LogStreamNoResourceMark In-Reply-To: <570547BA.7070003@oracle.com> References: <570547BA.7070003@oracle.com> Message-ID: <57069BC2.7080302@oracle.com> Stefan, Change looks good. Jon On 04/06/2016 10:30 AM, Stefan Karlsson wrote: > Hi all, > > Please review this patch to remove a redundant memory copy in the UL > log stream classes. > > http://cr.openjdk.java.net/~stefank/8153658/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8153658 > > LogStreamNoResourceMark copies the resource allocated string buffer > into a new resource allocated string buffer before copying the data to > UL. > > Moreover, this also causes problem when implementing a log stream > class using CHeap memory instead of Resource memory. Even though the > first allocation is done from CHeap the second copy comes from > Resource memory. > > Thanks, > StefanK From stefan.karlsson at oracle.com Thu Apr 7 17:57:15 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 7 Apr 2016 19:57:15 +0200 Subject: RFR: 8153658: Redundant memory copy in LogStreamNoResourceMark In-Reply-To: <57069BC2.7080302@oracle.com> References: <570547BA.7070003@oracle.com> <57069BC2.7080302@oracle.com> Message-ID: <57069F7B.5000003@oracle.com> Thanks, Jon. StefanK On 2016-04-07 19:41, Jon Masamitsu wrote: > Stefan, > > Change looks good. > > Jon > > On 04/06/2016 10:30 AM, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to remove a redundant memory copy in the UL >> log stream classes. >> >> http://cr.openjdk.java.net/~stefank/8153658/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8153658 >> >> LogStreamNoResourceMark copies the resource allocated string buffer >> into a new resource allocated string buffer before copying the data >> to UL. >> >> Moreover, this also causes problem when implementing a log stream >> class using CHeap memory instead of Resource memory. Even though the >> first allocation is done from CHeap the second copy comes from >> Resource memory. >> >> Thanks, >> StefanK > From jon.masamitsu at oracle.com Thu Apr 7 17:55:24 2016 From: jon.masamitsu at oracle.com (Jon Masamitsu) Date: Thu, 7 Apr 2016 10:55:24 -0700 Subject: RFR: 8153742: Remove unnecessary thread.inline.hpp includes In-Reply-To: <57068DA7.9080506@oracle.com> References: <57068DA7.9080506@oracle.com> Message-ID: <57069F0C.1080101@oracle.com> Looks good. Jon On 04/07/2016 09:41 AM, Stefan Karlsson wrote: > Hi all, > > Please review this tiny patch to remove includes of thread.inline.hpp > from some .hpp files. > > http://cr.openjdk.java.net/~stefank/8153742/webrev.01/ > https://bugs.openjdk.java.net/browse/JDK-8153742 > > I'm especially interested in getting rid of the include inside > resourceArea.hpp, since that will allow me to setup a ResourceMark in > ostream.hpp without leaking inline.hpp files. > > I've verified that none of the changed files use any of the functions > in thread.inline.hpp. > > Thanks, > StefanK From vladimir.kozlov at oracle.com Thu Apr 7 18:09:58 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Thu, 7 Apr 2016 11:09:58 -0700 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160407121221.GQ9504@rbackman> References: <20160407121221.GQ9504@rbackman> Message-ID: <5706A276.50703@oracle.com> Rickard, Undo noise changes in os_windows.cpp And where are closed changes? Thanks, Vladimir On 4/7/16 5:12 AM, Rickard B?ckman wrote: > Hi, > > can I please have review for this patch please? > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > to be in one continuous blob With this patch we are looking to change > that. It's been done by changing offsets in CodeBlob to addresses, > making some methods virtual to allow different behavior and also > creating a couple of new classes. CompiledMethod now sits inbetween > CodeBlob and nmethod. > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > Thanks > /R > From rachel.protacio at oracle.com Thu Apr 7 18:23:01 2016 From: rachel.protacio at oracle.com (Rachel Protacio) Date: Thu, 7 Apr 2016 14:23:01 -0400 Subject: RFR 8151939: VM_Version_init() print buffer is too small In-Reply-To: <570557F7.5050400@oracle.com> References: <57044C4C.9080601@oracle.com> <570557F7.5050400@oracle.com> Message-ID: <5706A585.6000007@oracle.com> Hi, Coleen, In src/cpu/x86/vm/vm_version_x86.cpp, - tty->cr(); - tty->print("Allocation"); + log->print("Allocation"); Had you meant to delete the cr() here? In src/os_cpu/linux_x86/vm/os_linux_x86.cpp, - if (PrintMiscellaneous && Verbose) { - tty->print("OS version is %d.%d, which %s support SSE/SSE2\n", + log_info(os)("OS version is %d.%d, which %s support SSE/SSE2\n", I think you need to remove the newline at the end. Otherwise looks good! Rachel On 4/6/2016 2:39 PM, Coleen Phillimore wrote: > > I removed ancient logging from the signal handler and left a corrected > comment instead. > > open webrev at http://cr.openjdk.java.net/~coleenp/8151939.02/webrev > bug link https://bugs.openjdk.java.net/browse/JDK-8151939 > > Thanks, > Coleen > > On 4/5/16 7:37 PM, Coleen Phillimore wrote: >> Summary: Increase buffer size, use logging to print out version and >> os information >> >> This replaces several -XX:+PrintMiscellaneous -XX:+Verbose to >> -Xlog:os or -Xlog:os+cpu. Most use info level logging because it's >> only printed once at the beginning, except where printing is in the >> signal handler, which uses debug level. Also, errors in setup use >> info level (not warning) since they never printed the warnings before >> without PrintMiscellaneous and Verbose. >> >> busaa027% java -Xlog:os -version >> [0.008s][info][os] SafePoint Polling address: 0x00007fde1d37f000 >> [0.008s][info][os] Memory Serialize Page address: 0x00007fde1d37d000 >> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >> java version "9-internal" >> Java(TM) SE Runtime Environment (fastdebug build >> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed >> mode) >> >> busaa027% java -Xlog:os,os+cpu -version >> [0.008s][info][os] SafePoint Polling address: 0x00007f49c021f000 >> [0.008s][info][os] Memory Serialize Page address: 0x00007f49c021d000 >> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >> [0.011s][info][os,cpu] Logical CPUs per core: 2 >> [0.011s][info][os,cpu] L1 data cache line size: 64 >> [0.011s][info][os,cpu] UseSSE=4 UseAVX=2 UseAES=1 >> MaxVectorSize=64Allocation prefetching: PREFETCHNTA at distance 192, >> 4 lines of 64 bytes >> [0.011s][info][os,cpu] PrefetchCopyIntervalInBytes 576 >> [0.011s][info][os,cpu] PrefetchScanIntervalInBytes 576 >> [0.011s][info][os,cpu] PrefetchFieldsAhead 1 >> [0.011s][info][os,cpu] ContendedPaddingWidth 128 >> [0.011s][info][os,cpu] CPU:total 72 (18 cores per cpu, 2 threads per >> core) family 6 model 63 stepping 2, cmov, cx8, fxsr, mmx, sse, sse2, >> sse3, ssse3, sse4.1, sse4.2, popcnt, avx, avx2, aes, clmul, erms, >> lzcnt, ht, tsc, tscinvbit, bmi1, bmi2 >> [0.011s][info][os,cpu] CPU Model and flags from /proc/cpuinfo: >> [0.011s][info][os,cpu] model name : Intel(R) Xeon(R) CPU E5-2699 >> v3 @ 2.30GHz >> [0.011s][info][os,cpu] flags : fpu vme de pse tsc msr pae mce >> cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr >> sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc >> arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf >> eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 >> fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt >> tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm ida arat epb >> xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid >> fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid >> java version "9-internal" >> Java(TM) SE Runtime Environment (fastdebug build >> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed >> mode) >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8151939.01/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8151939 >> >> Tested in rbt and jprt. >> >> Thanks, >> Coleen > From coleen.phillimore at oracle.com Thu Apr 7 19:11:50 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Thu, 7 Apr 2016 15:11:50 -0400 Subject: RFR 8151939: VM_Version_init() print buffer is too small In-Reply-To: <5706A585.6000007@oracle.com> References: <57044C4C.9080601@oracle.com> <570557F7.5050400@oracle.com> <5706A585.6000007@oracle.com> Message-ID: <5706B0F6.1010707@oracle.com> On 4/7/16 2:23 PM, Rachel Protacio wrote: > Hi, Coleen, > > In src/cpu/x86/vm/vm_version_x86.cpp, > > - tty->cr(); > - tty->print("Allocation"); > + log->print("Allocation"); > > Had you meant to delete the cr() here? No, I had not meant to remove that. Thank you for catching that. I removed other \n and stray newlines in the logging but that one is needed. > > In src/os_cpu/linux_x86/vm/os_linux_x86.cpp, > > - if (PrintMiscellaneous && Verbose) { > - tty->print("OS version is %d.%d, which %s support SSE/SSE2\n", > + log_info(os)("OS version is %d.%d, which %s support SSE/SSE2\n", > > > I think you need to remove the newline at the end. Yes, I fixed that. Vladimir noticed it too. > > Otherwise looks good! Thank you for reviewing! Coleen > Rachel > > On 4/6/2016 2:39 PM, Coleen Phillimore wrote: >> >> I removed ancient logging from the signal handler and left a >> corrected comment instead. >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8151939.02/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8151939 >> >> Thanks, >> Coleen >> >> On 4/5/16 7:37 PM, Coleen Phillimore wrote: >>> Summary: Increase buffer size, use logging to print out version and >>> os information >>> >>> This replaces several -XX:+PrintMiscellaneous -XX:+Verbose to >>> -Xlog:os or -Xlog:os+cpu. Most use info level logging because it's >>> only printed once at the beginning, except where printing is in the >>> signal handler, which uses debug level. Also, errors in setup use >>> info level (not warning) since they never printed the warnings >>> before without PrintMiscellaneous and Verbose. >>> >>> busaa027% java -Xlog:os -version >>> [0.008s][info][os] SafePoint Polling address: 0x00007fde1d37f000 >>> [0.008s][info][os] Memory Serialize Page address: 0x00007fde1d37d000 >>> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >>> java version "9-internal" >>> Java(TM) SE Runtime Environment (fastdebug build >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >>> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, >>> mixed mode) >>> >>> busaa027% java -Xlog:os,os+cpu -version >>> [0.008s][info][os] SafePoint Polling address: 0x00007f49c021f000 >>> [0.008s][info][os] Memory Serialize Page address: 0x00007f49c021d000 >>> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >>> [0.011s][info][os,cpu] Logical CPUs per core: 2 >>> [0.011s][info][os,cpu] L1 data cache line size: 64 >>> [0.011s][info][os,cpu] UseSSE=4 UseAVX=2 UseAES=1 >>> MaxVectorSize=64Allocation prefetching: PREFETCHNTA at distance 192, >>> 4 lines of 64 bytes >>> [0.011s][info][os,cpu] PrefetchCopyIntervalInBytes 576 >>> [0.011s][info][os,cpu] PrefetchScanIntervalInBytes 576 >>> [0.011s][info][os,cpu] PrefetchFieldsAhead 1 >>> [0.011s][info][os,cpu] ContendedPaddingWidth 128 >>> [0.011s][info][os,cpu] CPU:total 72 (18 cores per cpu, 2 threads per >>> core) family 6 model 63 stepping 2, cmov, cx8, fxsr, mmx, sse, sse2, >>> sse3, ssse3, sse4.1, sse4.2, popcnt, avx, avx2, aes, clmul, erms, >>> lzcnt, ht, tsc, tscinvbit, bmi1, bmi2 >>> [0.011s][info][os,cpu] CPU Model and flags from /proc/cpuinfo: >>> [0.011s][info][os,cpu] model name : Intel(R) Xeon(R) CPU E5-2699 >>> v3 @ 2.30GHz >>> [0.011s][info][os,cpu] flags : fpu vme de pse tsc msr pae mce >>> cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr >>> sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc >>> arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf >>> eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 >>> fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt >>> tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm ida arat >>> epb xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid >>> fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid >>> java version "9-internal" >>> Java(TM) SE Runtime Environment (fastdebug build >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >>> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, >>> mixed mode) >>> >>> open webrev at http://cr.openjdk.java.net/~coleenp/8151939.01/webrev >>> bug link https://bugs.openjdk.java.net/browse/JDK-8151939 >>> >>> Tested in rbt and jprt. >>> >>> Thanks, >>> Coleen >> > From alejandro.murillo at oracle.com Thu Apr 7 19:32:23 2016 From: alejandro.murillo at oracle.com (Alejandro Murillo) Date: Thu, 7 Apr 2016 13:32:23 -0600 Subject: Merging jdk9/hs-rt with jdk9/hs In-Reply-To: <57058B56.7060605@oracle.com> References: <56F08ACA.6030705@oracle.com> <57058B56.7060605@oracle.com> Message-ID: <5706B5C7.1080500@oracle.com> Hi Mikael, I apologize I missed your original email. Just seeing this now. You don't mention the plans about gate-keeping, will the hs-rt gatekeeper will also also assume the synching with dev, and therefore the submission of PIT, etc? If not, since main will be constantly changing, I will need to be provided with the tips of a stable snapshot to merge with dev to run pit. And then a hint of when to push the merge back to main. Since we have to push with JPRT, there are some situations for potential problems here. Of course, that's not different to what is currently done to merge jdk9/dev to master, except that we now have mach5 to pin point stable jdk9/dev snapshots, and also, there's nothing coming back from master to dev weekly (except tags) and once in a while special integrations. And as mentioned above, with hs repos, we use JPRT to push Thanks Alejandro On 4/6/2016 4:19 PM, Mikael Vidstedt wrote: > > Having heard no feedback[1], we're going to go ahead with this > experiment and the plan is to do the switch next week, *Friday April > 15th*. Again, please note that any outstanding work based on > jdk9/hs-rt will have to be rebased on jdk9/hs once the switch is made. > More information as we get closer to the actual switchover. > > Let us know if you have any concerns with the date, and/or any > feedback on how it's working out. > > Cheers, > Mikael > > [1] Not even from Volker *hint* ;) > > On 3/21/2016 4:59 PM, Mikael Vidstedt wrote: >> >> All, >> >> The JDK 9 development of Hotspot is primarily done in two different >> mercurial forests: jdk9/hs-rt[1], and jdk9/hs-comp[2]. In June of >> last year we moved[3] all the GC development from jdk9/hs-gc[4] to >> jdk9/hs-rt, and the experience so far has been a good one. Change >> propagation (from jdk9/hs-rt to jdk9/hs-gc and vice verse) is now a >> non-issue, we get testing faster on the union of the changes where >> previously it could take weeks to catch a GC related bug in RT >> testing, etc. >> >> However, both jdk9/hs-rt and jdk9/hs-comp still integrate through a >> third forest - jdk9/hs[5], aka. hs "main" - before the changes are >> integrated to jdk9/dev[6]. In line with the previous simplification, >> we would like to suggest a further simplification of the forest >> structure. Specifically, we suggest that the work currently done on >> the jdk9/hs-rt forest moves directly to the jdk9/hs forest. In >> addition to making the forest structure easier to understand, this >> would have the benefit of removing one set of integrations (jdk9/hs >> <-> jdk9/hs-rt), which further reduces cost and propagation time. It >> is also paving the way for eventually integrating up to jdk9/dev more >> often (but that is a separate discussion). >> >> We suggest that the experiment starts on April 15th, and goes on for >> at least two weeks (giving us some time to adapt in case of issues). >> Monitoring and evaluation of the new structure will take place >> continuously, with an option to revert back if things do not work >> out. The experiment would keep going for at least a few months, after >> which we will evaluate it and depending on the results consider >> making it the new standard. If so, the jdk9/hs-rt forest will >> eventually be retired, with an option of looking at further reduction >> of forests going forward. At least for now, we suggest that >> jdk9/hs-comp remains a separate forest and that it integrates through >> jdk9/hs just like it does today. >> >> Much like when we merged the jdk9/hs-gc and jdk9/hs-rt forests we >> would leave the jdk9/hs-rt forest around until we see if the >> experiment works out. We would also lock it down so that no >> accidental integrations are made to it. Once the jdk9/hs-rt forest is >> locked down, any work in flight based on it would have to be rebased >> on jdk9/hs. >> >> Please let us know if you have any feedback or questions! >> >> Cheers, >> Mikael >> >> [1]http://hg.openjdk.java.net/jdk9/hs-rt >> [2]http://hg.openjdk.java.net/jdk9/hs-comp >> [3]http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-May/thread.html >> >> [4]http://hg.openjdk.java.net/jdk9/hs-gc >> [5]http://hg.openjdk.java.net/jdk9/hs >> [6]http://hg.openjdk.java.net/jdk9/dev >> > -- Alejandro From max.ockner at oracle.com Thu Apr 7 20:41:16 2016 From: max.ockner at oracle.com (Max Ockner) Date: Thu, 07 Apr 2016 16:41:16 -0400 Subject: Fwd: Re: RFR 8151939: VM_Version_init() print buffer is too small In-Reply-To: <57069F48.2020107@oracle.com> References: <570677A9.3060901@oracle.com> <57069F48.2020107@oracle.com> Message-ID: <5706C5EC.3010804@oracle.com> It looks like you already have a second reviewer, but this looks good to me. I am assuming that "-version" is the appropriate program to test with in this case since the logging is closely related to the version. Thanks, Max On 4/7/2016 1:56 PM, Coleen Phillimore wrote: > > Can one of you guys review this change? It only needs a small r > reviewer because Vladimir is capital R. > > thanks, > Coleen > > > -------- Forwarded Message -------- > Subject: Re: RFR 8151939: VM_Version_init() print buffer is too small > Date: Thu, 7 Apr 2016 11:07:21 -0400 > From: Coleen Phillimore > To: hotspot-dev at openjdk.java.net > > > > On 4/6/16 4:17 PM, Vladimir Kozlov wrote: > > Should we use p2i() instead of (intprt_t) for polling and > > mem_serialize pages? > > Yes, this is better. I fixed these and recompiled. > > > > os_windows.cpp - does ~Log destruct or will generate 'new line'? I am > > simple asking since I don't know. > No the log.debug() lines each have a new line (which makes the output > come out on multiple lines). The only way I know to make the output come > out in all one line is to use a logStream() which requires a > ResourceMark which is inconvenient in a lot of places in the os layer > code (may not have current thread). > > > > os_linux_x86.cpp - do we need \n at the end of logging line?: > > + log_info(os)("OS version is %d.%d, which %s support SSE/SSE2\n", > > The \n is unneeded, I missed this. Thanks. > > > > Why new OsCpuLoggingTest.java test has SAP Copyright? > > Oops, I copied this from the SAP test. Thank you for noticing this! > > Coleen > > > > Thanks, > > Vladimir > > > > On 4/6/16 11:39 AM, Coleen Phillimore wrote: > >> > >> I removed ancient logging from the signal handler and left a corrected > >> comment instead. > >> > >> open webrev athttp://cr.openjdk.java.net/~coleenp/8151939.02/webrev > >> bug linkhttps://bugs.openjdk.java.net/browse/JDK-8151939 > >> > >> Thanks, > >> Coleen > >> > >> On 4/5/16 7:37 PM, Coleen Phillimore wrote: > >>> Summary: Increase buffer size, use logging to print out version and os > >>> information > >>> > >>> This replaces several -XX:+PrintMiscellaneous -XX:+Verbose to -Xlog:os > >>> or -Xlog:os+cpu. Most use info level logging because it's only > >>> printed once at the beginning, except where printing is in the signal > >>> handler, which uses debug level. Also, errors in setup use info level > >>> (not warning) since they never printed the warnings before without > >>> PrintMiscellaneous and Verbose. > >>> > >>> busaa027% java -Xlog:os -version > >>> [0.008s][info][os] SafePoint Polling address: 0x00007fde1d37f000 > >>> [0.008s][info][os] Memory Serialize Page address: 0x00007fde1d37d000 > >>> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 > >>> java version "9-internal" > >>> Java(TM) SE Runtime Environment (fastdebug build > >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) > >>> Java HotSpot(TM) 64-Bit Server VM (fastdebug build > >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed > >>> mode) > >>> > >>> busaa027% java -Xlog:os,os+cpu -version > >>> [0.008s][info][os] SafePoint Polling address: 0x00007f49c021f000 > >>> [0.008s][info][os] Memory Serialize Page address: 0x00007f49c021d000 > >>> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 > >>> [0.011s][info][os,cpu] Logical CPUs per core: 2 > >>> [0.011s][info][os,cpu] L1 data cache line size: 64 > >>> [0.011s][info][os,cpu] UseSSE=4 UseAVX=2 UseAES=1 > >>> MaxVectorSize=64Allocation prefetching: PREFETCHNTA at distance 192, 4 > >>> lines of 64 bytes > >>> [0.011s][info][os,cpu] PrefetchCopyIntervalInBytes 576 > >>> [0.011s][info][os,cpu] PrefetchScanIntervalInBytes 576 > >>> [0.011s][info][os,cpu] PrefetchFieldsAhead 1 > >>> [0.011s][info][os,cpu] ContendedPaddingWidth 128 > >>> [0.011s][info][os,cpu] CPU:total 72 (18 cores per cpu, 2 threads per > >>> core) family 6 model 63 stepping 2, cmov, cx8, fxsr, mmx, sse, sse2, > >>> sse3, ssse3, sse4.1, sse4.2, popcnt, avx, avx2, aes, clmul, erms, > >>> lzcnt, ht, tsc, tscinvbit, bmi1, bmi2 > >>> [0.011s][info][os,cpu] CPU Model and flags from /proc/cpuinfo: > >>> [0.011s][info][os,cpu] model name : Intel(R) Xeon(R) CPU E5-2699 v3 > >>> @ 2.30GHz > >>> [0.011s][info][os,cpu] flags : fpu vme de pse tsc msr pae mce > >>> cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse > >>> sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc > >>> arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf > >>> eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 fma > >>> cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt > >>> tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm ida arat epb > >>> xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase > >>> tsc_adjust bmi1 avx2 smep bmi2 erms invpcid > >>> java version "9-internal" > >>> Java(TM) SE Runtime Environment (fastdebug build > >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) > >>> Java HotSpot(TM) 64-Bit Server VM (fastdebug build > >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed > >>> mode) > >>> > >>> open webrev athttp://cr.openjdk.java.net/~coleenp/8151939.01/webrev > >>> bug linkhttps://bugs.openjdk.java.net/browse/JDK-8151939 > >>> > >>> Tested in rbt and jprt. > >>> > >>> Thanks, > >>> Coleen > >> > > > From coleen.phillimore at oracle.com Thu Apr 7 20:46:38 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Thu, 7 Apr 2016 16:46:38 -0400 Subject: Fwd: Re: RFR 8151939: VM_Version_init() print buffer is too small In-Reply-To: <5706C5EC.3010804@oracle.com> References: <570677A9.3060901@oracle.com> <57069F48.2020107@oracle.com> <5706C5EC.3010804@oracle.com> Message-ID: <5706C72E.3030603@oracle.com> Thanks Max! On 4/7/16 4:41 PM, Max Ockner wrote: > It looks like you already have a second reviewer, but this looks good > to me. > I am assuming that "-version" is the appropriate program to test with > in this case since the logging is closely related to the version. A lot of the output comes out with java -version. The other output would be unreliable to test. Coleen > Thanks, > Max > > On 4/7/2016 1:56 PM, Coleen Phillimore wrote: >> >> Can one of you guys review this change? It only needs a small r >> reviewer because Vladimir is capital R. >> >> thanks, >> Coleen >> >> >> -------- Forwarded Message -------- >> Subject: Re: RFR 8151939: VM_Version_init() print buffer is too small >> Date: Thu, 7 Apr 2016 11:07:21 -0400 >> From: Coleen Phillimore >> To: hotspot-dev at openjdk.java.net >> >> >> >> On 4/6/16 4:17 PM, Vladimir Kozlov wrote: >> > Should we use p2i() instead of (intprt_t) for polling and >> > mem_serialize pages? >> >> Yes, this is better. I fixed these and recompiled. >> > >> > os_windows.cpp - does ~Log destruct or will generate 'new line'? I am >> > simple asking since I don't know. >> No the log.debug() lines each have a new line (which makes the output >> come out on multiple lines). The only way I know to make the output come >> out in all one line is to use a logStream() which requires a >> ResourceMark which is inconvenient in a lot of places in the os layer >> code (may not have current thread). >> > >> > os_linux_x86.cpp - do we need \n at the end of logging line?: >> > + log_info(os)("OS version is %d.%d, which %s support SSE/SSE2\n", >> >> The \n is unneeded, I missed this. Thanks. >> > >> > Why new OsCpuLoggingTest.java test has SAP Copyright? >> >> Oops, I copied this from the SAP test. Thank you for noticing this! >> >> Coleen >> > >> > Thanks, >> > Vladimir >> > >> > On 4/6/16 11:39 AM, Coleen Phillimore wrote: >> >> >> >> I removed ancient logging from the signal handler and left a corrected >> >> comment instead. >> >> >> >> open webrev athttp://cr.openjdk.java.net/~coleenp/8151939.02/webrev >> >> bug linkhttps://bugs.openjdk.java.net/browse/JDK-8151939 >> >> >> >> Thanks, >> >> Coleen >> >> >> >> On 4/5/16 7:37 PM, Coleen Phillimore wrote: >> >>> Summary: Increase buffer size, use logging to print out version and os >> >>> information >> >>> >> >>> This replaces several -XX:+PrintMiscellaneous -XX:+Verbose to -Xlog:os >> >>> or -Xlog:os+cpu. Most use info level logging because it's only >> >>> printed once at the beginning, except where printing is in the signal >> >>> handler, which uses debug level. Also, errors in setup use info level >> >>> (not warning) since they never printed the warnings before without >> >>> PrintMiscellaneous and Verbose. >> >>> >> >>> busaa027% java -Xlog:os -version >> >>> [0.008s][info][os] SafePoint Polling address: 0x00007fde1d37f000 >> >>> [0.008s][info][os] Memory Serialize Page address: 0x00007fde1d37d000 >> >>> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >> >>> java version "9-internal" >> >>> Java(TM) SE Runtime Environment (fastdebug build >> >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >> >>> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >> >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed >> >>> mode) >> >>> >> >>> busaa027% java -Xlog:os,os+cpu -version >> >>> [0.008s][info][os] SafePoint Polling address: 0x00007f49c021f000 >> >>> [0.008s][info][os] Memory Serialize Page address: 0x00007f49c021d000 >> >>> [0.009s][info][os] HotSpot is running with glibc 2.12, NPTL 2.12 >> >>> [0.011s][info][os,cpu] Logical CPUs per core: 2 >> >>> [0.011s][info][os,cpu] L1 data cache line size: 64 >> >>> [0.011s][info][os,cpu] UseSSE=4 UseAVX=2 UseAES=1 >> >>> MaxVectorSize=64Allocation prefetching: PREFETCHNTA at distance 192, 4 >> >>> lines of 64 bytes >> >>> [0.011s][info][os,cpu] PrefetchCopyIntervalInBytes 576 >> >>> [0.011s][info][os,cpu] PrefetchScanIntervalInBytes 576 >> >>> [0.011s][info][os,cpu] PrefetchFieldsAhead 1 >> >>> [0.011s][info][os,cpu] ContendedPaddingWidth 128 >> >>> [0.011s][info][os,cpu] CPU:total 72 (18 cores per cpu, 2 threads per >> >>> core) family 6 model 63 stepping 2, cmov, cx8, fxsr, mmx, sse, sse2, >> >>> sse3, ssse3, sse4.1, sse4.2, popcnt, avx, avx2, aes, clmul, erms, >> >>> lzcnt, ht, tsc, tscinvbit, bmi1, bmi2 >> >>> [0.011s][info][os,cpu] CPU Model and flags from /proc/cpuinfo: >> >>> [0.011s][info][os,cpu] model name : Intel(R) Xeon(R) CPU E5-2699 v3 >> >>> @ 2.30GHz >> >>> [0.011s][info][os,cpu] flags : fpu vme de pse tsc msr pae mce >> >>> cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse >> >>> sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc >> >>> arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf >> >>> eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 fma >> >>> cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt >> >>> tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm ida arat epb >> >>> xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase >> >>> tsc_adjust bmi1 avx2 smep bmi2 erms invpcid >> >>> java version "9-internal" >> >>> Java(TM) SE Runtime Environment (fastdebug build >> >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version) >> >>> Java HotSpot(TM) 64-Bit Server VM (fastdebug build >> >>> 9-internal+0-2016-04-05-170806.cphillim.jdk9.vm-version-coleen, mixed >> >>> mode) >> >>> >> >>> open webrev athttp://cr.openjdk.java.net/~coleenp/8151939.01/webrev >> >>> bug linkhttps://bugs.openjdk.java.net/browse/JDK-8151939 >> >>> >> >>> Tested in rbt and jprt. >> >>> >> >>> Thanks, >> >>> Coleen >> >> >> >> >> > From edward.nevill at gmail.com Thu Apr 7 22:45:31 2016 From: edward.nevill at gmail.com (Edward Nevill) Date: Thu, 7 Apr 2016 23:45:31 +0100 Subject: [aarch64-port-dev ] RFR: aarch64: Add Arrays.fill stub code In-Reply-To: <57067DD3.1090503@redhat.com> References: <570532CF.1050903@redhat.com> <57054CCA.1080900@redhat.com> <57066DD6.9030508@redhat.com> <57067DD3.1090503@redhat.com> Message-ID: On Thu, Apr 7, 2016 at 4:33 PM, Andrew Haley wrote: > On 04/07/2016 03:25 PM, Andrew Haley wrote: > > On 04/07/2016 02:53 PM, Long Chen wrote: > >> Thanks Adnrew! > >> > >> It doesn't make sense for "disjoint fill", comment fixed in > http://people.linaro.org/~long.chen/ArrayFill/ArrayFill_v2.patch > > > > OK. > > By the way, patches which are anywhere other than cr.openjdk.java.net > will not be accepted. Please create a webrev and put it there. > What I meant to say was, I would be happy to author this patch for Long Chen http://cr.openjdk.java.net/~enevill/8153797/webrev/ and lets work on getting Long Chen author status so he can author his own patches. OK to push? Ed. From bharadwaj.yadavalli at oracle.com Thu Apr 7 23:36:25 2016 From: bharadwaj.yadavalli at oracle.com (S. Bharadwaj Yadavalli) Date: Thu, 7 Apr 2016 19:36:25 -0400 Subject: RFR: 8153655: TESTBUG: intrinsics tests must be updated to enable diagnostic options Message-ID: <5706EEF9.4030900@oracle.com> Backing out the change [1] that fixed [2]. Bug: https://bugs.openjdk.java.net/browse/JDK-8153655 webrev: http://cr.openjdk.java.net/~bharadwaj/8153655/webrev/ Testing: Ran the tests in bug report successfully using product build. Thanks, Bharadwaj [1] http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/rev/12b38ff7ad9b [2] https://bugs.openjdk.java.net/browse/JDK-8145348 From jesper.wilhelmsson at oracle.com Thu Apr 7 23:54:32 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Fri, 8 Apr 2016 01:54:32 +0200 Subject: RFR: 8153655: TESTBUG: intrinsics tests must be updated to enable diagnostic options In-Reply-To: <5706EEF9.4030900@oracle.com> References: <5706EEF9.4030900@oracle.com> Message-ID: <5706F338.9010301@oracle.com> Looks good! /Jesper Den 8/4/16 kl. 01:36, skrev S. Bharadwaj Yadavalli: > Backing out the change [1] that fixed [2]. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153655 > webrev: http://cr.openjdk.java.net/~bharadwaj/8153655/webrev/ > > Testing: Ran the tests in bug report successfully using product build. > > Thanks, > > Bharadwaj > > [1] http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/rev/12b38ff7ad9b > [2] https://bugs.openjdk.java.net/browse/JDK-8145348 > > From bharadwaj.yadavalli at oracle.com Fri Apr 8 00:01:59 2016 From: bharadwaj.yadavalli at oracle.com (S. Bharadwaj Yadavalli) Date: Thu, 7 Apr 2016 20:01:59 -0400 Subject: RFR: 8153655: TESTBUG: intrinsics tests must be updated to enable diagnostic options In-Reply-To: <5706F338.9010301@oracle.com> References: <5706EEF9.4030900@oracle.com> <5706F338.9010301@oracle.com> Message-ID: <5706F4F7.2050602@oracle.com> Thanks, Jesper! Bharadwaj On 04/07/2016 07:54 PM, Jesper Wilhelmsson wrote: > Looks good! > /Jesper > > Den 8/4/16 kl. 01:36, skrev S. Bharadwaj Yadavalli: >> Backing out the change [1] that fixed [2]. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153655 >> webrev: http://cr.openjdk.java.net/~bharadwaj/8153655/webrev/ >> >> Testing: Ran the tests in bug report successfully using product build. >> >> Thanks, >> >> Bharadwaj >> >> [1] http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/rev/12b38ff7ad9b >> [2] https://bugs.openjdk.java.net/browse/JDK-8145348 >> >> From vladimir.kozlov at oracle.com Fri Apr 8 00:20:30 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Thu, 7 Apr 2016 17:20:30 -0700 Subject: RFR: 8153655: TESTBUG: intrinsics tests must be updated to enable diagnostic options In-Reply-To: <5706EEF9.4030900@oracle.com> References: <5706EEF9.4030900@oracle.com> Message-ID: <5706F94E.2070803@oracle.com> No. This is very wrong change! The bug states that -XX:+UnlockDiagnosticVMOptions flag should be added to tests which miss it and not revert 8145348 changes. Vladimir On 4/7/16 4:36 PM, S. Bharadwaj Yadavalli wrote: > Backing out the change [1] that fixed [2]. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153655 > webrev: http://cr.openjdk.java.net/~bharadwaj/8153655/webrev/ > > Testing: Ran the tests in bug report successfully using product build. > > Thanks, > > Bharadwaj > > [1] http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/rev/12b38ff7ad9b > [2] https://bugs.openjdk.java.net/browse/JDK-8145348 > > From vladimir.kozlov at oracle.com Fri Apr 8 01:11:16 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Thu, 7 Apr 2016 18:11:16 -0700 Subject: RFR: 8153655: TESTBUG: intrinsics tests must be updated to enable diagnostic options In-Reply-To: <5706F94E.2070803@oracle.com> References: <5706EEF9.4030900@oracle.com> <5706F94E.2070803@oracle.com> Message-ID: <57070534.80509@oracle.com> I talked with Bharadwaj and we decided to push backout with different bug: 8153816: Backout changes for JDK-8145348 till 8153655 is fixed and use 8153655 for real fix as its synopsis say. Thanks, Vladimir On 4/7/16 5:20 PM, Vladimir Kozlov wrote: > No. This is very wrong change! The bug states that -XX:+UnlockDiagnosticVMOptions flag should be added to tests which miss it and not > revert 8145348 changes. > > Vladimir > > On 4/7/16 4:36 PM, S. Bharadwaj Yadavalli wrote: >> Backing out the change [1] that fixed [2]. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8153655 >> webrev: http://cr.openjdk.java.net/~bharadwaj/8153655/webrev/ >> >> Testing: Ran the tests in bug report successfully using product build. >> >> Thanks, >> >> Bharadwaj >> >> [1] http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/rev/12b38ff7ad9b >> [2] https://bugs.openjdk.java.net/browse/JDK-8145348 >> >> From bharadwaj.yadavalli at oracle.com Fri Apr 8 02:39:51 2016 From: bharadwaj.yadavalli at oracle.com (S. Bharadwaj Yadavalli) Date: Thu, 7 Apr 2016 22:39:51 -0400 Subject: RFR: 8153816: [BACKOUT] Make intrinsics flags diagnostic Message-ID: <570719F7.7010404@oracle.com> Backing out the change [1] that fixed [2]. This is a sub-task of [3]. Bug: https://bugs.openjdk.java.net/browse/JDK-8153816 webrev: http://cr.openjdk.java.net/~bharadwaj/8153816/webrev/ Testing: Ran the tests in bug report successfully using product build. Thanks, Bharadwaj [1] http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/rev/12b38ff7ad9b [2] https://bugs.openjdk.java.net/browse/JDK-8145348 [3] https://bugs.openjdk.java.net/browse/JDK-8153655 From vladimir.kozlov at oracle.com Fri Apr 8 02:48:00 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Thu, 7 Apr 2016 19:48:00 -0700 Subject: RFR: 8153816: [BACKOUT] Make intrinsics flags diagnostic In-Reply-To: <570719F7.7010404@oracle.com> References: <570719F7.7010404@oracle.com> Message-ID: <57071BE0.5000708@oracle.com> Looks good. Thanks, Vladimir On 4/7/16 7:39 PM, S. Bharadwaj Yadavalli wrote: > Backing out the change [1] that fixed [2]. This is a sub-task of [3]. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153816 > webrev: http://cr.openjdk.java.net/~bharadwaj/8153816/webrev/ > > Testing: Ran the tests in bug report successfully using product build. > > Thanks, > > Bharadwaj > > [1] http://hg.openjdk.java.net/jdk9/hs-comp/hotspot/rev/12b38ff7ad9b > [2] https://bugs.openjdk.java.net/browse/JDK-8145348 > [3] https://bugs.openjdk.java.net/browse/JDK-8153655 > From stefan.karlsson at oracle.com Fri Apr 8 06:47:07 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Fri, 8 Apr 2016 08:47:07 +0200 Subject: RFR: 8153742: Remove unnecessary thread.inline.hpp includes In-Reply-To: <57069F0C.1080101@oracle.com> References: <57068DA7.9080506@oracle.com> <57069F0C.1080101@oracle.com> Message-ID: <570753EB.9000005@oracle.com> Thanks, Jon. StefanK On 2016-04-07 19:55, Jon Masamitsu wrote: > Looks good. > > Jon > > > On 04/07/2016 09:41 AM, Stefan Karlsson wrote: >> Hi all, >> >> Please review this tiny patch to remove includes of thread.inline.hpp >> from some .hpp files. >> >> http://cr.openjdk.java.net/~stefank/8153742/webrev.01/ >> https://bugs.openjdk.java.net/browse/JDK-8153742 >> >> I'm especially interested in getting rid of the include inside >> resourceArea.hpp, since that will allow me to setup a ResourceMark in >> ostream.hpp without leaking inline.hpp files. >> >> I've verified that none of the changed files use any of the functions >> in thread.inline.hpp. >> >> Thanks, >> StefanK > From bengt.rutisson at oracle.com Fri Apr 8 08:09:58 2016 From: bengt.rutisson at oracle.com (Bengt Rutisson) Date: Fri, 8 Apr 2016 10:09:58 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <57068F87.5050607@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57068F87.5050607@oracle.com> Message-ID: <57076756.9010200@oracle.com> Hi StefanK, On 2016-04-07 18:49, Stefan Karlsson wrote: > Hi again, > > I decided to fix the resourceArea.hpp problem, so that I could move > the stringStreamWithResourceMark class into ostream.hpp. > > http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta > http://cr.openjdk.java.net/~stefank/8153659/webrev.03 Looks even better! Bengt > > The patch is applied on top of the thread.inline.hpp patch in: > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html > > Thanks > StefanK > > On 2016-04-07 13:30, Stefan Karlsson wrote: >> Hi all, >> >> I've updated the patch: >> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 >> >> The previous patch created the embedded ResourceMark after the >> stringStream instance was created. I discussed the layout of the >> classes with Bengt, and have decided to restructure this patch. I've >> changed the code so that the ResourceMark is embedded in a new >> stringStreamWithResourceMark class. This allows me to use the same >> LogStreamBase class, but different stringClass template parameters, >> for all three classes. >> >> I've put the stringStreamWithResourceMark class in logStream.hpp >> instead of ostream.hpp, to prevent the include of resourceArea.hpp to >> propagate through the ostream.hpp header. The resourceArea.hpp file >> is problematic, since it includes and uses thread.inline.hpp. The >> alternative would be to move the implementation of resourceArea.hpp >> into a resource.inline.hpp file, so that header files could create >> ResourceMark instances, without having to include thread.inline.hpp. >> I'm leaving that exercise for another RFE. >> >> Thanks, >> StefanK >> >> On 2016-04-06 19:54, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to add a LogStream class that allocates its >>> backing buffer from CHeap memory instead of Resource memory. >>> >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>> >>> The main motivation for this is that we can't use Resource allocated >>> memory during initialization, until Thread::current() has been >>> initialized. So, a CHeap backed LogStream is desirable when we >>> execute, for example, the following code during large pages >>> initialization: >>> >>> void os::trace_page_sizes(const char* str, const size_t* page_sizes, >>> int count) >>> { >>> if (TracePageSizes) { >>> tty->print("%s: ", str); >>> for (int i = 0; i < count; ++i) { >>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>> } >>> tty->cr(); >>> } >>> } >>> >>> The patch restructures the code and creates a LogStreamBase template >>> base class, which takes the backing outputStream class as a template >>> parameter. We then have three concrete LogStream classes: >>> >>> LogStream - Buffer resource allocated with an embedded ResourceMark >>> LogStreamNoResourceMark - Buffer resource allocated without an >>> embedded ResourceMark >>> LogStreamCHeap - Buffer CHeap allocated >>> >>> I moved the LogStream class from the logStream.inline.hpp file to >>> logStream.hpp, for consistency. If that's causing problems while >>> reviewing this, I can move it in a separate patch. >>> >>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >>> internal VM tests. >>> >>> Thanks, >>> StefanK >> > From stefan.karlsson at oracle.com Fri Apr 8 08:14:48 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Fri, 8 Apr 2016 10:14:48 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <57076756.9010200@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57068F87.5050607@oracle.com> <57076756.9010200@oracle.com> Message-ID: <57076878.7060300@oracle.com> Thanks, Bengt! StefanK On 2016-04-08 10:09, Bengt Rutisson wrote: > > Hi StefanK, > > On 2016-04-07 18:49, Stefan Karlsson wrote: >> Hi again, >> >> I decided to fix the resourceArea.hpp problem, so that I could move >> the stringStreamWithResourceMark class into ostream.hpp. >> >> http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta >> http://cr.openjdk.java.net/~stefank/8153659/webrev.03 > > Looks even better! > > Bengt > >> >> The patch is applied on top of the thread.inline.hpp patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html >> >> >> Thanks >> StefanK >> >> On 2016-04-07 13:30, Stefan Karlsson wrote: >>> Hi all, >>> >>> I've updated the patch: >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 >>> >>> The previous patch created the embedded ResourceMark after the >>> stringStream instance was created. I discussed the layout of the >>> classes with Bengt, and have decided to restructure this patch. I've >>> changed the code so that the ResourceMark is embedded in a new >>> stringStreamWithResourceMark class. This allows me to use the same >>> LogStreamBase class, but different stringClass template parameters, >>> for all three classes. >>> >>> I've put the stringStreamWithResourceMark class in logStream.hpp >>> instead of ostream.hpp, to prevent the include of resourceArea.hpp >>> to propagate through the ostream.hpp header. The resourceArea.hpp >>> file is problematic, since it includes and uses thread.inline.hpp. >>> The alternative would be to move the implementation of >>> resourceArea.hpp into a resource.inline.hpp file, so that header >>> files could create ResourceMark instances, without having to include >>> thread.inline.hpp. I'm leaving that exercise for another RFE. >>> >>> Thanks, >>> StefanK >>> >>> On 2016-04-06 19:54, Stefan Karlsson wrote: >>>> Hi all, >>>> >>>> Please review this patch to add a LogStream class that allocates >>>> its backing buffer from CHeap memory instead of Resource memory. >>>> >>>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>>> >>>> The main motivation for this is that we can't use Resource >>>> allocated memory during initialization, until Thread::current() has >>>> been initialized. So, a CHeap backed LogStream is desirable when we >>>> execute, for example, the following code during large pages >>>> initialization: >>>> >>>> void os::trace_page_sizes(const char* str, const size_t* >>>> page_sizes, int count) >>>> { >>>> if (TracePageSizes) { >>>> tty->print("%s: ", str); >>>> for (int i = 0; i < count; ++i) { >>>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>>> } >>>> tty->cr(); >>>> } >>>> } >>>> >>>> The patch restructures the code and creates a LogStreamBase >>>> template base class, which takes the backing outputStream class as >>>> a template parameter. We then have three concrete LogStream classes: >>>> >>>> LogStream - Buffer resource allocated with an embedded ResourceMark >>>> LogStreamNoResourceMark - Buffer resource allocated without an >>>> embedded ResourceMark >>>> LogStreamCHeap - Buffer CHeap allocated >>>> >>>> I moved the LogStream class from the logStream.inline.hpp file to >>>> logStream.hpp, for consistency. If that's causing problems while >>>> reviewing this, I can move it in a separate patch. >>>> >>>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >>>> internal VM tests. >>>> >>>> Thanks, >>>> StefanK >>> >> > From aph at redhat.com Fri Apr 8 08:16:02 2016 From: aph at redhat.com (Andrew Haley) Date: Fri, 8 Apr 2016 09:16:02 +0100 Subject: [aarch64-port-dev ] RFR: aarch64: Add Arrays.fill stub code In-Reply-To: References: <570532CF.1050903@redhat.com> <57054CCA.1080900@redhat.com> <57066DD6.9030508@redhat.com> <57067DD3.1090503@redhat.com> Message-ID: <570768C2.2070106@redhat.com> On 07/04/16 23:45, Edward Nevill wrote: > OK to push? Sure, thanks. Andrew. From volker.simonis at gmail.com Fri Apr 8 09:59:54 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Fri, 8 Apr 2016 11:59:54 +0200 Subject: RFR (S): 8139921: add mx configuration files to support HotSpot IDE configuration generation In-Reply-To: <4E949060-D14C-4986-A21D-69EFE55E095D@oracle.com> References: <31F6B9E0-D7C2-4B73-8114-8616F82AE5B5@oracle.com> <4E949060-D14C-4986-A21D-69EFE55E095D@oracle.com> Message-ID: On Thu, Apr 7, 2016 at 6:59 PM, Christian Thalinger wrote: > > On Apr 7, 2016, at 2:22 AM, Volker Simonis wrote: > > Hi Christian, > > I'd like to try this but I couldn't figure out how it works. > I don't have any experience with Graal/Truffel/JVMCI but as far as I > understood the creation of an Eclipse project should work equally well > for a vanilla hospot repository, right? > > > Correct. > > > The first question is where to get mx from (there's different > information in the Wiki and this email thread for example) ? > > https://bitbucket.org/allr/mx > oder > https://github.com/graalvm/mx.git > > > Since the review the mx repository got moved to github. That?s the one you > want to use. > > > Now let's say I cloned the right mx version. How do I use it? > Let's say I have a vanilla jdk9 forest under /share/OpenJDK/jdk9 > I create an output directory under /share/OpenJDK/output-jdk9-dbg and > from there I call configure and build the images: > > cd /share/OpenJDK/output-jdk9-dbg > bash /share/OpenJDK/jdk9/configure --with-boot-jdk=.. > --with-debug-level=slowdebug > make images > > Taking this scenario, from which directory am I supposed to call 'mx > ideinit', where will the Eclipse project be created at and how is mx > supposed to find my configuration (i.e. platform) and generated files? > > > I have never tried to have the output directory not in the source directory > and mx might not support this. But it?s not really necessary because > everything goes into build/ by default anyway. If you really want to have a > separate output directory I suppose we could add an option to mx. > Hi Christian, thanks a lot for your explanation. I've never tried WITH the output directory in the source tree :) I have my source repositories on a network share and on every platform I configure and build from the shared sources into a local directory. That said, I've tried to configure and build INTO the source directory and called 'mx eclipseinit' afterwards. That worked as described and the created project worked quite nicely. But after playing around a little bit, I found the following points: - the project settings are hard-wired for linux-x86_64 - so it makes no difference if I configure and build on linux-ppc64, the generated project still tries to get the generated files from 'PARENT-5-PROJECT_LOC/build/linux-x86_64-normal-server-slowdebug' which will apparently not work on any other platfrom. - the same is true for the preprocessor defines. They are hard wired in ".mx.jvmci/hotspot/templates/eclipse/cproject". I don't actually understand why you are creating distinct release, fastdebug and slowdebug projects at all, if all of them have the same settings (e.g. ASSERT and DEBUG is also defined for server-release). This of course also applies to OS, CPU, etc... - so in the end you're right. I can call 'mx eclipseinit' even without a build directory and the generated Eclipse project will still be the same (i.e. hard-wired for ../build/linux-x86_64-slowdebug). I thought that 'mx eclipseinit' will use the output directory to get platform and configuration information from there and to create the corresponding Eclipse project from it. That means a project which is configured for the corresponding platform with the relevant (and only the relevant) sources plus preprocessor defines and the corresponding generated sources. But in the current from, that doesn't seems to be true. Or am I missing something? Thank you and best regards, Volker > You run mx in the hotspot source directory. Since I?m using Eclipse I > usually run the eclipseinit command directly: > > cthaling at macbook:~/ws/jdk9/hs-comp/hotspot$ mx eclipseinit > created > /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.classpath > created > /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.checkstyle > created > /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.project > created > /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.jdt.core.prefs > created > /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.jdt.ui.prefs > created > /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.core.resources.prefs > ? > > You can see it generates all the necessary files into the various source > directories. The hotspot files are here: > > cthaling at macbook:~/ws/jdk9/hs-comp/hotspot$ ls .mx.jvmci/hotspot/eclipse/ > server-fastdebug/ server-release/ server-slowdebug/ > > So, you have to enable ?Search for nested projects? when importing into > Eclipse. Then everything should magically show up. > > > Thanks a lot and best regards, > Volker > > > On Wed, Nov 11, 2015 at 1:42 AM, Christian Thalinger > wrote: > > [This is kind of a long email but contains pictures :-)] > > https://bugs.openjdk.java.net/browse/JDK-8139921 > http://cr.openjdk.java.net/~twisti/8139921/webrev/ > > In order to make the IDE experience more pleasant now that JEP 243 is > integrated we would like to use mx (https://bitbucket.org/allr/mx) for IDE > configuration generation. For this we have to integrate a few mx support > files into the hotspot repository. > > The mx support files will be under a dot-directory: > > $ hg st --all .mx.jvmci/ > C .mx.jvmci/.project > C .mx.jvmci/.pydevproject > C .mx.jvmci/eclipse-settings/org.eclipse.jdt.core.prefs > C .mx.jvmci/hotspot/templates/eclipse/cproject > C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.core.prefs > C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.ui.prefs > C > .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.core.runtime.prefs > C .mx.jvmci/mx_jvmci.py > C .mx.jvmci/suite.py > > mx itself is and will stay an external tool. Some documentation on how to > use it can be found here: > > https://wiki.openjdk.java.net/display/Graal/Instructions > https://wiki.openjdk.java.net/display/Graal/Eclipse > > It basically boils down to: > > $ mx ideinit > > and importing the configuration into your favorite IDE. > > This would give every developer the same view of the source code and we can > also enforce code-style guidelines. > > Here is how the imported projects look like in Eclipse: > > > > This is most helpful for Compiler engineers who work on the JVMCI but there > is value for others too. > > Notice the ?hotspot:*? projects at the top? These are projects for > different HotSpot configurations. The main advantage here is that these > include the generated files directory (if the configuration exists and the > files are built). I only configured and built ?release? so these can been > seen, fastdebug is empty: > > > > This makes it possible for Eclipse to find generated source code. Very > helpful. For example, JVMTI. First, jvmtiUtils.hpp from the fastdebug > configuration: > > > > and here is the release one: > > > > mx has lots of other commands but most of them are not really useful for us. > The only ones worth mentioning besides ideinit are findbugs and checkstyle. > > findbugs runs FindBugs (doh!) on all Java projects that mx knows about: > > cthaling at macbook:~/ws/8139921/hotspot$ mx findbugs > Scanning archives (15 / 30) > 2 analysis passes to perform > Pass 1: Analyzing classes (524 / 524) - 100% complete > Pass 2: Analyzing classes (305 / 305) - 100% complete > Done with analysis > Calculating exit code... > Exit code set to: 0 > > checkstyle checks the Java projects against some predefined style. This is > particularly helpful for people who don?t use an IDE or to make sure > everything matches the style after applying an external patch: > > cthaling at macbook:~/ws/8139921/hotspot$ mx checkstyle > Running Checkstyle on > /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src > using > /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.service/.checkstyle_checks.xml... > /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src/jdk/vm/ci/runtime/JVMCI.java:33: > 'static' modifier out of order with the JLS suggestions. > > or: > > cthaling at macbook:~/ws/8139921/hotspot$ mx checkstyle > Running Checkstyle on > /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src > using > /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.service/.checkstyle_checks.xml... > /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src/jdk/vm/ci/runtime/JVMCI.java:43: > Name 'FOO' must match pattern '^[a-z][a-zA-Z0-9]*$'. > > That?s all, folks! > > From marcus.larsson at oracle.com Fri Apr 8 12:14:34 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Fri, 8 Apr 2016 14:14:34 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <57068F87.5050607@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57068F87.5050607@oracle.com> Message-ID: <5707A0AA.4040005@oracle.com> Hi Stefan, On 04/07/2016 06:49 PM, Stefan Karlsson wrote: > Hi again, > > I decided to fix the resourceArea.hpp problem, so that I could move > the stringStreamWithResourceMark class into ostream.hpp. > > http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta > http://cr.openjdk.java.net/~stefank/8153659/webrev.03 Looks good! Thanks, Marcus > > The patch is applied on top of the thread.inline.hpp patch in: > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html > > Thanks > StefanK > > On 2016-04-07 13:30, Stefan Karlsson wrote: >> Hi all, >> >> I've updated the patch: >> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 >> >> The previous patch created the embedded ResourceMark after the >> stringStream instance was created. I discussed the layout of the >> classes with Bengt, and have decided to restructure this patch. I've >> changed the code so that the ResourceMark is embedded in a new >> stringStreamWithResourceMark class. This allows me to use the same >> LogStreamBase class, but different stringClass template parameters, >> for all three classes. >> >> I've put the stringStreamWithResourceMark class in logStream.hpp >> instead of ostream.hpp, to prevent the include of resourceArea.hpp to >> propagate through the ostream.hpp header. The resourceArea.hpp file >> is problematic, since it includes and uses thread.inline.hpp. The >> alternative would be to move the implementation of resourceArea.hpp >> into a resource.inline.hpp file, so that header files could create >> ResourceMark instances, without having to include thread.inline.hpp. >> I'm leaving that exercise for another RFE. >> >> Thanks, >> StefanK >> >> On 2016-04-06 19:54, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to add a LogStream class that allocates its >>> backing buffer from CHeap memory instead of Resource memory. >>> >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>> >>> The main motivation for this is that we can't use Resource allocated >>> memory during initialization, until Thread::current() has been >>> initialized. So, a CHeap backed LogStream is desirable when we >>> execute, for example, the following code during large pages >>> initialization: >>> >>> void os::trace_page_sizes(const char* str, const size_t* page_sizes, >>> int count) >>> { >>> if (TracePageSizes) { >>> tty->print("%s: ", str); >>> for (int i = 0; i < count; ++i) { >>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>> } >>> tty->cr(); >>> } >>> } >>> >>> The patch restructures the code and creates a LogStreamBase template >>> base class, which takes the backing outputStream class as a template >>> parameter. We then have three concrete LogStream classes: >>> >>> LogStream - Buffer resource allocated with an embedded ResourceMark >>> LogStreamNoResourceMark - Buffer resource allocated without an >>> embedded ResourceMark >>> LogStreamCHeap - Buffer CHeap allocated >>> >>> I moved the LogStream class from the logStream.inline.hpp file to >>> logStream.hpp, for consistency. If that's causing problems while >>> reviewing this, I can move it in a separate patch. >>> >>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >>> internal VM tests. >>> >>> Thanks, >>> StefanK >> > From stefan.karlsson at oracle.com Fri Apr 8 12:16:11 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Fri, 8 Apr 2016 14:16:11 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <5707A0AA.4040005@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57068F87.5050607@oracle.com> <5707A0AA.4040005@oracle.com> Message-ID: <5707A10B.2020609@oracle.com> Thanks, Marcus! StefanK On 2016-04-08 14:14, Marcus Larsson wrote: > Hi Stefan, > > On 04/07/2016 06:49 PM, Stefan Karlsson wrote: >> Hi again, >> >> I decided to fix the resourceArea.hpp problem, so that I could move >> the stringStreamWithResourceMark class into ostream.hpp. >> >> http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta >> http://cr.openjdk.java.net/~stefank/8153659/webrev.03 > > Looks good! > > Thanks, > Marcus > >> >> The patch is applied on top of the thread.inline.hpp patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html >> >> >> Thanks >> StefanK >> >> On 2016-04-07 13:30, Stefan Karlsson wrote: >>> Hi all, >>> >>> I've updated the patch: >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 >>> >>> The previous patch created the embedded ResourceMark after the >>> stringStream instance was created. I discussed the layout of the >>> classes with Bengt, and have decided to restructure this patch. I've >>> changed the code so that the ResourceMark is embedded in a new >>> stringStreamWithResourceMark class. This allows me to use the same >>> LogStreamBase class, but different stringClass template parameters, >>> for all three classes. >>> >>> I've put the stringStreamWithResourceMark class in logStream.hpp >>> instead of ostream.hpp, to prevent the include of resourceArea.hpp >>> to propagate through the ostream.hpp header. The resourceArea.hpp >>> file is problematic, since it includes and uses thread.inline.hpp. >>> The alternative would be to move the implementation of >>> resourceArea.hpp into a resource.inline.hpp file, so that header >>> files could create ResourceMark instances, without having to include >>> thread.inline.hpp. I'm leaving that exercise for another RFE. >>> >>> Thanks, >>> StefanK >>> >>> On 2016-04-06 19:54, Stefan Karlsson wrote: >>>> Hi all, >>>> >>>> Please review this patch to add a LogStream class that allocates >>>> its backing buffer from CHeap memory instead of Resource memory. >>>> >>>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>>> >>>> The main motivation for this is that we can't use Resource >>>> allocated memory during initialization, until Thread::current() has >>>> been initialized. So, a CHeap backed LogStream is desirable when we >>>> execute, for example, the following code during large pages >>>> initialization: >>>> >>>> void os::trace_page_sizes(const char* str, const size_t* >>>> page_sizes, int count) >>>> { >>>> if (TracePageSizes) { >>>> tty->print("%s: ", str); >>>> for (int i = 0; i < count; ++i) { >>>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>>> } >>>> tty->cr(); >>>> } >>>> } >>>> >>>> The patch restructures the code and creates a LogStreamBase >>>> template base class, which takes the backing outputStream class as >>>> a template parameter. We then have three concrete LogStream classes: >>>> >>>> LogStream - Buffer resource allocated with an embedded ResourceMark >>>> LogStreamNoResourceMark - Buffer resource allocated without an >>>> embedded ResourceMark >>>> LogStreamCHeap - Buffer CHeap allocated >>>> >>>> I moved the LogStream class from the logStream.inline.hpp file to >>>> logStream.hpp, for consistency. If that's causing problems while >>>> reviewing this, I can move it in a separate patch. >>>> >>>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >>>> internal VM tests. >>>> >>>> Thanks, >>>> StefanK >>> >> > From robbin.ehn at oracle.com Fri Apr 8 13:13:09 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Fri, 8 Apr 2016 15:13:09 +0200 Subject: RFF(feedback): 8153723: Change the default logging output for errors and warnings from stderr to stdout Message-ID: <5707AE65.4030602@oracle.com> Hi all, Could we get some feedback please! Bug: https://bugs.openjdk.java.net/browse/JDK-8153723 Thanks! /Robbin From erik.joelsson at oracle.com Fri Apr 8 13:14:41 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Fri, 8 Apr 2016 15:14:41 +0200 Subject: The new Hotspot Build System Message-ID: <5707AEC1.5020601@oracle.com> The first phase of the new Hotspot Build System (JEP 284) has now been pushed to jdk9/hs-rt. During a transition period, both the old and new build system will co-exist and you can pick the one you want using the configure option "--disable-new-hotspot-build". The new system is the default while the old is there as a backup in case your particular work flow broke in the new system. If you have any trouble, don't hesitate to contact me or file an issue. The goal is to push the second phase, where the old system is removed, and the new moves from hotspot/makefiles to hotspot/make, as soon as possible. If no blocking issues are raised, I will propose to push that change in one weeks time from today. This is of course negotiable. The change to the new build system also means that the infra team now assumes responsibility for the Hotspot makefiles. This means that all changes that touch makefiles should include build-dev in the review. This is especially important during the transition period so that we can ensure that both build systems are kept in sync. The user of the build shouldn't notice much of a difference. If you build from the top level, "make hotspot" will still do the same thing. The new build does not support building from the hotspot repo. Debug levels are controlled through the same configure parameters as before. There is one new configure parameter, --with-jvm-features, that can be used to create specialized builds with non standard combinations of jvm options, especially in conjunction with the new jvm variant "custom". See the configure help output for more details. There is a new way of generating Visual Studio projects using the top level target "hotspot-ide-project", which replaces the old script. /Erik From daniel.daugherty at oracle.com Fri Apr 8 13:37:52 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Fri, 8 Apr 2016 07:37:52 -0600 Subject: The new Hotspot Build System In-Reply-To: <5707AEC1.5020601@oracle.com> References: <5707AEC1.5020601@oracle.com> Message-ID: <5707B430.6070503@oracle.com> Congrats!! I'm looking forward to taking the new HotSpot build out for a spin around the block... :-) Dan On 4/8/16 7:14 AM, Erik Joelsson wrote: > The first phase of the new Hotspot Build System (JEP 284) has now been > pushed to jdk9/hs-rt. During a transition period, both the old and new > build system will co-exist and you can pick the one you want using the > configure option "--disable-new-hotspot-build". The new system is the > default while the old is there as a backup in case your particular > work flow broke in the new system. If you have any trouble, don't > hesitate to contact me or file an issue. > > The goal is to push the second phase, where the old system is removed, > and the new moves from hotspot/makefiles to hotspot/make, as soon as > possible. If no blocking issues are raised, I will propose to push > that change in one weeks time from today. This is of course negotiable. > > The change to the new build system also means that the infra team now > assumes responsibility for the Hotspot makefiles. This means that all > changes that touch makefiles should include build-dev in the review. > This is especially important during the transition period so that we > can ensure that both build systems are kept in sync. > > The user of the build shouldn't notice much of a difference. If you > build from the top level, "make hotspot" will still do the same thing. > The new build does not support building from the hotspot repo. Debug > levels are controlled through the same configure parameters as before. > There is one new configure parameter, --with-jvm-features, that can be > used to create specialized builds with non standard combinations of > jvm options, especially in conjunction with the new jvm variant > "custom". See the configure help output for more details. There is a > new way of generating Visual Studio projects using the top level > target "hotspot-ide-project", which replaces the old script. > > /Erik From erik.joelsson at oracle.com Fri Apr 8 13:42:53 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Fri, 8 Apr 2016 15:42:53 +0200 Subject: The new Hotspot Build System In-Reply-To: <5707B430.6070503@oracle.com> References: <5707AEC1.5020601@oracle.com> <5707B430.6070503@oracle.com> Message-ID: <5707B55D.2030602@oracle.com> Thanks!! Sure feels good to finally get it done. /Erik On 2016-04-08 15:37, Daniel D. Daugherty wrote: > Congrats!! I'm looking forward to taking the new HotSpot build out > for a spin around the block... :-) > > Dan > > > On 4/8/16 7:14 AM, Erik Joelsson wrote: >> The first phase of the new Hotspot Build System (JEP 284) has now >> been pushed to jdk9/hs-rt. During a transition period, both the old >> and new build system will co-exist and you can pick the one you want >> using the configure option "--disable-new-hotspot-build". The new >> system is the default while the old is there as a backup in case your >> particular work flow broke in the new system. If you have any >> trouble, don't hesitate to contact me or file an issue. >> >> The goal is to push the second phase, where the old system is >> removed, and the new moves from hotspot/makefiles to hotspot/make, as >> soon as possible. If no blocking issues are raised, I will propose to >> push that change in one weeks time from today. This is of course >> negotiable. >> >> The change to the new build system also means that the infra team now >> assumes responsibility for the Hotspot makefiles. This means that all >> changes that touch makefiles should include build-dev in the review. >> This is especially important during the transition period so that we >> can ensure that both build systems are kept in sync. >> >> The user of the build shouldn't notice much of a difference. If you >> build from the top level, "make hotspot" will still do the same >> thing. The new build does not support building from the hotspot repo. >> Debug levels are controlled through the same configure parameters as >> before. There is one new configure parameter, --with-jvm-features, >> that can be used to create specialized builds with non standard >> combinations of jvm options, especially in conjunction with the new >> jvm variant "custom". See the configure help output for more details. >> There is a new way of generating Visual Studio projects using the top >> level target "hotspot-ide-project", which replaces the old script. >> >> /Erik > From adinn at redhat.com Fri Apr 8 13:59:25 2016 From: adinn at redhat.com (Andrew Dinn) Date: Fri, 8 Apr 2016 14:59:25 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <570651B7.9020004@redhat.com> Message-ID: <5707B93D.2070704@redhat.com> On 07/04/16 18:28, Christian Thalinger wrote: > >> On Apr 7, 2016, at 2:25 AM, Andrew Dinn wrote: >> Why do 'we want to be able to have pieces of compiled code and >> metadata that are not located in one continuous piece of memory'? >> Is there a motive for making this change? > > Yes, there are two reasons: > > 1) We would like to separate metadata from code in the CodeCache so > that we (potentially) get better CPU cache utilization: > > https://bugs.openjdk.java.net/browse/JDK-7072317 > > 2) As described in my last year?s JVMLS talk our AOT solution uses > shared libraries as a container format and as you know code and data > need to be separate. Thanks for the explanation, Christian. Perhaps it might be a good idea to add a link to indicate that 7072317 relates to (or even is a dependency of?) 8152664? > If you haven?t been at JVMLS here is the recording of my talk and the > section about the container format: > > https://youtu.be/Xybzyv8qbOc?t=10m41s I did watch this just after the summit -- great talk, by the way -- but, needless to say, failed to retain the relevant detail from that long ago. regards, Andrew Dinn ----------- From volker.simonis at gmail.com Fri Apr 8 18:12:19 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Fri, 8 Apr 2016 20:12:19 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> Message-ID: Hi Rickard, I found the problem why your change crashes the VM on ppc (and I'm pretty sure it will also crash on ARM - @Andrew, maybe you can try it out?). It is caused by the following code in address NativeCall::get_trampoline() which is also present on arm64: address NativeCall::get_trampoline() { address call_addr = addr_at(0); CodeBlob *code = CodeCache::find_blob(call_addr); ... // If the codeBlob is not a nmethod, this is because we get here from the // CodeBlob constructor, which is called within the nmethod constructor. return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); } The comment explains the situation quite well: we're in the CodeBlob constructor which was called by the CompiledMethod constructor which was called from the nmethod constructor: #3 0x00003fffb741b80c in NativeCall::get_trampoline (this=0x3fff607d0fac) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 #4 0x00003fffb7596914 in Relocation::pd_call_destination (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to (this=0x3ffdfe3fdb40, dest_blob=0x3fff607d0c10) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, caller_must_gc_arguments=false, subtype=8) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 "nmethod", size=1768, header_size=392, cb=0x3ffdfe3fdb40, frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, caller_must_gc_arguments=false) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 Now we cast 'code' to 'nmethod' but at this point in time 'code' is still a CodeBlob from the C++ point of view (i.e. it still has a CodeBlob vtable (see [1] for an explanation)). Later on, in RelocIterator::initialize() we call virtual methods on the nmethod which still has the vtable of a "CodeBlob" and this fails badly: #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 #1 0x00003fffb758d51c in RelocIterator::initialize (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 #2 0x00003fffb6ace56c in RelocIterator::RelocIterator (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 #3 0x00003fffb75919dc in trampoline_stub_Relocation::get_trampoline_for (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 #4 0x00003fffb741b80c in NativeCall::get_trampoline (this=0x3fff607d0fac) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 As you can see, we actually want to call nmethod::stub_begin() at relocInfo.cpp:144 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; but we actually end up in SingletonBlob::print_on() which is a completely different method. Notice that the call to nm->consts_begin() before also fails, but it doesn't crash the VM because it happens to call SingletonBlob::verify() which has no bad side effect. The call to nm->insts_begin() in line 143 is non-virtual and thus works fine. Here are the corresponding vtable slots in the CodeBlob vtable for consts_begin() and stub_begin() (gdb) p &nmethod::consts_begin $76 = &virtual table offset 42 (gdb) p &nmethod::stub_begin $77 = &virtual table offset 44 (gdb) p ((*(void ***)nm) + 1)[42] $86 = (void *) 0x3fffb6c41df8 (gdb) p ((*(void ***)nm) + 1)[44] $87 = (void *) 0x3fffb6c41e64 As you can see, 'nm' is indeed a "CodeBlob" at this point: (gdb) p *(void ***)nm $91 = (void **) 0x3fffb7befa00 (gdb) p nm->print() [CodeBlob (0x00003fff607d1090)] Framesize: 14 The offending calls succeeded before your change, because they where not virtual. Any idea how we can fix this with the new class hierarchy? Regards, Volker [1] http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis wrote: > Hi Rickard, > > I'd also like to know what's the rational behind this quite large > change. Do you expect some performance or memory consumption > improvements or is this a prerequisite for another change which is > still to come? > > The change itself currently doesn't work on ppc64 (neither on Linux > nor on AIX). I get the following crash during the build when the newly > built Hotspot is JIT-compiling java.lang.String::charAt on C1 : > > # > # A fatal error has been detected by the Java Runtime Environment: > # > # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 > # > # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) > # Java VM: OpenJDK 64-Bit Server VM (slowdebug > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, > tiered, compressed oo > ps, serial gc, linux-ppc64le) > # Problematic frame: > # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > const*, char*, bool)+0x40 > # > # No core dump will be written. Core dumps have been disabled. To > enable core dumping, try "ulimit -c unlimited" before starting Java > again > # > # If you would like to submit a bug report, please visit: > # http://bugreport.java.com/bugreport/crash.jsp > # > > --------------- S U M M A R Y ------------ > > Command Line: > -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk > -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. > module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create > --module-version 9-internal --os-name Linux --os-arch ppc64le > --os-version > 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods > --hash-dependencies .* --exclude **_the.* --libs > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base > --cmds > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base > --config > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base > --class-path /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base > /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod > > Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # > Please check /etc/os-release for details about this release. > Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h 0m 0s) > > --------------- T H R E A D --------------- > > Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" > daemon [_thread_in_vm, id=35404, > stack(0x000010006a800000,0x000010006ac00000)] > > > Current CompileTask: > C1: 761 3 3 java.lang.String::charAt (25 bytes) > > Stack: [0x000010006a800000,0x000010006ac00000], > sp=0x000010006abfc6c0, free space=4081k > Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native > code) > V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > const*, char*, bool)+0x40 > V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 > V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 > V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) const+0x40 > V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) const+0x4c > V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, > unsigned char*, unsigned char*)+0x170 > V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, > unsigned char*, unsigned char*)+0x78 > V [libjvm.so+0x10719dc] > trampoline_stub_Relocation::get_trampoline_for(unsigned char*, > nmethod*)+0x78 > V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 > V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned > char*)+0x150 > V [libjvm.so+0x106f5fc] > CallRelocation::fix_relocation_after_move(CodeBuffer const*, > CodeBuffer*)+0x74 > V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) > const+0x390 > V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 > V [libjvm.so+0x722670] CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, > CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, > int)+0x320 > V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char > const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 > V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, > CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, > int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, > bool, bool, RTMState)+0x560 > V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 > V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 > V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, > ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 > V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, > int, DirectiveSet*)+0xc8 > V [libjvm.so+0x7b188c] > CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 > V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 > V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, Thread*)+0xa0 > V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 > V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 > V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 > C [libpthread.so.0+0x8a64] start_thread+0xf4 > C [libc.so.6+0x1032a0] clone+0x98 > > I haven't identified the exact cause (will analyze it tomorrow) but > the stack trace indicates that it is indeed related to your changes. > > Besides that I have some comments: > > codeBuffer.hpp: > > 472 CodeSection* insts() { return &_insts; } > 475 const CodeSection* insts() const { return &_insts; } > > - do we really need both versions? > > codeBlob.hpp: > > 135 nmethod* as_nmethod_or_null() const { return > is_nmethod() ? (nmethod*) this : NULL; } > 136 nmethod* as_nmethod() const { > assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } > 137 CompiledMethod* as_compiled_method_or_null() const { return > is_compiled() ? (CompiledMethod*) this : NULL; } > 138 CompiledMethod* as_compiled_method() const { > assert(is_compiled(), "must be compiled"); return (CompiledMethod*) > this; } > 139 CodeBlob* as_codeblob_or_null() const { return > (CodeBlob*) this; } > > - I don't like this code. You make the getters 'const' which > implicitely makes 'this' a "pointer to const" but then the returned > pointer is a normal pointer to a non-const object and therefore you > have to statically cast away the "pointer to const" (that's why you > need the cast even in the case where you return a CodeBlob*). So > either remove the const qualifier from the method declarations or make > them return "pointers to const". And by the way, as_codeblob_or_null() > doesn't seemed to be used anywhere in the code, why do we need it at > all? > > - Why do we need the non-virtual methods is_nmethod() and > is_compiled() to manually simulate virtual behavior. Why can't we > simply make them virtual and implement them accordingly in nmathod and > CompiledMethod? > > Regards, > Volker > > On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman > wrote: > > Hi, > > > > can I please have review for this patch please? > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > to be in one continuous blob With this patch we are looking to change > > that. It's been done by changing offsets in CodeBlob to addresses, > > making some methods virtual to allow different behavior and also > > creating a couple of new classes. CompiledMethod now sits inbetween > > CodeBlob and nmethod. > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > Thanks > > /R > From christian.thalinger at oracle.com Fri Apr 8 18:31:39 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Fri, 8 Apr 2016 08:31:39 -1000 Subject: RFR (S): 8139921: add mx configuration files to support HotSpot IDE configuration generation In-Reply-To: References: <31F6B9E0-D7C2-4B73-8114-8616F82AE5B5@oracle.com> <4E949060-D14C-4986-A21D-69EFE55E095D@oracle.com> Message-ID: <520F6D4A-BEFB-4C7E-8360-8BB4403C02C2@oracle.com> > On Apr 7, 2016, at 11:59 PM, Volker Simonis wrote: > > On Thu, Apr 7, 2016 at 6:59 PM, Christian Thalinger > > wrote: >> >> On Apr 7, 2016, at 2:22 AM, Volker Simonis wrote: >> >> Hi Christian, >> >> I'd like to try this but I couldn't figure out how it works. >> I don't have any experience with Graal/Truffel/JVMCI but as far as I >> understood the creation of an Eclipse project should work equally well >> for a vanilla hospot repository, right? >> >> >> Correct. >> >> >> The first question is where to get mx from (there's different >> information in the Wiki and this email thread for example) ? >> >> https://bitbucket.org/allr/mx >> oder >> https://github.com/graalvm/mx.git >> >> >> Since the review the mx repository got moved to github. That?s the one you >> want to use. >> >> >> Now let's say I cloned the right mx version. How do I use it? >> Let's say I have a vanilla jdk9 forest under /share/OpenJDK/jdk9 >> I create an output directory under /share/OpenJDK/output-jdk9-dbg and >> from there I call configure and build the images: >> >> cd /share/OpenJDK/output-jdk9-dbg >> bash /share/OpenJDK/jdk9/configure --with-boot-jdk=.. >> --with-debug-level=slowdebug >> make images >> >> Taking this scenario, from which directory am I supposed to call 'mx >> ideinit', where will the Eclipse project be created at and how is mx >> supposed to find my configuration (i.e. platform) and generated files? >> >> >> I have never tried to have the output directory not in the source directory >> and mx might not support this. But it?s not really necessary because >> everything goes into build/ by default anyway. If you really want to have a >> separate output directory I suppose we could add an option to mx. >> > > Hi Christian, > > thanks a lot for your explanation. I've never tried WITH the output > directory in the source tree :) I have my source repositories on a > network share and on every platform I configure and build from the > shared sources into a local directory. > > That said, I've tried to configure and build INTO the source directory > and called 'mx eclipseinit' afterwards. That worked as described and > the created project worked quite nicely. But after playing around a > little bit, I found the following points: > > - the project settings are hard-wired for linux-x86_64 > - so it makes no difference if I configure and build on linux-ppc64, > the generated project still tries to get the generated files from > 'PARENT-5-PROJECT_LOC/build/linux-x86_64-normal-server-slowdebug' > which will apparently not work on any other platform. That is half-correct. It?s not hardcoded but it picks up the platform you are running mx on. So, if you had a separate repository for ppc64 and run mx on the ppc64 host it should work. I mean, there is no real solution to that problem, right? You can only have one generated directory to be included. > - the same is true for the preprocessor defines. They are hard wired > in ".mx.jvmci/hotspot/templates/eclipse/cproject". I don't actually > understand why you are creating distinct release, fastdebug and > slowdebug projects at all The main reason for the different configurations is because the generated directory is different. > , if all of them have the same settings (e.g. > ASSERT and DEBUG is also defined for server-release). This is odd and sounds like a bug. Doug? Ah! There is only one template: $ find .mx.jvmci/hotspot/templates/ .mx.jvmci/hotspot/templates/ .mx.jvmci/hotspot/templates/eclipse .mx.jvmci/hotspot/templates/eclipse/cproject .mx.jvmci/hotspot/templates/eclipse/settings .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.core.prefs .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.ui.prefs .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.core.runtime.prefs A much bigger problem is this: $ grep TARGET .mx.jvmci/hotspot/templates/eclipse/cproject Doug, can we change that to actually use the right defines? > This of course > also applies to OS, CPU, etc... > - so in the end you're right. I can call 'mx eclipseinit' even without > a build directory and the generated Eclipse project will still be the > same (i.e. hard-wired for ../build/linux-x86_64-slowdebug). > > I thought that 'mx eclipseinit' will use the output directory to get > platform and configuration information from there and to create the > corresponding Eclipse project from it. That means a project which is > configured for the corresponding platform with the relevant (and only > the relevant) sources plus preprocessor defines and the corresponding > generated sources. But in the current from, that doesn't seems to be > true. Or am I missing something? As I said above, mx is picking up the information from the host you are running on: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/file/7d9d8ad32fe6/.mx.jvmci/mx_jvmci.py#l718 Here is the configuration when running mx on a SPARC host: .mx.jvmci/hotspot/eclipse/server-release/.project 61: generated 63: PARENT-5-PROJECT_LOC/build/solaris-sparcv9-normal-server-release/hotspot/solaris_sparcv9_compiler2/generated > > Thank you and best regards, > Volker > >> You run mx in the hotspot source directory. Since I?m using Eclipse I >> usually run the eclipseinit command directly: >> >> cthaling at macbook:~/ws/jdk9/hs-comp/hotspot$ mx eclipseinit >> created >> /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.classpath >> created >> /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.checkstyle >> created >> /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.project >> created >> /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.jdt.core.prefs >> created >> /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.jdt.ui.prefs >> created >> /Users/cthaling/ws/jdk9/hs-comp/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/.settings/org.eclipse.core.resources.prefs >> ? >> >> You can see it generates all the necessary files into the various source >> directories. The hotspot files are here: >> >> cthaling at macbook:~/ws/jdk9/hs-comp/hotspot$ ls .mx.jvmci/hotspot/eclipse/ >> server-fastdebug/ server-release/ server-slowdebug/ >> >> So, you have to enable ?Search for nested projects? when importing into >> Eclipse. Then everything should magically show up. >> >> >> Thanks a lot and best regards, >> Volker >> >> >> On Wed, Nov 11, 2015 at 1:42 AM, Christian Thalinger >> wrote: >> >> [This is kind of a long email but contains pictures :-)] >> >> https://bugs.openjdk.java.net/browse/JDK-8139921 >> http://cr.openjdk.java.net/~twisti/8139921/webrev/ >> >> In order to make the IDE experience more pleasant now that JEP 243 is >> integrated we would like to use mx (https://bitbucket.org/allr/mx) for IDE >> configuration generation. For this we have to integrate a few mx support >> files into the hotspot repository. >> >> The mx support files will be under a dot-directory: >> >> $ hg st --all .mx.jvmci/ >> C .mx.jvmci/.project >> C .mx.jvmci/.pydevproject >> C .mx.jvmci/eclipse-settings/org.eclipse.jdt.core.prefs >> C .mx.jvmci/hotspot/templates/eclipse/cproject >> C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.core.prefs >> C .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.ui.prefs >> C >> .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.core.runtime.prefs >> C .mx.jvmci/mx_jvmci.py >> C .mx.jvmci/suite.py >> >> mx itself is and will stay an external tool. Some documentation on how to >> use it can be found here: >> >> https://wiki.openjdk.java.net/display/Graal/Instructions >> https://wiki.openjdk.java.net/display/Graal/Eclipse >> >> It basically boils down to: >> >> $ mx ideinit >> >> and importing the configuration into your favorite IDE. >> >> This would give every developer the same view of the source code and we can >> also enforce code-style guidelines. >> >> Here is how the imported projects look like in Eclipse: >> >> >> >> This is most helpful for Compiler engineers who work on the JVMCI but there >> is value for others too. >> >> Notice the ?hotspot:*? projects at the top? These are projects for >> different HotSpot configurations. The main advantage here is that these >> include the generated files directory (if the configuration exists and the >> files are built). I only configured and built ?release? so these can been >> seen, fastdebug is empty: >> >> >> >> This makes it possible for Eclipse to find generated source code. Very >> helpful. For example, JVMTI. First, jvmtiUtils.hpp from the fastdebug >> configuration: >> >> >> >> and here is the release one: >> >> >> >> mx has lots of other commands but most of them are not really useful for us. >> The only ones worth mentioning besides ideinit are findbugs and checkstyle. >> >> findbugs runs FindBugs (doh!) on all Java projects that mx knows about: >> >> cthaling at macbook:~/ws/8139921/hotspot$ mx findbugs >> Scanning archives (15 / 30) >> 2 analysis passes to perform >> Pass 1: Analyzing classes (524 / 524) - 100% complete >> Pass 2: Analyzing classes (305 / 305) - 100% complete >> Done with analysis >> Calculating exit code... >> Exit code set to: 0 >> >> checkstyle checks the Java projects against some predefined style. This is >> particularly helpful for people who don?t use an IDE or to make sure >> everything matches the style after applying an external patch: >> >> cthaling at macbook:~/ws/8139921/hotspot$ mx checkstyle >> Running Checkstyle on >> /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src >> using >> /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.service/.checkstyle_checks.xml... >> /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src/jdk/vm/ci/runtime/JVMCI.java:33: >> 'static' modifier out of order with the JLS suggestions. >> >> or: >> >> cthaling at macbook:~/ws/8139921/hotspot$ mx checkstyle >> Running Checkstyle on >> /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src >> using >> /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.service/.checkstyle_checks.xml... >> /Users/cthaling/ws/8139921/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.runtime/src/jdk/vm/ci/runtime/JVMCI.java:43: >> Name 'FOO' must match pattern '^[a-z][a-zA-Z0-9]*$'. >> >> That?s all, folks! From christian.thalinger at oracle.com Fri Apr 8 18:35:57 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Fri, 8 Apr 2016 08:35:57 -1000 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <5707B93D.2070704@redhat.com> References: <20160407121221.GQ9504@rbackman> <570651B7.9020004@redhat.com> <5707B93D.2070704@redhat.com> Message-ID: <1E3DBFBE-C7A9-4C14-B9E6-B0369011CD49@oracle.com> > On Apr 8, 2016, at 3:59 AM, Andrew Dinn wrote: > > On 07/04/16 18:28, Christian Thalinger wrote: >> >>> On Apr 7, 2016, at 2:25 AM, Andrew Dinn wrote: >>> Why do 'we want to be able to have pieces of compiled code and >>> metadata that are not located in one continuous piece of memory'? >>> Is there a motive for making this change? >> >> Yes, there are two reasons: >> >> 1) We would like to separate metadata from code in the CodeCache so >> that we (potentially) get better CPU cache utilization: >> >> https://bugs.openjdk.java.net/browse/JDK-7072317 >> >> 2) As described in my last year?s JVMLS talk our AOT solution uses >> shared libraries as a container format and as you know code and data >> need to be separate. > > Thanks for the explanation, Christian. Perhaps it might be a good idea > to add a link to indicate that 7072317 relates to (or even is a > dependency of?) 8152664? Yes, good point. Done. > >> If you haven?t been at JVMLS here is the recording of my talk and the >> section about the container format: >> >> https://youtu.be/Xybzyv8qbOc?t=10m41s > > I did watch this just after the summit -- great talk, by the way -- but, > needless to say, failed to retain the relevant detail from that long ago. Sure :-) > > regards, > > > Andrew Dinn > ----------- From mikael.vidstedt at oracle.com Fri Apr 8 18:57:18 2016 From: mikael.vidstedt at oracle.com (Mikael Vidstedt) Date: Fri, 8 Apr 2016 11:57:18 -0700 Subject: Merging jdk9/hs-rt with jdk9/hs In-Reply-To: <5706B5C7.1080500@oracle.com> References: <56F08ACA.6030705@oracle.com> <57058B56.7060605@oracle.com> <5706B5C7.1080500@oracle.com> Message-ID: <5707FF0E.2010707@oracle.com> Alejandro, As we discussed offline let's try to keep most of this the same way we're doing it today. You'll keep doing the jdk9/dev <-> jdk9/hs integrations, PIT submissions, etc. As you point out, instead of using latest/greatest in jdk9/hs the gatekeeper will have to provide you with a set of tips for you to use, but apart from that there should hopefully be very few actual changes to the process. Cheers, Mikael On 4/7/2016 12:32 PM, Alejandro Murillo wrote: > > Hi Mikael, > I apologize I missed your original email. Just seeing this now. > > You don't mention the plans about gate-keeping, > will the hs-rt gatekeeper will also also assume the > synching with dev, and therefore the submission of PIT, etc? > > If not, since main will be constantly changing, I will need > to be provided with the tips of a stable snapshot to merge with dev to > run pit. > And then a hint of when to push the merge back to main. > Since we have to push with JPRT, there are some situations for > potential problems here. > > Of course, that's not different to what is currently done to merge > jdk9/dev to master, > except that we now have mach5 to pin point stable jdk9/dev snapshots, > and also, there's nothing coming back from master to dev weekly > (except tags) and once in a while special integrations. > And as mentioned above, with hs repos, we use JPRT to push > > Thanks > Alejandro > > > On 4/6/2016 4:19 PM, Mikael Vidstedt wrote: >> >> Having heard no feedback[1], we're going to go ahead with this >> experiment and the plan is to do the switch next week, *Friday April >> 15th*. Again, please note that any outstanding work based on >> jdk9/hs-rt will have to be rebased on jdk9/hs once the switch is >> made. More information as we get closer to the actual switchover. >> >> Let us know if you have any concerns with the date, and/or any >> feedback on how it's working out. >> >> Cheers, >> Mikael >> >> [1] Not even from Volker *hint* ;) >> >> On 3/21/2016 4:59 PM, Mikael Vidstedt wrote: >>> >>> All, >>> >>> The JDK 9 development of Hotspot is primarily done in two different >>> mercurial forests: jdk9/hs-rt[1], and jdk9/hs-comp[2]. In June of >>> last year we moved[3] all the GC development from jdk9/hs-gc[4] to >>> jdk9/hs-rt, and the experience so far has been a good one. Change >>> propagation (from jdk9/hs-rt to jdk9/hs-gc and vice verse) is now a >>> non-issue, we get testing faster on the union of the changes where >>> previously it could take weeks to catch a GC related bug in RT >>> testing, etc. >>> >>> However, both jdk9/hs-rt and jdk9/hs-comp still integrate through a >>> third forest - jdk9/hs[5], aka. hs "main" - before the changes are >>> integrated to jdk9/dev[6]. In line with the previous simplification, >>> we would like to suggest a further simplification of the forest >>> structure. Specifically, we suggest that the work currently done on >>> the jdk9/hs-rt forest moves directly to the jdk9/hs forest. In >>> addition to making the forest structure easier to understand, this >>> would have the benefit of removing one set of integrations (jdk9/hs >>> <-> jdk9/hs-rt), which further reduces cost and propagation time. It >>> is also paving the way for eventually integrating up to jdk9/dev >>> more often (but that is a separate discussion). >>> >>> We suggest that the experiment starts on April 15th, and goes on for >>> at least two weeks (giving us some time to adapt in case of issues). >>> Monitoring and evaluation of the new structure will take place >>> continuously, with an option to revert back if things do not work >>> out. The experiment would keep going for at least a few months, >>> after which we will evaluate it and depending on the results >>> consider making it the new standard. If so, the jdk9/hs-rt forest >>> will eventually be retired, with an option of looking at further >>> reduction of forests going forward. At least for now, we suggest >>> that jdk9/hs-comp remains a separate forest and that it integrates >>> through jdk9/hs just like it does today. >>> >>> Much like when we merged the jdk9/hs-gc and jdk9/hs-rt forests we >>> would leave the jdk9/hs-rt forest around until we see if the >>> experiment works out. We would also lock it down so that no >>> accidental integrations are made to it. Once the jdk9/hs-rt forest >>> is locked down, any work in flight based on it would have to be >>> rebased on jdk9/hs. >>> >>> Please let us know if you have any feedback or questions! >>> >>> Cheers, >>> Mikael >>> >>> [1]http://hg.openjdk.java.net/jdk9/hs-rt >>> [2]http://hg.openjdk.java.net/jdk9/hs-comp >>> [3]http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-May/thread.html >>> >>> [4]http://hg.openjdk.java.net/jdk9/hs-gc >>> [5]http://hg.openjdk.java.net/jdk9/hs >>> [6]http://hg.openjdk.java.net/jdk9/dev >>> >> > From mikael.vidstedt at oracle.com Fri Apr 8 18:59:57 2016 From: mikael.vidstedt at oracle.com (Mikael Vidstedt) Date: Fri, 8 Apr 2016 11:59:57 -0700 Subject: The new Hotspot Build System In-Reply-To: <5707B55D.2030602@oracle.com> References: <5707AEC1.5020601@oracle.com> <5707B430.6070503@oracle.com> <5707B55D.2030602@oracle.com> Message-ID: <5707FFAD.5030803@oracle.com> Erik/Magnus/Ingemar, Thank you *so* much for making this happen. I can't even begin to describe how glad I am that this has finally happened, and that it made it in before the jdk9 FC! Cheers, Mikael On 4/8/2016 6:42 AM, Erik Joelsson wrote: > Thanks!! > > Sure feels good to finally get it done. > > /Erik > > On 2016-04-08 15:37, Daniel D. Daugherty wrote: >> Congrats!! I'm looking forward to taking the new HotSpot build out >> for a spin around the block... :-) >> >> Dan >> >> >> On 4/8/16 7:14 AM, Erik Joelsson wrote: >>> The first phase of the new Hotspot Build System (JEP 284) has now >>> been pushed to jdk9/hs-rt. During a transition period, both the old >>> and new build system will co-exist and you can pick the one you want >>> using the configure option "--disable-new-hotspot-build". The new >>> system is the default while the old is there as a backup in case >>> your particular work flow broke in the new system. If you have any >>> trouble, don't hesitate to contact me or file an issue. >>> >>> The goal is to push the second phase, where the old system is >>> removed, and the new moves from hotspot/makefiles to hotspot/make, >>> as soon as possible. If no blocking issues are raised, I will >>> propose to push that change in one weeks time from today. This is of >>> course negotiable. >>> >>> The change to the new build system also means that the infra team >>> now assumes responsibility for the Hotspot makefiles. This means >>> that all changes that touch makefiles should include build-dev in >>> the review. This is especially important during the transition >>> period so that we can ensure that both build systems are kept in sync. >>> >>> The user of the build shouldn't notice much of a difference. If you >>> build from the top level, "make hotspot" will still do the same >>> thing. The new build does not support building from the hotspot >>> repo. Debug levels are controlled through the same configure >>> parameters as before. There is one new configure parameter, >>> --with-jvm-features, that can be used to create specialized builds >>> with non standard combinations of jvm options, especially in >>> conjunction with the new jvm variant "custom". See the configure >>> help output for more details. There is a new way of generating >>> Visual Studio projects using the top level target >>> "hotspot-ide-project", which replaces the old script. >>> >>> /Erik >> > From dean.long at oracle.com Fri Apr 8 20:04:45 2016 From: dean.long at oracle.com (Dean Long) Date: Fri, 8 Apr 2016 13:04:45 -0700 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> Message-ID: <57080EDD.7040701@oracle.com> Hi Volker. I noticed this problem before and filed 8151956. Making those member functions non-virtual may solve this particular problem, but as the code evolves we may hit it again if we ever call a virtual member function by accident. I'm not a C++ expert, but if we declared those functions as virtual in CodeBlob, then would that work? It doesn't seem ideal, however. I would rather not call out from the CodeBlob constructor at all, but instead do the work in the subclass constructor. Let's say we move the call to cb->copy_code_and_locs_to() to a separate function. Is there a C++ idiom for making sure all subclasses of CodeBlob call it? The only think I can think of is to set an "initialized" flag and to check it in strategic places. dl On 4/8/2016 11:12 AM, Volker Simonis wrote: > Hi Rickard, > > I found the problem why your change crashes the VM on ppc (and I'm pretty > sure it will also crash on ARM - @Andrew, maybe you can try it out?). It is > caused by the following code in address NativeCall::get_trampoline() which > is also present on arm64: > > address NativeCall::get_trampoline() { > address call_addr = addr_at(0); > CodeBlob *code = CodeCache::find_blob(call_addr); > ... > // If the codeBlob is not a nmethod, this is because we get here from the > // CodeBlob constructor, which is called within the nmethod constructor. > return trampoline_stub_Relocation::get_trampoline_for(call_addr, > (nmethod*)code); > } > > The comment explains the situation quite well: we're in the CodeBlob > constructor which was called by the CompiledMethod constructor which was > called from the nmethod constructor: > > #3 0x00003fffb741b80c in NativeCall::get_trampoline (this=0x3fff607d0fac) > at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > #4 0x00003fffb7596914 in Relocation::pd_call_destination > (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move > (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to > (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to (this=0x3ffdfe3fdb40, > dest_blob=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to > (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, > name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, > frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, > caller_must_gc_arguments=false, subtype=8) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod > (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 "nmethod", > size=1768, header_size=392, cb=0x3ffdfe3fdb40, frame_complete_offset=20, > frame_size=14, oop_maps=0x3ffe00049620, caller_must_gc_arguments=false) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, > method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, > offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, > dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, > oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, > nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > > Now we cast 'code' to 'nmethod' but at this point in time 'code' is still a > CodeBlob from the C++ point of view (i.e. it still has a CodeBlob vtable > (see [1] for an explanation)). > > Later on, in RelocIterator::initialize() we call virtual methods on the > nmethod which still has the vtable of a "CodeBlob" and this fails badly: > > #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 > #1 0x00003fffb758d51c in RelocIterator::initialize (this=0x3ffdfe3fc928, > nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 > #2 0x00003fffb6ace56c in RelocIterator::RelocIterator > (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > limit=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > #3 0x00003fffb75919dc in trampoline_stub_Relocation::get_trampoline_for > (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > #4 0x00003fffb741b80c in NativeCall::get_trampoline (this=0x3fff607d0fac) > at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > As you can see, we actually want to call nmethod::stub_begin() at > relocInfo.cpp:144 > > 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); > 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; > 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; > > but we actually end up in SingletonBlob::print_on() which is a completely > different method. Notice that the call to nm->consts_begin() before also > fails, but it doesn't crash the VM because it happens to call > SingletonBlob::verify() which has no bad side effect. The call to > nm->insts_begin() in line 143 is non-virtual and thus works fine. Here are > the corresponding vtable slots in the CodeBlob vtable for consts_begin() > and stub_begin() > > (gdb) p &nmethod::consts_begin > $76 = &virtual table offset 42 > (gdb) p &nmethod::stub_begin > $77 = &virtual table offset 44 > (gdb) p ((*(void ***)nm) + 1)[42] > $86 = (void *) 0x3fffb6c41df8 > (gdb) p ((*(void ***)nm) + 1)[44] > $87 = (void *) 0x3fffb6c41e64 > > As you can see, 'nm' is indeed a "CodeBlob" at this point: > > (gdb) p *(void ***)nm > $91 = (void **) 0x3fffb7befa00 > (gdb) p nm->print() > [CodeBlob (0x00003fff607d1090)] > Framesize: 14 > > The offending calls succeeded before your change, because they where not > virtual. Any idea how we can fix this with the new class hierarchy? > > Regards, > Volker > > [1] > http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class > > > > On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis > wrote: > >> Hi Rickard, >> >> I'd also like to know what's the rational behind this quite large >> change. Do you expect some performance or memory consumption >> improvements or is this a prerequisite for another change which is >> still to come? >> >> The change itself currently doesn't work on ppc64 (neither on Linux >> nor on AIX). I get the following crash during the build when the newly >> built Hotspot is JIT-compiling java.lang.String::charAt on C1 : >> >> # >> # A fatal error has been detected by the Java Runtime Environment: >> # >> # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 >> # >> # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build >> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) >> # Java VM: OpenJDK 64-Bit Server VM (slowdebug >> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, >> tiered, compressed oo >> ps, serial gc, linux-ppc64le) >> # Problematic frame: >> # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >> const*, char*, bool)+0x40 >> # >> # No core dump will be written. Core dumps have been disabled. To >> enable core dumping, try "ulimit -c unlimited" before starting Java >> again >> # >> # If you would like to submit a bug report, please visit: >> # http://bugreport.java.com/bugreport/crash.jsp >> # >> >> --------------- S U M M A R Y ------------ >> >> Command Line: >> -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk >> -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. >> module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create >> --module-version 9-internal --os-name Linux --os-arch ppc64le >> --os-version >> 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods >> --hash-dependencies .* --exclude **_the.* --libs >> >> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base >> --cmds >> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base >> --config >> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base >> --class-path /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base >> /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod >> >> Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # >> Please check /etc/os-release for details about this release. >> Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h 0m 0s) >> >> --------------- T H R E A D --------------- >> >> Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" >> daemon [_thread_in_vm, id=35404, >> stack(0x000010006a800000,0x000010006ac00000)] >> >> >> Current CompileTask: >> C1: 761 3 3 java.lang.String::charAt (25 bytes) >> >> Stack: [0x000010006a800000,0x000010006ac00000], >> sp=0x000010006abfc6c0, free space=4081k >> Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native >> code) >> V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >> const*, char*, bool)+0x40 >> V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 >> V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 >> V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) const+0x40 >> V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) const+0x4c >> V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, >> unsigned char*, unsigned char*)+0x170 >> V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, >> unsigned char*, unsigned char*)+0x78 >> V [libjvm.so+0x10719dc] >> trampoline_stub_Relocation::get_trampoline_for(unsigned char*, >> nmethod*)+0x78 >> V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 >> V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned >> char*)+0x150 >> V [libjvm.so+0x106f5fc] >> CallRelocation::fix_relocation_after_move(CodeBuffer const*, >> CodeBuffer*)+0x74 >> V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) >> const+0x390 >> V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 >> V [libjvm.so+0x722670] CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 >> V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, >> CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, >> int)+0x320 >> V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char >> const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 >> V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, >> CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >> ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 >> V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, >> int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >> ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 >> V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, >> CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, >> ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, >> bool, bool, RTMState)+0x560 >> V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 >> V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 >> V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, >> ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 >> V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, >> int, DirectiveSet*)+0xc8 >> V [libjvm.so+0x7b188c] >> CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 >> V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 >> V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, Thread*)+0xa0 >> V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 >> V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 >> V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 >> C [libpthread.so.0+0x8a64] start_thread+0xf4 >> C [libc.so.6+0x1032a0] clone+0x98 >> >> I haven't identified the exact cause (will analyze it tomorrow) but >> the stack trace indicates that it is indeed related to your changes. >> >> Besides that I have some comments: >> >> codeBuffer.hpp: >> >> 472 CodeSection* insts() { return &_insts; } >> 475 const CodeSection* insts() const { return &_insts; } >> >> - do we really need both versions? >> >> codeBlob.hpp: >> >> 135 nmethod* as_nmethod_or_null() const { return >> is_nmethod() ? (nmethod*) this : NULL; } >> 136 nmethod* as_nmethod() const { >> assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } >> 137 CompiledMethod* as_compiled_method_or_null() const { return >> is_compiled() ? (CompiledMethod*) this : NULL; } >> 138 CompiledMethod* as_compiled_method() const { >> assert(is_compiled(), "must be compiled"); return (CompiledMethod*) >> this; } >> 139 CodeBlob* as_codeblob_or_null() const { return >> (CodeBlob*) this; } >> >> - I don't like this code. You make the getters 'const' which >> implicitely makes 'this' a "pointer to const" but then the returned >> pointer is a normal pointer to a non-const object and therefore you >> have to statically cast away the "pointer to const" (that's why you >> need the cast even in the case where you return a CodeBlob*). So >> either remove the const qualifier from the method declarations or make >> them return "pointers to const". And by the way, as_codeblob_or_null() >> doesn't seemed to be used anywhere in the code, why do we need it at >> all? >> >> - Why do we need the non-virtual methods is_nmethod() and >> is_compiled() to manually simulate virtual behavior. Why can't we >> simply make them virtual and implement them accordingly in nmathod and >> CompiledMethod? >> >> Regards, >> Volker >> >> On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman >> wrote: >>> Hi, >>> >>> can I please have review for this patch please? >>> >>> So far CodeBlobs have required all the data (metadata, oops, code, etc) >>> to be in one continuous blob With this patch we are looking to change >>> that. It's been done by changing offsets in CodeBlob to addresses, >>> making some methods virtual to allow different behavior and also >>> creating a couple of new classes. CompiledMethod now sits inbetween >>> CodeBlob and nmethod. >>> >>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>> >>> Thanks >>> /R From dean.long at oracle.com Fri Apr 8 20:15:55 2016 From: dean.long at oracle.com (Dean Long) Date: Fri, 8 Apr 2016 13:15:55 -0700 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <57080EDD.7040701@oracle.com> References: <20160407121221.GQ9504@rbackman> <57080EDD.7040701@oracle.com> Message-ID: <5708117B.4060705@oracle.com> I was able to find this: https://isocpp.org/wiki/faq/strange-inheritance#calling-virtuals-from-ctor-idiom dl On 4/8/2016 1:04 PM, Dean Long wrote: > Hi Volker. I noticed this problem before and filed 8151956. Making > those member functions > non-virtual may solve this particular problem, but as the code evolves > we may hit it > again if we ever call a virtual member function by accident. > > I'm not a C++ expert, but if we declared those functions as virtual in > CodeBlob, then would > that work? It doesn't seem ideal, however. I would rather not call > out from the CodeBlob > constructor at all, but instead do the work in the subclass > constructor. Let's say we move > the call to cb->copy_code_and_locs_to() to a separate function. Is > there a C++ idiom > for making sure all subclasses of CodeBlob call it? The only think I > can think of is to set > an "initialized" flag and to check it in strategic places. > > dl > > On 4/8/2016 11:12 AM, Volker Simonis wrote: >> Hi Rickard, >> >> I found the problem why your change crashes the VM on ppc (and I'm >> pretty >> sure it will also crash on ARM - @Andrew, maybe you can try it out?). >> It is >> caused by the following code in address NativeCall::get_trampoline() >> which >> is also present on arm64: >> >> address NativeCall::get_trampoline() { >> address call_addr = addr_at(0); >> CodeBlob *code = CodeCache::find_blob(call_addr); >> ... >> // If the codeBlob is not a nmethod, this is because we get here >> from the >> // CodeBlob constructor, which is called within the nmethod >> constructor. >> return trampoline_stub_Relocation::get_trampoline_for(call_addr, >> (nmethod*)code); >> } >> >> The comment explains the situation quite well: we're in the CodeBlob >> constructor which was called by the CompiledMethod constructor which was >> called from the nmethod constructor: >> >> #3 0x00003fffb741b80c in NativeCall::get_trampoline >> (this=0x3fff607d0fac) >> at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >> >> #4 0x00003fffb7596914 in Relocation::pd_call_destination >> (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >> >> #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move >> (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >> >> #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to >> (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >> >> #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to (this=0x3ffdfe3fdb40, >> dest_blob=0x3fff607d0c10) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >> >> #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to >> (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >> >> #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >> name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, >> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, >> caller_must_gc_arguments=false, subtype=8) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >> >> #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod >> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 >> "nmethod", >> size=1768, header_size=392, cb=0x3ffdfe3fdb40, frame_complete_offset=20, >> frame_size=14, oop_maps=0x3ffe00049620, >> caller_must_gc_arguments=false) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >> >> #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, >> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >> offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, >> dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, >> oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, >> nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >> >> >> Now we cast 'code' to 'nmethod' but at this point in time 'code' is >> still a >> CodeBlob from the C++ point of view (i.e. it still has a CodeBlob vtable >> (see [1] for an explanation)). >> >> Later on, in RelocIterator::initialize() we call virtual methods on the >> nmethod which still has the vtable of a "CodeBlob" and this fails badly: >> >> #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 >> >> #1 0x00003fffb758d51c in RelocIterator::initialize >> (this=0x3ffdfe3fc928, >> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 >> >> #2 0x00003fffb6ace56c in RelocIterator::RelocIterator >> (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >> limit=0x0) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >> >> #3 0x00003fffb75919dc in trampoline_stub_Relocation::get_trampoline_for >> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >> >> #4 0x00003fffb741b80c in NativeCall::get_trampoline >> (this=0x3fff607d0fac) >> at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >> >> >> As you can see, we actually want to call nmethod::stub_begin() at >> relocInfo.cpp:144 >> >> 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); >> 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; >> 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; >> >> but we actually end up in SingletonBlob::print_on() which is a >> completely >> different method. Notice that the call to nm->consts_begin() before also >> fails, but it doesn't crash the VM because it happens to call >> SingletonBlob::verify() which has no bad side effect. The call to >> nm->insts_begin() in line 143 is non-virtual and thus works fine. >> Here are >> the corresponding vtable slots in the CodeBlob vtable for consts_begin() >> and stub_begin() >> >> (gdb) p &nmethod::consts_begin >> $76 = &virtual table offset 42 >> (gdb) p &nmethod::stub_begin >> $77 = &virtual table offset 44 >> (gdb) p ((*(void ***)nm) + 1)[42] >> $86 = (void *) 0x3fffb6c41df8 >> (gdb) p ((*(void ***)nm) + 1)[44] >> $87 = (void *) 0x3fffb6c41e64 > const> >> >> As you can see, 'nm' is indeed a "CodeBlob" at this point: >> >> (gdb) p *(void ***)nm >> $91 = (void **) 0x3fffb7befa00 >> (gdb) p nm->print() >> [CodeBlob (0x00003fff607d1090)] >> Framesize: 14 >> >> The offending calls succeeded before your change, because they where not >> virtual. Any idea how we can fix this with the new class hierarchy? >> >> Regards, >> Volker >> >> [1] >> http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class >> >> >> >> >> On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis >> >> wrote: >> >>> Hi Rickard, >>> >>> I'd also like to know what's the rational behind this quite large >>> change. Do you expect some performance or memory consumption >>> improvements or is this a prerequisite for another change which is >>> still to come? >>> >>> The change itself currently doesn't work on ppc64 (neither on Linux >>> nor on AIX). I get the following crash during the build when the newly >>> built Hotspot is JIT-compiling java.lang.String::charAt on C1 : >>> >>> # >>> # A fatal error has been detected by the Java Runtime Environment: >>> # >>> # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 >>> # >>> # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build >>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) >>> # Java VM: OpenJDK 64-Bit Server VM (slowdebug >>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, >>> tiered, compressed oo >>> ps, serial gc, linux-ppc64le) >>> # Problematic frame: >>> # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>> const*, char*, bool)+0x40 >>> # >>> # No core dump will be written. Core dumps have been disabled. To >>> enable core dumping, try "ulimit -c unlimited" before starting Java >>> again >>> # >>> # If you would like to submit a bug report, please visit: >>> # http://bugreport.java.com/bugreport/crash.jsp >>> # >>> >>> --------------- S U M M A R Y ------------ >>> >>> Command Line: >>> -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk >>> -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. >>> module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create >>> --module-version 9-internal --os-name Linux --os-arch ppc64le >>> --os-version >>> 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods >>> --hash-dependencies .* --exclude **_the.* --libs >>> >>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base >>> >>> --cmds >>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base >>> >>> --config >>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base >>> --class-path >>> /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base >>> /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod >>> >>> Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # >>> Please check /etc/os-release for details about this release. >>> Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h >>> 0m 0s) >>> >>> --------------- T H R E A D --------------- >>> >>> Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" >>> daemon [_thread_in_vm, id=35404, >>> stack(0x000010006a800000,0x000010006ac00000)] >>> >>> >>> Current CompileTask: >>> C1: 761 3 3 java.lang.String::charAt (25 bytes) >>> >>> Stack: [0x000010006a800000,0x000010006ac00000], >>> sp=0x000010006abfc6c0, free space=4081k >>> Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, >>> C=native >>> code) >>> V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>> const*, char*, bool)+0x40 >>> V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 >>> V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 >>> V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) >>> const+0x40 >>> V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) >>> const+0x4c >>> V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, >>> unsigned char*, unsigned char*)+0x170 >>> V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, >>> unsigned char*, unsigned char*)+0x78 >>> V [libjvm.so+0x10719dc] >>> trampoline_stub_Relocation::get_trampoline_for(unsigned char*, >>> nmethod*)+0x78 >>> V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 >>> V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned >>> char*)+0x150 >>> V [libjvm.so+0x106f5fc] >>> CallRelocation::fix_relocation_after_move(CodeBuffer const*, >>> CodeBuffer*)+0x74 >>> V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) >>> const+0x390 >>> V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 >>> V [libjvm.so+0x722670] >>> CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 >>> V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, >>> CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, >>> int)+0x320 >>> V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char >>> const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 >>> V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, >>> CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>> ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 >>> V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, >>> int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>> ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 >>> V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, >>> CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, >>> ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, >>> bool, bool, RTMState)+0x560 >>> V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 >>> V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 >>> V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, >>> ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 >>> V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, >>> int, DirectiveSet*)+0xc8 >>> V [libjvm.so+0x7b188c] >>> CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 >>> V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 >>> V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, >>> Thread*)+0xa0 >>> V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 >>> V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 >>> V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 >>> C [libpthread.so.0+0x8a64] start_thread+0xf4 >>> C [libc.so.6+0x1032a0] clone+0x98 >>> >>> I haven't identified the exact cause (will analyze it tomorrow) but >>> the stack trace indicates that it is indeed related to your changes. >>> >>> Besides that I have some comments: >>> >>> codeBuffer.hpp: >>> >>> 472 CodeSection* insts() { return &_insts; } >>> 475 const CodeSection* insts() const { return &_insts; } >>> >>> - do we really need both versions? >>> >>> codeBlob.hpp: >>> >>> 135 nmethod* as_nmethod_or_null() const { return >>> is_nmethod() ? (nmethod*) this : NULL; } >>> 136 nmethod* as_nmethod() const { >>> assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } >>> 137 CompiledMethod* as_compiled_method_or_null() const { return >>> is_compiled() ? (CompiledMethod*) this : NULL; } >>> 138 CompiledMethod* as_compiled_method() const { >>> assert(is_compiled(), "must be compiled"); return (CompiledMethod*) >>> this; } >>> 139 CodeBlob* as_codeblob_or_null() const { return >>> (CodeBlob*) this; } >>> >>> - I don't like this code. You make the getters 'const' which >>> implicitely makes 'this' a "pointer to const" but then the returned >>> pointer is a normal pointer to a non-const object and therefore you >>> have to statically cast away the "pointer to const" (that's why you >>> need the cast even in the case where you return a CodeBlob*). So >>> either remove the const qualifier from the method declarations or make >>> them return "pointers to const". And by the way, as_codeblob_or_null() >>> doesn't seemed to be used anywhere in the code, why do we need it at >>> all? >>> >>> - Why do we need the non-virtual methods is_nmethod() and >>> is_compiled() to manually simulate virtual behavior. Why can't we >>> simply make them virtual and implement them accordingly in nmathod and >>> CompiledMethod? >>> >>> Regards, >>> Volker >>> >>> On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman >>> wrote: >>>> Hi, >>>> >>>> can I please have review for this patch please? >>>> >>>> So far CodeBlobs have required all the data (metadata, oops, code, >>>> etc) >>>> to be in one continuous blob With this patch we are looking to change >>>> that. It's been done by changing offsets in CodeBlob to addresses, >>>> making some methods virtual to allow different behavior and also >>>> creating a couple of new classes. CompiledMethod now sits inbetween >>>> CodeBlob and nmethod. >>>> >>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>>> >>>> Thanks >>>> /R > From christian.thalinger at oracle.com Fri Apr 8 20:40:52 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Fri, 8 Apr 2016 10:40:52 -1000 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <570673D0.20108@oracle.com> References: <20160407121221.GQ9504@rbackman> <570673D0.20108@oracle.com> Message-ID: > On Apr 7, 2016, at 4:50 AM, Tobias Hartmann wrote: > > Hi Rickard, > > I had a look at some parts of the changes. Here are my comments: > > codeCache.cpp > -> If we still need NMethodIterator it should be merged with CompiledMethodIterator using C++ templates to avoid code duplication. > sweeper.cpp > -> NMethodMarker is not used and should be removed > -> CompiledMethodMarker differs from NMethodMarker (it should be merged with latest changes) > -> the comment in line 432 is confusing. Shouldn't it be something like "Only flushing nmethod so size..."? > sweeper.hpp > -> the comment describing the sweeper cycle should be updated > -> line 69: "Current nmethod" should be changed to "Current compiled method" > -> is_sweeping(nmethod* which) is not used and can be removed > thread.hpp > -> set_scanned_nmethod() name and comment should be fixed. I also wonder if it's necessary to track/lock CompiledMethod. Shouldn't it be sufficient to lock nmethod? > > I also noticed some minor style issues: > > codeCache.cpp > -> "CompiledMethod *nm" vs. "CompiledMethod* nm? + CompiledMethod* CodeCache::find_compiled(void* start) { + CodeBlob *cb = find_blob(start); + assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method?); Should be ?a CompiledMethod?. > codeBlob.hpp > -> typo: "deoptimizatation" > nmethod.hpp > -> wrong indentation in line 265 (whitespace was removed) > vmStructs.cpp > -> unnecessary newline in line 916 > -> wrong indentation of "\" at line ends (multiple times) compileBroker.cpp: ! CompiledMethod* code = method->code(); + if (code == NULL) { + return (nmethod*) code; Return NULL instead of casting. > > Best regards, > Tobias > > > On 07.04.2016 14:12, Rickard B?ckman wrote: >> Hi, >> >> can I please have review for this patch please? >> >> So far CodeBlobs have required all the data (metadata, oops, code, etc) >> to be in one continuous blob With this patch we are looking to change >> that. It's been done by changing offsets in CodeBlob to addresses, >> making some methods virtual to allow different behavior and also >> creating a couple of new classes. CompiledMethod now sits inbetween >> CodeBlob and nmethod. >> >> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >> >> Thanks >> /R >> From coleen.phillimore at oracle.com Fri Apr 8 21:06:33 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Fri, 8 Apr 2016 17:06:33 -0400 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool Message-ID: <57081D59.9040909@oracle.com> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe for MethodHandleInError and MethodTypeInError. Need to ignore the InError tag when fetching method_handle_index and method_type_index. The error is cached after the call to systemDictionary::link_method_handle_constant() if it's not there already. Tested with rbt equivalent of nightly runs, and StressRedefine test (reproduceable with this error) for >24 hours (also with 8151546 fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test for this because it's too timing sensitive. open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev bug link https://bugs.openjdk.java.net/browse/JDK-8148772 Thanks, Coleen From christian.thalinger at oracle.com Fri Apr 8 21:20:51 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Fri, 8 Apr 2016 11:20:51 -1000 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool In-Reply-To: <57081D59.9040909@oracle.com> References: <57081D59.9040909@oracle.com> Message-ID: <6A70BF0D-3EF0-49B6-9111-E9A8DF654773@oracle.com> > On Apr 8, 2016, at 11:06 AM, Coleen Phillimore wrote: > > Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe for MethodHandleInError and MethodTypeInError. > > Need to ignore the InError tag when fetching method_handle_index and method_type_index. The error is cached after the call to systemDictionary::link_method_handle_constant() if it's not there already. > > Tested with rbt equivalent of nightly runs, and StressRedefine test (reproduceable with this error) for >24 hours (also with 8151546 fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test for this because it's too timing sensitive. > > open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev default: + DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d] = %d", + this, index1, t1)); + assert(false, "unexpected constant tag"); + ShouldNotReachHere(); break; } Merge the print_cr and assert into a fatal and remove the ShouldNotReachHere. > bug link https://bugs.openjdk.java.net/browse/JDK-8148772 > > Thanks, > Coleen > > > > > From dean.long at oracle.com Fri Apr 8 22:02:52 2016 From: dean.long at oracle.com (Dean Long) Date: Fri, 8 Apr 2016 15:02:52 -0700 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <5708117B.4060705@oracle.com> References: <20160407121221.GQ9504@rbackman> <57080EDD.7040701@oracle.com> <5708117B.4060705@oracle.com> Message-ID: <57082A8C.6030800@oracle.com> Volker, does this patch fix the problem? http://cr.openjdk.java.net/~dlong/8151956/8151956.patch dl On 4/8/2016 1:15 PM, Dean Long wrote: > I was able to find this: > > https://isocpp.org/wiki/faq/strange-inheritance#calling-virtuals-from-ctor-idiom > > > dl > > On 4/8/2016 1:04 PM, Dean Long wrote: >> Hi Volker. I noticed this problem before and filed 8151956. Making >> those member functions >> non-virtual may solve this particular problem, but as the code >> evolves we may hit it >> again if we ever call a virtual member function by accident. >> >> I'm not a C++ expert, but if we declared those functions as virtual >> in CodeBlob, then would >> that work? It doesn't seem ideal, however. I would rather not call >> out from the CodeBlob >> constructor at all, but instead do the work in the subclass >> constructor. Let's say we move >> the call to cb->copy_code_and_locs_to() to a separate function. Is >> there a C++ idiom >> for making sure all subclasses of CodeBlob call it? The only think I >> can think of is to set >> an "initialized" flag and to check it in strategic places. >> >> dl >> >> On 4/8/2016 11:12 AM, Volker Simonis wrote: >>> Hi Rickard, >>> >>> I found the problem why your change crashes the VM on ppc (and I'm >>> pretty >>> sure it will also crash on ARM - @Andrew, maybe you can try it >>> out?). It is >>> caused by the following code in address NativeCall::get_trampoline() >>> which >>> is also present on arm64: >>> >>> address NativeCall::get_trampoline() { >>> address call_addr = addr_at(0); >>> CodeBlob *code = CodeCache::find_blob(call_addr); >>> ... >>> // If the codeBlob is not a nmethod, this is because we get here >>> from the >>> // CodeBlob constructor, which is called within the nmethod >>> constructor. >>> return trampoline_stub_Relocation::get_trampoline_for(call_addr, >>> (nmethod*)code); >>> } >>> >>> The comment explains the situation quite well: we're in the CodeBlob >>> constructor which was called by the CompiledMethod constructor which >>> was >>> called from the nmethod constructor: >>> >>> #3 0x00003fffb741b80c in NativeCall::get_trampoline >>> (this=0x3fff607d0fac) >>> at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>> >>> #4 0x00003fffb7596914 in Relocation::pd_call_destination >>> (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >>> >>> #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move >>> (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >>> >>> #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to >>> (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >>> >>> #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to >>> (this=0x3ffdfe3fdb40, >>> dest_blob=0x3fff607d0c10) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >>> >>> #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to >>> (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >>> >>> #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >>> name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, >>> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, >>> caller_must_gc_arguments=false, subtype=8) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >>> >>> #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod >>> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 >>> "nmethod", >>> size=1768, header_size=392, cb=0x3ffdfe3fdb40, >>> frame_complete_offset=20, >>> frame_size=14, oop_maps=0x3ffe00049620, >>> caller_must_gc_arguments=false) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >>> >>> #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, >>> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >>> offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, >>> dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, >>> oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, >>> nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >>> >>> >>> Now we cast 'code' to 'nmethod' but at this point in time 'code' is >>> still a >>> CodeBlob from the C++ point of view (i.e. it still has a CodeBlob >>> vtable >>> (see [1] for an explanation)). >>> >>> Later on, in RelocIterator::initialize() we call virtual methods on the >>> nmethod which still has the vtable of a "CodeBlob" and this fails >>> badly: >>> >>> #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 >>> >>> #1 0x00003fffb758d51c in RelocIterator::initialize >>> (this=0x3ffdfe3fc928, >>> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 >>> >>> #2 0x00003fffb6ace56c in RelocIterator::RelocIterator >>> (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >>> limit=0x0) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >>> >>> #3 0x00003fffb75919dc in >>> trampoline_stub_Relocation::get_trampoline_for >>> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >>> >>> #4 0x00003fffb741b80c in NativeCall::get_trampoline >>> (this=0x3fff607d0fac) >>> at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>> >>> >>> As you can see, we actually want to call nmethod::stub_begin() at >>> relocInfo.cpp:144 >>> >>> 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); >>> 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; >>> 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; >>> >>> but we actually end up in SingletonBlob::print_on() which is a >>> completely >>> different method. Notice that the call to nm->consts_begin() before >>> also >>> fails, but it doesn't crash the VM because it happens to call >>> SingletonBlob::verify() which has no bad side effect. The call to >>> nm->insts_begin() in line 143 is non-virtual and thus works fine. >>> Here are >>> the corresponding vtable slots in the CodeBlob vtable for >>> consts_begin() >>> and stub_begin() >>> >>> (gdb) p &nmethod::consts_begin >>> $76 = &virtual table offset 42 >>> (gdb) p &nmethod::stub_begin >>> $77 = &virtual table offset 44 >>> (gdb) p ((*(void ***)nm) + 1)[42] >>> $86 = (void *) 0x3fffb6c41df8 >>> (gdb) p ((*(void ***)nm) + 1)[44] >>> $87 = (void *) 0x3fffb6c41e64 >>> >>> >>> As you can see, 'nm' is indeed a "CodeBlob" at this point: >>> >>> (gdb) p *(void ***)nm >>> $91 = (void **) 0x3fffb7befa00 >>> (gdb) p nm->print() >>> [CodeBlob (0x00003fff607d1090)] >>> Framesize: 14 >>> >>> The offending calls succeeded before your change, because they where >>> not >>> virtual. Any idea how we can fix this with the new class hierarchy? >>> >>> Regards, >>> Volker >>> >>> [1] >>> http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class >>> >>> >>> >>> >>> On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis >>> >>> wrote: >>> >>>> Hi Rickard, >>>> >>>> I'd also like to know what's the rational behind this quite large >>>> change. Do you expect some performance or memory consumption >>>> improvements or is this a prerequisite for another change which is >>>> still to come? >>>> >>>> The change itself currently doesn't work on ppc64 (neither on Linux >>>> nor on AIX). I get the following crash during the build when the newly >>>> built Hotspot is JIT-compiling java.lang.String::charAt on C1 : >>>> >>>> # >>>> # A fatal error has been detected by the Java Runtime Environment: >>>> # >>>> # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 >>>> # >>>> # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build >>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) >>>> # Java VM: OpenJDK 64-Bit Server VM (slowdebug >>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, >>>> tiered, compressed oo >>>> ps, serial gc, linux-ppc64le) >>>> # Problematic frame: >>>> # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>>> const*, char*, bool)+0x40 >>>> # >>>> # No core dump will be written. Core dumps have been disabled. To >>>> enable core dumping, try "ulimit -c unlimited" before starting Java >>>> again >>>> # >>>> # If you would like to submit a bug report, please visit: >>>> # http://bugreport.java.com/bugreport/crash.jsp >>>> # >>>> >>>> --------------- S U M M A R Y ------------ >>>> >>>> Command Line: >>>> -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk >>>> >>>> -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. >>>> module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create >>>> --module-version 9-internal --os-name Linux --os-arch ppc64le >>>> --os-version >>>> 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods >>>> --hash-dependencies .* --exclude **_the.* --libs >>>> >>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base >>>> >>>> --cmds >>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base >>>> >>>> --config >>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base >>>> --class-path >>>> /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base >>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod >>>> >>>> Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # >>>> Please check /etc/os-release for details about this release. >>>> Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h >>>> 0m 0s) >>>> >>>> --------------- T H R E A D --------------- >>>> >>>> Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" >>>> daemon [_thread_in_vm, id=35404, >>>> stack(0x000010006a800000,0x000010006ac00000)] >>>> >>>> >>>> Current CompileTask: >>>> C1: 761 3 3 java.lang.String::charAt (25 bytes) >>>> >>>> Stack: [0x000010006a800000,0x000010006ac00000], >>>> sp=0x000010006abfc6c0, free space=4081k >>>> Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, >>>> C=native >>>> code) >>>> V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>>> const*, char*, bool)+0x40 >>>> V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 >>>> V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 >>>> V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) >>>> const+0x40 >>>> V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) >>>> const+0x4c >>>> V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, >>>> unsigned char*, unsigned char*)+0x170 >>>> V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, >>>> unsigned char*, unsigned char*)+0x78 >>>> V [libjvm.so+0x10719dc] >>>> trampoline_stub_Relocation::get_trampoline_for(unsigned char*, >>>> nmethod*)+0x78 >>>> V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 >>>> V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned >>>> char*)+0x150 >>>> V [libjvm.so+0x106f5fc] >>>> CallRelocation::fix_relocation_after_move(CodeBuffer const*, >>>> CodeBuffer*)+0x74 >>>> V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) >>>> const+0x390 >>>> V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 >>>> V [libjvm.so+0x722670] >>>> CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 >>>> V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, >>>> CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, >>>> int)+0x320 >>>> V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char >>>> const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 >>>> V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, >>>> CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 >>>> V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, >>>> int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 >>>> V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, >>>> CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, >>>> ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, >>>> bool, bool, RTMState)+0x560 >>>> V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 >>>> V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 >>>> V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, >>>> ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 >>>> V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, >>>> int, DirectiveSet*)+0xc8 >>>> V [libjvm.so+0x7b188c] >>>> CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 >>>> V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 >>>> V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, >>>> Thread*)+0xa0 >>>> V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 >>>> V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 >>>> V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 >>>> C [libpthread.so.0+0x8a64] start_thread+0xf4 >>>> C [libc.so.6+0x1032a0] clone+0x98 >>>> >>>> I haven't identified the exact cause (will analyze it tomorrow) but >>>> the stack trace indicates that it is indeed related to your changes. >>>> >>>> Besides that I have some comments: >>>> >>>> codeBuffer.hpp: >>>> >>>> 472 CodeSection* insts() { return &_insts; } >>>> 475 const CodeSection* insts() const { return &_insts; } >>>> >>>> - do we really need both versions? >>>> >>>> codeBlob.hpp: >>>> >>>> 135 nmethod* as_nmethod_or_null() const { return >>>> is_nmethod() ? (nmethod*) this : NULL; } >>>> 136 nmethod* as_nmethod() const { >>>> assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } >>>> 137 CompiledMethod* as_compiled_method_or_null() const { return >>>> is_compiled() ? (CompiledMethod*) this : NULL; } >>>> 138 CompiledMethod* as_compiled_method() const { >>>> assert(is_compiled(), "must be compiled"); return (CompiledMethod*) >>>> this; } >>>> 139 CodeBlob* as_codeblob_or_null() const { return >>>> (CodeBlob*) this; } >>>> >>>> - I don't like this code. You make the getters 'const' which >>>> implicitely makes 'this' a "pointer to const" but then the returned >>>> pointer is a normal pointer to a non-const object and therefore you >>>> have to statically cast away the "pointer to const" (that's why you >>>> need the cast even in the case where you return a CodeBlob*). So >>>> either remove the const qualifier from the method declarations or make >>>> them return "pointers to const". And by the way, as_codeblob_or_null() >>>> doesn't seemed to be used anywhere in the code, why do we need it at >>>> all? >>>> >>>> - Why do we need the non-virtual methods is_nmethod() and >>>> is_compiled() to manually simulate virtual behavior. Why can't we >>>> simply make them virtual and implement them accordingly in nmathod and >>>> CompiledMethod? >>>> >>>> Regards, >>>> Volker >>>> >>>> On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman >>>> wrote: >>>>> Hi, >>>>> >>>>> can I please have review for this patch please? >>>>> >>>>> So far CodeBlobs have required all the data (metadata, oops, code, >>>>> etc) >>>>> to be in one continuous blob With this patch we are looking to change >>>>> that. It's been done by changing offsets in CodeBlob to addresses, >>>>> making some methods virtual to allow different behavior and also >>>>> creating a couple of new classes. CompiledMethod now sits inbetween >>>>> CodeBlob and nmethod. >>>>> >>>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>>>> >>>>> Thanks >>>>> /R >> > From doug.simon at oracle.com Sat Apr 9 00:11:04 2016 From: doug.simon at oracle.com (Doug Simon) Date: Sat, 9 Apr 2016 02:11:04 +0200 Subject: RFR (S): 8139921: add mx configuration files to support HotSpot IDE configuration generation In-Reply-To: <520F6D4A-BEFB-4C7E-8360-8BB4403C02C2@oracle.com> References: <31F6B9E0-D7C2-4B73-8114-8616F82AE5B5@oracle.com> <4E949060-D14C-4986-A21D-69EFE55E095D@oracle.com> <520F6D4A-BEFB-4C7E-8360-8BB4403C02C2@oracle.com> Message-ID: > On 08 Apr 2016, at 20:31, Christian Thalinger wrote: > > >> On Apr 7, 2016, at 11:59 PM, Volker Simonis wrote: >> >> On Thu, Apr 7, 2016 at 6:59 PM, Christian Thalinger >> wrote: >>> >>> On Apr 7, 2016, at 2:22 AM, Volker Simonis wrote: >>> >>> Hi Christian, >>> >>> I'd like to try this but I couldn't figure out how it works. >>> I don't have any experience with Graal/Truffel/JVMCI but as far as I >>> understood the creation of an Eclipse project should work equally well >>> for a vanilla hospot repository, right? >>> >>> >>> Correct. >>> >>> >>> The first question is where to get mx from (there's different >>> information in the Wiki and this email thread for example) ? >>> >>> https://bitbucket.org/allr/mx >>> oder >>> https://github.com/graalvm/mx.git The most up-to-date instructions are at https://github.com/graalvm/graal-core/blob/master/README.md which suggest to use https://github.com/graalvm/mx.git. The instructions on https://wiki.openjdk.java.net/display/Graal/Main are out dated and scheduled for removal very soon. >>> >>> >>> Since the review the mx repository got moved to github. That?s the one you >>> want to use. >>> >>> >>> Now let's say I cloned the right mx version. How do I use it? >>> Let's say I have a vanilla jdk9 forest under /share/OpenJDK/jdk9 >>> I create an output directory under /share/OpenJDK/output-jdk9-dbg and >>> from there I call configure and build the images: >>> >>> cd /share/OpenJDK/output-jdk9-dbg >>> bash /share/OpenJDK/jdk9/configure --with-boot-jdk=.. >>> --with-debug-level=slowdebug >>> make images >>> >>> Taking this scenario, from which directory am I supposed to call 'mx >>> ideinit', where will the Eclipse project be created at and how is mx >>> supposed to find my configuration (i.e. platform) and generated files? >>> >>> >>> I have never tried to have the output directory not in the source directory >>> and mx might not support this. But it?s not really necessary because >>> everything goes into build/ by default anyway. If you really want to have a >>> separate output directory I suppose we could add an option to mx. >>> >> >> Hi Christian, >> >> thanks a lot for your explanation. I've never tried WITH the output >> directory in the source tree :) I have my source repositories on a >> network share and on every platform I configure and build from the >> shared sources into a local directory. >> >> That said, I've tried to configure and build INTO the source directory >> and called 'mx eclipseinit' afterwards. That worked as described and >> the created project worked quite nicely. But after playing around a >> little bit, I found the following points: >> >> - the project settings are hard-wired for linux-x86_64 >> - so it makes no difference if I configure and build on linux-ppc64, >> the generated project still tries to get the generated files from >> 'PARENT-5-PROJECT_LOC/build/linux-x86_64-normal-server-slowdebug' >> which will apparently not work on any other platform. > > That is half-correct. It?s not hardcoded but it picks up the platform you are running mx on. So, if you had a separate repository for ppc64 and run mx on the ppc64 host it should work. > > I mean, there is no real solution to that problem, right? You can only have one generated directory to be included. In general, the mx support for native HotSpot development is somewhat limited (compared to Java development) and x64 focused given the history and resources of the project. That said, we?re open to work with you on any pull requests you care to submit. > >> - the same is true for the preprocessor defines. They are hard wired >> in ".mx.jvmci/hotspot/templates/eclipse/cproject". I don't actually >> understand why you are creating distinct release, fastdebug and >> slowdebug projects at all > > The main reason for the different configurations is because the generated directory is different. > >> , if all of them have the same settings (e.g. >> ASSERT and DEBUG is also defined for server-release). > > This is odd and sounds like a bug. Doug? Ah! There is only one template: Yes, the hotspot Eclipse configs generated by mx are quite inflexible currently. > > $ find .mx.jvmci/hotspot/templates/ > .mx.jvmci/hotspot/templates/ > .mx.jvmci/hotspot/templates/eclipse > .mx.jvmci/hotspot/templates/eclipse/cproject > .mx.jvmci/hotspot/templates/eclipse/settings > .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.core.prefs > .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.cdt.ui.prefs > .mx.jvmci/hotspot/templates/eclipse/settings/org.eclipse.core.runtime.prefs > > A much bigger problem is this: > > $ grep TARGET .mx.jvmci/hotspot/templates/eclipse/project > > > > > > > Doug, can we change that to actually use the right defines? Possibly. The current ?templates" were created by simply copying someones manually set up configuration. For someone who understands CDT projects well, this probably wouldn?t be hard to fix. -Doug From serguei.spitsyn at oracle.com Sat Apr 9 01:18:12 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Fri, 8 Apr 2016 18:18:12 -0700 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool In-Reply-To: <57081D59.9040909@oracle.com> References: <57081D59.9040909@oracle.com> Message-ID: <57085854.5020306@oracle.com> Coleen, The fix looks good. Thanks, Serguei On 4/8/16 14:06, Coleen Phillimore wrote: > Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe > for MethodHandleInError and MethodTypeInError. > > Need to ignore the InError tag when fetching method_handle_index and > method_type_index. The error is cached after the call to > systemDictionary::link_method_handle_constant() if it's not there > already. > > Tested with rbt equivalent of nightly runs, and StressRedefine test > (reproduceable with this error) for >24 hours (also with 8151546 > fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test > for this because it's too timing sensitive. > > open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev > bug link https://bugs.openjdk.java.net/browse/JDK-8148772 > > Thanks, > Coleen > > > > > From daniel.daugherty at oracle.com Sat Apr 9 01:46:05 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Fri, 8 Apr 2016 19:46:05 -0600 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool In-Reply-To: <57081D59.9040909@oracle.com> References: <57081D59.9040909@oracle.com> Message-ID: <57085EDD.6010904@oracle.com> On 4/8/16 3:06 PM, Coleen Phillimore wrote: > Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe > for MethodHandleInError and MethodTypeInError. > > Need to ignore the InError tag when fetching method_handle_index and > method_type_index. The error is cached after the call to > systemDictionary::link_method_handle_constant() if it's not there > already. > > Tested with rbt equivalent of nightly runs, and StressRedefine test > (reproduceable with this error) for >24 hours (also with 8151546 > fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test > for this because it's too timing sensitive. > > open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev I'm trying to get my head wrapped around this race... so the original failure mode looks like this: assert(tag_at(which).is_invoke_dynamic()) failed: Corrupted constant pool and the call stack looks like this: V [libjvm.so+0x7f1fe0] report_vm_error(char const*, int, char const*, char const*, ...)+0x60 V [libjvm.so+0x7e518b] ConstantPool::invoke_dynamic_name_and_type_ref_index_at(int)+0x3b V [libjvm.so+0x7dd18f] ConstantPool::impl_name_and_type_ref_index_at(int, bool)+0x15f V [libjvm.so+0x6a7363] ciBytecodeStream::get_method_signature_index()+0x4a3 and the crashing code looks like this: 517 int invoke_dynamic_name_and_type_ref_index_at(int which) { 518 assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); 519 return extract_high_short_from_int(*int_at_addr(which)); 520 } The other crashes in the bug report are different and are in different places... I don't think I'm going to get there by looking at the reported crashes... OK, so the bug report has one line of analysis: > ConstantPool::resolve_constant_at_impl() isn't thread safe for > MethodHandleInError and MethodTypeInError. but resolve_constant_at_impl() isn't changed at all by the webrev. OK, this is starting to get frustrating... OK, so I go back to the code and look at it again... The constantPool.hpp changes are all about getting rid of the 'error_ok' parameter and getting rid of the _error_ok() function variants. I'm cool with all that code, but I don't see what it has to do with a data race in the constant pool... The constantPool.cpp changes are all about switching from the _error_ok() function variants to regular variants. And there's the new debug additions to invalid/default part of the case statement... I'm still not seeing it... So since the constantPool.cpp code that used to call the _error_ok() functions now call the regular functions that means that this race has to be in the original functions that took the error_ok parameter... so I look again and I just don't see how removing the error_ok parameter and its use in the asserts() solves this race. OK, it's late on a Friday and I'm just not getting what this fix is about... src/share/vm/oops/constantPool.hpp No comments. src/share/vm/oops/constantPool.cpp L1024: DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d] = %d", L1025: this, index1, t1)); L1026: assert(false, "unexpected constant tag"); L1028: ShouldNotReachHere(); I agree with Chris that this should be merged into a fatal() call. Should the '%p' be a INTPTR_FORMAT? I have a vague memory about '%p' being problematic to get consistent across all platforms. I'll look at it again on Monday. For now my review is about style since I clearly don't understand this race nor how this fix solves it. Dan > bug link https://bugs.openjdk.java.net/browse/JDK-8148772 > > Thanks, > Coleen > > > > > From coleen.phillimore at oracle.com Sat Apr 9 13:02:43 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Sat, 9 Apr 2016 09:02:43 -0400 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool In-Reply-To: <6A70BF0D-3EF0-49B6-9111-E9A8DF654773@oracle.com> References: <57081D59.9040909@oracle.com> <6A70BF0D-3EF0-49B6-9111-E9A8DF654773@oracle.com> Message-ID: <5708FD73.10109@oracle.com> On 4/8/16 5:20 PM, Christian Thalinger wrote: > >> On Apr 8, 2016, at 11:06 AM, Coleen Phillimore >> > >> wrote: >> >> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >> for MethodHandleInError and MethodTypeInError. >> >> Need to ignore the InError tag when fetching method_handle_index and >> method_type_index. The error is cached after the call to >> systemDictionary::link_method_handle_constant() if it's not there >> already. >> >> Tested with rbt equivalent of nightly runs, and StressRedefine test >> (reproduceable with this error) for >24 hours (also with 8151546 >> fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test >> for this because it's too timing sensitive. >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev >> > > default: > + DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d] = %d", > + this, index1, t1)); > + assert(false, "unexpected constant tag"); > + > ShouldNotReachHere(); > break; > } > Merge the print_cr and assert into a fatal and remove > the ShouldNotReachHere. That was mistakenly left in debugging some other bug. I reverted this change. Thanks, Coleen > >> bug link https://bugs.openjdk.java.net/browse/JDK-8148772 >> >> Thanks, >> Coleen >> >> >> >> >> > From coleen.phillimore at oracle.com Sat Apr 9 13:02:58 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Sat, 9 Apr 2016 09:02:58 -0400 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool In-Reply-To: <57085854.5020306@oracle.com> References: <57081D59.9040909@oracle.com> <57085854.5020306@oracle.com> Message-ID: <5708FD82.4050300@oracle.com> Thanks Sergei, Coleen On 4/8/16 9:18 PM, serguei.spitsyn at oracle.com wrote: > Coleen, > > The fix looks good. > > Thanks, > Serguei > > > On 4/8/16 14:06, Coleen Phillimore wrote: >> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >> for MethodHandleInError and MethodTypeInError. >> >> Need to ignore the InError tag when fetching method_handle_index and >> method_type_index. The error is cached after the call to >> systemDictionary::link_method_handle_constant() if it's not there >> already. >> >> Tested with rbt equivalent of nightly runs, and StressRedefine test >> (reproduceable with this error) for >24 hours (also with 8151546 >> fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test >> for this because it's too timing sensitive. >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8148772 >> >> Thanks, >> Coleen >> >> >> >> >> > From coleen.phillimore at oracle.com Sat Apr 9 13:05:05 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Sat, 9 Apr 2016 09:05:05 -0400 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool In-Reply-To: <57085EDD.6010904@oracle.com> References: <57081D59.9040909@oracle.com> <57085EDD.6010904@oracle.com> Message-ID: <5708FE01.9050309@oracle.com> Hi Dan, I tried to answer your questions in the comments of the bug so there'd be a record (at least for me). I wasn't very descriptive in my earlier comment, because fixing this bug was prelude to trying to fix another bug with this StressRedefine test case. On 4/8/16 9:46 PM, Daniel D. Daugherty wrote: > On 4/8/16 3:06 PM, Coleen Phillimore wrote: >> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >> for MethodHandleInError and MethodTypeInError. >> >> Need to ignore the InError tag when fetching method_handle_index and >> method_type_index. The error is cached after the call to >> systemDictionary::link_method_handle_constant() if it's not there >> already. >> >> Tested with rbt equivalent of nightly runs, and StressRedefine test >> (reproduceable with this error) for >24 hours (also with 8151546 >> fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test >> for this because it's too timing sensitive. >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev > > I'm trying to get my head wrapped around this race... > so the original failure mode looks like this: > > assert(tag_at(which).is_invoke_dynamic()) failed: Corrupted > constant pool > > and the call stack looks like this: > > V [libjvm.so+0x7f1fe0] report_vm_error(char const*, int, char > const*, char const*, ...)+0x60 > V [libjvm.so+0x7e518b] > ConstantPool::invoke_dynamic_name_and_type_ref_index_at(int)+0x3b > V [libjvm.so+0x7dd18f] > ConstantPool::impl_name_and_type_ref_index_at(int, bool)+0x15f > V [libjvm.so+0x6a7363] > ciBytecodeStream::get_method_signature_index()+0x4a3 > > and the crashing code looks like this: > > 517 int invoke_dynamic_name_and_type_ref_index_at(int which) { > 518 assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant > pool"); > 519 return extract_high_short_from_int(*int_at_addr(which)); > 520 } > > The other crashes in the bug report are different and are in > different places... I don't think I'm going to get there by > looking at the reported crashes... > > OK, so the bug report has one line of analysis: > > > ConstantPool::resolve_constant_at_impl() isn't thread safe for > > MethodHandleInError and MethodTypeInError. > > but resolve_constant_at_impl() isn't changed at all by the webrev. > OK, this is starting to get frustrating... > > OK, so I go back to the code and look at it again... > The constantPool.hpp changes are all about getting > rid of the 'error_ok' parameter and getting rid of > the _error_ok() function variants. I'm cool with all > that code, but I don't see what it has to do with a > data race in the constant pool... > > The constantPool.cpp changes are all about switching > from the _error_ok() function variants to regular > variants. And there's the new debug additions to > invalid/default part of the case statement... I'm > still not seeing it... > > So since the constantPool.cpp code that used to call > the _error_ok() functions now call the regular functions > that means that this race has to be in the original > functions that took the error_ok parameter... so I > look again and I just don't see how removing the > error_ok parameter and its use in the asserts() solves > this race. > > OK, it's late on a Friday and I'm just not getting > what this fix is about... > > src/share/vm/oops/constantPool.hpp > No comments. > > src/share/vm/oops/constantPool.cpp > L1024: DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d] = %d", > L1025: this, index1, t1)); > L1026: assert(false, "unexpected constant tag"); > L1028: ShouldNotReachHere(); > I agree with Chris that this should be merged into > a fatal() call. Should the '%p' be a INTPTR_FORMAT? > I have a vague memory about '%p' being problematic > to get consistent across all platforms. I revered this change. Thanks, Coleen > > I'll look at it again on Monday. For now my review is > about style since I clearly don't understand this race > nor how this fix solves it. > > Dan > > > >> bug link https://bugs.openjdk.java.net/browse/JDK-8148772 >> >> Thanks, >> Coleen >> >> >> >> >> > From withoutpointk at gmail.com Sun Apr 10 21:02:22 2016 From: withoutpointk at gmail.com (Adrian) Date: Sun, 10 Apr 2016 17:02:22 -0400 Subject: Profiling interpreter vs. compiled time + proof of concept Message-ID: Hello, I am a student at the University of Toronto, working on a research project studying JVM performance with a focus on distributed systems. As part of the study, we want to know the time the JVM spends interpreting code, and the time it spends in compiled (JNI/native or jitted) code. For example, for a program repeating a task, you can see the interpreter time for each iteration decrease. A timer for interpreted and compiled code can be used to finely-grained see the performance of the jit (warmup time + improvement over interpreter). Interpreter -> native, and native -> interpreted calls are relatively easy to track and time as there are clear boundaries. However, it?s tricky to track transitions between interpreted and jitted code, especially with complications such as OSR and deoptimization. We have an implementation for the amd64 linux build, which is described below. Before we solved all the issues, we often encountered crashes. Of course, we are not JVM experts and may be missing other edge cases. Regardless, it has been working on our workloads such as reading on HDFS or running queries on Spark. As these are fairly complex workloads, we are confident it is successful at least as a proof of concept. We have found this instrumentation to have negligible overhead (less than the inherent variance between multiple runs). Furthermore, because most of the instrumentation is done in code blobs generated by the dynamic Assembler/MacroAssembler/InterpreterMacroAssembler, when instrumentation is disabled, it's essentially non-existent. Our modifications will be open sourced, but we wanted to know if OpenJDK developers would be interested in adding something like this in the future. I've attached a patch which should be up to date with the latest revision. I've also uploaded the patches and a prebuilt binary (linux amd64): http://research.dfr.moe/jvm_profiling/ You enable profiling with the flag -XX:+ProfileIntComp, and get the time in nanoseconds inside a Java program from a java.lang.Thread object: Thread th = Thread.currentThread(); th.resetIntCompTimes(); long a = th.getIntTime(); long b = th.getCompTime(); The times include a bit of JVM initialization (we have not found a perfect solution for this), so for short running programs you should call `resetIntCompTimes` at the start of `main`. Regarding the code, it is currently quite hacky because this is my and my team's first time really modifying the JVM. I was not familiar with the code structure, and tried to keep most of my additions in 1 file (sharedRuntime.cpp) instead of worrying about modularizing stuff. For example, thread local data is added to the JavaThread class, and the data is exposed in the java.lang.Thread class. If OpenJDK developers are interested in this, we are happy to continue working on this to make it consistent with the proper JVM structure (would need feedback/pointers). We are using it for our research and fixed many problems we encountered, but if anyone encounters anything or knows of any potential problems, any feedback would be greatly appreciated. Below is a high level description of the implementation (specific to linux 64 bit). --- The goal is to track transitions between interpreted, native, and jitted (standard + OSR compilation) code so we can precisely track the time the JVM spends in each type (currently, native and jitted code are lumped as "compiled" code, but it would be trivial to separate them), and the challenge is with jitted code. Interpreted calls pass all arguments on the stack, whereas the compiled calling convention uses 6 registers for integer/pointer arguments. There are adapters - generated in `sharedRuntime_x86_64.cpp`. Methods with the same signature share adapters, which move arguments from the stack into registers (i2c) or from registers onto the stack (c2i). It is easy to add instrumentation to these adapters to track a "call transition". When the callee returns, we need a "return transition" back to the caller's state. However, there is no existing place we can do this, as callees merely pop their stack frame and jump to the return address. Our solution is to save the "real return address" in the adapter, and replace the return address for the new frame with a "return handler" address (one for i2c, one for c2i). We also save the location of the return address (where on the stack/address in memory), for reasons explained later. This data is saved in the JavaThread object. We require a stack of return addresses as we could go i2c -> c2i -> i2c and so on. Because the last thing a compiled function does is pop the return address and jump to it, for i2c we end up in the "i2c return handler" and the stack pointer is 1 word above where the return address was. As a sanity check, we can verify it matches the "expected location" (what we saved earlier). We track the transition then jump to the "real/original return address" which we have also saved. When we first did this, we encountered many crashes. Many JVM operations rely on the return address to identify callers, such as: - figuring out the caller when a function is deoptimized (SharedRuntime::handle_wrong_method) - getting a call trace (java_lang_Throwable::fill_in_stack_trace) - finding ?activations? of compiled functions on the stack (NMethodSweeper::mark_active_nmethods) - checking permissions for AccessController (JVM_GetStackAccessControlContext) After identifying these "operations" which require examining the stack, we can "undo" all our changes to return addresses before any of it happens. We could use the `frame` code to walk the stack, but we also saved the return-address-locations earlier; we can replace each return address (which should now be an i2c/c2i return handler address) with the real/original return address. When we do this, there is no evidence our instrumentation took place. We redo all our changes after these operations are done so that when the thread continues executing, we'll continue tracking transitions. A lot of this happens in safepoints, e.g. marking active nmethods; we unpatch/repatch at the start and end of a safepoint to handle many scenarios. Java exceptions also need to be handled. If a function does not catch an exception, it has "finished executing"; we will pop the stack frame and see if the caller has a handler. It's necessary to check for a return handler address to track a transition back to the caller's state (interpreted/native/compiled), and also to identify the caller using the real/original return address. Lastly, deoptimization also requires careful treatment. The jit compiler does not necessarily compile all paths for a function. If it hits an unexpected path, it can end up in a ?deopt blob? (for C1 compiled functions) or ?uncommon trap blob? (for C2 compiled functions) - these are generated in `sharedRuntime_x86_64.cpp`. The two routines are the same to us - they replace the compiled frame with an interpreted one, and continue in the interpreter. If the deoptee?s caller was interpreted, we must have had an i2c transition (we should see the i2c return handler?s address on the stack). Since we?re executing the rest of the function as interpreted, it?s the same as if the callee returned to the interpreted caller. We need to replace the return address on the stack with the original/real one, and track a transition back to interpreted code - we simulate the "i2c return transition". If the deoptee?s caller was compiled, there was no transition earlier (it was c2c). However, we?re continuing execution as interpreted, and will eventually return to the compiled caller. We simulate a c2i call; we transition to interpreted code (as with an interpreted caller), but also save the current/real return address and replace the one on the stack with the c2i return handler's address. When the callee returns, we will track the transition back to compiled code. C2i return is actually more complicated if we want to do sanity checks. As explained above, we can check that the stack pointer is a sane value matching our recorded location for the return address. However, interpreter frames have both a base pointer (rbp) pointing to the "rsp on callee entry", and a "sender sp" (usually r13) for the "rsp on caller exit". A simple example of why this is required is the c2i adapter itself. Because it moves register-arguments onto the stack, it needs to allocate space on the stack. Sender sp/r13 points to the stack pointer at the start of the adapter before doing anything. Rbp points to the stack pointer on interpreted method entry, after the adapter has done the shuffling. The return address is 1 word above rbp. When an interpreted method returns, it does something like: # restore callee saved registers mov rsp <- rbp (reset frame) pop rbp (restore base pointer) pop r11 (return address into some temporary register - r11 is caller saved) mov rsp <- r13 (restore stack pointer to sender sp - r13 is callee saved and would have been restored above) jmp r11 (jump to return address) Therefore, by the time you end up in the c2i return handler, you don't know where the return address was. To verify the return address location, we had to find all places the interpreter implemented a method return (there are various cases), and manually check the return address right after picking it up from the stack, before rsp is set to the sender sp. --- I found code for JVMTI where it gets events for every interpreted and native entry. As a sanity check, enabled by the flag -XX:+ProfileIntCompStrict, on every interpreted method entry, it checks if the state recorded actually is interpreted. There are some Java functions the JVM specifically makes calls to (a lot of stuff related to classloading, e.g. `ClassLoader#checkPackageAccess`). These n2i transitions are manually tracked. We found these locations with the sanity check above. We do not see this check failing in our workloads currently. --- Hopefully this high level overview makes sense. Comments in the code give more details regarding specific scenarios, such as handling OSR (which is conceptually similar). I?d be happy to answer any questions or explain anything. Any feedback is appreciated. Thank you for your time! Best regards, Adrian From rickard.backman at oracle.com Mon Apr 11 09:05:01 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Mon, 11 Apr 2016 11:05:01 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> Message-ID: <20160411090501.GS9504@rbackman> Volker, thanks for finding this issue. I think that maybe the easiest fix is as follows: create new virtual methods in CompiledMethod: virtual address stub_begin_v() = 0; make the now virtual stub_begin non-virtual like: address stub_begin() { return stub_begin_v(); } in nmethod we override the stub_begin() with the normal this + offset compuation and implement stub_begin_v() to call stub_begin(). That will avoid all virtual calls in the case were we are not working on a CompiledMethod. It adds a couple of methods though. What do you think? /R On 04/08, Volker Simonis wrote: > Hi Rickard, > > I found the problem why your change crashes the VM on ppc (and I'm pretty > sure it will also crash on ARM - @Andrew, maybe you can try it out?). It is > caused by the following code in address NativeCall::get_trampoline() which > is also present on arm64: > > address NativeCall::get_trampoline() { > address call_addr = addr_at(0); > CodeBlob *code = CodeCache::find_blob(call_addr); > ... > // If the codeBlob is not a nmethod, this is because we get here from the > // CodeBlob constructor, which is called within the nmethod constructor. > return trampoline_stub_Relocation::get_trampoline_for(call_addr, > (nmethod*)code); > } > > The comment explains the situation quite well: we're in the CodeBlob > constructor which was called by the CompiledMethod constructor which was > called from the nmethod constructor: > > #3 0x00003fffb741b80c in NativeCall::get_trampoline (this=0x3fff607d0fac) > at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > #4 0x00003fffb7596914 in Relocation::pd_call_destination > (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move > (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to > (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to (this=0x3ffdfe3fdb40, > dest_blob=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to > (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, > name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, > frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, > caller_must_gc_arguments=false, subtype=8) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod > (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 "nmethod", > size=1768, header_size=392, cb=0x3ffdfe3fdb40, frame_complete_offset=20, > frame_size=14, oop_maps=0x3ffe00049620, caller_must_gc_arguments=false) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, > method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, > offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, > dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, > oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, > nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > > Now we cast 'code' to 'nmethod' but at this point in time 'code' is still a > CodeBlob from the C++ point of view (i.e. it still has a CodeBlob vtable > (see [1] for an explanation)). > > Later on, in RelocIterator::initialize() we call virtual methods on the > nmethod which still has the vtable of a "CodeBlob" and this fails badly: > > #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 > #1 0x00003fffb758d51c in RelocIterator::initialize (this=0x3ffdfe3fc928, > nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 > #2 0x00003fffb6ace56c in RelocIterator::RelocIterator > (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > limit=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > #3 0x00003fffb75919dc in trampoline_stub_Relocation::get_trampoline_for > (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > #4 0x00003fffb741b80c in NativeCall::get_trampoline (this=0x3fff607d0fac) > at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > As you can see, we actually want to call nmethod::stub_begin() at > relocInfo.cpp:144 > > 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); > 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; > 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; > > but we actually end up in SingletonBlob::print_on() which is a completely > different method. Notice that the call to nm->consts_begin() before also > fails, but it doesn't crash the VM because it happens to call > SingletonBlob::verify() which has no bad side effect. The call to > nm->insts_begin() in line 143 is non-virtual and thus works fine. Here are > the corresponding vtable slots in the CodeBlob vtable for consts_begin() > and stub_begin() > > (gdb) p &nmethod::consts_begin > $76 = &virtual table offset 42 > (gdb) p &nmethod::stub_begin > $77 = &virtual table offset 44 > (gdb) p ((*(void ***)nm) + 1)[42] > $86 = (void *) 0x3fffb6c41df8 > (gdb) p ((*(void ***)nm) + 1)[44] > $87 = (void *) 0x3fffb6c41e64 > > As you can see, 'nm' is indeed a "CodeBlob" at this point: > > (gdb) p *(void ***)nm > $91 = (void **) 0x3fffb7befa00 > (gdb) p nm->print() > [CodeBlob (0x00003fff607d1090)] > Framesize: 14 > > The offending calls succeeded before your change, because they where not > virtual. Any idea how we can fix this with the new class hierarchy? > > Regards, > Volker > > [1] > http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class > > > > On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis > wrote: > > > Hi Rickard, > > > > I'd also like to know what's the rational behind this quite large > > change. Do you expect some performance or memory consumption > > improvements or is this a prerequisite for another change which is > > still to come? > > > > The change itself currently doesn't work on ppc64 (neither on Linux > > nor on AIX). I get the following crash during the build when the newly > > built Hotspot is JIT-compiling java.lang.String::charAt on C1 : > > > > # > > # A fatal error has been detected by the Java Runtime Environment: > > # > > # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 > > # > > # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) > > # Java VM: OpenJDK 64-Bit Server VM (slowdebug > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, > > tiered, compressed oo > > ps, serial gc, linux-ppc64le) > > # Problematic frame: > > # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > > const*, char*, bool)+0x40 > > # > > # No core dump will be written. Core dumps have been disabled. To > > enable core dumping, try "ulimit -c unlimited" before starting Java > > again > > # > > # If you would like to submit a bug report, please visit: > > # http://bugreport.java.com/bugreport/crash.jsp > > # > > > > --------------- S U M M A R Y ------------ > > > > Command Line: > > -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk > > -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. > > module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create > > --module-version 9-internal --os-name Linux --os-arch ppc64le > > --os-version > > 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods > > --hash-dependencies .* --exclude **_the.* --libs > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base > > --cmds > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base > > --config > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base > > --class-path /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base > > /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod > > > > Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # > > Please check /etc/os-release for details about this release. > > Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h 0m 0s) > > > > --------------- T H R E A D --------------- > > > > Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" > > daemon [_thread_in_vm, id=35404, > > stack(0x000010006a800000,0x000010006ac00000)] > > > > > > Current CompileTask: > > C1: 761 3 3 java.lang.String::charAt (25 bytes) > > > > Stack: [0x000010006a800000,0x000010006ac00000], > > sp=0x000010006abfc6c0, free space=4081k > > Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native > > code) > > V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > > const*, char*, bool)+0x40 > > V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 > > V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 > > V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) const+0x40 > > V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) const+0x4c > > V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, > > unsigned char*, unsigned char*)+0x170 > > V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, > > unsigned char*, unsigned char*)+0x78 > > V [libjvm.so+0x10719dc] > > trampoline_stub_Relocation::get_trampoline_for(unsigned char*, > > nmethod*)+0x78 > > V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 > > V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned > > char*)+0x150 > > V [libjvm.so+0x106f5fc] > > CallRelocation::fix_relocation_after_move(CodeBuffer const*, > > CodeBuffer*)+0x74 > > V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) > > const+0x390 > > V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 > > V [libjvm.so+0x722670] CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > > V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, > > CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, > > int)+0x320 > > V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char > > const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 > > V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, > > CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > > V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, > > int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > > V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, > > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > > ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, > > bool, bool, RTMState)+0x560 > > V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 > > V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 > > V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, > > ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 > > V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, > > int, DirectiveSet*)+0xc8 > > V [libjvm.so+0x7b188c] > > CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 > > V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 > > V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, Thread*)+0xa0 > > V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 > > V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 > > V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 > > C [libpthread.so.0+0x8a64] start_thread+0xf4 > > C [libc.so.6+0x1032a0] clone+0x98 > > > > I haven't identified the exact cause (will analyze it tomorrow) but > > the stack trace indicates that it is indeed related to your changes. > > > > Besides that I have some comments: > > > > codeBuffer.hpp: > > > > 472 CodeSection* insts() { return &_insts; } > > 475 const CodeSection* insts() const { return &_insts; } > > > > - do we really need both versions? > > > > codeBlob.hpp: > > > > 135 nmethod* as_nmethod_or_null() const { return > > is_nmethod() ? (nmethod*) this : NULL; } > > 136 nmethod* as_nmethod() const { > > assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } > > 137 CompiledMethod* as_compiled_method_or_null() const { return > > is_compiled() ? (CompiledMethod*) this : NULL; } > > 138 CompiledMethod* as_compiled_method() const { > > assert(is_compiled(), "must be compiled"); return (CompiledMethod*) > > this; } > > 139 CodeBlob* as_codeblob_or_null() const { return > > (CodeBlob*) this; } > > > > - I don't like this code. You make the getters 'const' which > > implicitely makes 'this' a "pointer to const" but then the returned > > pointer is a normal pointer to a non-const object and therefore you > > have to statically cast away the "pointer to const" (that's why you > > need the cast even in the case where you return a CodeBlob*). So > > either remove the const qualifier from the method declarations or make > > them return "pointers to const". And by the way, as_codeblob_or_null() > > doesn't seemed to be used anywhere in the code, why do we need it at > > all? > > > > - Why do we need the non-virtual methods is_nmethod() and > > is_compiled() to manually simulate virtual behavior. Why can't we > > simply make them virtual and implement them accordingly in nmathod and > > CompiledMethod? > > > > Regards, > > Volker > > > > On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman > > wrote: > > > Hi, > > > > > > can I please have review for this patch please? > > > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > > to be in one continuous blob With this patch we are looking to change > > > that. It's been done by changing offsets in CodeBlob to addresses, > > > making some methods virtual to allow different behavior and also > > > creating a couple of new classes. CompiledMethod now sits inbetween > > > CodeBlob and nmethod. > > > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > > > Thanks > > > /R > > From rickard.backman at oracle.com Mon Apr 11 09:12:21 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Mon, 11 Apr 2016 11:12:21 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> Message-ID: <20160411091221.GT9504@rbackman> On 04/07, Volker Simonis wrote: > Hi Rickard, > > I'd also like to know what's the rational behind this quite large > change. Do you expect some performance or memory consumption > improvements or is this a prerequisite for another change which is > still to come? > > The change itself currently doesn't work on ppc64 (neither on Linux > nor on AIX). I get the following crash during the build when the newly > built Hotspot is JIT-compiling java.lang.String::charAt on C1 : > > # > # A fatal error has been detected by the Java Runtime Environment: > # > # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 > # > # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) > # Java VM: OpenJDK 64-Bit Server VM (slowdebug > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, > tiered, compressed oo > ps, serial gc, linux-ppc64le) > # Problematic frame: > # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > const*, char*, bool)+0x40 > # > # No core dump will be written. Core dumps have been disabled. To > enable core dumping, try "ulimit -c unlimited" before starting Java > again > # > # If you would like to submit a bug report, please visit: > # http://bugreport.java.com/bugreport/crash.jsp > # > > --------------- S U M M A R Y ------------ > > Command Line: -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk > -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. > module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create > --module-version 9-internal --os-name Linux --os-arch ppc64le > --os-version > 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods > --hash-dependencies .* --exclude **_the.* --libs > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base > --cmds /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base > --config /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base > --class-path /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base > /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod > > Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # > Please check /etc/os-release for details about this release. > Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h 0m 0s) > > --------------- T H R E A D --------------- > > Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" > daemon [_thread_in_vm, id=35404, > stack(0x000010006a800000,0x000010006ac00000)] > > > Current CompileTask: > C1: 761 3 3 java.lang.String::charAt (25 bytes) > > Stack: [0x000010006a800000,0x000010006ac00000], > sp=0x000010006abfc6c0, free space=4081k > Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code) > V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > const*, char*, bool)+0x40 > V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 > V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 > V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) const+0x40 > V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) const+0x4c > V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, > unsigned char*, unsigned char*)+0x170 > V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, > unsigned char*, unsigned char*)+0x78 > V [libjvm.so+0x10719dc] > trampoline_stub_Relocation::get_trampoline_for(unsigned char*, > nmethod*)+0x78 > V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 > V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned char*)+0x150 > V [libjvm.so+0x106f5fc] > CallRelocation::fix_relocation_after_move(CodeBuffer const*, > CodeBuffer*)+0x74 > V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) const+0x390 > V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 > V [libjvm.so+0x722670] CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, > CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, > int)+0x320 > V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char > const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 > V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, > CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, > int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, > bool, bool, RTMState)+0x560 > V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 > V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 > V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, > ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 > V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, > int, DirectiveSet*)+0xc8 > V [libjvm.so+0x7b188c] > CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 > V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 > V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, Thread*)+0xa0 > V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 > V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 > V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 > C [libpthread.so.0+0x8a64] start_thread+0xf4 > C [libc.so.6+0x1032a0] clone+0x98 > > I haven't identified the exact cause (will analyze it tomorrow) but > the stack trace indicates that it is indeed related to your changes. > > Besides that I have some comments: > > codeBuffer.hpp: > > 472 CodeSection* insts() { return &_insts; } > 475 const CodeSection* insts() const { return &_insts; } > > - do we really need both versions? Really need? No. But there would be a ripple effect of removing const from a couple of places. If you really disagree with having both of them I can make it happen. > > codeBlob.hpp: > > 135 nmethod* as_nmethod_or_null() const { return > is_nmethod() ? (nmethod*) this : NULL; } > 136 nmethod* as_nmethod() const { > assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } > 137 CompiledMethod* as_compiled_method_or_null() const { return > is_compiled() ? (CompiledMethod*) this : NULL; } > 138 CompiledMethod* as_compiled_method() const { > assert(is_compiled(), "must be compiled"); return (CompiledMethod*) > this; } > 139 CodeBlob* as_codeblob_or_null() const { return > (CodeBlob*) this; } > > - I don't like this code. You make the getters 'const' which > implicitely makes 'this' a "pointer to const" but then the returned > pointer is a normal pointer to a non-const object and therefore you > have to statically cast away the "pointer to const" (that's why you > need the cast even in the case where you return a CodeBlob*). So > either remove the const qualifier from the method declarations or make > them return "pointers to const". And by the way, as_codeblob_or_null() > doesn't seemed to be used anywhere in the code, why do we need it at > all? You are right. I removed const from these methods. > > - Why do we need the non-virtual methods is_nmethod() and > is_compiled() to manually simulate virtual behavior. Why can't we > simply make them virtual and implement them accordingly in nmathod and > CompiledMethod? When we made the changes and did performance measures we noticed that there were now more calls to is_compiled() and is_nmethod() then there used to be. In certain paths it made an impact on performance so we tried this way instead. I can try both versions again and see if the numbers matters. > > Regards, > Volker > > On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman > wrote: > > Hi, > > > > can I please have review for this patch please? > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > to be in one continuous blob With this patch we are looking to change > > that. It's been done by changing offsets in CodeBlob to addresses, > > making some methods virtual to allow different behavior and also > > creating a couple of new classes. CompiledMethod now sits inbetween > > CodeBlob and nmethod. > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > Thanks > > /R /R From stefan.karlsson at oracle.com Mon Apr 11 09:19:21 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 11 Apr 2016 11:19:21 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <57068F87.5050607@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57068F87.5050607@oracle.com> Message-ID: <570B6C19.5040507@oracle.com> Hi all, The last suggestion to move stringStreamWithResourceMark into ostream.hpp causes include circularities in the latest hs-rt code. I'd like to proceed with webrev.02 for now. Thanks, StefanK On 2016-04-07 18:49, Stefan Karlsson wrote: > Hi again, > > I decided to fix the resourceArea.hpp problem, so that I could move > the stringStreamWithResourceMark class into ostream.hpp. > > http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta > http://cr.openjdk.java.net/~stefank/8153659/webrev.03 > > The patch is applied on top of the thread.inline.hpp patch in: > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html > > Thanks > StefanK > > On 2016-04-07 13:30, Stefan Karlsson wrote: >> Hi all, >> >> I've updated the patch: >> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 >> >> The previous patch created the embedded ResourceMark after the >> stringStream instance was created. I discussed the layout of the >> classes with Bengt, and have decided to restructure this patch. I've >> changed the code so that the ResourceMark is embedded in a new >> stringStreamWithResourceMark class. This allows me to use the same >> LogStreamBase class, but different stringClass template parameters, >> for all three classes. >> >> I've put the stringStreamWithResourceMark class in logStream.hpp >> instead of ostream.hpp, to prevent the include of resourceArea.hpp to >> propagate through the ostream.hpp header. The resourceArea.hpp file >> is problematic, since it includes and uses thread.inline.hpp. The >> alternative would be to move the implementation of resourceArea.hpp >> into a resource.inline.hpp file, so that header files could create >> ResourceMark instances, without having to include thread.inline.hpp. >> I'm leaving that exercise for another RFE. >> >> Thanks, >> StefanK >> >> On 2016-04-06 19:54, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to add a LogStream class that allocates its >>> backing buffer from CHeap memory instead of Resource memory. >>> >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>> >>> The main motivation for this is that we can't use Resource allocated >>> memory during initialization, until Thread::current() has been >>> initialized. So, a CHeap backed LogStream is desirable when we >>> execute, for example, the following code during large pages >>> initialization: >>> >>> void os::trace_page_sizes(const char* str, const size_t* page_sizes, >>> int count) >>> { >>> if (TracePageSizes) { >>> tty->print("%s: ", str); >>> for (int i = 0; i < count; ++i) { >>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>> } >>> tty->cr(); >>> } >>> } >>> >>> The patch restructures the code and creates a LogStreamBase template >>> base class, which takes the backing outputStream class as a template >>> parameter. We then have three concrete LogStream classes: >>> >>> LogStream - Buffer resource allocated with an embedded ResourceMark >>> LogStreamNoResourceMark - Buffer resource allocated without an >>> embedded ResourceMark >>> LogStreamCHeap - Buffer CHeap allocated >>> >>> I moved the LogStream class from the logStream.inline.hpp file to >>> logStream.hpp, for consistency. If that's causing problems while >>> reviewing this, I can move it in a separate patch. >>> >>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >>> internal VM tests. >>> >>> Thanks, >>> StefanK >> > From marcus.larsson at oracle.com Mon Apr 11 09:24:00 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Mon, 11 Apr 2016 11:24:00 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <570B6C19.5040507@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57068F87.5050607@oracle.com> <570B6C19.5040507@oracle.com> Message-ID: <570B6D30.7080501@oracle.com> Hi Stefan, On 04/11/2016 11:19 AM, Stefan Karlsson wrote: > Hi all, > > The last suggestion to move stringStreamWithResourceMark into > ostream.hpp causes include circularities in the latest hs-rt code. I'd > like to proceed with webrev.02 for now. webrev.02 looks good to me as well. Thanks, Marcus > > Thanks, > StefanK > > On 2016-04-07 18:49, Stefan Karlsson wrote: >> Hi again, >> >> I decided to fix the resourceArea.hpp problem, so that I could move >> the stringStreamWithResourceMark class into ostream.hpp. >> >> http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta >> http://cr.openjdk.java.net/~stefank/8153659/webrev.03 >> >> The patch is applied on top of the thread.inline.hpp patch in: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html >> >> >> Thanks >> StefanK >> >> On 2016-04-07 13:30, Stefan Karlsson wrote: >>> Hi all, >>> >>> I've updated the patch: >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 >>> >>> The previous patch created the embedded ResourceMark after the >>> stringStream instance was created. I discussed the layout of the >>> classes with Bengt, and have decided to restructure this patch. I've >>> changed the code so that the ResourceMark is embedded in a new >>> stringStreamWithResourceMark class. This allows me to use the same >>> LogStreamBase class, but different stringClass template parameters, >>> for all three classes. >>> >>> I've put the stringStreamWithResourceMark class in logStream.hpp >>> instead of ostream.hpp, to prevent the include of resourceArea.hpp >>> to propagate through the ostream.hpp header. The resourceArea.hpp >>> file is problematic, since it includes and uses thread.inline.hpp. >>> The alternative would be to move the implementation of >>> resourceArea.hpp into a resource.inline.hpp file, so that header >>> files could create ResourceMark instances, without having to include >>> thread.inline.hpp. I'm leaving that exercise for another RFE. >>> >>> Thanks, >>> StefanK >>> >>> On 2016-04-06 19:54, Stefan Karlsson wrote: >>>> Hi all, >>>> >>>> Please review this patch to add a LogStream class that allocates >>>> its backing buffer from CHeap memory instead of Resource memory. >>>> >>>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>>> >>>> The main motivation for this is that we can't use Resource >>>> allocated memory during initialization, until Thread::current() has >>>> been initialized. So, a CHeap backed LogStream is desirable when we >>>> execute, for example, the following code during large pages >>>> initialization: >>>> >>>> void os::trace_page_sizes(const char* str, const size_t* >>>> page_sizes, int count) >>>> { >>>> if (TracePageSizes) { >>>> tty->print("%s: ", str); >>>> for (int i = 0; i < count; ++i) { >>>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>>> } >>>> tty->cr(); >>>> } >>>> } >>>> >>>> The patch restructures the code and creates a LogStreamBase >>>> template base class, which takes the backing outputStream class as >>>> a template parameter. We then have three concrete LogStream classes: >>>> >>>> LogStream - Buffer resource allocated with an embedded ResourceMark >>>> LogStreamNoResourceMark - Buffer resource allocated without an >>>> embedded ResourceMark >>>> LogStreamCHeap - Buffer CHeap allocated >>>> >>>> I moved the LogStream class from the logStream.inline.hpp file to >>>> logStream.hpp, for consistency. If that's causing problems while >>>> reviewing this, I can move it in a separate patch. >>>> >>>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >>>> internal VM tests. >>>> >>>> Thanks, >>>> StefanK >>> >> > From stefan.karlsson at oracle.com Mon Apr 11 09:23:44 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 11 Apr 2016 11:23:44 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <570B6D30.7080501@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57068F87.5050607@oracle.com> <570B6C19.5040507@oracle.com> <570B6D30.7080501@oracle.com> Message-ID: <570B6D20.40803@oracle.com> Thanks, Marcus. StefanK On 2016-04-11 11:24, Marcus Larsson wrote: > Hi Stefan, > > On 04/11/2016 11:19 AM, Stefan Karlsson wrote: >> Hi all, >> >> The last suggestion to move stringStreamWithResourceMark into >> ostream.hpp causes include circularities in the latest hs-rt code. >> I'd like to proceed with webrev.02 for now. > > webrev.02 looks good to me as well. > > Thanks, > Marcus > >> >> Thanks, >> StefanK >> >> On 2016-04-07 18:49, Stefan Karlsson wrote: >>> Hi again, >>> >>> I decided to fix the resourceArea.hpp problem, so that I could move >>> the stringStreamWithResourceMark class into ostream.hpp. >>> >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.03 >>> >>> The patch is applied on top of the thread.inline.hpp patch in: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html >>> >>> >>> Thanks >>> StefanK >>> >>> On 2016-04-07 13:30, Stefan Karlsson wrote: >>>> Hi all, >>>> >>>> I've updated the patch: >>>> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 >>>> >>>> The previous patch created the embedded ResourceMark after the >>>> stringStream instance was created. I discussed the layout of the >>>> classes with Bengt, and have decided to restructure this patch. >>>> I've changed the code so that the ResourceMark is embedded in a new >>>> stringStreamWithResourceMark class. This allows me to use the same >>>> LogStreamBase class, but different stringClass template parameters, >>>> for all three classes. >>>> >>>> I've put the stringStreamWithResourceMark class in logStream.hpp >>>> instead of ostream.hpp, to prevent the include of resourceArea.hpp >>>> to propagate through the ostream.hpp header. The resourceArea.hpp >>>> file is problematic, since it includes and uses thread.inline.hpp. >>>> The alternative would be to move the implementation of >>>> resourceArea.hpp into a resource.inline.hpp file, so that header >>>> files could create ResourceMark instances, without having to >>>> include thread.inline.hpp. I'm leaving that exercise for another RFE. >>>> >>>> Thanks, >>>> StefanK >>>> >>>> On 2016-04-06 19:54, Stefan Karlsson wrote: >>>>> Hi all, >>>>> >>>>> Please review this patch to add a LogStream class that allocates >>>>> its backing buffer from CHeap memory instead of Resource memory. >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>>>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>>>> >>>>> The main motivation for this is that we can't use Resource >>>>> allocated memory during initialization, until Thread::current() >>>>> has been initialized. So, a CHeap backed LogStream is desirable >>>>> when we execute, for example, the following code during large >>>>> pages initialization: >>>>> >>>>> void os::trace_page_sizes(const char* str, const size_t* >>>>> page_sizes, int count) >>>>> { >>>>> if (TracePageSizes) { >>>>> tty->print("%s: ", str); >>>>> for (int i = 0; i < count; ++i) { >>>>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>>>> } >>>>> tty->cr(); >>>>> } >>>>> } >>>>> >>>>> The patch restructures the code and creates a LogStreamBase >>>>> template base class, which takes the backing outputStream class as >>>>> a template parameter. We then have three concrete LogStream classes: >>>>> >>>>> LogStream - Buffer resource allocated with an embedded ResourceMark >>>>> LogStreamNoResourceMark - Buffer resource allocated without an >>>>> embedded ResourceMark >>>>> LogStreamCHeap - Buffer CHeap allocated >>>>> >>>>> I moved the LogStream class from the logStream.inline.hpp file to >>>>> logStream.hpp, for consistency. If that's causing problems while >>>>> reviewing this, I can move it in a separate patch. >>>>> >>>>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and >>>>> internal VM tests. >>>>> >>>>> Thanks, >>>>> StefanK >>>> >>> >> > From volker.simonis at gmail.com Mon Apr 11 09:48:12 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Mon, 11 Apr 2016 11:48:12 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <57082A8C.6030800@oracle.com> References: <20160407121221.GQ9504@rbackman> <57080EDD.7040701@oracle.com> <5708117B.4060705@oracle.com> <57082A8C.6030800@oracle.com> Message-ID: No, unfortunately not: # Internal Error (/usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeCache.cpp:577), pid=29667, tid=29686 # assert(cb->is_nmethod()) failed: did not find an nmethod Current CompileTask: C1: 651 4 3 java.lang.StringLatin1::charAt (28 bytes) Stack: [0x000010006b400000,0x000010006b800000], sp=0x000010006b7fc820, free space=4082k Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code) V [libjvm.so+0x123b52c] VMError::report(outputStream*, bool)+0x12fc V [libjvm.so+0x123d928] VMError::report_and_die(int, char const*, char const*, char*, Thread*, unsigned char*, void*, void*, char const*, int, unsigned long)+0x5bc V [libjvm.so+0x123d25c] VMError::report_and_die(Thread*, char const*, int, char const*, char const*, char*)+0x84 V [libjvm.so+0x84100c] report_vm_error(char const*, int, char const*, char const*, ...)+0xcc V [libjvm.so+0x7314f8] CodeCache::find_nmethod(void*)+0x74 V [libjvm.so+0xefb7dc] NativeCall::get_trampoline()+0x44 V [libjvm.so+0x10769cc] Relocation::pd_call_destination(unsigned char*)+0x150 V [libjvm.so+0x106f6b4] CallRelocation::fix_relocation_after_move(CodeBuffer const*, CodeBuffer*)+0x74 V [libjvm.so+0x728918] CodeBuffer::relocate_code_to(CodeBuffer*) const+0x390 V [libjvm.so+0x728484] CodeBuffer::copy_code_to(CodeBlob*)+0x134 V [libjvm.so+0x7226f0] CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 V [libjvm.so+0x71f8f8] CodeBlob::initialize(CodeBuffer*)+0x3c V [libjvm.so+0x7c539c] CompiledMethod::CompiledMethod(Method*, char const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0x12c V [libjvm.so+0xf02010] nmethod::nmethod(Method*, int, int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 V [libjvm.so+0xf016c8] nmethod::new_nmethod(methodHandle const&, int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, bool, bool, RTMState)+0x560 V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, int, DirectiveSet*)+0xc8 I'm currently trying to find another solution... Regards, Volker On Sat, Apr 9, 2016 at 12:02 AM, Dean Long wrote: > Volker, does this patch fix the problem? > > http://cr.openjdk.java.net/~dlong/8151956/8151956.patch > > dl > > > On 4/8/2016 1:15 PM, Dean Long wrote: > >> I was able to find this: >> >> >> https://isocpp.org/wiki/faq/strange-inheritance#calling-virtuals-from-ctor-idiom >> >> dl >> >> On 4/8/2016 1:04 PM, Dean Long wrote: >> >>> Hi Volker. I noticed this problem before and filed 8151956. Making >>> those member functions >>> non-virtual may solve this particular problem, but as the code evolves >>> we may hit it >>> again if we ever call a virtual member function by accident. >>> >>> I'm not a C++ expert, but if we declared those functions as virtual in >>> CodeBlob, then would >>> that work? It doesn't seem ideal, however. I would rather not call out >>> from the CodeBlob >>> constructor at all, but instead do the work in the subclass >>> constructor. Let's say we move >>> the call to cb->copy_code_and_locs_to() to a separate function. Is >>> there a C++ idiom >>> for making sure all subclasses of CodeBlob call it? The only think I >>> can think of is to set >>> an "initialized" flag and to check it in strategic places. >>> >>> dl >>> >>> On 4/8/2016 11:12 AM, Volker Simonis wrote: >>> >>>> Hi Rickard, >>>> >>>> I found the problem why your change crashes the VM on ppc (and I'm >>>> pretty >>>> sure it will also crash on ARM - @Andrew, maybe you can try it out?). >>>> It is >>>> caused by the following code in address NativeCall::get_trampoline() >>>> which >>>> is also present on arm64: >>>> >>>> address NativeCall::get_trampoline() { >>>> address call_addr = addr_at(0); >>>> CodeBlob *code = CodeCache::find_blob(call_addr); >>>> ... >>>> // If the codeBlob is not a nmethod, this is because we get here >>>> from the >>>> // CodeBlob constructor, which is called within the nmethod >>>> constructor. >>>> return trampoline_stub_Relocation::get_trampoline_for(call_addr, >>>> (nmethod*)code); >>>> } >>>> >>>> The comment explains the situation quite well: we're in the CodeBlob >>>> constructor which was called by the CompiledMethod constructor which was >>>> called from the nmethod constructor: >>>> >>>> #3 0x00003fffb741b80c in NativeCall::get_trampoline >>>> (this=0x3fff607d0fac) >>>> at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>>> >>>> #4 0x00003fffb7596914 in Relocation::pd_call_destination >>>> (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >>>> >>>> #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move >>>> (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >>>> >>>> #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to >>>> (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >>>> >>>> #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to (this=0x3ffdfe3fdb40, >>>> dest_blob=0x3fff607d0c10) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >>>> >>>> #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to >>>> (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >>>> >>>> #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >>>> name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, >>>> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, >>>> caller_must_gc_arguments=false, subtype=8) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >>>> >>>> #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod >>>> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 >>>> "nmethod", >>>> size=1768, header_size=392, cb=0x3ffdfe3fdb40, frame_complete_offset=20, >>>> frame_size=14, oop_maps=0x3ffe00049620, caller_must_gc_arguments=false) >>>> at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >>>> >>>> #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, >>>> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >>>> offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, >>>> dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, >>>> oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, >>>> nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >>>> >>>> >>>> Now we cast 'code' to 'nmethod' but at this point in time 'code' is >>>> still a >>>> CodeBlob from the C++ point of view (i.e. it still has a CodeBlob vtable >>>> (see [1] for an explanation)). >>>> >>>> Later on, in RelocIterator::initialize() we call virtual methods on the >>>> nmethod which still has the vtable of a "CodeBlob" and this fails badly: >>>> >>>> #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 >>>> >>>> #1 0x00003fffb758d51c in RelocIterator::initialize >>>> (this=0x3ffdfe3fc928, >>>> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 >>>> >>>> #2 0x00003fffb6ace56c in RelocIterator::RelocIterator >>>> (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >>>> limit=0x0) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >>>> >>>> #3 0x00003fffb75919dc in trampoline_stub_Relocation::get_trampoline_for >>>> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >>>> >>>> #4 0x00003fffb741b80c in NativeCall::get_trampoline >>>> (this=0x3fff607d0fac) >>>> at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>>> >>>> >>>> As you can see, we actually want to call nmethod::stub_begin() at >>>> relocInfo.cpp:144 >>>> >>>> 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); >>>> 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; >>>> 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; >>>> >>>> but we actually end up in SingletonBlob::print_on() which is a >>>> completely >>>> different method. Notice that the call to nm->consts_begin() before also >>>> fails, but it doesn't crash the VM because it happens to call >>>> SingletonBlob::verify() which has no bad side effect. The call to >>>> nm->insts_begin() in line 143 is non-virtual and thus works fine. Here >>>> are >>>> the corresponding vtable slots in the CodeBlob vtable for consts_begin() >>>> and stub_begin() >>>> >>>> (gdb) p &nmethod::consts_begin >>>> $76 = &virtual table offset 42 >>>> (gdb) p &nmethod::stub_begin >>>> $77 = &virtual table offset 44 >>>> (gdb) p ((*(void ***)nm) + 1)[42] >>>> $86 = (void *) 0x3fffb6c41df8 >>>> (gdb) p ((*(void ***)nm) + 1)[44] >>>> $87 = (void *) 0x3fffb6c41e64 >>> const> >>>> >>>> As you can see, 'nm' is indeed a "CodeBlob" at this point: >>>> >>>> (gdb) p *(void ***)nm >>>> $91 = (void **) 0x3fffb7befa00 >>>> (gdb) p nm->print() >>>> [CodeBlob (0x00003fff607d1090)] >>>> Framesize: 14 >>>> >>>> The offending calls succeeded before your change, because they where not >>>> virtual. Any idea how we can fix this with the new class hierarchy? >>>> >>>> Regards, >>>> Volker >>>> >>>> [1] >>>> >>>> http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class >>>> >>>> >>>> >>>> On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis < >>>> volker.simonis at gmail.com> >>>> wrote: >>>> >>>> Hi Rickard, >>>>> >>>>> I'd also like to know what's the rational behind this quite large >>>>> change. Do you expect some performance or memory consumption >>>>> improvements or is this a prerequisite for another change which is >>>>> still to come? >>>>> >>>>> The change itself currently doesn't work on ppc64 (neither on Linux >>>>> nor on AIX). I get the following crash during the build when the newly >>>>> built Hotspot is JIT-compiling java.lang.String::charAt on C1 : >>>>> >>>>> # >>>>> # A fatal error has been detected by the Java Runtime Environment: >>>>> # >>>>> # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 >>>>> # >>>>> # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build >>>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) >>>>> # Java VM: OpenJDK 64-Bit Server VM (slowdebug >>>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, >>>>> tiered, compressed oo >>>>> ps, serial gc, linux-ppc64le) >>>>> # Problematic frame: >>>>> # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>>>> const*, char*, bool)+0x40 >>>>> # >>>>> # No core dump will be written. Core dumps have been disabled. To >>>>> enable core dumping, try "ulimit -c unlimited" before starting Java >>>>> again >>>>> # >>>>> # If you would like to submit a bug report, please visit: >>>>> # http://bugreport.java.com/bugreport/crash.jsp >>>>> # >>>>> >>>>> --------------- S U M M A R Y ------------ >>>>> >>>>> Command Line: >>>>> -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk >>>>> >>>>> -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. >>>>> module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create >>>>> --module-version 9-internal --os-name Linux --os-arch ppc64le >>>>> --os-version >>>>> 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods >>>>> --hash-dependencies .* --exclude **_the.* --libs >>>>> >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base >>>>> >>>>> --cmds >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base >>>>> >>>>> --config >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base >>>>> --class-path >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod >>>>> >>>>> Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # >>>>> Please check /etc/os-release for details about this release. >>>>> Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h 0m >>>>> 0s) >>>>> >>>>> --------------- T H R E A D --------------- >>>>> >>>>> Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" >>>>> daemon [_thread_in_vm, id=35404, >>>>> stack(0x000010006a800000,0x000010006ac00000)] >>>>> >>>>> >>>>> Current CompileTask: >>>>> C1: 761 3 3 java.lang.String::charAt (25 bytes) >>>>> >>>>> Stack: [0x000010006a800000,0x000010006ac00000], >>>>> sp=0x000010006abfc6c0, free space=4081k >>>>> Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, >>>>> C=native >>>>> code) >>>>> V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>>>> const*, char*, bool)+0x40 >>>>> V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 >>>>> V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 >>>>> V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) const+0x40 >>>>> V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) >>>>> const+0x4c >>>>> V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, >>>>> unsigned char*, unsigned char*)+0x170 >>>>> V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, >>>>> unsigned char*, unsigned char*)+0x78 >>>>> V [libjvm.so+0x10719dc] >>>>> trampoline_stub_Relocation::get_trampoline_for(unsigned char*, >>>>> nmethod*)+0x78 >>>>> V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 >>>>> V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned >>>>> char*)+0x150 >>>>> V [libjvm.so+0x106f5fc] >>>>> CallRelocation::fix_relocation_after_move(CodeBuffer const*, >>>>> CodeBuffer*)+0x74 >>>>> V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) >>>>> const+0x390 >>>>> V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 >>>>> V [libjvm.so+0x722670] >>>>> CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 >>>>> V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, >>>>> CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, >>>>> int)+0x320 >>>>> V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char >>>>> const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 >>>>> V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, >>>>> CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >>>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 >>>>> V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, >>>>> int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >>>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 >>>>> V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, >>>>> CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, >>>>> ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, >>>>> bool, bool, RTMState)+0x560 >>>>> V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 >>>>> V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 >>>>> V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, >>>>> ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 >>>>> V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, >>>>> int, DirectiveSet*)+0xc8 >>>>> V [libjvm.so+0x7b188c] >>>>> CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 >>>>> V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 >>>>> V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, >>>>> Thread*)+0xa0 >>>>> V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 >>>>> V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 >>>>> V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 >>>>> C [libpthread.so.0+0x8a64] start_thread+0xf4 >>>>> C [libc.so.6+0x1032a0] clone+0x98 >>>>> >>>>> I haven't identified the exact cause (will analyze it tomorrow) but >>>>> the stack trace indicates that it is indeed related to your changes. >>>>> >>>>> Besides that I have some comments: >>>>> >>>>> codeBuffer.hpp: >>>>> >>>>> 472 CodeSection* insts() { return &_insts; } >>>>> 475 const CodeSection* insts() const { return &_insts; } >>>>> >>>>> - do we really need both versions? >>>>> >>>>> codeBlob.hpp: >>>>> >>>>> 135 nmethod* as_nmethod_or_null() const { return >>>>> is_nmethod() ? (nmethod*) this : NULL; } >>>>> 136 nmethod* as_nmethod() const { >>>>> assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } >>>>> 137 CompiledMethod* as_compiled_method_or_null() const { return >>>>> is_compiled() ? (CompiledMethod*) this : NULL; } >>>>> 138 CompiledMethod* as_compiled_method() const { >>>>> assert(is_compiled(), "must be compiled"); return (CompiledMethod*) >>>>> this; } >>>>> 139 CodeBlob* as_codeblob_or_null() const { return >>>>> (CodeBlob*) this; } >>>>> >>>>> - I don't like this code. You make the getters 'const' which >>>>> implicitely makes 'this' a "pointer to const" but then the returned >>>>> pointer is a normal pointer to a non-const object and therefore you >>>>> have to statically cast away the "pointer to const" (that's why you >>>>> need the cast even in the case where you return a CodeBlob*). So >>>>> either remove the const qualifier from the method declarations or make >>>>> them return "pointers to const". And by the way, as_codeblob_or_null() >>>>> doesn't seemed to be used anywhere in the code, why do we need it at >>>>> all? >>>>> >>>>> - Why do we need the non-virtual methods is_nmethod() and >>>>> is_compiled() to manually simulate virtual behavior. Why can't we >>>>> simply make them virtual and implement them accordingly in nmathod and >>>>> CompiledMethod? >>>>> >>>>> Regards, >>>>> Volker >>>>> >>>>> On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman >>>>> wrote: >>>>> >>>>>> Hi, >>>>>> >>>>>> can I please have review for this patch please? >>>>>> >>>>>> So far CodeBlobs have required all the data (metadata, oops, code, >>>>>> etc) >>>>>> to be in one continuous blob With this patch we are looking to change >>>>>> that. It's been done by changing offsets in CodeBlob to addresses, >>>>>> making some methods virtual to allow different behavior and also >>>>>> creating a couple of new classes. CompiledMethod now sits inbetween >>>>>> CodeBlob and nmethod. >>>>>> >>>>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>>>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>>>>> >>>>>> Thanks >>>>>> /R >>>>>> >>>>> >>> >> > From bengt.rutisson at oracle.com Mon Apr 11 10:26:03 2016 From: bengt.rutisson at oracle.com (Bengt Rutisson) Date: Mon, 11 Apr 2016 12:26:03 +0200 Subject: RFR: 8153659: Create a CHeap backed LogStream class In-Reply-To: <570B6D30.7080501@oracle.com> References: <57054D6A.8030405@oracle.com> <570644DD.3070304@oracle.com> <57068F87.5050607@oracle.com> <570B6C19.5040507@oracle.com> <570B6D30.7080501@oracle.com> Message-ID: <93FBDE5B-80F7-4112-ADD8-7241A70FB93B@oracle.com> > 11 apr. 2016 kl. 11:24 skrev Marcus Larsson : > > Hi Stefan, > >> On 04/11/2016 11:19 AM, Stefan Karlsson wrote: >> Hi all, >> >> The last suggestion to move stringStreamWithResourceMark into ostream.hpp causes include circularities in the latest hs-rt code. I'd like to proceed with webrev.02 for now. > > webrev.02 looks good to me as well. +1 Bengt > > Thanks, > Marcus > >> >> Thanks, >> StefanK >> >>> On 2016-04-07 18:49, Stefan Karlsson wrote: >>> Hi again, >>> >>> I decided to fix the resourceArea.hpp problem, so that I could move the stringStreamWithResourceMark class into ostream.hpp. >>> >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.03.delta >>> http://cr.openjdk.java.net/~stefank/8153659/webrev.03 >>> >>> The patch is applied on top of the thread.inline.hpp patch in: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022511.html >>> >>> Thanks >>> StefanK >>> >>>> On 2016-04-07 13:30, Stefan Karlsson wrote: >>>> Hi all, >>>> >>>> I've updated the patch: >>>> http://cr.openjdk.java.net/~stefank/8153659/webrev.02 >>>> >>>> The previous patch created the embedded ResourceMark after the stringStream instance was created. I discussed the layout of the classes with Bengt, and have decided to restructure this patch. I've changed the code so that the ResourceMark is embedded in a new stringStreamWithResourceMark class. This allows me to use the same LogStreamBase class, but different stringClass template parameters, for all three classes. >>>> >>>> I've put the stringStreamWithResourceMark class in logStream.hpp instead of ostream.hpp, to prevent the include of resourceArea.hpp to propagate through the ostream.hpp header. The resourceArea.hpp file is problematic, since it includes and uses thread.inline.hpp. The alternative would be to move the implementation of resourceArea.hpp into a resource.inline.hpp file, so that header files could create ResourceMark instances, without having to include thread.inline.hpp. I'm leaving that exercise for another RFE. >>>> >>>> Thanks, >>>> StefanK >>>> >>>>> On 2016-04-06 19:54, Stefan Karlsson wrote: >>>>> Hi all, >>>>> >>>>> Please review this patch to add a LogStream class that allocates its backing buffer from CHeap memory instead of Resource memory. >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8153659/webrev.01 >>>>> https://bugs.openjdk.java.net/browse/JDK-8153659 >>>>> >>>>> The main motivation for this is that we can't use Resource allocated memory during initialization, until Thread::current() has been initialized. So, a CHeap backed LogStream is desirable when we execute, for example, the following code during large pages initialization: >>>>> >>>>> void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count) >>>>> { >>>>> if (TracePageSizes) { >>>>> tty->print("%s: ", str); >>>>> for (int i = 0; i < count; ++i) { >>>>> tty->print(" " SIZE_FORMAT, page_sizes[i]); >>>>> } >>>>> tty->cr(); >>>>> } >>>>> } >>>>> >>>>> The patch restructures the code and creates a LogStreamBase template base class, which takes the backing outputStream class as a template parameter. We then have three concrete LogStream classes: >>>>> >>>>> LogStream - Buffer resource allocated with an embedded ResourceMark >>>>> LogStreamNoResourceMark - Buffer resource allocated without an embedded ResourceMark >>>>> LogStreamCHeap - Buffer CHeap allocated >>>>> >>>>> I moved the LogStream class from the logStream.inline.hpp file to logStream.hpp, for consistency. If that's causing problems while reviewing this, I can move it in a separate patch. >>>>> >>>>> Tested with JPRT with the TracePageSizes patch ( JDK-8152491) and internal VM tests. >>>>> >>>>> Thanks, >>>>> StefanK > From stefan.karlsson at oracle.com Mon Apr 11 11:03:29 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 11 Apr 2016 13:03:29 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages Message-ID: <570B8481.8010108@oracle.com> Hi all, Please review this patch to enable SHM large page allocations even when the requested alignment is larger than os::large_page_size(). http://cr.openjdk.java.net/~stefank/8017629/webrev.01 https://bugs.openjdk.java.net/browse/JDK-8017629 G1 is affected by this bug since it requires the heap to start at an address that is aligned with the heap region size. The patch fixes this by changing the UseSHM large pages allocation code. First, virtual memory with correct alignment is pre-reserved and then the large pages are attached to this memory area. Tested with vm.gc.testlist and ExecuteInternaVMTests Thanks, StefanK From rickard.backman at oracle.com Mon Apr 11 11:55:21 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Mon, 11 Apr 2016 13:55:21 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160411090501.GS9504@rbackman> References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> Message-ID: <20160411115521.GU9504@rbackman> Volker, here is the patch if you want to try it. http://cr.openjdk.java.net/~rbackman/8152664/virtual.patch /R On 04/11, Rickard B?ckman wrote: > Volker, > > thanks for finding this issue. > > I think that maybe the easiest fix is as follows: > > create new virtual methods in CompiledMethod: > > virtual address stub_begin_v() = 0; > > make the now virtual stub_begin non-virtual like: > > address stub_begin() { return stub_begin_v(); } > > in nmethod we override the stub_begin() with the normal this + offset > compuation and implement stub_begin_v() to call stub_begin(). > > That will avoid all virtual calls in the case were we are not working on > a CompiledMethod. > > It adds a couple of methods though. What do you think? > > /R > > On 04/08, Volker Simonis wrote: > > Hi Rickard, > > > > I found the problem why your change crashes the VM on ppc (and I'm pretty > > sure it will also crash on ARM - @Andrew, maybe you can try it out?). It is > > caused by the following code in address NativeCall::get_trampoline() which > > is also present on arm64: > > > > address NativeCall::get_trampoline() { > > address call_addr = addr_at(0); > > CodeBlob *code = CodeCache::find_blob(call_addr); > > ... > > // If the codeBlob is not a nmethod, this is because we get here from the > > // CodeBlob constructor, which is called within the nmethod constructor. > > return trampoline_stub_Relocation::get_trampoline_for(call_addr, > > (nmethod*)code); > > } > > > > The comment explains the situation quite well: we're in the CodeBlob > > constructor which was called by the CompiledMethod constructor which was > > called from the nmethod constructor: > > > > #3 0x00003fffb741b80c in NativeCall::get_trampoline (this=0x3fff607d0fac) > > at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > #4 0x00003fffb7596914 in Relocation::pd_call_destination > > (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > > #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move > > (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > > #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to > > (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > > #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to (this=0x3ffdfe3fdb40, > > dest_blob=0x3fff607d0c10) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > > #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to > > (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > > #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, > > name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, > > frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, > > caller_must_gc_arguments=false, subtype=8) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > > #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod > > (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 "nmethod", > > size=1768, header_size=392, cb=0x3ffdfe3fdb40, frame_complete_offset=20, > > frame_size=14, oop_maps=0x3ffe00049620, caller_must_gc_arguments=false) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > > #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, > > method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, > > offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, > > dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, > > oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, > > nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > > > > Now we cast 'code' to 'nmethod' but at this point in time 'code' is still a > > CodeBlob from the C++ point of view (i.e. it still has a CodeBlob vtable > > (see [1] for an explanation)). > > > > Later on, in RelocIterator::initialize() we call virtual methods on the > > nmethod which still has the vtable of a "CodeBlob" and this fails badly: > > > > #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 > > #1 0x00003fffb758d51c in RelocIterator::initialize (this=0x3ffdfe3fc928, > > nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 > > #2 0x00003fffb6ace56c in RelocIterator::RelocIterator > > (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > > limit=0x0) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > > #3 0x00003fffb75919dc in trampoline_stub_Relocation::get_trampoline_for > > (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > > #4 0x00003fffb741b80c in NativeCall::get_trampoline (this=0x3fff607d0fac) > > at > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > > > As you can see, we actually want to call nmethod::stub_begin() at > > relocInfo.cpp:144 > > > > 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); > > 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; > > 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; > > > > but we actually end up in SingletonBlob::print_on() which is a completely > > different method. Notice that the call to nm->consts_begin() before also > > fails, but it doesn't crash the VM because it happens to call > > SingletonBlob::verify() which has no bad side effect. The call to > > nm->insts_begin() in line 143 is non-virtual and thus works fine. Here are > > the corresponding vtable slots in the CodeBlob vtable for consts_begin() > > and stub_begin() > > > > (gdb) p &nmethod::consts_begin > > $76 = &virtual table offset 42 > > (gdb) p &nmethod::stub_begin > > $77 = &virtual table offset 44 > > (gdb) p ((*(void ***)nm) + 1)[42] > > $86 = (void *) 0x3fffb6c41df8 > > (gdb) p ((*(void ***)nm) + 1)[44] > > $87 = (void *) 0x3fffb6c41e64 > > > > As you can see, 'nm' is indeed a "CodeBlob" at this point: > > > > (gdb) p *(void ***)nm > > $91 = (void **) 0x3fffb7befa00 > > (gdb) p nm->print() > > [CodeBlob (0x00003fff607d1090)] > > Framesize: 14 > > > > The offending calls succeeded before your change, because they where not > > virtual. Any idea how we can fix this with the new class hierarchy? > > > > Regards, > > Volker > > > > [1] > > http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class > > > > > > > > On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis > > wrote: > > > > > Hi Rickard, > > > > > > I'd also like to know what's the rational behind this quite large > > > change. Do you expect some performance or memory consumption > > > improvements or is this a prerequisite for another change which is > > > still to come? > > > > > > The change itself currently doesn't work on ppc64 (neither on Linux > > > nor on AIX). I get the following crash during the build when the newly > > > built Hotspot is JIT-compiling java.lang.String::charAt on C1 : > > > > > > # > > > # A fatal error has been detected by the Java Runtime Environment: > > > # > > > # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 > > > # > > > # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build > > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) > > > # Java VM: OpenJDK 64-Bit Server VM (slowdebug > > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, > > > tiered, compressed oo > > > ps, serial gc, linux-ppc64le) > > > # Problematic frame: > > > # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > > > const*, char*, bool)+0x40 > > > # > > > # No core dump will be written. Core dumps have been disabled. To > > > enable core dumping, try "ulimit -c unlimited" before starting Java > > > again > > > # > > > # If you would like to submit a bug report, please visit: > > > # http://bugreport.java.com/bugreport/crash.jsp > > > # > > > > > > --------------- S U M M A R Y ------------ > > > > > > Command Line: > > > -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk > > > -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. > > > module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create > > > --module-version 9-internal --os-name Linux --os-arch ppc64le > > > --os-version > > > 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods > > > --hash-dependencies .* --exclude **_the.* --libs > > > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base > > > --cmds > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base > > > --config > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base > > > --class-path /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod > > > > > > Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # > > > Please check /etc/os-release for details about this release. > > > Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h 0m 0s) > > > > > > --------------- T H R E A D --------------- > > > > > > Current thread (0x000010000429c800): JavaThread "C1 CompilerThread10" > > > daemon [_thread_in_vm, id=35404, > > > stack(0x000010006a800000,0x000010006ac00000)] > > > > > > > > > Current CompileTask: > > > C1: 761 3 3 java.lang.String::charAt (25 bytes) > > > > > > Stack: [0x000010006a800000,0x000010006ac00000], > > > sp=0x000010006abfc6c0, free space=4081k > > > Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native > > > code) > > > V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > > > const*, char*, bool)+0x40 > > > V [libjvm.so+0xf74668] outputStream::print_cr(char const*, ...)+0x68 > > > V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 > > > V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) const+0x40 > > > V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) const+0x4c > > > V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, > > > unsigned char*, unsigned char*)+0x170 > > > V [libjvm.so+0x5ae56c] RelocIterator::RelocIterator(CompiledMethod*, > > > unsigned char*, unsigned char*)+0x78 > > > V [libjvm.so+0x10719dc] > > > trampoline_stub_Relocation::get_trampoline_for(unsigned char*, > > > nmethod*)+0x78 > > > V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 > > > V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned > > > char*)+0x150 > > > V [libjvm.so+0x106f5fc] > > > CallRelocation::fix_relocation_after_move(CodeBuffer const*, > > > CodeBuffer*)+0x74 > > > V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) > > > const+0x390 > > > V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 > > > V [libjvm.so+0x722670] CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > > > V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, > > > CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, > > > int)+0x320 > > > V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char > > > const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 > > > V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, > > > CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > > > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > > > V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, > > > int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > > > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > > > V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, > > > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > > > ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, > > > bool, bool, RTMState)+0x560 > > > V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 > > > V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 > > > V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, > > > ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 > > > V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, > > > int, DirectiveSet*)+0xc8 > > > V [libjvm.so+0x7b188c] > > > CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 > > > V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 > > > V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, Thread*)+0xa0 > > > V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 > > > V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 > > > V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 > > > C [libpthread.so.0+0x8a64] start_thread+0xf4 > > > C [libc.so.6+0x1032a0] clone+0x98 > > > > > > I haven't identified the exact cause (will analyze it tomorrow) but > > > the stack trace indicates that it is indeed related to your changes. > > > > > > Besides that I have some comments: > > > > > > codeBuffer.hpp: > > > > > > 472 CodeSection* insts() { return &_insts; } > > > 475 const CodeSection* insts() const { return &_insts; } > > > > > > - do we really need both versions? > > > > > > codeBlob.hpp: > > > > > > 135 nmethod* as_nmethod_or_null() const { return > > > is_nmethod() ? (nmethod*) this : NULL; } > > > 136 nmethod* as_nmethod() const { > > > assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } > > > 137 CompiledMethod* as_compiled_method_or_null() const { return > > > is_compiled() ? (CompiledMethod*) this : NULL; } > > > 138 CompiledMethod* as_compiled_method() const { > > > assert(is_compiled(), "must be compiled"); return (CompiledMethod*) > > > this; } > > > 139 CodeBlob* as_codeblob_or_null() const { return > > > (CodeBlob*) this; } > > > > > > - I don't like this code. You make the getters 'const' which > > > implicitely makes 'this' a "pointer to const" but then the returned > > > pointer is a normal pointer to a non-const object and therefore you > > > have to statically cast away the "pointer to const" (that's why you > > > need the cast even in the case where you return a CodeBlob*). So > > > either remove the const qualifier from the method declarations or make > > > them return "pointers to const". And by the way, as_codeblob_or_null() > > > doesn't seemed to be used anywhere in the code, why do we need it at > > > all? > > > > > > - Why do we need the non-virtual methods is_nmethod() and > > > is_compiled() to manually simulate virtual behavior. Why can't we > > > simply make them virtual and implement them accordingly in nmathod and > > > CompiledMethod? > > > > > > Regards, > > > Volker > > > > > > On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman > > > wrote: > > > > Hi, > > > > > > > > can I please have review for this patch please? > > > > > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > > > to be in one continuous blob With this patch we are looking to change > > > > that. It's been done by changing offsets in CodeBlob to addresses, > > > > making some methods virtual to allow different behavior and also > > > > creating a couple of new classes. CompiledMethod now sits inbetween > > > > CodeBlob and nmethod. > > > > > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > > > > > Thanks > > > > /R > > > From adinn at redhat.com Mon Apr 11 12:03:06 2016 From: adinn at redhat.com (Andrew Dinn) Date: Mon, 11 Apr 2016 13:03:06 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> Message-ID: <570B927A.1090204@redhat.com> Hi Volker, Sorry for the delayed response. On 08/04/16 19:12, Volker Simonis wrote: > I found the problem why your change crashes the VM on ppc (and I'm > pretty sure it will also crash on ARM - @Andrew, maybe you can try it > out?). It is caused by the following code in address > NativeCall::get_trampoline() which is also present on arm64: I have not yet tried this out on AArch64. However, I am sure it will fail exactly as per ppc for precisely the same reasons. IMHO the cleanest way to deal with this is to split the current code so that construction and relocation happen as two separate stages. That would require hiding the constructors (protected for non-leaf, private for leaf) providing (private virtual) relocate methods replacing the public API with create methods The latter API would ensure construction and relocation always happen as a single, atomic step. This will certainly be the easiest to maintain (especially for anyone new to the code). regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) From aph at redhat.com Mon Apr 11 12:18:25 2016 From: aph at redhat.com (Andrew Haley) Date: Mon, 11 Apr 2016 13:18:25 +0100 Subject: RFR: 8152554: CompactStrings broken on AArch64 Message-ID: <570B9611.5020302@redhat.com> The way that we implemented CompactStrings on AArch64 was wrong. The simple technique of disabling the intrinsics when selecting patterns like with a predicate: predicate(!CompactStrings); does not work: all it does is lead to selection failures. Instead, we need to be smart about generating the intrinsics, like this: const bool Matcher::match_rule_supported(int opcode) { - // TODO - // identify extra cases that we might want to provide match rules for - // e.g. Op_StrEquals and other intrinsics + switch (opcode) { + case Op_StrComp: + case Op_StrIndexOf: + if (CompactStrings) return false; + break; + default: + break; + } + if (!has_match_rule(opcode)) { return false; } This webrev makes CompactStrings works, but doesn't have all the accelerated intrinsics yet. http://cr.openjdk.java.net/~aph/8152554/ OK? Andrew. From rwestrel at redhat.com Mon Apr 11 12:31:58 2016 From: rwestrel at redhat.com (Roland Westrelin) Date: Mon, 11 Apr 2016 14:31:58 +0200 Subject: RFR: 8152554: CompactStrings broken on AArch64 In-Reply-To: <570B9611.5020302@redhat.com> References: <570B9611.5020302@redhat.com> Message-ID: <570B993E.8090401@redhat.com> > http://cr.openjdk.java.net/~aph/8152554/ That looks good to me. It seems macroAssembler_aarch64.hpp has a declaration for string_compareL but it's never defined or used? Roland. From thomas.stuefe at gmail.com Mon Apr 11 12:39:30 2016 From: thomas.stuefe at gmail.com (=?UTF-8?Q?Thomas_St=C3=BCfe?=) Date: Mon, 11 Apr 2016 14:39:30 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: <570B8481.8010108@oracle.com> References: <570B8481.8010108@oracle.com> Message-ID: Hi Stefan, short question, why the mmap before the shmat? Why not shmat right away at the requested address? Also note that mmap- and shmat-allocated memory may have different alignment requirements: mmap requires a page-aligned request address, whereas shmat requires alignment to SHMLBA, which may be multiple pages (e.g. for ARM: http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). So, for this shat-over-mmap trick to work, request address has to be aligned to SHMLBA, not just page size. I see that you assert alignment of requ address to os::large_page_size(), which I would assume is a multiple of SHMLBA, but I am not sure of this. Kind Regards, Thomas On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson wrote: > Hi all, > > Please review this patch to enable SHM large page allocations even when > the requested alignment is larger than os::large_page_size(). > > http://cr.openjdk.java.net/~stefank/8017629/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8017629 > > G1 is affected by this bug since it requires the heap to start at an > address that is aligned with the heap region size. The patch fixes this by > changing the UseSHM large pages allocation code. First, virtual memory with > correct alignment is pre-reserved and then the large pages are attached to > this memory area. > > Tested with vm.gc.testlist and ExecuteInternaVMTests > > Thanks, > StefanK > From aph at redhat.com Mon Apr 11 12:52:03 2016 From: aph at redhat.com (Andrew Haley) Date: Mon, 11 Apr 2016 13:52:03 +0100 Subject: RFR: 8152554: CompactStrings broken on AArch64 In-Reply-To: <570B993E.8090401@redhat.com> References: <570B9611.5020302@redhat.com> <570B993E.8090401@redhat.com> Message-ID: <570B9DF3.5070701@redhat.com> On 04/11/2016 01:31 PM, Roland Westrelin wrote: > >> http://cr.openjdk.java.net/~aph/8152554/ > > That looks good to me. > > It seems macroAssembler_aarch64.hpp has a declaration for > string_compareL but it's never defined or used? So it does. Will fix. Thanks, Andrew. From stefan.karlsson at oracle.com Mon Apr 11 13:52:27 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 11 Apr 2016 15:52:27 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: References: <570B8481.8010108@oracle.com> Message-ID: <570BAC1B.7040806@oracle.com> Hi Thomas, On 2016-04-11 14:39, Thomas St?fe wrote: > Hi Stefan, > > short question, why the mmap before the shmat? Why not shmat right > away at the requested address? If we have a requested_address we do exactly what you propose. if (req_addr == NULL && alignment > os::large_page_size()) { return shmat_with_large_alignment(shmid, bytes, alignment); } else { return shmat_with_normal_alignment(shmid, req_addr); } ... static char* shmat_with_normal_alignment(int shmid, char* req_addr) { char* addr = (char*)shmat(shmid, req_addr, 0); if ((intptr_t)addr == -1) { shm_warning_with_errno("Failed to attach shared memory."); return NULL; } return addr; } It's when you don't have a requested address that mmap is used to find a large enough virtual memory area. > > Also note that mmap- and shmat-allocated memory may have different > alignment requirements: mmap requires a page-aligned request address, > whereas shmat requires alignment to SHMLBA, which may be multiple > pages (e.g. for ARM: > http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). > So, for this shat-over-mmap trick to work, request address has to be > aligned to SHMLBA, not just page size. > > I see that you assert alignment of requ address to > os::large_page_size(), which I would assume is a multiple of SHMLBA, > but I am not sure of this. I've added some defensive code and asserts to catch this if/when this assumption fails: http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ http://cr.openjdk.java.net/~stefank/8017629/webrev.02 I need to verify that this works on other machines than my local Linux x64 machine. Thanks, StefanK > > Kind Regards, Thomas > > > > On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson > > wrote: > > Hi all, > > Please review this patch to enable SHM large page allocations even > when the requested alignment is larger than os::large_page_size(). > > http://cr.openjdk.java.net/~stefank/8017629/webrev.01 > > https://bugs.openjdk.java.net/browse/JDK-8017629 > > G1 is affected by this bug since it requires the heap to start at > an address that is aligned with the heap region size. The patch > fixes this by changing the UseSHM large pages allocation code. > First, virtual memory with correct alignment is pre-reserved and > then the large pages are attached to this memory area. > > Tested with vm.gc.testlist and ExecuteInternaVMTests > > Thanks, > StefanK > > From volker.simonis at gmail.com Mon Apr 11 14:16:33 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Mon, 11 Apr 2016 16:16:33 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160411115521.GU9504@rbackman> References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> Message-ID: Rickard, Dean, I'm afraid all this hacks can not work. It doesn't help to make CompiledMethod::consts_begin() non-virtual and then calling a virtual function from it. The problem ist that at the point where you call consts_begin_v(), the vtable of 'this' is still the one of CodeBlob and this results in calling yet another arbitrary function: #0 CodeBlob::is_locked_by_vm (this=0x3fff607d0c10) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.hpp:168 #1 0x00003fffb6e38048 in CompiledMethod::consts_begin (this=0x3fff607d0c10) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.hpp:255 #2 0x00003fffb758d658 in RelocIterator::initialize (this=0x3ffdfd3fc9a8, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:142 #3 0x00003fffb6ace56c in RelocIterator::RelocIterator (this=0x3ffdfd3fc9a8, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 #4 0x00003fffb7591afc in trampoline_stub_Relocation::get_trampoline_for (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 #5 0x00003fffb741ba4c in NativeCall::get_trampoline (this=0x3fff607d0fac) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 #6 0x00003fffb7596a34 in Relocation::pd_call_destination (this=0x3ffdfd3fcd10, orig_addr=0x3fff6033482c "\001") at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 #7 0x00003fffb758f71c in CallRelocation::fix_relocation_after_move (this=0x3ffdfd3fcd10, src=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 #8 0x00003fffb6c48914 in CodeBuffer::relocate_code_to (this=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 #9 0x00003fffb6c48480 in CodeBuffer::copy_code_to (this=0x3ffdfd3fdbc0, dest_blob=0x3fff607d0c10) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 #10 0x00003fffb6c426ec in CodeBuffer::copy_code_and_locs_to (this=0x3ffdfd3fdbc0, blob=0x3fff607d0c10) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 #11 0x00003fffb6c3f8b0 in CodeBlob::CodeBlob (this=0x3fff607d0c10, name=0x3fffb7a760f8 "nmethod", layout=..., cb=0x3ffdfd3fdbc0, frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe0001ed00, caller_must_gc_arguments=false, subtype=8) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 #12 0x00003fffb6ce5360 in CompiledMethod::CompiledMethod (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a760f8 "nmethod", size=1768, header_size=392, cb=0x3ffdfd3fdbc0, frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe0001ed00, caller_must_gc_arguments=false) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 #13 0x00003fffb7422198 in nmethod::nmethod (this=0x3fff607d0c10, method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, offsets=0x3ffdfd3fdb98, orig_pc_offset=104, debug_info=0x3fffb03f2dc0, dependencies=0x3ffe0001ed70, code_buffer=0x3ffdfd3fdbc0, frame_size=14, oop_maps=0x3ffe0001ed00, handler_table=0x3ffdfd3fdb50, nul_chk_table=0x3ffdfd3fdb70, compiler=0x3fffb03d0cd0, comp_level=3) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 #14 0x00003fffb7421850 in nmethod::new_nmethod (method=..., compile_id=4, entry_bci=-1, offsets=0x3ffdfd3fdb98, orig_pc_offset=104, debug_info=0x3fffb03f2dc0, dependencies=0x3ffe0001ed70, code_buffer=0x3ffdfd3fdbc0, frame_size=14, oop_maps=0x3ffe0001ed00, handler_table=0x3ffdfd3fdb50, nul_chk_table=0x3ffdfd3fdb70, compiler=0x3fffb03d0cd0, comp_level=3) at /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:548 I think we really need to rework this as proposed by Andrew in his last mail. I'm working on such a fix. Regards, Volker On Mon, Apr 11, 2016 at 1:55 PM, Rickard B?ckman wrote: > Volker, > > here is the patch if you want to try it. > http://cr.openjdk.java.net/~rbackman/8152664/virtual.patch > > /R > > On 04/11, Rickard B?ckman wrote: > > Volker, > > > > thanks for finding this issue. > > > > I think that maybe the easiest fix is as follows: > > > > create new virtual methods in CompiledMethod: > > > > virtual address stub_begin_v() = 0; > > > > make the now virtual stub_begin non-virtual like: > > > > address stub_begin() { return stub_begin_v(); } > > > > in nmethod we override the stub_begin() with the normal this + offset > > compuation and implement stub_begin_v() to call stub_begin(). > > > > That will avoid all virtual calls in the case were we are not working on > > a CompiledMethod. > > > > It adds a couple of methods though. What do you think? > > > > /R > > > > On 04/08, Volker Simonis wrote: > > > Hi Rickard, > > > > > > I found the problem why your change crashes the VM on ppc (and I'm > pretty > > > sure it will also crash on ARM - @Andrew, maybe you can try it out?). > It is > > > caused by the following code in address NativeCall::get_trampoline() > which > > > is also present on arm64: > > > > > > address NativeCall::get_trampoline() { > > > address call_addr = addr_at(0); > > > CodeBlob *code = CodeCache::find_blob(call_addr); > > > ... > > > // If the codeBlob is not a nmethod, this is because we get here > from the > > > // CodeBlob constructor, which is called within the nmethod > constructor. > > > return trampoline_stub_Relocation::get_trampoline_for(call_addr, > > > (nmethod*)code); > > > } > > > > > > The comment explains the situation quite well: we're in the CodeBlob > > > constructor which was called by the CompiledMethod constructor which > was > > > called from the nmethod constructor: > > > > > > #3 0x00003fffb741b80c in NativeCall::get_trampoline > (this=0x3fff607d0fac) > > > at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > > #4 0x00003fffb7596914 in Relocation::pd_call_destination > > > (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > > > #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move > > > (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > > > #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to > > > (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > > > #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to > (this=0x3ffdfe3fdb40, > > > dest_blob=0x3fff607d0c10) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > > > #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to > > > (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > > > #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, > > > name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, > > > frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, > > > caller_must_gc_arguments=false, subtype=8) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > > > #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod > > > (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 > "nmethod", > > > size=1768, header_size=392, cb=0x3ffdfe3fdb40, > frame_complete_offset=20, > > > frame_size=14, oop_maps=0x3ffe00049620, > caller_must_gc_arguments=false) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > > > #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, > > > method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, > > > offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, > > > dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, > > > oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, > > > nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > > > > > > Now we cast 'code' to 'nmethod' but at this point in time 'code' is > still a > > > CodeBlob from the C++ point of view (i.e. it still has a CodeBlob > vtable > > > (see [1] for an explanation)). > > > > > > Later on, in RelocIterator::initialize() we call virtual methods on the > > > nmethod which still has the vtable of a "CodeBlob" and this fails > badly: > > > > > > #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 > > > #1 0x00003fffb758d51c in RelocIterator::initialize > (this=0x3ffdfe3fc928, > > > nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 > > > #2 0x00003fffb6ace56c in RelocIterator::RelocIterator > > > (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > > > limit=0x0) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > > > #3 0x00003fffb75919dc in > trampoline_stub_Relocation::get_trampoline_for > > > (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > > > #4 0x00003fffb741b80c in NativeCall::get_trampoline > (this=0x3fff607d0fac) > > > at > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > > > > > As you can see, we actually want to call nmethod::stub_begin() at > > > relocInfo.cpp:144 > > > > > > 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); > > > 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; > > > 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; > > > > > > but we actually end up in SingletonBlob::print_on() which is a > completely > > > different method. Notice that the call to nm->consts_begin() before > also > > > fails, but it doesn't crash the VM because it happens to call > > > SingletonBlob::verify() which has no bad side effect. The call to > > > nm->insts_begin() in line 143 is non-virtual and thus works fine. Here > are > > > the corresponding vtable slots in the CodeBlob vtable for > consts_begin() > > > and stub_begin() > > > > > > (gdb) p &nmethod::consts_begin > > > $76 = &virtual table offset 42 > > > (gdb) p &nmethod::stub_begin > > > $77 = &virtual table offset 44 > > > (gdb) p ((*(void ***)nm) + 1)[42] > > > $86 = (void *) 0x3fffb6c41df8 > > > (gdb) p ((*(void ***)nm) + 1)[44] > > > $87 = (void *) 0x3fffb6c41e64 const> > > > > > > As you can see, 'nm' is indeed a "CodeBlob" at this point: > > > > > > (gdb) p *(void ***)nm > > > $91 = (void **) 0x3fffb7befa00 > > > (gdb) p nm->print() > > > [CodeBlob (0x00003fff607d1090)] > > > Framesize: 14 > > > > > > The offending calls succeeded before your change, because they where > not > > > virtual. Any idea how we can fix this with the new class hierarchy? > > > > > > Regards, > > > Volker > > > > > > [1] > > > > http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class > > > > > > > > > > > > On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis < > volker.simonis at gmail.com> > > > wrote: > > > > > > > Hi Rickard, > > > > > > > > I'd also like to know what's the rational behind this quite large > > > > change. Do you expect some performance or memory consumption > > > > improvements or is this a prerequisite for another change which is > > > > still to come? > > > > > > > > The change itself currently doesn't work on ppc64 (neither on Linux > > > > nor on AIX). I get the following crash during the build when the > newly > > > > built Hotspot is JIT-compiling java.lang.String::charAt on C1 : > > > > > > > > # > > > > # A fatal error has been detected by the Java Runtime Environment: > > > > # > > > > # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 > > > > # > > > > # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build > > > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) > > > > # Java VM: OpenJDK 64-Bit Server VM (slowdebug > > > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, > > > > tiered, compressed oo > > > > ps, serial gc, linux-ppc64le) > > > > # Problematic frame: > > > > # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > > > > const*, char*, bool)+0x40 > > > > # > > > > # No core dump will be written. Core dumps have been disabled. To > > > > enable core dumping, try "ulimit -c unlimited" before starting Java > > > > again > > > > # > > > > # If you would like to submit a bug report, please visit: > > > > # http://bugreport.java.com/bugreport/crash.jsp > > > > # > > > > > > > > --------------- S U M M A R Y ------------ > > > > > > > > Command Line: > > > > > -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk > > > > -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. > > > > module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create > > > > --module-version 9-internal --os-name Linux --os-arch ppc64le > > > > --os-version > > > > 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods > > > > --hash-dependencies .* --exclude **_the.* --libs > > > > > > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base > > > > --cmds > > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base > > > > --config > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base > > > > --class-path > /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod > > > > > > > > Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # > > > > Please check /etc/os-release for details about this release. > > > > Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h > 0m 0s) > > > > > > > > --------------- T H R E A D --------------- > > > > > > > > Current thread (0x000010000429c800): JavaThread "C1 > CompilerThread10" > > > > daemon [_thread_in_vm, id=35404, > > > > stack(0x000010006a800000,0x000010006ac00000)] > > > > > > > > > > > > Current CompileTask: > > > > C1: 761 3 3 java.lang.String::charAt (25 bytes) > > > > > > > > Stack: [0x000010006a800000,0x000010006ac00000], > > > > sp=0x000010006abfc6c0, free space=4081k > > > > Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, > C=native > > > > code) > > > > V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > > > > const*, char*, bool)+0x40 > > > > V [libjvm.so+0xf74668] outputStream::print_cr(char const*, > ...)+0x68 > > > > V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 > > > > V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) > const+0x40 > > > > V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) > const+0x4c > > > > V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, > > > > unsigned char*, unsigned char*)+0x170 > > > > V [libjvm.so+0x5ae56c] > RelocIterator::RelocIterator(CompiledMethod*, > > > > unsigned char*, unsigned char*)+0x78 > > > > V [libjvm.so+0x10719dc] > > > > trampoline_stub_Relocation::get_trampoline_for(unsigned char*, > > > > nmethod*)+0x78 > > > > V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 > > > > V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned > > > > char*)+0x150 > > > > V [libjvm.so+0x106f5fc] > > > > CallRelocation::fix_relocation_after_move(CodeBuffer const*, > > > > CodeBuffer*)+0x74 > > > > V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) > > > > const+0x390 > > > > V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 > > > > V [libjvm.so+0x722670] > CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > > > > V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, > > > > CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, > > > > int)+0x320 > > > > V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char > > > > const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 > > > > V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, > > > > CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > > > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > > > > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > > > > V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, > > > > int, int, CodeOffsets*, int, DebugInformationRecorder*, > Dependencies*, > > > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > > > > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > > > > V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, > > > > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > > > > ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, > > > > bool, bool, RTMState)+0x560 > > > > V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 > > > > V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 > > > > V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, > > > > ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 > > > > V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, > > > > int, DirectiveSet*)+0xc8 > > > > V [libjvm.so+0x7b188c] > > > > CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 > > > > V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 > > > > V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, > Thread*)+0xa0 > > > > V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 > > > > V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 > > > > V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 > > > > C [libpthread.so.0+0x8a64] start_thread+0xf4 > > > > C [libc.so.6+0x1032a0] clone+0x98 > > > > > > > > I haven't identified the exact cause (will analyze it tomorrow) but > > > > the stack trace indicates that it is indeed related to your changes. > > > > > > > > Besides that I have some comments: > > > > > > > > codeBuffer.hpp: > > > > > > > > 472 CodeSection* insts() { return &_insts; } > > > > 475 const CodeSection* insts() const { return &_insts; } > > > > > > > > - do we really need both versions? > > > > > > > > codeBlob.hpp: > > > > > > > > 135 nmethod* as_nmethod_or_null() const { return > > > > is_nmethod() ? (nmethod*) this : NULL; } > > > > 136 nmethod* as_nmethod() const { > > > > assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } > > > > 137 CompiledMethod* as_compiled_method_or_null() const { return > > > > is_compiled() ? (CompiledMethod*) this : NULL; } > > > > 138 CompiledMethod* as_compiled_method() const { > > > > assert(is_compiled(), "must be compiled"); return (CompiledMethod*) > > > > this; } > > > > 139 CodeBlob* as_codeblob_or_null() const { return > > > > (CodeBlob*) this; } > > > > > > > > - I don't like this code. You make the getters 'const' which > > > > implicitely makes 'this' a "pointer to const" but then the returned > > > > pointer is a normal pointer to a non-const object and therefore you > > > > have to statically cast away the "pointer to const" (that's why you > > > > need the cast even in the case where you return a CodeBlob*). So > > > > either remove the const qualifier from the method declarations or > make > > > > them return "pointers to const". And by the way, > as_codeblob_or_null() > > > > doesn't seemed to be used anywhere in the code, why do we need it at > > > > all? > > > > > > > > - Why do we need the non-virtual methods is_nmethod() and > > > > is_compiled() to manually simulate virtual behavior. Why can't we > > > > simply make them virtual and implement them accordingly in nmathod > and > > > > CompiledMethod? > > > > > > > > Regards, > > > > Volker > > > > > > > > On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman > > > > wrote: > > > > > Hi, > > > > > > > > > > can I please have review for this patch please? > > > > > > > > > > So far CodeBlobs have required all the data (metadata, oops, code, > etc) > > > > > to be in one continuous blob With this patch we are looking to > change > > > > > that. It's been done by changing offsets in CodeBlob to addresses, > > > > > making some methods virtual to allow different behavior and also > > > > > creating a couple of new classes. CompiledMethod now sits inbetween > > > > > CodeBlob and nmethod. > > > > > > > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > > > > > > > Thanks > > > > > /R > > > > > From stefan.karlsson at oracle.com Mon Apr 11 14:16:34 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 11 Apr 2016 16:16:34 +0200 Subject: RFR: 8153967: Remove top.hpp Message-ID: <570BB1C2.9080808@oracle.com> Hi all, Please review this patch remove the top.hpp file. http://cr.openjdk.java.net/~stefank/8153967/webrev.01 https://bugs.openjdk.java.net/browse/JDK-8153967 It has been replaced by explicit includes and forward declarations, where needed. This is just a small step towards getting smaller include dependencies. Tested with JPRT Thanks, StefanK From adinn at redhat.com Mon Apr 11 14:26:16 2016 From: adinn at redhat.com (Andrew Dinn) Date: Mon, 11 Apr 2016 15:26:16 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> Message-ID: <570BB408.2050508@redhat.com> On 11/04/16 15:16, Volker Simonis wrote: > I'm afraid all this hacks can not work. It doesn't help to make > CompiledMethod::consts_begin() non-virtual and then calling a virtual > function from it. The problem ist that at the point where you call > consts_begin_v(), the vtable of 'this' is still the one of CodeBlob and > this results in calling yet another arbitrary function: Yes, the root of this is that virtual calls don't virtualise below the current class when called in a constructor. Essentially, you are trying to call a subclass method before the subclass fields have had a chance to be initialised. C++ deliberately stops you doing that by only giving you a vtable for super S when initialising at super level S. If you try to call a virtual method it executes the implementation belonging to S. Of course, if you cast to a subclass and try to call a subclass method (as is happening here) you get the sort of chaos you would expect if you cast to some totally unrelated type and call a virtual method. Arguably this code was broke in the first place even when it called a non-virtual method. > I think we really need to rework this as proposed by Andrew in his last > mail. I'm working on such a fix. Ok, so I will wait for you to fix it and throw my 2 cents in then :-) Thanks for looking at it, Volker. regards, Andrew Dinn ----------- From jesper.wilhelmsson at oracle.com Mon Apr 11 15:06:05 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Mon, 11 Apr 2016 17:06:05 +0200 Subject: RFR: 8153967: Remove top.hpp In-Reply-To: <570BB1C2.9080808@oracle.com> References: <570BB1C2.9080808@oracle.com> Message-ID: <570BBD5D.90003@oracle.com> Looks good! /Jesper Den 11/4/16 kl. 16:16, skrev Stefan Karlsson: > Hi all, > > Please review this patch remove the top.hpp file. > > http://cr.openjdk.java.net/~stefank/8153967/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8153967 > > It has been replaced by explicit includes and forward declarations, where > needed. This is just a small step towards getting smaller include dependencies. > > Tested with JPRT > > Thanks, > StefanK From stefan.karlsson at oracle.com Mon Apr 11 15:11:31 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 11 Apr 2016 17:11:31 +0200 Subject: RFR: 8153967: Remove top.hpp In-Reply-To: <570BBD5D.90003@oracle.com> References: <570BB1C2.9080808@oracle.com> <570BBD5D.90003@oracle.com> Message-ID: <570BBEA3.8030008@oracle.com> Thanks, Jesper! StefanK On 2016-04-11 17:06, Jesper Wilhelmsson wrote: > Looks good! > /Jesper > > Den 11/4/16 kl. 16:16, skrev Stefan Karlsson: >> Hi all, >> >> Please review this patch remove the top.hpp file. >> >> http://cr.openjdk.java.net/~stefank/8153967/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8153967 >> >> It has been replaced by explicit includes and forward declarations, >> where >> needed. This is just a small step towards getting smaller include >> dependencies. >> >> Tested with JPRT >> >> Thanks, >> StefanK From adinn at redhat.com Mon Apr 11 15:18:40 2016 From: adinn at redhat.com (Andrew Dinn) Date: Mon, 11 Apr 2016 16:18:40 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160407121221.GQ9504@rbackman> References: <20160407121221.GQ9504@rbackman> Message-ID: <570BC050.7030806@redhat.com> On 07/04/16 13:12, Rickard B?ckman wrote: > Hi, > > can I please have review for this patch please? > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > to be in one continuous blob With this patch we are looking to change > that. It's been done by changing offsets in CodeBlob to addresses, > making some methods virtual to allow different behavior and also > creating a couple of new classes. CompiledMethod now sits inbetween > CodeBlob and nmethod. > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ n.b. The patch to os_linux_aarch64.cpp has an error in it: @@ -389,7 +389,7 @@ // here if the underlying file has been truncated. // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob_unsafe(pc); - nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL; + CompiledMethod* nm = (cb != NULL) cb->as_compiled_method_or_null() : NULL; if (nm != NULL && nm->has_unsafe_access()) { stub = handle_unsafe_access(thread, pc); } The replaced line is missing the ? in the ? : ternary operator. regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) From adinn at redhat.com Mon Apr 11 15:43:21 2016 From: adinn at redhat.com (Andrew Dinn) Date: Mon, 11 Apr 2016 16:43:21 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <570B927A.1090204@redhat.com> References: <20160407121221.GQ9504@rbackman> <570B927A.1090204@redhat.com> Message-ID: <570BC619.3020601@redhat.com> Hi Volker, On 11/04/16 13:03, Andrew Dinn wrote: > . . . > I have not yet tried this out on AArch64. However, I am sure it will > fail exactly as per ppc for precisely the same reasons. I'm just confirming that the exact same problem occurs on AArch64. Relevant details form the hs_err_pid log file are included below. regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) # SIGSEGV (0xb) at pc=0x000003ff918a1acc, pid=25010, tid=25040 # . . . Stack: [0x000003ff21b80000,0x000003ff21d80000], sp=0x000003ff21d7cb50, free space=2034k Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code) C [libc.so.6+0x71acc] vsnprintf+0x64 V [libjvm.so+0xcbe0ac] outputStream::do_vsnprintf(char*, unsigned long, char const*, std::__va_list, bool, unsigned long&)+0x1e8 V [libjvm.so+0xcbe2f8] outputStream::do_vsnprintf_and_write_with_scratch_buffer(char const*, std::__va_list, bool)+0x6c V [libjvm.so+0xcbe3a8] outputStream::do_vsnprintf_and_write(char const*, std::__va_list, bool)+0x64 V [libjvm.so+0xcbe5a0] outputStream::print_cr(char const*, ...)+0xc0 V [libjvm.so+0x62d118] CodeBlob::print_on(outputStream*) const+0x3c V [libjvm.so+0x62e850] RuntimeBlob::print_on(outputStream*) const+0x2c V [libjvm.so+0x62d4f4] SingletonBlob::print_on(outputStream*) const+0x34 V [libjvm.so+0xd7a468] RelocIterator::initialize(CompiledMethod*, unsigned char*, unsigned char*)+0x128 . . . From max.ockner at oracle.com Mon Apr 11 15:52:25 2016 From: max.ockner at oracle.com (Max Ockner) Date: Mon, 11 Apr 2016 11:52:25 -0400 Subject: RFR: 8145704: Make test for classinit logging more robust. Message-ID: <570BC839.8090505@oracle.com> Hello, Please review this small fix for the classinit logging test. This test was submitted before it was standard to test that logging disappears when the level is "off". The test now contains this case. bug: https://bugs.openjdk.java.net/browse/JDK-8145704 webrev: http://cr.openjdk.java.net/~mockner/8145704/ Test passes. Thanks, Max From coleen.phillimore at oracle.com Mon Apr 11 15:55:43 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 11 Apr 2016 11:55:43 -0400 Subject: RFR: 8153967: Remove top.hpp In-Reply-To: <570BBEA3.8030008@oracle.com> References: <570BB1C2.9080808@oracle.com> <570BBD5D.90003@oracle.com> <570BBEA3.8030008@oracle.com> Message-ID: <570BC8FF.2030403@oracle.com> This looks great. Never wanted to understand why we had top.hpp. Thanks! Coleen On 4/11/16 11:11 AM, Stefan Karlsson wrote: > Thanks, Jesper! > > StefanK > > On 2016-04-11 17:06, Jesper Wilhelmsson wrote: >> Looks good! >> /Jesper >> >> Den 11/4/16 kl. 16:16, skrev Stefan Karlsson: >>> Hi all, >>> >>> Please review this patch remove the top.hpp file. >>> >>> http://cr.openjdk.java.net/~stefank/8153967/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8153967 >>> >>> It has been replaced by explicit includes and forward declarations, >>> where >>> needed. This is just a small step towards getting smaller include >>> dependencies. >>> >>> Tested with JPRT >>> >>> Thanks, >>> StefanK > From stefan.karlsson at oracle.com Mon Apr 11 16:03:13 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 11 Apr 2016 18:03:13 +0200 Subject: RFR: 8153967: Remove top.hpp In-Reply-To: <570BC8FF.2030403@oracle.com> References: <570BB1C2.9080808@oracle.com> <570BBD5D.90003@oracle.com> <570BBEA3.8030008@oracle.com> <570BC8FF.2030403@oracle.com> Message-ID: <570BCAC1.5060803@oracle.com> Thanks, Coleen. StefanK On 2016-04-11 17:55, Coleen Phillimore wrote: > > This looks great. Never wanted to understand why we had top.hpp. > Thanks! > Coleen > > On 4/11/16 11:11 AM, Stefan Karlsson wrote: >> Thanks, Jesper! >> >> StefanK >> >> On 2016-04-11 17:06, Jesper Wilhelmsson wrote: >>> Looks good! >>> /Jesper >>> >>> Den 11/4/16 kl. 16:16, skrev Stefan Karlsson: >>>> Hi all, >>>> >>>> Please review this patch remove the top.hpp file. >>>> >>>> http://cr.openjdk.java.net/~stefank/8153967/webrev.01 >>>> https://bugs.openjdk.java.net/browse/JDK-8153967 >>>> >>>> It has been replaced by explicit includes and forward declarations, >>>> where >>>> needed. This is just a small step towards getting smaller include >>>> dependencies. >>>> >>>> Tested with JPRT >>>> >>>> Thanks, >>>> StefanK >> > From coleen.phillimore at oracle.com Mon Apr 11 16:11:51 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 11 Apr 2016 12:11:51 -0400 Subject: CFV: New hotspot Group Member: Christian Tornqvist Message-ID: <570BCCC7.7010602@oracle.com> I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to Membership in the hotspot Group. Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE team. He has been working in the HotSpot team since 2011. Christian is a Reviewer in the JDK9 project. Votes are due by Monday, April 25, 2016 at 12:00PM ET. Only current Members of the hotspot Group [1] are eligible to vote on this nomination. Votes must be cast in the open by replying to this mailing list. For Lazy Consensus voting instructions, see [2]. Coleen Phillimore [1] http://openjdk.java.net/census/#hotspot [2] http://openjdk.java.net/groups/#member-vote From max.ockner at oracle.com Mon Apr 11 16:18:14 2016 From: max.ockner at oracle.com (Max Ockner) Date: Mon, 11 Apr 2016 12:18:14 -0400 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570BCE46.8020401@oracle.com> Vote: yes Max On 4/11/2016 12:11 PM, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime > SQE team. He has been working in the HotSpot team since 2011. > Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From igor.ignatyev at oracle.com Mon Apr 11 16:39:10 2016 From: igor.ignatyev at oracle.com (Igor Ignatyev) Date: Mon, 11 Apr 2016 09:39:10 -0700 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <042DEC03-4DCE-4DA0-BA78-C73B1158BE9E@oracle.com> Vote: yes ? Igor > On Apr 11, 2016, at 9:11 AM, Coleen Phillimore wrote: > > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE team. He has been working in the HotSpot team since 2011. Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on this nomination. Votes must be cast in the open by replying to this mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From jesper.wilhelmsson at oracle.com Mon Apr 11 16:49:56 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Mon, 11 Apr 2016 18:49:56 +0200 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570BD5B4.70007@oracle.com> Vote: yes /Jesper Den 11/4/16 kl. 18:11, skrev Coleen Phillimore: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE team. > He has been working in the HotSpot team since 2011. Christian is a Reviewer in > the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on this > nomination. Votes must be cast in the open by replying to this mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From brent.christian at oracle.com Mon Apr 4 23:45:31 2016 From: brent.christian at oracle.com (Brent Christian) Date: Mon, 04 Apr 2016 16:45:31 -0700 Subject: RFR 8153123 : Streamline StackWalker code Message-ID: <5702FC9B.7020600@oracle.com> Hi, I'd like to check in some footprint and code reduction changes to the java.lang.StackWalker implementation. Webrev: http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ Bug: https://bugs.openjdk.java.net/browse/JDK-8153123 A summary of the changes: * remove the "stackwalk.newThrowable" system property and "MemberNameInStackFrame" VM flag, originally left in to aid benchmarking * Streamline StackFrameInfo fields * Refactor/streamline StackStreamFactory (no more separate classes[]/StackFrame[] arrays, remove unneeded (for now) StackStreamFactory.StackTrace class) Given the hotspot changes, I plan to push this through hs-rt. Thanks, -Brent From brent.christian at oracle.com Tue Apr 5 23:48:24 2016 From: brent.christian at oracle.com (Brent Christian) Date: Tue, 05 Apr 2016 16:48:24 -0700 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <5704030F.80906@oracle.com> References: <5702FC9B.7020600@oracle.com> <5703F579.8050702@oracle.com> <5703F5E7.4060404@oracle.com> <5704030F.80906@oracle.com> Message-ID: <57044EC8.7050602@oracle.com> Thanks, Coleen. Coordinating method/function names on "to stack trace element" is a fine thing. I've done so in the updated webrev, and also implemented Claes's suggestion. http://cr.openjdk.java.net/~bchristi/8153123/webrev.01/index.html -Brent On 04/05/2016 11:25 AM, Coleen Phillimore wrote: > > A correction below. > > On 4/5/16 1:29 PM, Coleen Phillimore wrote: >> >> Also meant to include core-libs-dev in the email. >> Thanks, >> Coleen >> >> On 4/5/16 1:27 PM, Coleen Phillimore wrote: >>> >>> Hi, I've reviewed the hotspot changes and some of the jdk changes. >>> This looks really good. >>> >>> One comment about the jvm function names: >>> >>> I think FillInStackTraceElement is too close of a name to >>> Throwable::fill_in_stack_trace(). >>> >>> -JVM_ENTRY(void, JVM_SetMethodInfo(JNIEnv *env, jobject frame)) >>> +JVM_ENTRY(void, JVM_FillInStackTraceElement(JNIEnv *env, jobject >>> frame, jobject stack)) >>> JVMWrapper("JVM_SetMethodInfo"); >>> - Handle stackFrame(THREAD, JNIHandles::resolve(frame)); >>> - java_lang_StackFrameInfo::fill_methodInfo(stackFrame, THREAD); >>> + Handle stack_frame_info(THREAD, JNIHandles::resolve(frame)); >>> + Handle stack_trace_element(THREAD, JNIHandles::resolve(stack)); >>> + java_lang_StackFrameInfo::fill_methodInfo(stack_frame_info, >>> stack_trace_element, THREAD); JVM_END >>> >>> >>> And the function is called fill_methodInfo in the javaClasses function. >>> >>> I think the JVM and the java_lang_StackFrameInfo function names >>> should be closer. >>> >>> I wonder if the name JVM_ToStackFrameElement() and >>> java_lang_StackFrameInfo::to_stack_frame_element() would be better >>> and then it'd match the Java name. >>> > > I meant JVM_ToStackTraceElement() and > java_lang_StackFrameInfo::to_stack_trace_element(), since it's producing > a StackTraceElement. > > thanks, > Coleen >>> Thanks! >>> Coleen >>> >>> On 4/4/16 9:29 PM, Mandy Chung wrote: >>>>> On Apr 4, 2016, at 4:45 PM, Brent Christian >>>>> wrote: >>>>> >>>>> Hi, >>>>> >>>>> I'd like to check in some footprint and code reduction changes to >>>>> the java.lang.StackWalker implementation. >>>>> >>>>> Webrev: >>>>> http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ >>>>> Bug: >>>>> https://bugs.openjdk.java.net/browse/JDK-8153123 >>>>> >>>> This looks good to me. >>>> >>>> One thing to mention is that this patch is a follow-up work from the >>>> investigation on what it takes to enable Throwable to use >>>> StackWalker (JDK-8141239). The current built-in VM backtrace is very >>>> compact and performant. We have identified and prototypes the >>>> performance improvements if Throwable backtrace is generated using >>>> stack walker. There are some performance gaps that we agree to >>>> defer JDK-8141239 to a future release and improve the footprint >>>> performance and GC throughput concerns when MemberNames are stored >>>> in the throwable backtrace. >>>> >>>> Mandy >>>> >>> >> > From brent.christian at oracle.com Wed Apr 6 18:50:35 2016 From: brent.christian at oracle.com (Brent Christian) Date: Wed, 06 Apr 2016 11:50:35 -0700 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: References: <5702FC9B.7020600@oracle.com> <5703F579.8050702@oracle.com> <5703F5E7.4060404@oracle.com> <5704030F.80906@oracle.com> <57044EC8.7050602@oracle.com> Message-ID: <57055A7B.30903@oracle.com> On 04/05/2016 10:36 PM, Mandy Chung wrote: > > Looks good. Nit: can you add a space after ?synchronized? in StackFrameInfo.java line 109: > 109 synchronized(this) { Yep - changed locally. Thanks, -Brent From brent.christian at oracle.com Thu Apr 7 23:33:52 2016 From: brent.christian at oracle.com (Brent Christian) Date: Thu, 07 Apr 2016 16:33:52 -0700 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <57064352.5010800@oracle.com> References: <5702FC9B.7020600@oracle.com> <57064352.5010800@oracle.com> Message-ID: <5706EE60.3010901@oracle.com> Hi, Daniel On 04/07/2016 04:24 AM, Daniel Fuchs wrote: > > In > http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/hotspot/src/share/vm/prims/jvm.cpp.frames.html > > 548 objArrayOop fa = objArrayOop(JNIHandles::resolve_non_null(frames)); > 549 objArrayHandle frames_array_h(THREAD, fa); > 550 > 551 int limit = start_index + frame_count; > 552 if (frames_array_h.is_null()) { > 553 THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), > "parameters and mode mismatch", NULL); > 554 } > > Can frames_array_h.is_null() ever be true, given that we used > JNIHandles::resolve_non_null(frames) at line 548? No! As you point out, it will assert out at 548. > I wonder if lines 552-554 are a remnant of the previous > implementation and could be removed now... You're absolutely right. > 589 Handle stack_frame_info(THREAD, JNIHandles::resolve(frame)); > 590 Handle stack_trace_element(THREAD, JNIHandles::resolve(stack)); > > Should these call JNIHandles::resolve_non_null instead? Yes! > http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/jdk/src/java.base/share/classes/java/lang/StackFrameInfo.java.frames.html > > I'd be very tempted to make 'ste' private volatile. Sounds good to me. Thank you for having a look. I will send: http://cr.openjdk.java.net/~bchristi/8153123/webrev.02/ to hs-rt shortly. -Brent > On 05/04/16 01:45, Brent Christian wrote: >> Hi, >> >> I'd like to check in some footprint and code reduction changes to the >> java.lang.StackWalker implementation. >> >> Webrev: >> http://cr.openjdk.java.net/~bchristi/8153123/webrev.00/ >> Bug: >> https://bugs.openjdk.java.net/browse/JDK-8153123 >> >> A summary of the changes: >> >> * remove the "stackwalk.newThrowable" system property and >> "MemberNameInStackFrame" VM flag, originally left in to aid benchmarking >> >> * Streamline StackFrameInfo fields >> >> * Refactor/streamline StackStreamFactory (no more separate >> classes[]/StackFrame[] arrays, remove unneeded (for now) >> StackStreamFactory.StackTrace class) >> >> >> Given the hotspot changes, I plan to push this through hs-rt. >> >> Thanks, >> -Brent >> > From brent.christian at oracle.com Fri Apr 8 21:23:50 2016 From: brent.christian at oracle.com (Brent Christian) Date: Fri, 08 Apr 2016 14:23:50 -0700 Subject: RFR 8153123 : Streamline StackWalker code In-Reply-To: <5706EE60.3010901@oracle.com> References: <5702FC9B.7020600@oracle.com> <57064352.5010800@oracle.com> <5706EE60.3010901@oracle.com> Message-ID: <57082166.1030500@oracle.com> On 04/07/2016 04:33 PM, Brent Christian wrote: > I will send: > http://cr.openjdk.java.net/~bchristi/8153123/webrev.02/ > to hs-rt shortly. ...after adding the following :) diff -r f628b87a6067 makefiles/symbols/symbols-unix --- a/makefiles/symbols/symbols-unix Fri Apr 08 13:14:23 2016 +0200 +++ b/makefiles/symbols/symbols-unix Fri Apr 08 12:22:14 2016 -0700 @@ -58,7 +58,6 @@ JVM_DumpAllStacks JVM_DumpThreads JVM_FillInStackTrace -JVM_FillStackFrames JVM_FindClassFromCaller JVM_FindClassFromClass JVM_FindLibraryEntry @@ -169,7 +168,6 @@ JVM_ResumeThread JVM_SetArrayElement JVM_SetClassSigners -JVM_SetMethodInfo JVM_SetNativeThreadName JVM_SetPrimitiveArrayElement JVM_SetThreadPriority @@ -178,6 +176,7 @@ JVM_StopThread JVM_SupportsCX8 JVM_SuspendThread +JVM_ToStackTraceElement JVM_TotalMemory JVM_UnloadLibrary JVM_Yield From coleen.phillimore at oracle.com Mon Apr 11 17:15:19 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 11 Apr 2016 13:15:19 -0400 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570BDBA7.7030609@oracle.com> Vote: yes On 4/11/16 12:11 PM, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime > SQE team. He has been working in the HotSpot team since 2011. > Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From rachel.protacio at oracle.com Mon Apr 11 17:32:18 2016 From: rachel.protacio at oracle.com (Rachel Protacio) Date: Mon, 11 Apr 2016 13:32:18 -0400 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570BDFA2.7040109@oracle.com> Vote: yes Rachel On 4/11/2016 12:11 PM, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime > SQE team. He has been working in the HotSpot team since 2011. > Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From george.triantafillou at oracle.com Mon Apr 11 17:51:54 2016 From: george.triantafillou at oracle.com (George Triantafillou) Date: Mon, 11 Apr 2016 13:51:54 -0400 Subject: RFR: 8145704: Make test for classinit logging more robust. In-Reply-To: <570BC839.8090505@oracle.com> References: <570BC839.8090505@oracle.com> Message-ID: <570BE43A.40901@oracle.com> Hi Max, Looks good. -George On 4/11/2016 11:52 AM, Max Ockner wrote: > Hello, > > Please review this small fix for the classinit logging test. This test > was submitted before it was standard to test that logging disappears > when the level is "off". The test now contains this case. > > bug: https://bugs.openjdk.java.net/browse/JDK-8145704 > webrev: http://cr.openjdk.java.net/~mockner/8145704/ > > Test passes. > > Thanks, > Max From jon.masamitsu at oracle.com Mon Apr 11 17:59:49 2016 From: jon.masamitsu at oracle.com (Jon Masamitsu) Date: Mon, 11 Apr 2016 10:59:49 -0700 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570BE615.8050702@oracle.com> Vote: yes On 04/11/2016 09:11 AM, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime > SQE team. He has been working in the HotSpot team since 2011. > Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From harold.seigel at oracle.com Mon Apr 11 18:22:31 2016 From: harold.seigel at oracle.com (harold seigel) Date: Mon, 11 Apr 2016 14:22:31 -0400 Subject: RFR: 8145704: Make test for classinit logging more robust. In-Reply-To: <570BC839.8090505@oracle.com> References: <570BC839.8090505@oracle.com> Message-ID: <570BEB67.2060808@oracle.com> Hi Max, The changes look good. Harold On 4/11/2016 11:52 AM, Max Ockner wrote: > Hello, > > Please review this small fix for the classinit logging test. This test > was submitted before it was standard to test that logging disappears > when the level is "off". The test now contains this case. > > bug: https://bugs.openjdk.java.net/browse/JDK-8145704 > webrev: http://cr.openjdk.java.net/~mockner/8145704/ > > Test passes. > > Thanks, > Max From daniel.daugherty at oracle.com Mon Apr 11 18:33:43 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Mon, 11 Apr 2016 12:33:43 -0600 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570BEE07.6000405@oracle.com> Vote: yes Dan On 4/11/16 10:11 AM, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime > SQE team. He has been working in the HotSpot team since 2011. > Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote > From mikael.gerdin at oracle.com Mon Apr 11 18:52:40 2016 From: mikael.gerdin at oracle.com (Mikael Gerdin) Date: Mon, 11 Apr 2016 20:52:40 +0200 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <15406ab24c0.2781.2bd8b3e304f6b680ef02ac6d3e44f07f@oracle.com> Vote: yes On April 11, 2016 18:12:05 Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE > team. He has been working in the HotSpot team since 2011. Christian is a > Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From jiangli.zhou at oracle.com Mon Apr 11 18:59:26 2016 From: jiangli.zhou at oracle.com (Jiangli Zhou) Date: Mon, 11 Apr 2016 11:59:26 -0700 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <376EEBE1-62FE-4DA2-9C2A-4DCA0078A1A4@oracle.com> Vote: yes Thanks, Jiangli > On Apr 11, 2016, at 9:11 AM, Coleen Phillimore wrote: > > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE team. He has been working in the HotSpot team since 2011. Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on this nomination. Votes must be cast in the open by replying to this mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From dean.long at oracle.com Mon Apr 11 19:03:06 2016 From: dean.long at oracle.com (Dean Long) Date: Mon, 11 Apr 2016 12:03:06 -0700 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <57080EDD.7040701@oracle.com> <5708117B.4060705@oracle.com> <57082A8C.6030800@oracle.com> Message-ID: <570BF4EA.6080504@oracle.com> Sorry, I think I put the call to CodeBlob::initialize(cb) in the CompiledMethod constructor when it really should be in the two nmethod constructors. dl On 4/11/2016 2:48 AM, Volker Simonis wrote: > No, unfortunately not: > > # Internal Error > (/usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeCache.cpp:577), > pid=29667, tid=29686 > # assert(cb->is_nmethod()) failed: did not find an nmethod > > Current CompileTask: > C1: 651 4 3 java.lang.StringLatin1::charAt (28 bytes) > > Stack: [0x000010006b400000,0x000010006b800000], > sp=0x000010006b7fc820, free space=4082k > Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, > C=native code) > V [libjvm.so+0x123b52c] VMError::report(outputStream*, bool)+0x12fc > V [libjvm.so+0x123d928] VMError::report_and_die(int, char const*, > char const*, char*, Thread*, unsigned char*, void*, void*, char > const*, int, unsigned long)+0x5bc > V [libjvm.so+0x123d25c] VMError::report_and_die(Thread*, char > const*, int, char const*, char const*, char*)+0x84 > V [libjvm.so+0x84100c] report_vm_error(char const*, int, char > const*, char const*, ...)+0xcc > V [libjvm.so+0x7314f8] CodeCache::find_nmethod(void*)+0x74 > V [libjvm.so+0xefb7dc] NativeCall::get_trampoline()+0x44 > V [libjvm.so+0x10769cc] Relocation::pd_call_destination(unsigned > char*)+0x150 > V [libjvm.so+0x106f6b4] > CallRelocation::fix_relocation_after_move(CodeBuffer const*, > CodeBuffer*)+0x74 > V [libjvm.so+0x728918] CodeBuffer::relocate_code_to(CodeBuffer*) > const+0x390 > V [libjvm.so+0x728484] CodeBuffer::copy_code_to(CodeBlob*)+0x134 > V [libjvm.so+0x7226f0] CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > V [libjvm.so+0x71f8f8] CodeBlob::initialize(CodeBuffer*)+0x3c > V [libjvm.so+0x7c539c] CompiledMethod::CompiledMethod(Method*, char > const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0x12c > V [libjvm.so+0xf02010] nmethod::nmethod(Method*, int, int, int, > CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > V [libjvm.so+0xf016c8] nmethod::new_nmethod(methodHandle const&, > int, int, CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, > bool, bool, RTMState)+0x560 > V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 > V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 > V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, > ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 > V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, > int, DirectiveSet*)+0xc8 > > I'm currently trying to find another solution... > > Regards, > Volker > > > > On Sat, Apr 9, 2016 at 12:02 AM, Dean Long > wrote: > > Volker, does this patch fix the problem? > > http://cr.openjdk.java.net/~dlong/8151956/8151956.patch > > > dl > > > On 4/8/2016 1:15 PM, Dean Long wrote: > > I was able to find this: > > https://isocpp.org/wiki/faq/strange-inheritance#calling-virtuals-from-ctor-idiom > > > dl > > On 4/8/2016 1:04 PM, Dean Long wrote: > > Hi Volker. I noticed this problem before and filed > 8151956. Making those member functions > non-virtual may solve this particular problem, but as the > code evolves we may hit it > again if we ever call a virtual member function by accident. > > I'm not a C++ expert, but if we declared those functions > as virtual in CodeBlob, then would > that work? It doesn't seem ideal, however. I would > rather not call out from the CodeBlob > constructor at all, but instead do the work in the > subclass constructor. Let's say we move > the call to cb->copy_code_and_locs_to() to a separate > function. Is there a C++ idiom > for making sure all subclasses of CodeBlob call it? The > only think I can think of is to set > an "initialized" flag and to check it in strategic places. > > dl > > On 4/8/2016 11:12 AM, Volker Simonis wrote: > > Hi Rickard, > > I found the problem why your change crashes the VM on > ppc (and I'm pretty > sure it will also crash on ARM - @Andrew, maybe you > can try it out?). It is > caused by the following code in address > NativeCall::get_trampoline() which > is also present on arm64: > > address NativeCall::get_trampoline() { > address call_addr = addr_at(0); > CodeBlob *code = CodeCache::find_blob(call_addr); > ... > // If the codeBlob is not a nmethod, this is > because we get here from the > // CodeBlob constructor, which is called within the > nmethod constructor. > return > trampoline_stub_Relocation::get_trampoline_for(call_addr, > (nmethod*)code); > } > > The comment explains the situation quite well: we're > in the CodeBlob > constructor which was called by the CompiledMethod > constructor which was > called from the nmethod constructor: > > #3 0x00003fffb741b80c in NativeCall::get_trampoline > (this=0x3fff607d0fac) > at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > #4 0x00003fffb7596914 in Relocation::pd_call_destination > (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > > #5 0x00003fffb758f5fc in > CallRelocation::fix_relocation_after_move > (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, > dest=0x3ffdfe3fcd58) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > > #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to > (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > > #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to > (this=0x3ffdfe3fdb40, > dest_blob=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > > #8 0x00003fffb6c42670 in > CodeBuffer::copy_code_and_locs_to > (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > > #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob > (this=0x3fff607d0c10, > name=0x3fffb7a75fd8 "nmethod", layout=..., > cb=0x3ffdfe3fdb40, > frame_complete_offset=20, frame_size=14, > oop_maps=0x3ffe00049620, > caller_must_gc_arguments=false, subtype=8) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > > #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod > (this=0x3fff607d0c10, method=0x3ffe1ddce568, > name=0x3fffb7a75fd8 "nmethod", > size=1768, header_size=392, cb=0x3ffdfe3fdb40, > frame_complete_offset=20, > frame_size=14, oop_maps=0x3ffe00049620, > caller_must_gc_arguments=false) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > > #11 0x00003fffb7421f58 in nmethod::nmethod > (this=0x3fff607d0c10, > method=0x3ffe1ddce568, nmethod_size=1768, > compile_id=4, entry_bci=-1, > offsets=0x3ffdfe3fdb18, orig_pc_offset=104, > debug_info=0x3fffb03d55f0, > dependencies=0x3ffe00049690, > code_buffer=0x3ffdfe3fdb40, frame_size=14, > oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, > nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, > comp_level=3) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > > > Now we cast 'code' to 'nmethod' but at this point in > time 'code' is still a > CodeBlob from the C++ point of view (i.e. it still has > a CodeBlob vtable > (see [1] for an explanation)). > > Later on, in RelocIterator::initialize() we call > virtual methods on the > nmethod which still has the vtable of a "CodeBlob" and > this fails badly: > > #0 SingletonBlob::print_on (this=0x3fff607d0c10, > st=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 > > #1 0x00003fffb758d51c in RelocIterator::initialize > (this=0x3ffdfe3fc928, > nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > limit=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 > > #2 0x00003fffb6ace56c in RelocIterator::RelocIterator > (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, > begin=0x3fff607d0fac "\001", > limit=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > > #3 0x00003fffb75919dc in > trampoline_stub_Relocation::get_trampoline_for > (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > > #4 0x00003fffb741b80c in NativeCall::get_trampoline > (this=0x3fff607d0fac) > at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > > As you can see, we actually want to call > nmethod::stub_begin() at > relocInfo.cpp:144 > > 142 _section_start[CodeBuffer::SECT_CONSTS] = > nm->consts_begin(); > 143 _section_start[CodeBuffer::SECT_INSTS ] = > nm->insts_begin() ; > 144 _section_start[CodeBuffer::SECT_STUBS ] = > nm->stub_begin() ; > > but we actually end up in SingletonBlob::print_on() > which is a completely > different method. Notice that the call to > nm->consts_begin() before also > fails, but it doesn't crash the VM because it happens > to call > SingletonBlob::verify() which has no bad side effect. > The call to > nm->insts_begin() in line 143 is non-virtual and thus > works fine. Here are > the corresponding vtable slots in the CodeBlob vtable > for consts_begin() > and stub_begin() > > (gdb) p &nmethod::consts_begin > $76 = &virtual table offset 42 > (gdb) p &nmethod::stub_begin > $77 = &virtual table offset 44 > (gdb) p ((*(void ***)nm) + 1)[42] > $86 = (void *) 0x3fffb6c41df8 > (gdb) p ((*(void ***)nm) + 1)[44] > $87 = (void *) 0x3fffb6c41e64 > > > As you can see, 'nm' is indeed a "CodeBlob" at this point: > > (gdb) p *(void ***)nm > $91 = (void **) 0x3fffb7befa00 > (gdb) p nm->print() > [CodeBlob (0x00003fff607d1090)] > Framesize: 14 > > The offending calls succeeded before your change, > because they where not > virtual. Any idea how we can fix this with the new > class hierarchy? > > Regards, > Volker > > [1] > http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class > > > > > On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis > > > wrote: > > Hi Rickard, > > I'd also like to know what's the rational behind > this quite large > change. Do you expect some performance or memory > consumption > improvements or is this a prerequisite for another > change which is > still to come? > > The change itself currently doesn't work on ppc64 > (neither on Linux > nor on AIX). I get the following crash during the > build when the newly > built Hotspot is JIT-compiling > java.lang.String::charAt on C1 : > > # > # A fatal error has been detected by the Java > Runtime Environment: > # > # SIGSEGV (0xb) at pc=0x00001000012a44d0, > pid=35331, tid=35404 > # > # JRE version: OpenJDK Runtime Environment (9.0) > (slowdebug build > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) > # Java VM: OpenJDK 64-Bit Server VM (slowdebug > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, > mixed mode, > tiered, compressed oo > ps, serial gc, linux-ppc64le) > # Problematic frame: > # V [libjvm.so+0xf744d0] > outputStream::do_vsnprintf_and_write(char > const*, char*, bool)+0x40 > # > # No core dump will be written. Core dumps have > been disabled. To > enable core dumping, try "ulimit -c unlimited" > before starting Java > again > # > # If you would like to submit a bug report, please > visit: > # http://bugreport.java.com/bugreport/crash.jsp > # > > --------------- S U M M A R Y ------------ > > Command Line: > -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk > > -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. > module.main=jdk.jlink > jdk.jlink/jdk.tools.jmod.Main create > --module-version 9-internal --os-name Linux > --os-arch ppc64le > --os-version > 2.6 --modulepath > /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods > --hash-dependencies .* --exclude **_the.* --libs > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base > > --cmds > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base > > --config > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base > --class-path > /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base > /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod > > Host: ld9510, POWER8E (raw), altivec supported, 48 > cores, 61G, # > Please check /etc/os-release for details about > this release. > Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: > 0 seconds (0d 0h 0m 0s) > > --------------- T H R E A D --------------- > > Current thread (0x000010000429c800): JavaThread > "C1 CompilerThread10" > daemon [_thread_in_vm, id=35404, > stack(0x000010006a800000,0x000010006ac00000)] > > > Current CompileTask: > C1: 761 3 3 java.lang.String::charAt > (25 bytes) > > Stack: [0x000010006a800000,0x000010006ac00000], > sp=0x000010006abfc6c0, free space=4081k > Native frames: (J=compiled Java code, > j=interpreted, Vv=VM code, C=native > code) > V [libjvm.so+0xf744d0] > outputStream::do_vsnprintf_and_write(char > const*, char*, bool)+0x40 > V [libjvm.so+0xf74668] > outputStream::print_cr(char const*, ...)+0x68 > V [libjvm.so+0x72189c] > CodeBlob::print_on(outputStream*) const+0x50 > V [libjvm.so+0x723bdc] > RuntimeBlob::print_on(outputStream*) const+0x40 > V [libjvm.so+0x721eb0] > SingletonBlob::print_on(outputStream*) const+0x4c > V [libjvm.so+0x106d51c] > RelocIterator::initialize(CompiledMethod*, > unsigned char*, unsigned char*)+0x170 > V [libjvm.so+0x5ae56c] > RelocIterator::RelocIterator(CompiledMethod*, > unsigned char*, unsigned char*)+0x78 > V [libjvm.so+0x10719dc] > trampoline_stub_Relocation::get_trampoline_for(unsigned > char*, > nmethod*)+0x78 > V [libjvm.so+0xefb80c] > NativeCall::get_trampoline()+0x110 > V [libjvm.so+0x1076914] > Relocation::pd_call_destination(unsigned > char*)+0x150 > V [libjvm.so+0x106f5fc] > CallRelocation::fix_relocation_after_move(CodeBuffer > const*, > CodeBuffer*)+0x74 > V [libjvm.so+0x728898] > CodeBuffer::relocate_code_to(CodeBuffer*) > const+0x390 > V [libjvm.so+0x728404] > CodeBuffer::copy_code_to(CodeBlob*)+0x134 > V [libjvm.so+0x722670] > CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char > const*, > CodeBlobLayout const&, CodeBuffer*, int, int, > OopMapSet*, bool, > int)+0x320 > V [libjvm.so+0x7c52c8] > CompiledMethod::CompiledMethod(Method*, char > const*, int, int, CodeBuffer*, int, int, > OopMapSet*, bool)+0xd8 > V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, > int, int, int, > CodeOffsets*, int, DebugInformationRecorder*, > Dependencies*, > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > V [libjvm.so+0xf01610] > nmethod::new_nmethod(methodHandle const&, > int, int, CodeOffsets*, int, > DebugInformationRecorder*, Dependencies*, > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > V [libjvm.so+0x632970] > ciEnv::register_method(ciMethod*, int, > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > ExceptionHandlerTable*, ImplicitExceptionTable*, > AbstractCompiler*, > bool, bool, RTMState)+0x560 > V [libjvm.so+0x48ee00] > Compilation::install_code(int)+0x264 > V [libjvm.so+0x48eff8] > Compilation::compile_method()+0x184 > V [libjvm.so+0x48f7a8] > Compilation::Compilation(AbstractCompiler*, > ciEnv*, ciMethod*, int, BufferBlob*, > DirectiveSet*)+0x288 > V [libjvm.so+0x4980d0] > Compiler::compile_method(ciEnv*, ciMethod*, > int, DirectiveSet*)+0xc8 > V [libjvm.so+0x7b188c] > CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 > V [libjvm.so+0x7b07bc] > CompileBroker::compiler_thread_loop()+0x310 > V [libjvm.so+0x11a614c] > compiler_thread_entry(JavaThread*, Thread*)+0xa0 > V [libjvm.so+0x119f3a8] > JavaThread::thread_main_inner()+0x1b4 > V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 > V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 > C [libpthread.so.0+0x8a64] start_thread+0xf4 > C [libc.so.6+0x1032a0] clone+0x98 > > I haven't identified the exact cause (will analyze > it tomorrow) but > the stack trace indicates that it is indeed > related to your changes. > > Besides that I have some comments: > > codeBuffer.hpp: > > 472 CodeSection* insts() { return &_insts; } > 475 const CodeSection* insts() const { return > &_insts; } > > - do we really need both versions? > > codeBlob.hpp: > > 135 nmethod* as_nmethod_or_null() const > { return > is_nmethod() ? (nmethod*) this : NULL; } > 136 nmethod* as_nmethod() const { > assert(is_nmethod(), "must be nmethod"); return > (nmethod*) this; } > 137 CompiledMethod* > as_compiled_method_or_null() const { return > is_compiled() ? (CompiledMethod*) this : NULL; } > 138 CompiledMethod* as_compiled_method() > const { > assert(is_compiled(), "must be compiled"); return > (CompiledMethod*) > this; } > 139 CodeBlob* as_codeblob_or_null() const > { return > (CodeBlob*) this; } > > - I don't like this code. You make the getters > 'const' which > implicitely makes 'this' a "pointer to const" but > then the returned > pointer is a normal pointer to a non-const object > and therefore you > have to statically cast away the "pointer to > const" (that's why you > need the cast even in the case where you return a > CodeBlob*). So > either remove the const qualifier from the method > declarations or make > them return "pointers to const". And by the way, > as_codeblob_or_null() > doesn't seemed to be used anywhere in the code, > why do we need it at > all? > > - Why do we need the non-virtual methods > is_nmethod() and > is_compiled() to manually simulate virtual > behavior. Why can't we > simply make them virtual and implement them > accordingly in nmathod and > CompiledMethod? > > Regards, > Volker > > On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman > > wrote: > > Hi, > > can I please have review for this patch please? > > So far CodeBlobs have required all the data > (metadata, oops, code, etc) > to be in one continuous blob With this patch > we are looking to change > that. It's been done by changing offsets in > CodeBlob to addresses, > making some methods virtual to allow different > behavior and also > creating a couple of new classes. > CompiledMethod now sits inbetween > CodeBlob and nmethod. > > CR: > https://bugs.openjdk.java.net/browse/JDK-8152664 > Webrev: > http://cr.openjdk.java.net/~rbackman/8152664/ > > > Thanks > /R > > > > > From coleen.phillimore at oracle.com Mon Apr 11 19:07:24 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 11 Apr 2016 15:07:24 -0400 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <376EEBE1-62FE-4DA2-9C2A-4DCA0078A1A4@oracle.com> References: <570BCCC7.7010602@oracle.com> <376EEBE1-62FE-4DA2-9C2A-4DCA0078A1A4@oracle.com> Message-ID: <570BF5EC.4080004@oracle.com> Hi, Note that you can only vote if you're already a member of the hotspot group. Correcting the link below. http://openjdk.java.net/census#hotspot Thanks, Coleen On 4/11/16 2:59 PM, Jiangli Zhou wrote: > Vote: yes > > Thanks, > Jiangli > >> On Apr 11, 2016, at 9:11 AM, Coleen Phillimore wrote: >> >> I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to Membership in the hotspot Group. >> >> Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE team. He has been working in the HotSpot team since 2011. Christian is a Reviewer in the JDK9 project. >> >> Votes are due by Monday, April 25, 2016 at 12:00PM ET. >> >> Only current Members of the hotspot Group [1] are eligible to vote on this nomination. Votes must be cast in the open by replying to this mailing list. >> For Lazy Consensus voting instructions, see [2]. >> >> Coleen Phillimore >> >> [1] http://openjdk.java.net/census/#hotspot >> [2] http://openjdk.java.net/groups/#member-vote From john.r.rose at oracle.com Mon Apr 11 19:22:41 2016 From: john.r.rose at oracle.com (John Rose) Date: Mon, 11 Apr 2016 12:22:41 -0700 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: Vote: yes. From karen.kinnear at oracle.com Mon Apr 11 19:26:47 2016 From: karen.kinnear at oracle.com (Karen Kinnear) Date: Mon, 11 Apr 2016 15:26:47 -0400 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <608C61C9-D720-496C-A2CE-CB1D5102EC32@oracle.com> Vote: yes Karen > On Apr 11, 2016, at 12:11 PM, Coleen Phillimore wrote: > > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE team. He has been working in the HotSpot team since 2011. Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on this nomination. Votes must be cast in the open by replying to this mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From vladimir.kozlov at oracle.com Mon Apr 11 20:03:51 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Mon, 11 Apr 2016 13:03:51 -0700 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570C0327.4010607@oracle.com> Vote: yes On 4/11/16 9:11 AM, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE > team. He has been working in the HotSpot team since 2011. Christian is a > Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From coleen.phillimore at oracle.com Mon Apr 11 20:06:03 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 11 Apr 2016 16:06:03 -0400 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly Message-ID: <570C03AB.4020906@oracle.com> Summary: Constant pool merging is not thread safe for source_file_name. This change includes the change for the following bug because they are tested together. 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe for MethodHandleInError and MethodTypeInError. The parallel constant pool merges are mostly harmless because the old methods constant pool pointers aren't updated. The only case I found where it isn't harmless is that we rely on finding the source_file_name_index from the final merged constant pool, which could be any of the parallel merged constant pools. The code to attempt to dig out the name from redefined classes is removed. open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev bug link https://bugs.openjdk.java.net/browse/JDK-8151546 Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I tried to write a test with all the conditions of the failure but couldn't make it fail (so noreg-hard). Thanks, Coleen From alejandro.murillo at oracle.com Tue Apr 12 00:14:56 2016 From: alejandro.murillo at oracle.com (Alejandro Murillo) Date: Mon, 11 Apr 2016 18:14:56 -0600 Subject: [9] RFR 4154028: Several hotspot tests need to be updated after 8153737 (Unsupported Module) Message-ID: <570C3E00.60203@oracle.com> Can I get a quick review for this fix that's blocking the synching of jdk9/hs with jdk9/dev: Basically, https://bugs.openjdk.java.net/browse/JDK-8153737 introduced some changes that need to be applied to several hotspot tests that were not in jdk9/dev by the time that fix was pushed. They can't be fixed on either repo, the need to be fixed in the merging repo before being pushed back to jdk9/hs (first) Bug https://bugs.openjdk.java.net/browse/JDK-8154028 Webrev: http://cr.openjdk.java.net/~amurillo/9/8154028/ I'm running a sanity test job now, but wanted to get the review going as I need to get the synch done ASAP to start PIT Thanks -- Alejandro From joe.darcy at oracle.com Tue Apr 12 00:44:33 2016 From: joe.darcy at oracle.com (joe darcy) Date: Mon, 11 Apr 2016 17:44:33 -0700 Subject: [9] RFR 4154028: Several hotspot tests need to be updated after 8153737 (Unsupported Module) In-Reply-To: <570C3E00.60203@oracle.com> References: <570C3E00.60203@oracle.com> Message-ID: <570C44F1.7000500@oracle.com> Looks fine Alejandro; thanks, -Joe On 4/11/2016 5:14 PM, Alejandro Murillo wrote: > > Can I get a quick review for this fix that's blocking the > synching of jdk9/hs with jdk9/dev: > > Basically, https://bugs.openjdk.java.net/browse/JDK-8153737 > introduced some changes that need to be applied to > several hotspot tests that were not in jdk9/dev > by the time that fix was pushed. > > They can't be fixed on either repo, the need to be fixed > in the merging repo before being pushed back to jdk9/hs (first) > > Bug https://bugs.openjdk.java.net/browse/JDK-8154028 > Webrev: http://cr.openjdk.java.net/~amurillo/9/8154028/ > > I'm running a sanity test job now, but wanted to get the review going > as I need to get the synch done ASAP to start PIT > > Thanks > From serguei.spitsyn at oracle.com Tue Apr 12 01:17:00 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Mon, 11 Apr 2016 18:17:00 -0700 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly In-Reply-To: <570C03AB.4020906@oracle.com> References: <570C03AB.4020906@oracle.com> Message-ID: <570C4C8C.5060300@oracle.com> Coleen, src/share/vm/prims/jvmtiRedefineClasses.cpp - // Update the version number of the constant pool + // Update the version number of the constant pools (may keep scratch_cp) merge_cp->increment_and_save_version(old_cp->version()); + scratch_cp->increment_and_save_version(old_cp->version()); Not sure, I understand the change above. Could you, please, explain why this change is needed? I suspect, the scratch_cp->version() is never used. + // NOTE: this doesn't work because you can redefine the same class in two + // threads, each getting their own constant pool data appended to the + // original constant pool. In order for the new methods to work when they + // become old methods, they need to keep their updated copy of the constant pool. + It feels like the statement in this note is too strong, and as such, confusing. Would it be better to tell something like "not always work"? Otherwise, the question is: why do we need this block of code if it doesn't work? Thanks, Serguei On 4/11/16 13:06, Coleen Phillimore wrote: > Summary: Constant pool merging is not thread safe for source_file_name. > > This change includes the change for the following bug because they are > tested together. > > 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert > failed: Corrupted constant pool > Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe > for MethodHandleInError and MethodTypeInError. > > The parallel constant pool merges are mostly harmless because the old > methods constant pool pointers aren't updated. The only case I found > where it isn't harmless is that we rely on finding the > source_file_name_index from the final merged constant pool, which > could be any of the parallel merged constant pools. The code to > attempt to dig out the name from redefined classes is removed. > > open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev > bug link https://bugs.openjdk.java.net/browse/JDK-8151546 > > Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I > tried to write a test with all the conditions of the failure but > couldn't make it fail (so noreg-hard). > > Thanks, > Coleen From vladimir.kozlov at oracle.com Tue Apr 12 01:19:48 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Mon, 11 Apr 2016 18:19:48 -0700 Subject: [9] RFR 4154028: Several hotspot tests need to be updated after 8153737 (Unsupported Module) In-Reply-To: <570C3E00.60203@oracle.com> References: <570C3E00.60203@oracle.com> Message-ID: <570C4D34.8070107@oracle.com> Looks good. Thanks, Vladimir On 4/11/16 5:14 PM, Alejandro Murillo wrote: > > Can I get a quick review for this fix that's blocking the > synching of jdk9/hs with jdk9/dev: > > Basically, https://bugs.openjdk.java.net/browse/JDK-8153737 > introduced some changes that need to be applied to > several hotspot tests that were not in jdk9/dev > by the time that fix was pushed. > > They can't be fixed on either repo, the need to be fixed > in the merging repo before being pushed back to jdk9/hs (first) > > Bug https://bugs.openjdk.java.net/browse/JDK-8154028 > Webrev: http://cr.openjdk.java.net/~amurillo/9/8154028/ > > I'm running a sanity test job now, but wanted to get the review going > as I need to get the synch done ASAP to start PIT > > Thanks > From alejandro.murillo at oracle.com Tue Apr 12 03:06:03 2016 From: alejandro.murillo at oracle.com (Alejandro Murillo) Date: Mon, 11 Apr 2016 21:06:03 -0600 Subject: [9] RFR 4154028: Several hotspot tests need to be updated after 8153737 (Unsupported Module) In-Reply-To: <570C44F1.7000500@oracle.com> References: <570C3E00.60203@oracle.com> <570C44F1.7000500@oracle.com> Message-ID: <570C661B.5000507@oracle.com> Thanks Joe. Alejandro On 4/11/2016 6:44 PM, joe darcy wrote: > Looks fine Alejandro; thanks, > > -Joe > > On 4/11/2016 5:14 PM, Alejandro Murillo wrote: >> >> Can I get a quick review for this fix that's blocking the >> synching of jdk9/hs with jdk9/dev: >> >> Basically, https://bugs.openjdk.java.net/browse/JDK-8153737 >> introduced some changes that need to be applied to >> several hotspot tests that were not in jdk9/dev >> by the time that fix was pushed. >> >> They can't be fixed on either repo, the need to be fixed >> in the merging repo before being pushed back to jdk9/hs (first) >> >> Bug https://bugs.openjdk.java.net/browse/JDK-8154028 >> Webrev: http://cr.openjdk.java.net/~amurillo/9/8154028/ >> >> I'm running a sanity test job now, but wanted to get the review going >> as I need to get the synch done ASAP to start PIT >> >> Thanks >> > -- Alejandro From alejandro.murillo at oracle.com Tue Apr 12 03:08:38 2016 From: alejandro.murillo at oracle.com (Alejandro Murillo) Date: Mon, 11 Apr 2016 21:08:38 -0600 Subject: [9] RFR 4154028: Several hotspot tests need to be updated after 8153737 (Unsupported Module) In-Reply-To: <570C4D34.8070107@oracle.com> References: <570C3E00.60203@oracle.com> <570C4D34.8070107@oracle.com> Message-ID: <570C66B6.5070708@oracle.com> Thanks Vladimir Alejandro On 4/11/2016 7:19 PM, Vladimir Kozlov wrote: > Looks good. > > Thanks, > Vladimir > > On 4/11/16 5:14 PM, Alejandro Murillo wrote: >> >> Can I get a quick review for this fix that's blocking the >> synching of jdk9/hs with jdk9/dev: >> >> Basically, https://bugs.openjdk.java.net/browse/JDK-8153737 >> introduced some changes that need to be applied to >> several hotspot tests that were not in jdk9/dev >> by the time that fix was pushed. >> >> They can't be fixed on either repo, the need to be fixed >> in the merging repo before being pushed back to jdk9/hs (first) >> >> Bug https://bugs.openjdk.java.net/browse/JDK-8154028 >> Webrev: http://cr.openjdk.java.net/~amurillo/9/8154028/ >> >> I'm running a sanity test job now, but wanted to get the review going >> as I need to get the synch done ASAP to start PIT >> >> Thanks >> -- Alejandro From bengt.rutisson at oracle.com Tue Apr 12 05:03:37 2016 From: bengt.rutisson at oracle.com (Bengt Rutisson) Date: Tue, 12 Apr 2016 07:03:37 +0200 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570C81A9.7060004@oracle.com> Vote: yes Bengt On 2016-04-11 18:11, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime > SQE team. He has been working in the HotSpot team since 2011. > Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From volker.simonis at gmail.com Tue Apr 12 06:15:03 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Tue, 12 Apr 2016 08:15:03 +0200 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: Vote: yes On Mon, Apr 11, 2016 at 6:11 PM, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE > team. He has been working in the HotSpot team since 2011. Christian is a > Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on this > nomination. Votes must be cast in the open by replying to this mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From stefan.karlsson at oracle.com Tue Apr 12 07:39:41 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 12 Apr 2016 09:39:41 +0200 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570CA63D.40805@oracle.com> Vote: yes StefanK On 2016-04-11 18:11, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime > SQE team. He has been working in the HotSpot team since 2011. > Christian is a Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From erik.joelsson at oracle.com Tue Apr 12 14:00:50 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Tue, 12 Apr 2016 16:00:50 +0200 Subject: RFR: JDK-8149777: Enable enhanced failure handler for "make test" Message-ID: <570CFF92.2090702@oracle.com> Please review this change which adds a proper makefile and build sequence for the failure-handler jtreg plugin. It also adds the failure handler to the test image and makes the hotspot and jdk test/Makefile's pick it up when available. Bug: https://bugs.openjdk.java.net/browse/JDK-8149777 Webrev: http://cr.openjdk.java.net/~erikj/8149777/webrev.03/ /Erik From thomas.stuefe at gmail.com Tue Apr 12 14:23:27 2016 From: thomas.stuefe at gmail.com (=?UTF-8?Q?Thomas_St=C3=BCfe?=) Date: Tue, 12 Apr 2016 16:23:27 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: <570BAC1B.7040806@oracle.com> References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> Message-ID: Hi Stefan, On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson wrote: > Hi Thomas, > > On 2016-04-11 14:39, Thomas St?fe wrote: > > Hi Stefan, > > short question, why the mmap before the shmat? Why not shmat right away at > the requested address? > > > If we have a requested_address we do exactly what you propose. > > if (req_addr == NULL && alignment > os::large_page_size()) { > return shmat_with_large_alignment(shmid, bytes, alignment); > } else { > return shmat_with_normal_alignment(shmid, req_addr); > } > > ... > > static char* shmat_with_normal_alignment(int shmid, char* req_addr) { > char* addr = (char*)shmat(shmid, req_addr, 0); > > if ((intptr_t)addr == -1) { > shm_warning_with_errno("Failed to attach shared memory."); > return NULL; > } > > return addr; > } > > > It's when you don't have a requested address that mmap is used to find a > large enough virtual memory area. > > Sorry, seems I did not look at this coding thoroughly enough. I understand now that you do mmap to allocate and then to cut away the extra pre-/post-space, something which would not be possible with shmat, which cannot be unmapped page-wise. But I am still not sure why we do it his way: 3429 static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment, char* req_addr) { 3430 // If there's no requested address, the shmat call can return memory that is not 3431 // 'alignment' aligned, if the given alignment is larger than the large page size. 3432 // Special care needs to be taken to ensure that we get aligned memory back. 3433 if (req_addr == NULL && alignment > os::large_page_size()) { 3434 return shmat_with_large_alignment(shmid, bytes, alignment); 3435 } else { 3436 return shmat_with_normal_alignment(shmid, req_addr); 3437 } 3438 } For req_addr==0 and big alignment, we attach at the given alignment ("shmat_with_large_alignment"). For req_addr!=0, we attach at the given requested address ("shmat_with_normal_alignment"). For req_addr==0 and smaller alignment, we ignore the alignment and attach anywhere? Maybe I am slow, but why does it matter if the alignment is large or small? Why not just distinguish between: 1) address given (req_addr!=0): in this case we attach at this req_addr and rely on the user having aligned the address properly for his purposes. We specify 0 for flags, so we will attach at exactly the given address or fail. In this case we could simply ignore the given alignment - if one was given - or just use it to counter-check the req_addr. 2) alignment given (req_addr==0 and alignment > 0): attach at the given alignment using mmap-before-shmat. This could be done for any alignment, be it large or small. Functions would become simpler and also could be clearer named (e.g. "shmat_at_address" and "shmat_with_alignment", respectivly). ---- This: 3402 if ((intptr_t)addr == -1) { 3403 shm_warning_with_errno("Failed to attach shared memory."); 3404 // Since we don't know if the kernel unmapped the pre-reserved memory area 3405 // we can't unmap it, since that would potentially unmap memory that was 3406 // mapped from other threads. 3407 return NULL; 3408 } seems scary. Means for every call this happens, we leak the reserved (not committed) address space? For most cases (anything but ENOMEM, actually) could we at least assert?: EACCES - should not happen: we created the shared memory and are its owner EIDRM - should not happen. EINVAL - should not happen. (you already check now the attach address for alignment to SHMLBA, so this is covered) --- Smaller nits: Functions called "shmat_..." suggest shmat-like behaviour, so could we have them return -1 instead of NULL in case of error? Kind Regards, Thomas > > Also note that mmap- and shmat-allocated memory may have different > alignment requirements: mmap requires a page-aligned request address, > whereas shmat requires alignment to SHMLBA, which may be multiple pages > (e.g. for ARM: > > http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). > So, for this shat-over-mmap trick to work, request address has to be > aligned to SHMLBA, not just page size. > > I see that you assert alignment of requ address to os::large_page_size(), > which I would assume is a multiple of SHMLBA, but I am not sure of this. > > > I've added some defensive code and asserts to catch this if/when this > assumption fails: > > http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ > http://cr.openjdk.java.net/~stefank/8017629/webrev.02 > > I need to verify that this works on other machines than my local Linux x64 > machine. > > Thanks, > StefanK > > > Kind Regards, Thomas > > > > On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson < > stefan.karlsson at oracle.com> wrote: > >> Hi all, >> >> Please review this patch to enable SHM large page allocations even when >> the requested alignment is larger than os::large_page_size(). >> >> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8017629 >> >> G1 is affected by this bug since it requires the heap to start at an >> address that is aligned with the heap region size. The patch fixes this by >> changing the UseSHM large pages allocation code. First, virtual memory with >> correct alignment is pre-reserved and then the large pages are attached to >> this memory area. >> >> Tested with vm.gc.testlist and ExecuteInternaVMTests >> >> Thanks, >> StefanK >> > > > From stefan.karlsson at oracle.com Tue Apr 12 15:41:59 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 12 Apr 2016 17:41:59 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> Message-ID: <570D1747.2020508@oracle.com> Hi Thomas, On 2016-04-12 16:23, Thomas St?fe wrote: > Hi Stefan, > > > On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson > > wrote: > > Hi Thomas, > > On 2016-04-11 14:39, Thomas St?fe wrote: >> Hi Stefan, >> >> short question, why the mmap before the shmat? Why not >> shmat right away at the requested address? > > If we have a requested_address we do exactly what you propose. > > if (req_addr == NULL && alignment > os::large_page_size()) { > return shmat_with_large_alignment(shmid, bytes, alignment); > } else { > return shmat_with_normal_alignment(shmid, req_addr); > } > > ... > > static char* shmat_with_normal_alignment(int shmid, char* req_addr) { > char* addr = (char*)shmat(shmid, req_addr, 0); > > if ((intptr_t)addr == -1) { > shm_warning_with_errno("Failed to attach shared memory."); > return NULL; > } > > return addr; > } > > > It's when you don't have a requested address that mmap is used to > find a large enough virtual memory area. > > > Sorry, seems I did not look at this coding thoroughly enough. I > understand now that you do mmap to allocate and then to cut away the > extra pre-/post-space, something which would not be possible with > shmat, which cannot be unmapped page-wise. > > But I am still not sure why we do it his way: > > 3429 static char* shmat_with_alignment(int shmid, size_t bytes, size_t > alignment, char* req_addr) { > 3430 // If there's no requested address, the shmat call can return > memory that is not > 3431 // 'alignment' aligned, if the given alignment is larger than > the large page size. > 3432 // Special care needs to be taken to ensure that we get aligned > memory back. > 3433 if (req_addr == NULL && alignment > os::large_page_size()) { > 3434 return shmat_with_large_alignment(shmid, bytes, alignment); > 3435 } else { > 3436 return shmat_with_normal_alignment(shmid, req_addr); > 3437 } > 3438 } > > For req_addr==0 and big alignment, we attach at the given alignment > ("shmat_with_large_alignment"). > For req_addr!=0, we attach at the given requested address > ("shmat_with_normal_alignment"). > For req_addr==0 and smaller alignment, we ignore the alignment and > attach anywhere? > > Maybe I am slow, but why does it matter if the alignment is large or > small? Why not just distinguish between: > > 1) address given (req_addr!=0): in this case we attach at this > req_addr and rely on the user having aligned the address properly for > his purposes. We specify 0 for flags, so we will attach at exactly the > given address or fail. In this case we could simply ignore the given > alignment - if one was given - or just use it to counter-check the > req_addr. > > 2) alignment given (req_addr==0 and alignment > 0): attach at the > given alignment using mmap-before-shmat. This could be done for any > alignment, be it large or small. What you propose doesn't work. We're allocating large pages with SHM_HUGETLB, and if we try to attach to an address that is not large_page_size aligned the shmat call returns EINVAL. > > Functions would become simpler and also could be clearer named (e.g. > "shmat_at_address" and "shmat_with_alignment", respectivly). Maybe I should rename the functions to make it more obvious that these are large pages specific functions? > > ---- > > This: > > 3402 if ((intptr_t)addr == -1) { > 3403 shm_warning_with_errno("Failed to attach shared memory."); > 3404 // Since we don't know if the kernel unmapped the > pre-reserved memory area > 3405 // we can't unmap it, since that would potentially unmap > memory that was > 3406 // mapped from other threads. > 3407 return NULL; > 3408 } > > seems scary. Means for every call this happens, we leak the reserved > (not committed) address space? Yes, that's unfortunate. An alternative would be to use this sequence: 1) Use anon_mmap_aligned to find a suitable VA range 2) Immediately unmap the VA range 3) Try to attach at that VA range _without_ SHM_REMAP That would remove the risk of leaking the reserved address space, but instead we risk failing at (3) if another thread manages to allocate memory inside the found VA range. This will cause some users to unnecessarily fail to get large pages, though. We've had other problems when pre-existing threads used mmap while we were initializing the VM. See: JDK-8007074. > For most cases (anything but ENOMEM, actually) could we at least assert?: > > EACCES - should not happen: we created the shared memory and are its owner > EIDRM - should not happen. > EINVAL - should not happen. (you already check now the attach address > for alignment to SHMLBA, so this is covered) Sure. I'll add asserts for these. > > --- > > Smaller nits: > > Functions called "shmat_..." suggest shmat-like behaviour, so could we > have them return -1 instead of NULL in case of error? That would add clutter to the reserve_memory_special_shm, and it might also suggest that it would be OK to check errno for the failure reason, which probably wouldn't work. I'll let other Reviewers chime in and help decide if we should change this. Thanks for reviewing this, StefanK > > Kind Regards, Thomas > >> >> Also note that mmap- and shmat-allocated memory may have >> different alignment requirements: mmap requires a page-aligned >> request address, whereas shmat requires alignment to SHMLBA, >> which may be multiple pages (e.g. for ARM: >> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >> So, for this shat-over-mmap trick to work, request address has to >> be aligned to SHMLBA, not just page size. >> >> I see that you assert alignment of requ address to >> os::large_page_size(), which I would assume is a multiple of >> SHMLBA, but I am not sure of this. > > I've added some defensive code and asserts to catch this if/when > this assumption fails: > > http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ > > http://cr.openjdk.java.net/~stefank/8017629/webrev.02 > > > I need to verify that this works on other machines than my local > Linux x64 machine. > > Thanks, > StefanK > >> >> Kind Regards, Thomas >> >> >> >> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson >> > >> wrote: >> >> Hi all, >> >> Please review this patch to enable SHM large page allocations >> even when the requested alignment is larger than >> os::large_page_size(). >> >> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >> >> https://bugs.openjdk.java.net/browse/JDK-8017629 >> >> G1 is affected by this bug since it requires the heap to >> start at an address that is aligned with the heap region >> size. The patch fixes this by changing the UseSHM large pages >> allocation code. First, virtual memory with correct alignment >> is pre-reserved and then the large pages are attached to this >> memory area. >> >> Tested with vm.gc.testlist and ExecuteInternaVMTests >> >> Thanks, >> StefanK >> >> > > From volker.simonis at gmail.com Tue Apr 12 16:03:58 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Tue, 12 Apr 2016 18:03:58 +0200 Subject: RFR(S): 8154087: Fix AIX and Linux/ppc64le after the integration of the new hotspot build Message-ID: Hi, can I please have a review for the following trivial changes to make the build work again on AIX and Linux/ppc64le after the integration of the new hotspot build system. The changes are all AIX and/or ppc64 specific and shouldn't change the behavior on any other platform. Because the top-level changes require the rebuild of generated-configure.sh and the hotspot changes are in shared code, I also need a sponsor. It would be best if the changes could be pushed in sync to the hs-rt repository: http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.top http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.hs http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.jdk https://bugs.openjdk.java.net/browse/JDK-8154087 The hotspot change contains a trivial source code change in an AIX file to fix a warning which would otherwise break the build with "warnings as errors". The jdk change disables "warnings as errors" for AIX for several libs such that we can build the complete jdk with "warnings as errors" on AIX as well now. Fixing the actual warnings will be done in a later change. Thanks a lot and best regards, Volker From erik.joelsson at oracle.com Tue Apr 12 16:18:00 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Tue, 12 Apr 2016 18:18:00 +0200 Subject: RFR(S): 8154087: Fix AIX and Linux/ppc64le after the integration of the new hotspot build In-Reply-To: References: Message-ID: <570D1FB8.7000506@oracle.com> Hello Volker, This looks good to me. I will sponsor the change into hs-rt once someone reviews the source code changes. /Erik On 2016-04-12 18:03, Volker Simonis wrote: > Hi, > > can I please have a review for the following trivial changes to make > the build work again on AIX and Linux/ppc64le after the integration of > the new hotspot build system. The changes are all AIX and/or ppc64 > specific and shouldn't change the behavior on any other platform. > > Because the top-level changes require the rebuild of > generated-configure.sh and the hotspot changes are in shared code, I > also need a sponsor. It would be best if the changes could be pushed > in sync to the hs-rt repository: > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.top > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.hs > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.jdk > > https://bugs.openjdk.java.net/browse/JDK-8154087 > > The hotspot change contains a trivial source code change in an AIX > file to fix a warning which would otherwise break the build with > "warnings as errors". > > The jdk change disables "warnings as errors" for AIX for several libs > such that we can build the complete jdk with "warnings as errors" on > AIX as well now. Fixing the actual warnings will be done in a later > change. > > Thanks a lot and best regards, > Volker From stefan.karlsson at oracle.com Tue Apr 12 16:40:09 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 12 Apr 2016 18:40:09 +0200 Subject: RFC: 8154079: Catch incorrectly included .inline.hpp files Message-ID: <570D24E9.9030401@oracle.com> Hi all, I would like to propose a patch to make it easier to find and clean up places where we include .inline.hpp files from .hpp files. So, that we start getting smaller include dependencies, with lower risk of circular include dependencies, and maybe even shorter compile times. The guidelines regarding file inclusions can be found at: https://wiki.openjdk.java.net/display/HotSpot/StyleGuide "Files Do not put non-trivial function implementations in .hpp files. If the implementation depends on other .hpp files, put it in a .cpp or a .inline.hpp file. .inline.hpp files should only be included in .cpp or .inline.hpp files. All .cpp files include precompiled.hpp as the first include line. precompiled.hpp is just a build time optimization, so don't rely on it to resolve include problems. Keep the include lines sorted. Put conditional inclusions (#if ...) at the end of the include list." The code to enable the stricter .inline.hpp include check can be found in this small patch: http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ I'm using the fact that we are including almost all header files in the precompiled.hpp file. So, if I add a "scoped" define, called INLINE_HPP_GUARD, in preompiled.hpp and add checks for this define in .inline.hpp files, the preprocessor will tell me when .inline.hpp includes come from .hpp files (directly or indirectly) rather than .cpp files. This requires that the .hpp file is reachable through precompiled.hpp and that we start remove .inline.hpp files from precompiled.hpp. I've tried this on a few .inline.hpp files. For example: thread.inline.hpp: http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp The inlineHppGuard.hpp file is first included in thread.inline.hpp: #include "runtime/atomic.inline.hpp" #include "runtime/os.inline.hpp" #include "runtime/thread.hpp" +#include "utilities/inlineHppGuard.hpp" Then when I compile (with precompiled headers enabled) I get: In file included from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/thread.inline.hpp:33:0, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/handles.inline.hpp:29, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/reflectionUtils.hpp:32, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/classfile/systemDictionary.hpp:34, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciEnv.hpp:30, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciUtilities.hpp:28, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciNullObject.hpp:30, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciConstant.hpp:29, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciArray.hpp:29, from /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/precompiled/precompiled.hpp:37: /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/utilities/inlineHppGuard.hpp:44:2: error: #error ".inline.hpp file incorrectly included from .hpp file" That tells me that I probably need to go and fix the inclusion of reflectionUtils.hpp -> handles.inline.hpp first. It's not always enough to remove the inclusion of the .inline.hpp file, because the header might actually use the inline files. In those cases the code needs to be restructured so that we the "offending" functions out from the .hpp files to .cpp or .linline.hpp files. This might be tedious in the beginning, but will hopefully become easier to maintain when when more of these files get cleaned up. So, with this in place, after all incorrect includes have been fixed, whenever someone "incorrectly" adds an inclusion of thread.inline.hpp (directly or indirectly) to a .hpp file the preprocessor will complain. I have a patch set where I've tried this on different .inline.hpp files, and the following patches show the kind of work that is needed to fix the includes: http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.02.guardHandleInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.03.guardOsInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.04.guardMarkOopInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.05.guardOopInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.06.guardFrameInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.07.guardHashtableInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.08.guardOrderAccessInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.09.guardBitMapInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.10.guardAtomicInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.11.guardKlassInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.12.guardTypeArrayOopInlineHpp/ http://cr.openjdk.java.net/~stefank/8154079/webrev.01.all/ It compiles on linux-x64 with and without precompiled headers, but I haven't taken the time to try to get it to compile on other platforms. So, is this worth doing to start fixing our include mess? Thanks, StefanK From coleen.phillimore at oracle.com Tue Apr 12 19:29:41 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 12 Apr 2016 15:29:41 -0400 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly In-Reply-To: <570C4C8C.5060300@oracle.com> References: <570C03AB.4020906@oracle.com> <570C4C8C.5060300@oracle.com> Message-ID: <570D4CA5.2070505@oracle.com> Hi Serguei, Thank you for looking at this change. On 4/11/16 9:17 PM, serguei.spitsyn at oracle.com wrote: > Coleen, > > src/share/vm/prims/jvmtiRedefineClasses.cpp > - // Update the version number of the constant pool > + // Update the version number of the constant pools (may keep scratch_cp) > merge_cp->increment_and_save_version(old_cp->version()); > + scratch_cp->increment_and_save_version(old_cp->version()); > > Not sure, I understand the change above. > Could you, please, explain why this change is needed? > I suspect, the scratch_cp->version() is never used. scratch_cp is used if it's equivalent to the old constant pool (see the code below this with the comments). But it could add entries. In this case, we want scratch_cp to have a new version number because scratch_class->_source_file_name_index may be an appended entry (old_cp->length() + n) which a parallel constant pool merge might append a different entry and be set to the constant pool after the safepoint. So source_file_name_index won't point to the first appended entry. So I need to update the version also in scratch_cp to detect this. Actually, I made this change because I was going to make a bigger change that compared constant pool entries if they were the same version (ie both old_cp->version + 1), indicating parallel constant pool merging. I decided this change was too much. > > > + // NOTE: this doesn't work because you can redefine the same class > in two > + // threads, each getting their own constant pool data appended to the > + // original constant pool. In order for the new methods to work when > they > + // become old methods, they need to keep their updated copy of the > constant pool. > + > It feels like the statement in this note is too strong, and as such, > confusing. > Would it be better to tell something like "not always work"? > Otherwise, the question is: why do we need this block of code if it > doesn't work? > The block of code is #if 0'ed out. In my debugging I figured out why it wouldn't work, so I thought I'd comment it. Thanks, Coleen > > Thanks, > Serguei > > > On 4/11/16 13:06, Coleen Phillimore wrote: >> Summary: Constant pool merging is not thread safe for source_file_name. >> >> This change includes the change for the following bug because they >> are tested together. >> >> 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert >> failed: Corrupted constant pool >> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >> for MethodHandleInError and MethodTypeInError. >> >> The parallel constant pool merges are mostly harmless because the old >> methods constant pool pointers aren't updated. The only case I found >> where it isn't harmless is that we rely on finding the >> source_file_name_index from the final merged constant pool, which >> could be any of the parallel merged constant pools. The code to >> attempt to dig out the name from redefined classes is removed. >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8151546 >> >> Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I >> tried to write a test with all the conditions of the failure but >> couldn't make it fail (so noreg-hard). >> >> Thanks, >> Coleen > From daniel.daugherty at oracle.com Tue Apr 12 22:36:47 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Tue, 12 Apr 2016 16:36:47 -0600 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool In-Reply-To: <5708FE01.9050309@oracle.com> References: <57081D59.9040909@oracle.com> <57085EDD.6010904@oracle.com> <5708FE01.9050309@oracle.com> Message-ID: <570D787F.1020208@oracle.com> OK, I think I understand the code. Please see the note that I added to the bug report. I have a couple of final questions about the other failures modes in JDK-8148772, but I suspect those are covered by the work on: JDK-8151546 nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly Thumbs up on this code. Dan On 4/9/16 7:05 AM, Coleen Phillimore wrote: > > Hi Dan, I tried to answer your questions in the comments of the bug > so there'd be a record (at least for me). I wasn't very descriptive > in my earlier comment, because fixing this bug was prelude to trying > to fix another bug with this StressRedefine test case. > > On 4/8/16 9:46 PM, Daniel D. Daugherty wrote: >> On 4/8/16 3:06 PM, Coleen Phillimore wrote: >>> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >>> for MethodHandleInError and MethodTypeInError. >>> >>> Need to ignore the InError tag when fetching method_handle_index and >>> method_type_index. The error is cached after the call to >>> systemDictionary::link_method_handle_constant() if it's not there >>> already. >>> >>> Tested with rbt equivalent of nightly runs, and StressRedefine test >>> (reproduceable with this error) for >24 hours (also with 8151546 >>> fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test >>> for this because it's too timing sensitive. >>> >>> open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev >> >> I'm trying to get my head wrapped around this race... >> so the original failure mode looks like this: >> >> assert(tag_at(which).is_invoke_dynamic()) failed: Corrupted >> constant pool >> >> and the call stack looks like this: >> >> V [libjvm.so+0x7f1fe0] report_vm_error(char const*, int, char >> const*, char const*, ...)+0x60 >> V [libjvm.so+0x7e518b] >> ConstantPool::invoke_dynamic_name_and_type_ref_index_at(int)+0x3b >> V [libjvm.so+0x7dd18f] >> ConstantPool::impl_name_and_type_ref_index_at(int, bool)+0x15f >> V [libjvm.so+0x6a7363] >> ciBytecodeStream::get_method_signature_index()+0x4a3 >> >> and the crashing code looks like this: >> >> 517 int invoke_dynamic_name_and_type_ref_index_at(int which) { >> 518 assert(tag_at(which).is_invoke_dynamic(), "Corrupted >> constant pool"); >> 519 return extract_high_short_from_int(*int_at_addr(which)); >> 520 } >> >> The other crashes in the bug report are different and are in >> different places... I don't think I'm going to get there by >> looking at the reported crashes... >> >> OK, so the bug report has one line of analysis: >> >> > ConstantPool::resolve_constant_at_impl() isn't thread safe for >> > MethodHandleInError and MethodTypeInError. >> >> but resolve_constant_at_impl() isn't changed at all by the webrev. >> OK, this is starting to get frustrating... >> >> OK, so I go back to the code and look at it again... >> The constantPool.hpp changes are all about getting >> rid of the 'error_ok' parameter and getting rid of >> the _error_ok() function variants. I'm cool with all >> that code, but I don't see what it has to do with a >> data race in the constant pool... >> >> The constantPool.cpp changes are all about switching >> from the _error_ok() function variants to regular >> variants. And there's the new debug additions to >> invalid/default part of the case statement... I'm >> still not seeing it... >> >> So since the constantPool.cpp code that used to call >> the _error_ok() functions now call the regular functions >> that means that this race has to be in the original >> functions that took the error_ok parameter... so I >> look again and I just don't see how removing the >> error_ok parameter and its use in the asserts() solves >> this race. >> >> OK, it's late on a Friday and I'm just not getting >> what this fix is about... >> >> src/share/vm/oops/constantPool.hpp >> No comments. >> >> src/share/vm/oops/constantPool.cpp >> L1024: DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d] = %d", >> L1025: this, index1, t1)); >> L1026: assert(false, "unexpected constant tag"); >> L1028: ShouldNotReachHere(); >> I agree with Chris that this should be merged into >> a fatal() call. Should the '%p' be a INTPTR_FORMAT? >> I have a vague memory about '%p' being problematic >> to get consistent across all platforms. > > I revered this change. > > Thanks, > Coleen >> >> I'll look at it again on Monday. For now my review is >> about style since I clearly don't understand this race >> nor how this fix solves it. >> >> Dan >> >> >> >>> bug link https://bugs.openjdk.java.net/browse/JDK-8148772 >>> >>> Thanks, >>> Coleen >>> >>> >>> >>> >>> >> > > From coleen.phillimore at oracle.com Tue Apr 12 22:57:52 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 12 Apr 2016 18:57:52 -0400 Subject: RFR (s) 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert failed: Corrupted constant pool In-Reply-To: <570D787F.1020208@oracle.com> References: <57081D59.9040909@oracle.com> <57085EDD.6010904@oracle.com> <5708FE01.9050309@oracle.com> <570D787F.1020208@oracle.com> Message-ID: <570D7D70.3020309@oracle.com> Thanks, Dan. I also commented in the bug. Can you review 8151546 also? I want to fix them together since they together fixed the test (which I don't think is quarantined in any way - I should check). Thanks! Coleen On 4/12/16 6:36 PM, Daniel D. Daugherty wrote: > OK, I think I understand the code. Please see the note that I added > to the bug report. I have a couple of final questions about the > other failures modes in JDK-8148772, but I suspect those are covered > by the work on: > > JDK-8151546 nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly > > Thumbs up on this code. > > Dan > > > On 4/9/16 7:05 AM, Coleen Phillimore wrote: >> >> Hi Dan, I tried to answer your questions in the comments of the bug >> so there'd be a record (at least for me). I wasn't very descriptive >> in my earlier comment, because fixing this bug was prelude to trying >> to fix another bug with this StressRedefine test case. >> >> On 4/8/16 9:46 PM, Daniel D. Daugherty wrote: >>> On 4/8/16 3:06 PM, Coleen Phillimore wrote: >>>> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >>>> for MethodHandleInError and MethodTypeInError. >>>> >>>> Need to ignore the InError tag when fetching method_handle_index >>>> and method_type_index. The error is cached after the call to >>>> systemDictionary::link_method_handle_constant() if it's not there >>>> already. >>>> >>>> Tested with rbt equivalent of nightly runs, and StressRedefine test >>>> (reproduceable with this error) for >24 hours (also with 8151546 >>>> fixed). Ran jdk/test/java/lang/invoke tests. I can't write a test >>>> for this because it's too timing sensitive. >>>> >>>> open webrev at http://cr.openjdk.java.net/~coleenp/8148772.01/webrev >>> >>> I'm trying to get my head wrapped around this race... >>> so the original failure mode looks like this: >>> >>> assert(tag_at(which).is_invoke_dynamic()) failed: Corrupted >>> constant pool >>> >>> and the call stack looks like this: >>> >>> V [libjvm.so+0x7f1fe0] report_vm_error(char const*, int, char >>> const*, char const*, ...)+0x60 >>> V [libjvm.so+0x7e518b] >>> ConstantPool::invoke_dynamic_name_and_type_ref_index_at(int)+0x3b >>> V [libjvm.so+0x7dd18f] >>> ConstantPool::impl_name_and_type_ref_index_at(int, bool)+0x15f >>> V [libjvm.so+0x6a7363] >>> ciBytecodeStream::get_method_signature_index()+0x4a3 >>> >>> and the crashing code looks like this: >>> >>> 517 int invoke_dynamic_name_and_type_ref_index_at(int which) { >>> 518 assert(tag_at(which).is_invoke_dynamic(), "Corrupted >>> constant pool"); >>> 519 return extract_high_short_from_int(*int_at_addr(which)); >>> 520 } >>> >>> The other crashes in the bug report are different and are in >>> different places... I don't think I'm going to get there by >>> looking at the reported crashes... >>> >>> OK, so the bug report has one line of analysis: >>> >>> > ConstantPool::resolve_constant_at_impl() isn't thread safe for >>> > MethodHandleInError and MethodTypeInError. >>> >>> but resolve_constant_at_impl() isn't changed at all by the webrev. >>> OK, this is starting to get frustrating... >>> >>> OK, so I go back to the code and look at it again... >>> The constantPool.hpp changes are all about getting >>> rid of the 'error_ok' parameter and getting rid of >>> the _error_ok() function variants. I'm cool with all >>> that code, but I don't see what it has to do with a >>> data race in the constant pool... >>> >>> The constantPool.cpp changes are all about switching >>> from the _error_ok() function variants to regular >>> variants. And there's the new debug additions to >>> invalid/default part of the case statement... I'm >>> still not seeing it... >>> >>> So since the constantPool.cpp code that used to call >>> the _error_ok() functions now call the regular functions >>> that means that this race has to be in the original >>> functions that took the error_ok parameter... so I >>> look again and I just don't see how removing the >>> error_ok parameter and its use in the asserts() solves >>> this race. >>> >>> OK, it's late on a Friday and I'm just not getting >>> what this fix is about... >>> >>> src/share/vm/oops/constantPool.hpp >>> No comments. >>> >>> src/share/vm/oops/constantPool.cpp >>> L1024: DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d] = %d", >>> L1025: this, index1, t1)); >>> L1026: assert(false, "unexpected constant tag"); >>> L1028: ShouldNotReachHere(); >>> I agree with Chris that this should be merged into >>> a fatal() call. Should the '%p' be a INTPTR_FORMAT? >>> I have a vague memory about '%p' being problematic >>> to get consistent across all platforms. >> >> I revered this change. >> >> Thanks, >> Coleen >>> >>> I'll look at it again on Monday. For now my review is >>> about style since I clearly don't understand this race >>> nor how this fix solves it. >>> >>> Dan >>> >>> >>> >>>> bug link https://bugs.openjdk.java.net/browse/JDK-8148772 >>>> >>>> Thanks, >>>> Coleen >>>> >>>> >>>> >>>> >>>> >>> >> >> > From serguei.spitsyn at oracle.com Wed Apr 13 01:35:07 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Tue, 12 Apr 2016 18:35:07 -0700 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly In-Reply-To: <570D4CA5.2070505@oracle.com> References: <570C03AB.4020906@oracle.com> <570C4C8C.5060300@oracle.com> <570D4CA5.2070505@oracle.com> Message-ID: <570DA24B.6010804@oracle.com> On 4/12/16 12:29, Coleen Phillimore wrote: > > Hi Serguei, > > Thank you for looking at this change. > > On 4/11/16 9:17 PM, serguei.spitsyn at oracle.com wrote: >> Coleen, >> >> src/share/vm/prims/jvmtiRedefineClasses.cpp >> - // Update the version number of the constant pool >> + // Update the version number of the constant pools (may keep >> scratch_cp) >> merge_cp->increment_and_save_version(old_cp->version()); >> + scratch_cp->increment_and_save_version(old_cp->version()); >> >> Not sure, I understand the change above. >> Could you, please, explain why this change is needed? >> I suspect, the scratch_cp->version() is never used. > > scratch_cp is used if it's equivalent to the old constant pool (see > the code below this with the comments). But it could add entries. In > this case, we want scratch_cp to have a new version number because > scratch_class->_source_file_name_index may be an appended entry > (old_cp->length() + n) which a parallel constant pool merge might > append a different entry and be set to the constant pool after the > safepoint. So source_file_name_index won't point to the first > appended entry. So I need to update the version also in scratch_cp to > detect this. Thank you for the explanation. I'm Ok with this change, just wanted to understand. I think, we have to prevent multiple class redefinitions (prologues) of the same class at the same time. Otherwise, it is hard to isolate and fix all potential issues in this scenario. I doubt, the original goal was to allow this. I've never investigated this corner case. It is not clear what happens with two merged constant pools prepared concurrently. We do not merge them again, right? Most likely, the last redefinition wins with some side effects. If so, then there has to be a way to detect and prevent this kind of concurrency. > > Actually, I made this change because I was going to make a bigger > change that compared constant pool entries if they were the same > version (ie both old_cp->version + 1), indicating parallel constant > pool merging. I decided this change was too much. > >> >> >> + // NOTE: this doesn't work because you can redefine the same class >> in two >> + // threads, each getting their own constant pool data appended to the >> + // original constant pool. In order for the new methods to work >> when they >> + // become old methods, they need to keep their updated copy of the >> constant pool. >> + >> It feels like the statement in this note is too strong, and as such, >> confusing. >> Would it be better to tell something like "not always work"? >> Otherwise, the question is: why do we need this block of code if it >> doesn't work? >> > > The block of code is #if 0'ed out. In my debugging I figured out why > it wouldn't work, so I thought I'd comment it. Oh, I see. In this particular case, I looked at the Udiff that does not show all the context. Please, consider it reviewed. Thanks, Serguei > > Thanks, > Coleen > >> >> Thanks, >> Serguei >> >> >> On 4/11/16 13:06, Coleen Phillimore wrote: >>> Summary: Constant pool merging is not thread safe for source_file_name. >>> >>> This change includes the change for the following bug because they >>> are tested together. >>> >>> 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: >>> assert failed: Corrupted constant pool >>> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >>> for MethodHandleInError and MethodTypeInError. >>> >>> The parallel constant pool merges are mostly harmless because the >>> old methods constant pool pointers aren't updated. The only case I >>> found where it isn't harmless is that we rely on finding the >>> source_file_name_index from the final merged constant pool, which >>> could be any of the parallel merged constant pools. The code to >>> attempt to dig out the name from redefined classes is removed. >>> >>> open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev >>> bug link https://bugs.openjdk.java.net/browse/JDK-8151546 >>> >>> Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I >>> tried to write a test with all the conditions of the failure but >>> couldn't make it fail (so noreg-hard). >>> >>> Thanks, >>> Coleen >> > From coleen.phillimore at oracle.com Wed Apr 13 01:43:44 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 12 Apr 2016 21:43:44 -0400 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly In-Reply-To: <570DA24B.6010804@oracle.com> References: <570C03AB.4020906@oracle.com> <570C4C8C.5060300@oracle.com> <570D4CA5.2070505@oracle.com> <570DA24B.6010804@oracle.com> Message-ID: <570DA450.5060607@oracle.com> Hi Serguei, On 4/12/16 9:35 PM, serguei.spitsyn at oracle.com wrote: > On 4/12/16 12:29, Coleen Phillimore wrote: >> >> Hi Serguei, >> >> Thank you for looking at this change. >> >> On 4/11/16 9:17 PM, serguei.spitsyn at oracle.com wrote: >>> Coleen, >>> >>> src/share/vm/prims/jvmtiRedefineClasses.cpp >>> - // Update the version number of the constant pool >>> + // Update the version number of the constant pools (may keep >>> scratch_cp) >>> merge_cp->increment_and_save_version(old_cp->version()); >>> + scratch_cp->increment_and_save_version(old_cp->version()); >>> >>> Not sure, I understand the change above. >>> Could you, please, explain why this change is needed? >>> I suspect, the scratch_cp->version() is never used. >> >> scratch_cp is used if it's equivalent to the old constant pool (see >> the code below this with the comments). But it could add entries. >> In this case, we want scratch_cp to have a new version number because >> scratch_class->_source_file_name_index may be an appended entry >> (old_cp->length() + n) which a parallel constant pool merge might >> append a different entry and be set to the constant pool after the >> safepoint. So source_file_name_index won't point to the first >> appended entry. So I need to update the version also in scratch_cp to >> detect this. > > Thank you for the explanation. > I'm Ok with this change, just wanted to understand. > > I think, we have to prevent multiple class redefinitions (prologues) > of the same class at the same time. > Otherwise, it is hard to isolate and fix all potential issues in this > scenario. > I doubt, the original goal was to allow this. > > I've never investigated this corner case. > It is not clear what happens with two merged constant pools prepared > concurrently. > We do not merge them again, right? > Most likely, the last redefinition wins with some side effects. > If so, then there has to be a way to detect and prevent this kind of > concurrency. Yes, if there are two constant pools prepared concurrently, the last one wins and will not contain any entries added by the other constant pool. I tried a change to detect concurrency and give an error. It's quite easy to detect with the constant pool versioning fixed. It failed a lot of our tests which do the same redefinition concurrently. I then tried a change to detect that the concurrent redefinition is incompatible. This change worked and I almost sent it out but since I couldn't write a test to provoke a failure in this case (because the old methods running are using their older constant pools), I decided to stick with the simplest fix to not depend on the constant pool index to be correct for the InstanceKlass::_source_file_name_index. So that's what happened. The ultimate answer is to stop merging constant pools. I have a repository with this change that passes all our tests but it's too big and risky of a change, ie needs more testing and verification, for this late in jdk9. I'd like to prepare it once the next release opens. > >> >> Actually, I made this change because I was going to make a bigger >> change that compared constant pool entries if they were the same >> version (ie both old_cp->version + 1), indicating parallel constant >> pool merging. I decided this change was too much. >> >>> >>> >>> + // NOTE: this doesn't work because you can redefine the same class >>> in two >>> + // threads, each getting their own constant pool data appended to the >>> + // original constant pool. In order for the new methods to work >>> when they >>> + // become old methods, they need to keep their updated copy of the >>> constant pool. >>> + >>> It feels like the statement in this note is too strong, and as such, >>> confusing. >>> Would it be better to tell something like "not always work"? >>> Otherwise, the question is: why do we need this block of code if it >>> doesn't work? >>> >> >> The block of code is #if 0'ed out. In my debugging I figured out >> why it wouldn't work, so I thought I'd comment it. > > Oh, I see. > In this particular case, I looked at the Udiff that does not show all > the context. > > Please, consider it reviewed. Thank you. It's great that you know this code so well to review this! Coleen > > Thanks, > Serguei > > >> >> Thanks, >> Coleen >> >>> >>> Thanks, >>> Serguei >>> >>> >>> On 4/11/16 13:06, Coleen Phillimore wrote: >>>> Summary: Constant pool merging is not thread safe for >>>> source_file_name. >>>> >>>> This change includes the change for the following bug because they >>>> are tested together. >>>> >>>> 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: >>>> assert failed: Corrupted constant pool >>>> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >>>> for MethodHandleInError and MethodTypeInError. >>>> >>>> The parallel constant pool merges are mostly harmless because the >>>> old methods constant pool pointers aren't updated. The only case I >>>> found where it isn't harmless is that we rely on finding the >>>> source_file_name_index from the final merged constant pool, which >>>> could be any of the parallel merged constant pools. The code to >>>> attempt to dig out the name from redefined classes is removed. >>>> >>>> open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev >>>> bug link https://bugs.openjdk.java.net/browse/JDK-8151546 >>>> >>>> Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I >>>> tried to write a test with all the conditions of the failure but >>>> couldn't make it fail (so noreg-hard). >>>> >>>> Thanks, >>>> Coleen >>> >> > From serguei.spitsyn at oracle.com Wed Apr 13 02:24:06 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Tue, 12 Apr 2016 19:24:06 -0700 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly In-Reply-To: <570DA450.5060607@oracle.com> References: <570C03AB.4020906@oracle.com> <570C4C8C.5060300@oracle.com> <570D4CA5.2070505@oracle.com> <570DA24B.6010804@oracle.com> <570DA450.5060607@oracle.com> Message-ID: <570DADC6.40003@oracle.com> On 4/12/16 18:43, Coleen Phillimore wrote: > > Hi Serguei, > > On 4/12/16 9:35 PM, serguei.spitsyn at oracle.com wrote: >> On 4/12/16 12:29, Coleen Phillimore wrote: >>> >>> Hi Serguei, >>> >>> Thank you for looking at this change. >>> >>> On 4/11/16 9:17 PM, serguei.spitsyn at oracle.com wrote: >>>> Coleen, >>>> >>>> src/share/vm/prims/jvmtiRedefineClasses.cpp >>>> - // Update the version number of the constant pool >>>> + // Update the version number of the constant pools (may keep >>>> scratch_cp) >>>> merge_cp->increment_and_save_version(old_cp->version()); >>>> + scratch_cp->increment_and_save_version(old_cp->version()); >>>> >>>> Not sure, I understand the change above. >>>> Could you, please, explain why this change is needed? >>>> I suspect, the scratch_cp->version() is never used. >>> >>> scratch_cp is used if it's equivalent to the old constant pool (see >>> the code below this with the comments). But it could add entries. >>> In this case, we want scratch_cp to have a new version number >>> because scratch_class->_source_file_name_index may be an appended >>> entry (old_cp->length() + n) which a parallel constant pool merge >>> might append a different entry and be set to the constant pool after >>> the safepoint. So source_file_name_index won't point to the first >>> appended entry. So I need to update the version also in scratch_cp >>> to detect this. >> >> Thank you for the explanation. >> I'm Ok with this change, just wanted to understand. >> >> I think, we have to prevent multiple class redefinitions (prologues) >> of the same class at the same time. >> Otherwise, it is hard to isolate and fix all potential issues in this >> scenario. >> I doubt, the original goal was to allow this. >> >> I've never investigated this corner case. >> It is not clear what happens with two merged constant pools prepared >> concurrently. >> We do not merge them again, right? >> Most likely, the last redefinition wins with some side effects. >> If so, then there has to be a way to detect and prevent this kind of >> concurrency. > > Yes, if there are two constant pools prepared concurrently, the last > one wins and will not contain any entries added by the other constant > pool. > > I tried a change to detect concurrency and give an error. It's quite > easy to detect with the constant pool versioning fixed. It failed a > lot of our tests which do the same redefinition concurrently. I then > tried a change to detect that the concurrent redefinition is > incompatible. This change worked and I almost sent it out but since I > couldn't write a test to provoke a failure in this case (because the > old methods running are using their older constant pools), I decided > to stick with the simplest fix to not depend on the constant pool > index to be correct for the InstanceKlass::_source_file_name_index. > > So that's what happened. > > The ultimate answer is to stop merging constant pools. I have a > repository with this change that passes all our tests but it's too big > and risky of a change, ie needs more testing and verification, for > this late in jdk9. I'd like to prepare it once the next release opens. I think, even with the constant pool merge removal it is still too dangerous to have multiple prologues executed concurrently. We already have the following bug covering this topic: https://bugs.openjdk.java.net/browse/JDK-6227506 JVMTI Spec: Atomicity of RedefineClasses should be specified Thanks, Serguei >> >>> >>> Actually, I made this change because I was going to make a bigger >>> change that compared constant pool entries if they were the same >>> version (ie both old_cp->version + 1), indicating parallel constant >>> pool merging. I decided this change was too much. >>> >>>> >>>> >>>> + // NOTE: this doesn't work because you can redefine the same >>>> class in two >>>> + // threads, each getting their own constant pool data appended to the >>>> + // original constant pool. In order for the new methods to work >>>> when they >>>> + // become old methods, they need to keep their updated copy of >>>> the constant pool. >>>> + >>>> It feels like the statement in this note is too strong, and as >>>> such, confusing. >>>> Would it be better to tell something like "not always work"? >>>> Otherwise, the question is: why do we need this block of code if it >>>> doesn't work? >>>> >>> >>> The block of code is #if 0'ed out. In my debugging I figured out >>> why it wouldn't work, so I thought I'd comment it. >> >> Oh, I see. >> In this particular case, I looked at the Udiff that does not show all >> the context. >> >> Please, consider it reviewed. > > Thank you. It's great that you know this code so well to review this! > > Coleen > >> >> Thanks, >> Serguei >> >> >>> >>> Thanks, >>> Coleen >>> >>>> >>>> Thanks, >>>> Serguei >>>> >>>> >>>> On 4/11/16 13:06, Coleen Phillimore wrote: >>>>> Summary: Constant pool merging is not thread safe for >>>>> source_file_name. >>>>> >>>>> This change includes the change for the following bug because they >>>>> are tested together. >>>>> >>>>> 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: >>>>> assert failed: Corrupted constant pool >>>>> Summary: ConstantPool::resolve_constant_at_impl() isn't thread >>>>> safe for MethodHandleInError and MethodTypeInError. >>>>> >>>>> The parallel constant pool merges are mostly harmless because the >>>>> old methods constant pool pointers aren't updated. The only case >>>>> I found where it isn't harmless is that we rely on finding the >>>>> source_file_name_index from the final merged constant pool, which >>>>> could be any of the parallel merged constant pools. The code to >>>>> attempt to dig out the name from redefined classes is removed. >>>>> >>>>> open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev >>>>> bug link https://bugs.openjdk.java.net/browse/JDK-8151546 >>>>> >>>>> Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I >>>>> tried to write a test with all the conditions of the failure but >>>>> couldn't make it fail (so noreg-hard). >>>>> >>>>> Thanks, >>>>> Coleen >>>> >>> >> > From goetz.lindenmaier at sap.com Wed Apr 13 07:34:22 2016 From: goetz.lindenmaier at sap.com (Lindenmaier, Goetz) Date: Wed, 13 Apr 2016 07:34:22 +0000 Subject: RFR(S): 8154087: Fix AIX and Linux/ppc64le after the integration of the new hotspot build In-Reply-To: <570D1FB8.7000506@oracle.com> References: <570D1FB8.7000506@oracle.com> Message-ID: Hi Volker, I had a look at your changes. Good that 'warning as errors' already lead to a fix :) Note that your changes might break the old build, as you remove the endianess check in flags.m4 etc.. I think this is ok, though, as else dead code will remain in the file. Looks good, reviewed. Best regards, Goetz. > -----Original Message----- > From: hotspot-dev [mailto:hotspot-dev-bounces at openjdk.java.net] On > Behalf Of Erik Joelsson > Sent: Dienstag, 12. April 2016 18:18 > To: Volker Simonis ; HotSpot Open Source > Developers ; build-dev dev at openjdk.java.net> > Subject: Re: RFR(S): 8154087: Fix AIX and Linux/ppc64le after the integration > of the new hotspot build > > Hello Volker, > > This looks good to me. I will sponsor the change into hs-rt once someone > reviews the source code changes. > > /Erik > > On 2016-04-12 18:03, Volker Simonis wrote: > > Hi, > > > > can I please have a review for the following trivial changes to make > > the build work again on AIX and Linux/ppc64le after the integration of > > the new hotspot build system. The changes are all AIX and/or ppc64 > > specific and shouldn't change the behavior on any other platform. > > > > Because the top-level changes require the rebuild of > > generated-configure.sh and the hotspot changes are in shared code, I > > also need a sponsor. It would be best if the changes could be pushed > > in sync to the hs-rt repository: > > > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.top > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.hs > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.jdk > > > > https://bugs.openjdk.java.net/browse/JDK-8154087 > > > > The hotspot change contains a trivial source code change in an AIX > > file to fix a warning which would otherwise break the build with > > "warnings as errors". > > > > The jdk change disables "warnings as errors" for AIX for several libs > > such that we can build the complete jdk with "warnings as errors" on > > AIX as well now. Fixing the actual warnings will be done in a later > > change. > > > > Thanks a lot and best regards, > > Volker From per.liden at oracle.com Wed Apr 13 08:34:50 2016 From: per.liden at oracle.com (Per Liden) Date: Wed, 13 Apr 2016 10:34:50 +0200 Subject: RFC: 8154079: Catch incorrectly included .inline.hpp files In-Reply-To: <570D24E9.9030401@oracle.com> References: <570D24E9.9030401@oracle.com> Message-ID: <570E04AA.3060509@oracle.com> Hi Stefan, On 2016-04-12 18:40, Stefan Karlsson wrote: > Hi all, > > I would like to propose a patch to make it easier to find and clean up > places where we include .inline.hpp files from .hpp files. So, that we > start getting smaller include dependencies, with lower risk of circular > include dependencies, and maybe even shorter compile times. > > The guidelines regarding file inclusions can be found at: > https://wiki.openjdk.java.net/display/HotSpot/StyleGuide > > "Files > > Do not put non-trivial function implementations in .hpp files. If > the implementation depends on other .hpp files, put it in a .cpp or a > .inline.hpp file. > .inline.hpp files should only be included in .cpp or .inline.hpp > files. > All .cpp files include precompiled.hpp as the first include line. > precompiled.hpp is just a build time optimization, so don't rely on > it to resolve include problems. > Keep the include lines sorted. > Put conditional inclusions (#if ...) at the end of the include list." > > The code to enable the stricter .inline.hpp include check can be found > in this small patch: > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ > > I'm using the fact that we are including almost all header files in the > precompiled.hpp file. So, if I add a "scoped" define, called > INLINE_HPP_GUARD, in preompiled.hpp and add checks for this define in > .inline.hpp files, the preprocessor will tell me when .inline.hpp > includes come from .hpp files (directly or indirectly) rather than .cpp > files. This requires that the .hpp file is reachable through > precompiled.hpp and that we start remove .inline.hpp files from > precompiled.hpp. > > I've tried this on a few .inline.hpp files. For example: thread.inline.hpp: > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp > > > The inlineHppGuard.hpp file is first included in thread.inline.hpp: > > #include "runtime/atomic.inline.hpp" > #include "runtime/os.inline.hpp" > #include "runtime/thread.hpp" > +#include "utilities/inlineHppGuard.hpp" > > Then when I compile (with precompiled headers enabled) I get: > In file included from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/thread.inline.hpp:33:0, > > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/handles.inline.hpp:29, > > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/reflectionUtils.hpp:32, > > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/classfile/systemDictionary.hpp:34, > > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciEnv.hpp:30, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciUtilities.hpp:28, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciNullObject.hpp:30, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciConstant.hpp:29, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciArray.hpp:29, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/precompiled/precompiled.hpp:37: > > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/utilities/inlineHppGuard.hpp:44:2: > error: #error ".inline.hpp file incorrectly included from .hpp file" > > That tells me that I probably need to go and fix the inclusion of > reflectionUtils.hpp -> handles.inline.hpp first. It's not always enough > to remove the inclusion of the .inline.hpp file, because the header > might actually use the inline files. In those cases the code needs to be > restructured so that we the "offending" functions out from the .hpp > files to .cpp or .linline.hpp files. This might be tedious in the > beginning, but will hopefully become easier to maintain when when more > of these files get cleaned up. > > So, with this in place, after all incorrect includes have been fixed, > whenever someone "incorrectly" adds an inclusion of thread.inline.hpp > (directly or indirectly) to a .hpp file the preprocessor will complain. > > I have a patch set where I've tried this on different .inline.hpp files, > and the following patches show the kind of work that is needed to fix > the includes: > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.02.guardHandleInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.03.guardOsInlineHpp/ > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.04.guardMarkOopInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.05.guardOopInlineHpp/ > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.06.guardFrameInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.07.guardHashtableInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.08.guardOrderAccessInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.09.guardBitMapInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.10.guardAtomicInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.11.guardKlassInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.12.guardTypeArrayOopInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.all/ > > It compiles on linux-x64 with and without precompiled headers, but I > haven't taken the time to try to get it to compile on other platforms. > > So, is this worth doing to start fixing our include mess? +1, this looks like a nice way of gradually moving in the right direction. It would be nice to get these errors also when precompiled headers is disabled. Instead of doing this through precompiled.hpp, how about adding a make target which creates a temporary "all.hpp" that #includes all our .hpp files (.inline.hpp excluded) and just runs the pre-processor on that file? cheers, Per > > Thanks, > StefanK From stefan.karlsson at oracle.com Wed Apr 13 08:43:58 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 13 Apr 2016 10:43:58 +0200 Subject: RFC: 8154079: Catch incorrectly included .inline.hpp files In-Reply-To: <570E04AA.3060509@oracle.com> References: <570D24E9.9030401@oracle.com> <570E04AA.3060509@oracle.com> Message-ID: <570E06CE.2060302@oracle.com> Hi Per, On 2016-04-13 10:34, Per Liden wrote: > Hi Stefan, > > On 2016-04-12 18:40, Stefan Karlsson wrote: >> Hi all, >> >> I would like to propose a patch to make it easier to find and clean up >> places where we include .inline.hpp files from .hpp files. So, that we >> start getting smaller include dependencies, with lower risk of circular >> include dependencies, and maybe even shorter compile times. >> >> The guidelines regarding file inclusions can be found at: >> https://wiki.openjdk.java.net/display/HotSpot/StyleGuide >> >> "Files >> >> Do not put non-trivial function implementations in .hpp files. If >> the implementation depends on other .hpp files, put it in a .cpp or a >> .inline.hpp file. >> .inline.hpp files should only be included in .cpp or .inline.hpp >> files. >> All .cpp files include precompiled.hpp as the first include line. >> precompiled.hpp is just a build time optimization, so don't rely on >> it to resolve include problems. >> Keep the include lines sorted. >> Put conditional inclusions (#if ...) at the end of the include >> list." >> >> The code to enable the stricter .inline.hpp include check can be found >> in this small patch: >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ >> >> >> I'm using the fact that we are including almost all header files in the >> precompiled.hpp file. So, if I add a "scoped" define, called >> INLINE_HPP_GUARD, in preompiled.hpp and add checks for this define in >> .inline.hpp files, the preprocessor will tell me when .inline.hpp >> includes come from .hpp files (directly or indirectly) rather than .cpp >> files. This requires that the .hpp file is reachable through >> precompiled.hpp and that we start remove .inline.hpp files from >> precompiled.hpp. >> >> I've tried this on a few .inline.hpp files. For example: >> thread.inline.hpp: >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp >> >> >> >> The inlineHppGuard.hpp file is first included in thread.inline.hpp: >> >> #include "runtime/atomic.inline.hpp" >> #include "runtime/os.inline.hpp" >> #include "runtime/thread.hpp" >> +#include "utilities/inlineHppGuard.hpp" >> >> Then when I compile (with precompiled headers enabled) I get: >> In file included from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/thread.inline.hpp:33:0, >> >> >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/handles.inline.hpp:29, >> >> >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/reflectionUtils.hpp:32, >> >> >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/classfile/systemDictionary.hpp:34, >> >> >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciEnv.hpp:30, >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciUtilities.hpp:28, >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciNullObject.hpp:30, >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciConstant.hpp:29, >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciArray.hpp:29, >> from >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/precompiled/precompiled.hpp:37: >> >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/utilities/inlineHppGuard.hpp:44:2: >> >> error: #error ".inline.hpp file incorrectly included from .hpp file" >> >> That tells me that I probably need to go and fix the inclusion of >> reflectionUtils.hpp -> handles.inline.hpp first. It's not always enough >> to remove the inclusion of the .inline.hpp file, because the header >> might actually use the inline files. In those cases the code needs to be >> restructured so that we the "offending" functions out from the .hpp >> files to .cpp or .linline.hpp files. This might be tedious in the >> beginning, but will hopefully become easier to maintain when when more >> of these files get cleaned up. >> >> So, with this in place, after all incorrect includes have been fixed, >> whenever someone "incorrectly" adds an inclusion of thread.inline.hpp >> (directly or indirectly) to a .hpp file the preprocessor will complain. >> >> I have a patch set where I've tried this on different .inline.hpp files, >> and the following patches show the kind of work that is needed to fix >> the includes: >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.02.guardHandleInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.03.guardOsInlineHpp/ >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.04.guardMarkOopInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.05.guardOopInlineHpp/ >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.06.guardFrameInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.07.guardHashtableInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.08.guardOrderAccessInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.09.guardBitMapInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.10.guardAtomicInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.11.guardKlassInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.12.guardTypeArrayOopInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.all/ >> >> It compiles on linux-x64 with and without precompiled headers, but I >> haven't taken the time to try to get it to compile on other platforms. >> >> So, is this worth doing to start fixing our include mess? > > +1, this looks like a nice way of gradually moving in the right > direction. > > It would be nice to get these errors also when precompiled headers is > disabled. Instead of doing this through precompiled.hpp, how about > adding a make target which creates a temporary "all.hpp" that > #includes all our .hpp files (.inline.hpp excluded) and just runs the > pre-processor on that file? Sounds like a good idea to me! Thanks, StefanK > > cheers, > Per > >> >> Thanks, >> StefanK From magnus.ihse.bursie at oracle.com Wed Apr 13 10:38:22 2016 From: magnus.ihse.bursie at oracle.com (Magnus Ihse Bursie) Date: Wed, 13 Apr 2016 12:38:22 +0200 Subject: RFR(S): 8154087: Fix AIX and Linux/ppc64le after the integration of the new hotspot build In-Reply-To: References: Message-ID: Looks good to me. /Magnus > 12 apr. 2016 kl. 18:03 skrev Volker Simonis : > > Hi, > > can I please have a review for the following trivial changes to make > the build work again on AIX and Linux/ppc64le after the integration of > the new hotspot build system. The changes are all AIX and/or ppc64 > specific and shouldn't change the behavior on any other platform. > > Because the top-level changes require the rebuild of > generated-configure.sh and the hotspot changes are in shared code, I > also need a sponsor. It would be best if the changes could be pushed > in sync to the hs-rt repository: > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.top > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.hs > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.jdk > > https://bugs.openjdk.java.net/browse/JDK-8154087 > > The hotspot change contains a trivial source code change in an AIX > file to fix a warning which would otherwise break the build with > "warnings as errors". > > The jdk change disables "warnings as errors" for AIX for several libs > such that we can build the complete jdk with "warnings as errors" on > AIX as well now. Fixing the actual warnings will be done in a later > change. > > Thanks a lot and best regards, > Volker From thomas.stuefe at gmail.com Wed Apr 13 10:44:58 2016 From: thomas.stuefe at gmail.com (=?UTF-8?Q?Thomas_St=C3=BCfe?=) Date: Wed, 13 Apr 2016 12:44:58 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: <570D1747.2020508@oracle.com> References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> <570D1747.2020508@oracle.com> Message-ID: Hi Stefan, On Tue, Apr 12, 2016 at 5:41 PM, Stefan Karlsson wrote: > Hi Thomas, > > > On 2016-04-12 16:23, Thomas St?fe wrote: > > Hi Stefan, > > > On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson < > stefan.karlsson at oracle.com> wrote: > >> Hi Thomas, >> >> On 2016-04-11 14:39, Thomas St?fe wrote: >> >> Hi Stefan, >> >> short question, why the mmap before the shmat? Why not shmat right away >> at the requested address? >> >> >> If we have a requested_address we do exactly what you propose. >> >> if (req_addr == NULL && alignment > os::large_page_size()) { >> return shmat_with_large_alignment(shmid, bytes, alignment); >> } else { >> return shmat_with_normal_alignment(shmid, req_addr); >> } >> >> ... >> >> static char* shmat_with_normal_alignment(int shmid, char* req_addr) { >> char* addr = (char*)shmat(shmid, req_addr, 0); >> >> if ((intptr_t)addr == -1) { >> shm_warning_with_errno("Failed to attach shared memory."); >> return NULL; >> } >> >> return addr; >> } >> >> >> It's when you don't have a requested address that mmap is used to find a >> large enough virtual memory area. >> >> > Sorry, seems I did not look at this coding thoroughly enough. I understand > now that you do mmap to allocate and then to cut away the extra > pre-/post-space, something which would not be possible with shmat, which > cannot be unmapped page-wise. > > But I am still not sure why we do it his way: > > 3429 static char* shmat_with_alignment(int shmid, size_t bytes, size_t > alignment, char* req_addr) { > 3430 // If there's no requested address, the shmat call can return > memory that is not > 3431 // 'alignment' aligned, if the given alignment is larger than the > large page size. > 3432 // Special care needs to be taken to ensure that we get aligned > memory back. > 3433 if (req_addr == NULL && alignment > os::large_page_size()) { > 3434 return shmat_with_large_alignment(shmid, bytes, alignment); > 3435 } else { > 3436 return shmat_with_normal_alignment(shmid, req_addr); > 3437 } > 3438 } > > For req_addr==0 and big alignment, we attach at the given alignment > ("shmat_with_large_alignment"). > For req_addr!=0, we attach at the given requested address > ("shmat_with_normal_alignment"). > For req_addr==0 and smaller alignment, we ignore the alignment and attach > anywhere? > > Maybe I am slow, but why does it matter if the alignment is large or > small? Why not just distinguish between: > > 1) address given (req_addr!=0): in this case we attach at this req_addr > and rely on the user having aligned the address properly for his purposes. > We specify 0 for flags, so we will attach at exactly the given address or > fail. In this case we could simply ignore the given alignment - if one was > given - or just use it to counter-check the req_addr. > > 2) alignment given (req_addr==0 and alignment > 0): attach at the given > alignment using mmap-before-shmat. This could be done for any alignment, be > it large or small. > > > What you propose doesn't work. > > We're allocating large pages with SHM_HUGETLB, and if we try to attach to > an address that is not large_page_size aligned the shmat call returns > EINVAL. > > I was aware of this. What I meant was: You have "shmat_with_large_alignment" which takes an alignment and does its best to shmat with that alignment using the mmap trick. This coding does not need to know anything about huge pages, and actually does not do anything huge-pagey, apart from the asserts - it would just as well work with small pages, because the only place where the code needs to know about huge pages is in the layer above, in reserve_memory_special - where we pass SHM_HUGETLB to shmget. (Btw, I always wondered about the "reserve_memory_special" naming.) I think my point is that by renaming this to "shmat_with_alignment" and removing the huge-page-related asserts the function would become both simpler and more versatile and could be reused for small alignments as well as large ones. The fact that it returns EINVAL for alignments instead of asserting would not be a problem - we would return an error instead of asserting because of bad alignment, and both handling this error and asserting for huge-page-alignment could be handled better in reserve_memory_special. To put it another way, I think "shmat_with_large_alignment" does not need to know about huge pages; this should be the responsibility of reserve_memory_special. About "shmat_with_normal_alignment", this is actually only a raw shmat call and exists for the req_addr!=NULL case and for the case where we do not pass neither req_addr nor alignment. So the only thing it does not handle is alignment, so it is misnamed and also should not be called for the req_addr==NULL-and-small-alignments-case. > > Functions would become simpler and also could be clearer named (e.g. > "shmat_at_address" and "shmat_with_alignment", respectivly). > > > Maybe I should rename the functions to make it more obvious that these are > large pages specific functions? > > > ---- > > This: > > 3402 if ((intptr_t)addr == -1) { > 3403 shm_warning_with_errno("Failed to attach shared memory."); > 3404 // Since we don't know if the kernel unmapped the pre-reserved > memory area > 3405 // we can't unmap it, since that would potentially unmap memory > that was > 3406 // mapped from other threads. > 3407 return NULL; > 3408 } > > seems scary. Means for every call this happens, we leak the reserved (not > committed) address space? > > > Yes, that's unfortunate. > > An alternative would be to use this sequence: > 1) Use anon_mmap_aligned to find a suitable VA range > 2) Immediately unmap the VA range > 3) Try to attach at that VA range _without_ SHM_REMAP > > That would remove the risk of leaking the reserved address space, but > instead we risk failing at (3) if another thread manages to allocate memory > inside the found VA range. This will cause some users to unnecessarily fail > to get large pages, though. We've had other problems when pre-existing > threads used mmap while we were initializing the VM. See: JDK-8007074. > Yes; btw you also could do this with shmget/shmat instead of mmap. Note that similar unclean tricks are already done in other places, see e.g. the windows version of os::pd_split_reserved_memory(). Which deals with VirtualAlloc() being unable, like shmget, to deallocate piece-wise. > > > For most cases (anything but ENOMEM, actually) could we at least assert?: > > EACCES - should not happen: we created the shared memory and are its owner > EIDRM - should not happen. > EINVAL - should not happen. (you already check now the attach address for > alignment to SHMLBA, so this is covered) > > > Sure. I'll add asserts for these. > > > --- > > Smaller nits: > > Functions called "shmat_..." suggest shmat-like behaviour, so could we > have them return -1 instead of NULL in case of error? > > > That would add clutter to the reserve_memory_special_shm, and it might > also suggest that it would be OK to check errno for the failure reason, > which probably wouldn't work. I'll let other Reviewers chime in and help > decide if we should change this. > > You are right. If one returns -1, one would have to preserve errno for the caller too. > Thanks for reviewing this, > StefanK > You are welcome! Kind Regards, Thomas > > > > Kind Regards, Thomas > >> >> Also note that mmap- and shmat-allocated memory may have different >> alignment requirements: mmap requires a page-aligned request address, >> whereas shmat requires alignment to SHMLBA, which may be multiple pages >> (e.g. for ARM: >> >> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >> So, for this shat-over-mmap trick to work, request address has to be >> aligned to SHMLBA, not just page size. >> >> I see that you assert alignment of requ address to os::large_page_size(), >> which I would assume is a multiple of SHMLBA, but I am not sure of this. >> >> >> I've added some defensive code and asserts to catch this if/when this >> assumption fails: >> >> http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ >> http://cr.openjdk.java.net/~stefank/8017629/webrev.02 >> >> I need to verify that this works on other machines than my local Linux >> x64 machine. >> >> Thanks, >> StefanK >> >> >> Kind Regards, Thomas >> >> >> >> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson < >> stefan.karlsson at oracle.com> wrote: >> >>> Hi all, >>> >>> Please review this patch to enable SHM large page allocations even when >>> the requested alignment is larger than os::large_page_size(). >>> >>> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8017629 >>> >>> G1 is affected by this bug since it requires the heap to start at an >>> address that is aligned with the heap region size. The patch fixes this by >>> changing the UseSHM large pages allocation code. First, virtual memory with >>> correct alignment is pre-reserved and then the large pages are attached to >>> this memory area. >>> >>> Tested with vm.gc.testlist and ExecuteInternaVMTests >>> >>> Thanks, >>> StefanK >>> >> >> >> > > From magnus.ihse.bursie at oracle.com Wed Apr 13 10:48:13 2016 From: magnus.ihse.bursie at oracle.com (Magnus Ihse Bursie) Date: Wed, 13 Apr 2016 12:48:13 +0200 Subject: RFR: JDK-8149777: Enable enhanced failure handler for "make test" In-Reply-To: <570CFF92.2090702@oracle.com> References: <570CFF92.2090702@oracle.com> Message-ID: Is the test-failure-handle used to test that the failure handler is correct? It seems a bit odd, since most other tests has their main purpose to be run, and the build part is just a necessary thing, but here I presume that the main thing is to build the handler so other tests can use it, and it is not clear what the test itself mean. Otoh, I can't see any other way to express this. /Magnus > 12 apr. 2016 kl. 16:00 skrev Erik Joelsson : > > Please review this change which adds a proper makefile and build sequence for the failure-handler jtreg plugin. It also adds the failure handler to the test image and makes the hotspot and jdk test/Makefile's pick it up when available. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8149777 > Webrev: http://cr.openjdk.java.net/~erikj/8149777/webrev.03/ > > /Erik From stefan.karlsson at oracle.com Wed Apr 13 11:23:49 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 13 Apr 2016 13:23:49 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> <570D1747.2020508@oracle.com> Message-ID: <570E2C45.7090201@oracle.com> Hi Thomas, On 2016-04-13 12:44, Thomas St?fe wrote: > Hi Stefan, > > On Tue, Apr 12, 2016 at 5:41 PM, Stefan Karlsson > > wrote: > > Hi Thomas, > > > On 2016-04-12 16:23, Thomas St?fe wrote: >> Hi Stefan, >> >> >> On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson >> > >> wrote: >> >> Hi Thomas, >> >> On 2016-04-11 14:39, Thomas St?fe wrote: >>> Hi Stefan, >>> >>> short question, why the mmap before the shmat? Why not >>> shmat right away at the requested address? >> >> If we have a requested_address we do exactly what you propose. >> >> if (req_addr == NULL && alignment > os::large_page_size()) { >> return shmat_with_large_alignment(shmid, bytes, alignment); >> } else { >> return shmat_with_normal_alignment(shmid, req_addr); >> } >> >> ... >> >> static char* shmat_with_normal_alignment(int shmid, char* >> req_addr) { >> char* addr = (char*)shmat(shmid, req_addr, 0); >> >> if ((intptr_t)addr == -1) { >> shm_warning_with_errno("Failed to attach shared memory."); >> return NULL; >> } >> >> return addr; >> } >> >> >> It's when you don't have a requested address that mmap is >> used to find a large enough virtual memory area. >> >> >> Sorry, seems I did not look at this coding thoroughly enough. I >> understand now that you do mmap to allocate and then to cut away >> the extra pre-/post-space, something which would not be possible >> with shmat, which cannot be unmapped page-wise. >> >> But I am still not sure why we do it his way: >> >> 3429 static char* shmat_with_alignment(int shmid, size_t bytes, >> size_t alignment, char* req_addr) { >> 3430 // If there's no requested address, the shmat call can >> return memory that is not >> 3431 // 'alignment' aligned, if the given alignment is larger >> than the large page size. >> 3432 // Special care needs to be taken to ensure that we get >> aligned memory back. >> 3433 if (req_addr == NULL && alignment > os::large_page_size()) { >> 3434 return shmat_with_large_alignment(shmid, bytes, alignment); >> 3435 } else { >> 3436 return shmat_with_normal_alignment(shmid, req_addr); >> 3437 } >> 3438 } >> >> For req_addr==0 and big alignment, we attach at the given >> alignment ("shmat_with_large_alignment"). >> For req_addr!=0, we attach at the given requested address >> ("shmat_with_normal_alignment"). >> For req_addr==0 and smaller alignment, we ignore the alignment >> and attach anywhere? >> >> Maybe I am slow, but why does it matter if the alignment is large >> or small? Why not just distinguish between: >> >> 1) address given (req_addr!=0): in this case we attach at this >> req_addr and rely on the user having aligned the address properly >> for his purposes. We specify 0 for flags, so we will attach at >> exactly the given address or fail. In this case we could simply >> ignore the given alignment - if one was given - or just use it to >> counter-check the req_addr. >> >> 2) alignment given (req_addr==0 and alignment > 0): attach at the >> given alignment using mmap-before-shmat. This could be done for >> any alignment, be it large or small. > > What you propose doesn't work. > > We're allocating large pages with SHM_HUGETLB, and if we try to > attach to an address that is not large_page_size aligned the shmat > call returns EINVAL. > > > I was aware of this. What I meant was: > > You have "shmat_with_large_alignment" which takes an alignment and > does its best to shmat with that alignment using the mmap trick. This > coding does not need to know anything about huge pages, and actually > does not do anything huge-pagey, apart from the asserts - it would > just as well work with small pages, because the only place where the > code needs to know about huge pages is in the layer above, in > reserve_memory_special - where we pass SHM_HUGETLB to shmget. (Btw, I > always wondered about the "reserve_memory_special" naming.) > > I think my point is that by renaming this to "shmat_with_alignment" > and removing the huge-page-related asserts the function would become > both simpler and more versatile and could be reused for small > alignments as well as large ones. The fact that it returns EINVAL for > alignments instead of asserting would not be a problem - we would > return an error instead of asserting because of bad alignment, and > both handling this error and asserting for huge-page-alignment could > be handled better in reserve_memory_special. > > To put it another way, I think "shmat_with_large_alignment" does not > need to know about huge pages; this should be the responsibility of > reserve_memory_special. > > About "shmat_with_normal_alignment", this is actually only a raw shmat > call and exists for the req_addr!=NULL case and for the case where we > do not pass neither req_addr nor alignment. So the only thing it does > not handle is alignment, so it is misnamed and also should not be > called for the req_addr==NULL-and-small-alignments-case. The reserve_memory_special_shm function and the associated helper functions I'm adding are specifically written to support large pages allocations. The names "normal_alignment" and "large_alignment" are intended to refer to alignment sizes compared to the large pages size. I grant you that it's not obvious from the name, and we can rename them to make it more clear. I want to provide a small bug fix for this large pages bug, while you are suggesting that we re-purpose the code into supporting small page allocations as well. Your suggestions might be good, but may I suggest that you create a patch and an RFE that motivates why we should make this code more generic to support small pages as well? Thanks, StefanK >> >> Functions would become simpler and also could be clearer named >> (e.g. "shmat_at_address" and "shmat_with_alignment", respectivly). > > Maybe I should rename the functions to make it more obvious that > these are large pages specific functions? > >> >> ---- >> >> This: >> >> 3402 if ((intptr_t)addr == -1) { >> 3403 shm_warning_with_errno("Failed to attach shared memory."); >> 3404 // Since we don't know if the kernel unmapped the >> pre-reserved memory area >> 3405 // we can't unmap it, since that would potentially unmap >> memory that was >> 3406 // mapped from other threads. >> 3407 return NULL; >> 3408 } >> >> seems scary. Means for every call this happens, we leak the >> reserved (not committed) address space? > > Yes, that's unfortunate. > > An alternative would be to use this sequence: > 1) Use anon_mmap_aligned to find a suitable VA range > 2) Immediately unmap the VA range > 3) Try to attach at that VA range _without_ SHM_REMAP > > That would remove the risk of leaking the reserved address space, > but instead we risk failing at (3) if another thread manages to > allocate memory inside the found VA range. This will cause some > users to unnecessarily fail to get large pages, though. We've had > other problems when pre-existing threads used mmap while we were > initializing the VM. See: JDK-8007074. > > > Yes; btw you also could do this with shmget/shmat instead of mmap. > > Note that similar unclean tricks are already done in other places, see > e.g. the windows version of os::pd_split_reserved_memory(). Which > deals with VirtualAlloc() being unable, like shmget, to deallocate > piece-wise. > > > >> For most cases (anything but ENOMEM, actually) could we at least >> assert?: >> >> EACCES - should not happen: we created the shared memory and are >> its owner >> EIDRM - should not happen. >> EINVAL - should not happen. (you already check now the attach >> address for alignment to SHMLBA, so this is covered) > > Sure. I'll add asserts for these. > >> >> --- >> >> Smaller nits: >> >> Functions called "shmat_..." suggest shmat-like behaviour, so >> could we have them return -1 instead of NULL in case of error? > > That would add clutter to the reserve_memory_special_shm, and it > might also suggest that it would be OK to check errno for the > failure reason, which probably wouldn't work. I'll let other > Reviewers chime in and help decide if we should change this. > > > You are right. If one returns -1, one would have to preserve errno for > the caller too. > > Thanks for reviewing this, > StefanK > > > You are welcome! > > Kind Regards, Thomas > > > >> >> Kind Regards, Thomas >> >>> >>> Also note that mmap- and shmat-allocated memory may have >>> different alignment requirements: mmap requires a >>> page-aligned request address, whereas shmat requires >>> alignment to SHMLBA, which may be multiple pages (e.g. for >>> ARM: >>> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >>> So, for this shat-over-mmap trick to work, request address >>> has to be aligned to SHMLBA, not just page size. >>> >>> I see that you assert alignment of requ address to >>> os::large_page_size(), which I would assume is a multiple of >>> SHMLBA, but I am not sure of this. >> >> I've added some defensive code and asserts to catch this >> if/when this assumption fails: >> >> http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ >> >> http://cr.openjdk.java.net/~stefank/8017629/webrev.02 >> >> >> I need to verify that this works on other machines than my >> local Linux x64 machine. >> >> Thanks, >> StefanK >> >>> >>> Kind Regards, Thomas >>> >>> >>> >>> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson >>> >> > wrote: >>> >>> Hi all, >>> >>> Please review this patch to enable SHM large page >>> allocations even when the requested alignment is larger >>> than os::large_page_size(). >>> >>> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >>> >>> https://bugs.openjdk.java.net/browse/JDK-8017629 >>> >>> G1 is affected by this bug since it requires the heap to >>> start at an address that is aligned with the heap region >>> size. The patch fixes this by changing the UseSHM large >>> pages allocation code. First, virtual memory with >>> correct alignment is pre-reserved and then the large >>> pages are attached to this memory area. >>> >>> Tested with vm.gc.testlist and ExecuteInternaVMTests >>> >>> Thanks, >>> StefanK >>> >>> >> >> > > From thomas.stuefe at gmail.com Wed Apr 13 12:07:00 2016 From: thomas.stuefe at gmail.com (=?UTF-8?Q?Thomas_St=C3=BCfe?=) Date: Wed, 13 Apr 2016 14:07:00 +0200 Subject: RFC: 8154079: Catch incorrectly included .inline.hpp files In-Reply-To: <570E06CE.2060302@oracle.com> References: <570D24E9.9030401@oracle.com> <570E04AA.3060509@oracle.com> <570E06CE.2060302@oracle.com> Message-ID: Hi, I assume this target would not get built for normal hotspot builds, but would have to be built explicitly? If yes, could we have this somehow be a part of hg jcheck? Kind Regards, Thomas On Wed, Apr 13, 2016 at 10:43 AM, Stefan Karlsson < stefan.karlsson at oracle.com> wrote: > Hi Per, > > > On 2016-04-13 10:34, Per Liden wrote: > >> Hi Stefan, >> >> On 2016-04-12 18:40, Stefan Karlsson wrote: >> >>> Hi all, >>> >>> I would like to propose a patch to make it easier to find and clean up >>> places where we include .inline.hpp files from .hpp files. So, that we >>> start getting smaller include dependencies, with lower risk of circular >>> include dependencies, and maybe even shorter compile times. >>> >>> The guidelines regarding file inclusions can be found at: >>> https://wiki.openjdk.java.net/display/HotSpot/StyleGuide >>> >>> "Files >>> >>> Do not put non-trivial function implementations in .hpp files. If >>> the implementation depends on other .hpp files, put it in a .cpp or a >>> .inline.hpp file. >>> .inline.hpp files should only be included in .cpp or .inline.hpp >>> files. >>> All .cpp files include precompiled.hpp as the first include line. >>> precompiled.hpp is just a build time optimization, so don't rely on >>> it to resolve include problems. >>> Keep the include lines sorted. >>> Put conditional inclusions (#if ...) at the end of the include >>> list." >>> >>> The code to enable the stricter .inline.hpp include check can be found >>> in this small patch: >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ >>> >>> I'm using the fact that we are including almost all header files in the >>> precompiled.hpp file. So, if I add a "scoped" define, called >>> INLINE_HPP_GUARD, in preompiled.hpp and add checks for this define in >>> .inline.hpp files, the preprocessor will tell me when .inline.hpp >>> includes come from .hpp files (directly or indirectly) rather than .cpp >>> files. This requires that the .hpp file is reachable through >>> precompiled.hpp and that we start remove .inline.hpp files from >>> precompiled.hpp. >>> >>> I've tried this on a few .inline.hpp files. For example: >>> thread.inline.hpp: >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp >>> >>> >>> The inlineHppGuard.hpp file is first included in thread.inline.hpp: >>> >>> #include "runtime/atomic.inline.hpp" >>> #include "runtime/os.inline.hpp" >>> #include "runtime/thread.hpp" >>> +#include "utilities/inlineHppGuard.hpp" >>> >>> Then when I compile (with precompiled headers enabled) I get: >>> In file included from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/thread.inline.hpp:33:0, >>> >>> >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/handles.inline.hpp:29, >>> >>> >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/reflectionUtils.hpp:32, >>> >>> >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/classfile/systemDictionary.hpp:34, >>> >>> >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciEnv.hpp:30, >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciUtilities.hpp:28, >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciNullObject.hpp:30, >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciConstant.hpp:29, >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciArray.hpp:29, >>> from >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/precompiled/precompiled.hpp:37: >>> >>> >>> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/utilities/inlineHppGuard.hpp:44:2: >>> >>> error: #error ".inline.hpp file incorrectly included from .hpp file" >>> >>> That tells me that I probably need to go and fix the inclusion of >>> reflectionUtils.hpp -> handles.inline.hpp first. It's not always enough >>> to remove the inclusion of the .inline.hpp file, because the header >>> might actually use the inline files. In those cases the code needs to be >>> restructured so that we the "offending" functions out from the .hpp >>> files to .cpp or .linline.hpp files. This might be tedious in the >>> beginning, but will hopefully become easier to maintain when when more >>> of these files get cleaned up. >>> >>> So, with this in place, after all incorrect includes have been fixed, >>> whenever someone "incorrectly" adds an inclusion of thread.inline.hpp >>> (directly or indirectly) to a .hpp file the preprocessor will complain. >>> >>> I have a patch set where I've tried this on different .inline.hpp files, >>> and the following patches show the kind of work that is needed to fix >>> the includes: >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.02.guardHandleInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.03.guardOsInlineHpp/ >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.04.guardMarkOopInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.05.guardOopInlineHpp/ >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.06.guardFrameInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.07.guardHashtableInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.08.guardOrderAccessInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.09.guardBitMapInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.10.guardAtomicInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.11.guardKlassInlineHpp/ >>> >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.12.guardTypeArrayOopInlineHpp/ >>> >>> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.all/ >>> >>> It compiles on linux-x64 with and without precompiled headers, but I >>> haven't taken the time to try to get it to compile on other platforms. >>> >>> So, is this worth doing to start fixing our include mess? >>> >> >> +1, this looks like a nice way of gradually moving in the right direction. >> >> It would be nice to get these errors also when precompiled headers is >> disabled. Instead of doing this through precompiled.hpp, how about adding a >> make target which creates a temporary "all.hpp" that #includes all our .hpp >> files (.inline.hpp excluded) and just runs the pre-processor on that file? >> > > Sounds like a good idea to me! > > Thanks, > StefanK > > >> cheers, >> Per >> >> >>> Thanks, >>> StefanK >>> >> > From volker.simonis at gmail.com Wed Apr 13 12:08:50 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Wed, 13 Apr 2016 14:08:50 +0200 Subject: RFR(S): 8154087: Fix AIX and Linux/ppc64le after the integration of the new hotspot build In-Reply-To: References: Message-ID: Goetz, Magnus - thanks for reviewing. Erik - thanks for pushing. Regards, Volker On Wed, Apr 13, 2016 at 12:38 PM, Magnus Ihse Bursie < magnus.ihse.bursie at oracle.com> wrote: > Looks good to me. > > /Magnus > > > 12 apr. 2016 kl. 18:03 skrev Volker Simonis : > > > > Hi, > > > > can I please have a review for the following trivial changes to make > > the build work again on AIX and Linux/ppc64le after the integration of > > the new hotspot build system. The changes are all AIX and/or ppc64 > > specific and shouldn't change the behavior on any other platform. > > > > Because the top-level changes require the rebuild of > > generated-configure.sh and the hotspot changes are in shared code, I > > also need a sponsor. It would be best if the changes could be pushed > > in sync to the hs-rt repository: > > > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.top > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.hs > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154087.jdk > > > > https://bugs.openjdk.java.net/browse/JDK-8154087 > > > > The hotspot change contains a trivial source code change in an AIX > > file to fix a warning which would otherwise break the build with > > "warnings as errors". > > > > The jdk change disables "warnings as errors" for AIX for several libs > > such that we can build the complete jdk with "warnings as errors" on > > AIX as well now. Fixing the actual warnings will be done in a later > > change. > > > > Thanks a lot and best regards, > > Volker > From vladimir.x.ivanov at oracle.com Wed Apr 13 12:10:49 2016 From: vladimir.x.ivanov at oracle.com (Vladimir Ivanov) Date: Wed, 13 Apr 2016 15:10:49 +0300 Subject: [9] RFR (XXS): 8154145: Missing klass/method name in stack traces on error Message-ID: <570E3749.8080605@oracle.com> http://cr.openjdk.java.net/~vlivanov/8154145/webrev.00/ https://bugs.openjdk.java.net/browse/JDK-8154145 After Jigsaw merge, hs_err files contain incomplete stack information for code located in modules: J 1384 C1 java.base at 9-internal9-internal (104 bytes) @ 0xf4bca070 [0xf4bc9ea0+0x000001d0] The problem is that module info overwrites class/method name before it is printed. Best regards, Vladimir Ivanov From stefan.karlsson at oracle.com Wed Apr 13 12:29:22 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 13 Apr 2016 14:29:22 +0200 Subject: [9] RFR (XXS): 8154145: Missing klass/method name in stack traces on error In-Reply-To: <570E3749.8080605@oracle.com> References: <570E3749.8080605@oracle.com> Message-ID: <570E3BA2.7070309@oracle.com> Looks good. StefanK On 2016-04-13 14:10, Vladimir Ivanov wrote: > http://cr.openjdk.java.net/~vlivanov/8154145/webrev.00/ > https://bugs.openjdk.java.net/browse/JDK-8154145 > > After Jigsaw merge, hs_err files contain incomplete stack information > for code located in modules: > > J 1384 C1 java.base at 9-internal9-internal (104 bytes) @ 0xf4bca070 > [0xf4bc9ea0+0x000001d0] > > The problem is that module info overwrites class/method name before it > is printed. > > Best regards, > Vladimir Ivanov From per.liden at oracle.com Wed Apr 13 12:33:15 2016 From: per.liden at oracle.com (Per Liden) Date: Wed, 13 Apr 2016 14:33:15 +0200 Subject: RFC: 8154079: Catch incorrectly included .inline.hpp files In-Reply-To: References: <570D24E9.9030401@oracle.com> <570E04AA.3060509@oracle.com> <570E06CE.2060302@oracle.com> Message-ID: <570E3C8B.5060106@oracle.com> Hi, On 2016-04-13 14:07, Thomas St?fe wrote: > Hi, > > I assume this target would not get built for normal hotspot builds, but > would have to be built explicitly? No, I'm thinking this would be part of the default hotspot build target. You would of course also be able to do it explicitly if needed. This of course assumes that it doesn't add a lot to the build time, but that shouldn't be a problem as far as I can see. cheers, Per > > If yes, could we have this somehow be a part of hg jcheck? > > Kind Regards, Thomas > > > On Wed, Apr 13, 2016 at 10:43 AM, Stefan Karlsson > > wrote: > > Hi Per, > > > On 2016-04-13 10:34, Per Liden wrote: > > Hi Stefan, > > On 2016-04-12 18:40, Stefan Karlsson wrote: > > Hi all, > > I would like to propose a patch to make it easier to find > and clean up > places where we include .inline.hpp files from .hpp files. > So, that we > start getting smaller include dependencies, with lower risk > of circular > include dependencies, and maybe even shorter compile times. > > The guidelines regarding file inclusions can be found at: > https://wiki.openjdk.java.net/display/HotSpot/StyleGuide > > "Files > > Do not put non-trivial function implementations in > .hpp files. If > the implementation depends on other .hpp files, put it in a > .cpp or a > .inline.hpp file. > .inline.hpp files should only be included in .cpp or > .inline.hpp > files. > All .cpp files include precompiled.hpp as the first > include line. > precompiled.hpp is just a build time optimization, so > don't rely on > it to resolve include problems. > Keep the include lines sorted. > Put conditional inclusions (#if ...) at the end of the > include list." > > The code to enable the stricter .inline.hpp include check > can be found > in this small patch: > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ > > > I'm using the fact that we are including almost all header > files in the > precompiled.hpp file. So, if I add a "scoped" define, called > INLINE_HPP_GUARD, in preompiled.hpp and add checks for this > define in > .inline.hpp files, the preprocessor will tell me when > .inline.hpp > includes come from .hpp files (directly or indirectly) > rather than .cpp > files. This requires that the .hpp file is reachable through > precompiled.hpp and that we start remove .inline.hpp files from > precompiled.hpp. > > I've tried this on a few .inline.hpp files. For example: > thread.inline.hpp: > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp > > > > The inlineHppGuard.hpp file is first included in > thread.inline.hpp: > > #include "runtime/atomic.inline.hpp" > #include "runtime/os.inline.hpp" > #include "runtime/thread.hpp" > +#include "utilities/inlineHppGuard.hpp" > > Then when I compile (with precompiled headers enabled) I get: > In file included from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/thread.inline.hpp:33:0, > > > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/handles.inline.hpp:29, > > > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/reflectionUtils.hpp:32, > > > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/classfile/systemDictionary.hpp:34, > > > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciEnv.hpp:30, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciUtilities.hpp:28, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciNullObject.hpp:30, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciConstant.hpp:29, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciArray.hpp:29, > from > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/precompiled/precompiled.hpp:37: > > > /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/utilities/inlineHppGuard.hpp:44:2: > > error: #error ".inline.hpp file incorrectly included from > .hpp file" > > That tells me that I probably need to go and fix the > inclusion of > reflectionUtils.hpp -> handles.inline.hpp first. It's not > always enough > to remove the inclusion of the .inline.hpp file, because the > header > might actually use the inline files. In those cases the code > needs to be > restructured so that we the "offending" functions out from > the .hpp > files to .cpp or .linline.hpp files. This might be tedious > in the > beginning, but will hopefully become easier to maintain when > when more > of these files get cleaned up. > > So, with this in place, after all incorrect includes have > been fixed, > whenever someone "incorrectly" adds an inclusion of > thread.inline.hpp > (directly or indirectly) to a .hpp file the preprocessor > will complain. > > I have a patch set where I've tried this on different > .inline.hpp files, > and the following patches show the kind of work that is > needed to fix > the includes: > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.02.guardHandleInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.03.guardOsInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.04.guardMarkOopInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.05.guardOopInlineHpp/ > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.06.guardFrameInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.07.guardHashtableInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.08.guardOrderAccessInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.09.guardBitMapInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.10.guardAtomicInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.11.guardKlassInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.12.guardTypeArrayOopInlineHpp/ > > > http://cr.openjdk.java.net/~stefank/8154079/webrev.01.all/ > > It compiles on linux-x64 with and without precompiled > headers, but I > haven't taken the time to try to get it to compile on other > platforms. > > So, is this worth doing to start fixing our include mess? > > > +1, this looks like a nice way of gradually moving in the right > direction. > > It would be nice to get these errors also when precompiled > headers is disabled. Instead of doing this through > precompiled.hpp, how about adding a make target which creates a > temporary "all.hpp" that #includes all our .hpp files > (.inline.hpp excluded) and just runs the pre-processor on that file? > > > Sounds like a good idea to me! > > Thanks, > StefanK > > > cheers, > Per > > > Thanks, > StefanK > > > From thomas.stuefe at gmail.com Wed Apr 13 12:38:37 2016 From: thomas.stuefe at gmail.com (=?UTF-8?Q?Thomas_St=C3=BCfe?=) Date: Wed, 13 Apr 2016 14:38:37 +0200 Subject: RFC: 8154079: Catch incorrectly included .inline.hpp files In-Reply-To: <570E3C8B.5060106@oracle.com> References: <570D24E9.9030401@oracle.com> <570E04AA.3060509@oracle.com> <570E06CE.2060302@oracle.com> <570E3C8B.5060106@oracle.com> Message-ID: Hi Per, On Wed, Apr 13, 2016 at 2:33 PM, Per Liden wrote: > Hi, > > On 2016-04-13 14:07, Thomas St?fe wrote: > >> Hi, >> >> I assume this target would not get built for normal hotspot builds, but >> would have to be built explicitly? >> > > No, I'm thinking this would be part of the default hotspot build target. > You would of course also be able to do it explicitly if needed. > > Great! Regards, Thomas > This of course assumes that it doesn't add a lot to the build time, but > that shouldn't be a problem as far as I can see. > > cheers, > Per > > >> If yes, could we have this somehow be a part of hg jcheck? >> >> Kind Regards, Thomas >> >> >> On Wed, Apr 13, 2016 at 10:43 AM, Stefan Karlsson >> > wrote: >> >> Hi Per, >> >> >> On 2016-04-13 10:34, Per Liden wrote: >> >> Hi Stefan, >> >> On 2016-04-12 18:40, Stefan Karlsson wrote: >> >> Hi all, >> >> I would like to propose a patch to make it easier to find >> and clean up >> places where we include .inline.hpp files from .hpp files. >> So, that we >> start getting smaller include dependencies, with lower risk >> of circular >> include dependencies, and maybe even shorter compile times. >> >> The guidelines regarding file inclusions can be found at: >> https://wiki.openjdk.java.net/display/HotSpot/StyleGuide >> >> "Files >> >> Do not put non-trivial function implementations in >> .hpp files. If >> the implementation depends on other .hpp files, put it in a >> .cpp or a >> .inline.hpp file. >> .inline.hpp files should only be included in .cpp or >> .inline.hpp >> files. >> All .cpp files include precompiled.hpp as the first >> include line. >> precompiled.hpp is just a build time optimization, so >> don't rely on >> it to resolve include problems. >> Keep the include lines sorted. >> Put conditional inclusions (#if ...) at the end of the >> include list." >> >> The code to enable the stricter .inline.hpp include check >> can be found >> in this small patch: >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ >> >> >> I'm using the fact that we are including almost all header >> files in the >> precompiled.hpp file. So, if I add a "scoped" define, called >> INLINE_HPP_GUARD, in preompiled.hpp and add checks for this >> define in >> .inline.hpp files, the preprocessor will tell me when >> .inline.hpp >> includes come from .hpp files (directly or indirectly) >> rather than .cpp >> files. This requires that the .hpp file is reachable through >> precompiled.hpp and that we start remove .inline.hpp files >> from >> precompiled.hpp. >> >> I've tried this on a few .inline.hpp files. For example: >> thread.inline.hpp: >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp >> >> >> >> The inlineHppGuard.hpp file is first included in >> thread.inline.hpp: >> >> #include "runtime/atomic.inline.hpp" >> #include "runtime/os.inline.hpp" >> #include "runtime/thread.hpp" >> +#include "utilities/inlineHppGuard.hpp" >> >> Then when I compile (with precompiled headers enabled) I get: >> In file included from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/thread.inline.hpp:33:0, >> >> >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/handles.inline.hpp:29, >> >> >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/runtime/reflectionUtils.hpp:32, >> >> >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/classfile/systemDictionary.hpp:34, >> >> >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciEnv.hpp:30, >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciUtilities.hpp:28, >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciNullObject.hpp:30, >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciConstant.hpp:29, >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/ci/ciArray.hpp:29, >> from >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/precompiled/precompiled.hpp:37: >> >> >> >> /home/stefank/hg/jdk9/hs-rt/hotspot/src/share/vm/utilities/inlineHppGuard.hpp:44:2: >> >> error: #error ".inline.hpp file incorrectly included from >> .hpp file" >> >> That tells me that I probably need to go and fix the >> inclusion of >> reflectionUtils.hpp -> handles.inline.hpp first. It's not >> always enough >> to remove the inclusion of the .inline.hpp file, because the >> header >> might actually use the inline files. In those cases the code >> needs to be >> restructured so that we the "offending" functions out from >> the .hpp >> files to .cpp or .linline.hpp files. This might be tedious >> in the >> beginning, but will hopefully become easier to maintain when >> when more >> of these files get cleaned up. >> >> So, with this in place, after all incorrect includes have >> been fixed, >> whenever someone "incorrectly" adds an inclusion of >> thread.inline.hpp >> (directly or indirectly) to a .hpp file the preprocessor >> will complain. >> >> I have a patch set where I've tried this on different >> .inline.hpp files, >> and the following patches show the kind of work that is >> needed to fix >> the includes: >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.00.addInlineHppGuard/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.01.guardThreadInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.02.guardHandleInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.03.guardOsInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.04.guardMarkOopInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.05.guardOopInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.06.guardFrameInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.07.guardHashtableInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.08.guardOrderAccessInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.09.guardBitMapInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.10.guardAtomicInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.11.guardKlassInlineHpp/ >> >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.12.guardTypeArrayOopInlineHpp/ >> >> >> http://cr.openjdk.java.net/~stefank/8154079/webrev.01.all/ >> >> It compiles on linux-x64 with and without precompiled >> headers, but I >> haven't taken the time to try to get it to compile on other >> platforms. >> >> So, is this worth doing to start fixing our include mess? >> >> >> +1, this looks like a nice way of gradually moving in the right >> direction. >> >> It would be nice to get these errors also when precompiled >> headers is disabled. Instead of doing this through >> precompiled.hpp, how about adding a make target which creates a >> temporary "all.hpp" that #includes all our .hpp files >> (.inline.hpp excluded) and just runs the pre-processor on that >> file? >> >> >> Sounds like a good idea to me! >> >> Thanks, >> StefanK >> >> >> cheers, >> Per >> >> >> Thanks, >> StefanK >> >> >> >> From rickard.backman at oracle.com Wed Apr 13 13:31:28 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Wed, 13 Apr 2016 15:31:28 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> Message-ID: <20160413133128.GV9504@rbackman> Volker, yes, I didn't realize at first that the nmethod was casted to a CompiledMethod before the call to consts_begin(). Otherwise it would have used the non-virtual consts_begin of nmethod that didn't have any virtual calls. The entire code chain and looking up itself from the CodeCache before fully constructed seems quite problematic. Even before the changes I made. Previous to my changes the calls would have succeeded but returned header_begin() or this for all the consts_begin, consts_end, etc... ? /R On 04/11, Volker Simonis wrote: > Rickard, Dean, > > I'm afraid all this hacks can not work. It doesn't help to make > CompiledMethod::consts_begin() non-virtual and then calling a virtual > function from it. The problem ist that at the point where you call > consts_begin_v(), the vtable of 'this' is still the one of CodeBlob and > this results in calling yet another arbitrary function: > > #0 CodeBlob::is_locked_by_vm (this=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.hpp:168 > #1 0x00003fffb6e38048 in CompiledMethod::consts_begin > (this=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.hpp:255 > #2 0x00003fffb758d658 in RelocIterator::initialize (this=0x3ffdfd3fc9a8, > nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:142 > #3 0x00003fffb6ace56c in RelocIterator::RelocIterator > (this=0x3ffdfd3fc9a8, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > limit=0x0) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > #4 0x00003fffb7591afc in trampoline_stub_Relocation::get_trampoline_for > (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > #5 0x00003fffb741ba4c in NativeCall::get_trampoline (this=0x3fff607d0fac) > at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > #6 0x00003fffb7596a34 in Relocation::pd_call_destination > (this=0x3ffdfd3fcd10, orig_addr=0x3fff6033482c "\001") at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > #7 0x00003fffb758f71c in CallRelocation::fix_relocation_after_move > (this=0x3ffdfd3fcd10, src=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > #8 0x00003fffb6c48914 in CodeBuffer::relocate_code_to > (this=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > #9 0x00003fffb6c48480 in CodeBuffer::copy_code_to (this=0x3ffdfd3fdbc0, > dest_blob=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > #10 0x00003fffb6c426ec in CodeBuffer::copy_code_and_locs_to > (this=0x3ffdfd3fdbc0, blob=0x3fff607d0c10) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > #11 0x00003fffb6c3f8b0 in CodeBlob::CodeBlob (this=0x3fff607d0c10, > name=0x3fffb7a760f8 "nmethod", layout=..., cb=0x3ffdfd3fdbc0, > frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe0001ed00, > caller_must_gc_arguments=false, subtype=8) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > #12 0x00003fffb6ce5360 in CompiledMethod::CompiledMethod > (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a760f8 "nmethod", > size=1768, header_size=392, cb=0x3ffdfd3fdbc0, frame_complete_offset=20, > frame_size=14, oop_maps=0x3ffe0001ed00, caller_must_gc_arguments=false) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > #13 0x00003fffb7422198 in nmethod::nmethod (this=0x3fff607d0c10, > method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, > offsets=0x3ffdfd3fdb98, orig_pc_offset=104, debug_info=0x3fffb03f2dc0, > dependencies=0x3ffe0001ed70, code_buffer=0x3ffdfd3fdbc0, frame_size=14, > oop_maps=0x3ffe0001ed00, handler_table=0x3ffdfd3fdb50, > nul_chk_table=0x3ffdfd3fdb70, compiler=0x3fffb03d0cd0, comp_level=3) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > #14 0x00003fffb7421850 in nmethod::new_nmethod (method=..., compile_id=4, > entry_bci=-1, offsets=0x3ffdfd3fdb98, orig_pc_offset=104, > debug_info=0x3fffb03f2dc0, dependencies=0x3ffe0001ed70, > code_buffer=0x3ffdfd3fdbc0, frame_size=14, oop_maps=0x3ffe0001ed00, > handler_table=0x3ffdfd3fdb50, nul_chk_table=0x3ffdfd3fdb70, > compiler=0x3fffb03d0cd0, comp_level=3) at > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:548 > > I think we really need to rework this as proposed by Andrew in his last > mail. I'm working on such a fix. > > Regards, > Volker > > > On Mon, Apr 11, 2016 at 1:55 PM, Rickard B?ckman > wrote: > > > Volker, > > > > here is the patch if you want to try it. > > http://cr.openjdk.java.net/~rbackman/8152664/virtual.patch > > > > /R > > > > On 04/11, Rickard B?ckman wrote: > > > Volker, > > > > > > thanks for finding this issue. > > > > > > I think that maybe the easiest fix is as follows: > > > > > > create new virtual methods in CompiledMethod: > > > > > > virtual address stub_begin_v() = 0; > > > > > > make the now virtual stub_begin non-virtual like: > > > > > > address stub_begin() { return stub_begin_v(); } > > > > > > in nmethod we override the stub_begin() with the normal this + offset > > > compuation and implement stub_begin_v() to call stub_begin(). > > > > > > That will avoid all virtual calls in the case were we are not working on > > > a CompiledMethod. > > > > > > It adds a couple of methods though. What do you think? > > > > > > /R > > > > > > On 04/08, Volker Simonis wrote: > > > > Hi Rickard, > > > > > > > > I found the problem why your change crashes the VM on ppc (and I'm > > pretty > > > > sure it will also crash on ARM - @Andrew, maybe you can try it out?). > > It is > > > > caused by the following code in address NativeCall::get_trampoline() > > which > > > > is also present on arm64: > > > > > > > > address NativeCall::get_trampoline() { > > > > address call_addr = addr_at(0); > > > > CodeBlob *code = CodeCache::find_blob(call_addr); > > > > ... > > > > // If the codeBlob is not a nmethod, this is because we get here > > from the > > > > // CodeBlob constructor, which is called within the nmethod > > constructor. > > > > return trampoline_stub_Relocation::get_trampoline_for(call_addr, > > > > (nmethod*)code); > > > > } > > > > > > > > The comment explains the situation quite well: we're in the CodeBlob > > > > constructor which was called by the CompiledMethod constructor which > > was > > > > called from the nmethod constructor: > > > > > > > > #3 0x00003fffb741b80c in NativeCall::get_trampoline > > (this=0x3fff607d0fac) > > > > at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > > > #4 0x00003fffb7596914 in Relocation::pd_call_destination > > > > (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > > > > #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move > > > > (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > > > > #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to > > > > (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > > > > #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to > > (this=0x3ffdfe3fdb40, > > > > dest_blob=0x3fff607d0c10) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > > > > #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to > > > > (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > > > > #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, > > > > name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, > > > > frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, > > > > caller_must_gc_arguments=false, subtype=8) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > > > > #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod > > > > (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 > > "nmethod", > > > > size=1768, header_size=392, cb=0x3ffdfe3fdb40, > > frame_complete_offset=20, > > > > frame_size=14, oop_maps=0x3ffe00049620, > > caller_must_gc_arguments=false) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > > > > #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, > > > > method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, > > > > offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, > > > > dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, > > > > oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, > > > > nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > > > > > > > > Now we cast 'code' to 'nmethod' but at this point in time 'code' is > > still a > > > > CodeBlob from the C++ point of view (i.e. it still has a CodeBlob > > vtable > > > > (see [1] for an explanation)). > > > > > > > > Later on, in RelocIterator::initialize() we call virtual methods on the > > > > nmethod which still has the vtable of a "CodeBlob" and this fails > > badly: > > > > > > > > #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 > > > > #1 0x00003fffb758d51c in RelocIterator::initialize > > (this=0x3ffdfe3fc928, > > > > nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 > > > > #2 0x00003fffb6ace56c in RelocIterator::RelocIterator > > > > (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > > > > limit=0x0) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > > > > #3 0x00003fffb75919dc in > > trampoline_stub_Relocation::get_trampoline_for > > > > (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > > > > #4 0x00003fffb741b80c in NativeCall::get_trampoline > > (this=0x3fff607d0fac) > > > > at > > > > > > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > > > > > > > > As you can see, we actually want to call nmethod::stub_begin() at > > > > relocInfo.cpp:144 > > > > > > > > 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); > > > > 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; > > > > 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; > > > > > > > > but we actually end up in SingletonBlob::print_on() which is a > > completely > > > > different method. Notice that the call to nm->consts_begin() before > > also > > > > fails, but it doesn't crash the VM because it happens to call > > > > SingletonBlob::verify() which has no bad side effect. The call to > > > > nm->insts_begin() in line 143 is non-virtual and thus works fine. Here > > are > > > > the corresponding vtable slots in the CodeBlob vtable for > > consts_begin() > > > > and stub_begin() > > > > > > > > (gdb) p &nmethod::consts_begin > > > > $76 = &virtual table offset 42 > > > > (gdb) p &nmethod::stub_begin > > > > $77 = &virtual table offset 44 > > > > (gdb) p ((*(void ***)nm) + 1)[42] > > > > $86 = (void *) 0x3fffb6c41df8 > > > > (gdb) p ((*(void ***)nm) + 1)[44] > > > > $87 = (void *) 0x3fffb6c41e64 > const> > > > > > > > > As you can see, 'nm' is indeed a "CodeBlob" at this point: > > > > > > > > (gdb) p *(void ***)nm > > > > $91 = (void **) 0x3fffb7befa00 > > > > (gdb) p nm->print() > > > > [CodeBlob (0x00003fff607d1090)] > > > > Framesize: 14 > > > > > > > > The offending calls succeeded before your change, because they where > > not > > > > virtual. Any idea how we can fix this with the new class hierarchy? > > > > > > > > Regards, > > > > Volker > > > > > > > > [1] > > > > > > http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class > > > > > > > > > > > > > > > > On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis < > > volker.simonis at gmail.com> > > > > wrote: > > > > > > > > > Hi Rickard, > > > > > > > > > > I'd also like to know what's the rational behind this quite large > > > > > change. Do you expect some performance or memory consumption > > > > > improvements or is this a prerequisite for another change which is > > > > > still to come? > > > > > > > > > > The change itself currently doesn't work on ppc64 (neither on Linux > > > > > nor on AIX). I get the following crash during the build when the > > newly > > > > > built Hotspot is JIT-compiling java.lang.String::charAt on C1 : > > > > > > > > > > # > > > > > # A fatal error has been detected by the Java Runtime Environment: > > > > > # > > > > > # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 > > > > > # > > > > > # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build > > > > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) > > > > > # Java VM: OpenJDK 64-Bit Server VM (slowdebug > > > > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, > > > > > tiered, compressed oo > > > > > ps, serial gc, linux-ppc64le) > > > > > # Problematic frame: > > > > > # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > > > > > const*, char*, bool)+0x40 > > > > > # > > > > > # No core dump will be written. Core dumps have been disabled. To > > > > > enable core dumping, try "ulimit -c unlimited" before starting Java > > > > > again > > > > > # > > > > > # If you would like to submit a bug report, please visit: > > > > > # http://bugreport.java.com/bugreport/crash.jsp > > > > > # > > > > > > > > > > --------------- S U M M A R Y ------------ > > > > > > > > > > Command Line: > > > > > > > -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk > > > > > -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. > > > > > module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create > > > > > --module-version 9-internal --os-name Linux --os-arch ppc64le > > > > > --os-version > > > > > 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods > > > > > --hash-dependencies .* --exclude **_the.* --libs > > > > > > > > > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base > > > > > --cmds > > > > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base > > > > > --config > > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base > > > > > --class-path > > /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base > > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod > > > > > > > > > > Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # > > > > > Please check /etc/os-release for details about this release. > > > > > Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h > > 0m 0s) > > > > > > > > > > --------------- T H R E A D --------------- > > > > > > > > > > Current thread (0x000010000429c800): JavaThread "C1 > > CompilerThread10" > > > > > daemon [_thread_in_vm, id=35404, > > > > > stack(0x000010006a800000,0x000010006ac00000)] > > > > > > > > > > > > > > > Current CompileTask: > > > > > C1: 761 3 3 java.lang.String::charAt (25 bytes) > > > > > > > > > > Stack: [0x000010006a800000,0x000010006ac00000], > > > > > sp=0x000010006abfc6c0, free space=4081k > > > > > Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, > > C=native > > > > > code) > > > > > V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > > > > > const*, char*, bool)+0x40 > > > > > V [libjvm.so+0xf74668] outputStream::print_cr(char const*, > > ...)+0x68 > > > > > V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 > > > > > V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) > > const+0x40 > > > > > V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) > > const+0x4c > > > > > V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, > > > > > unsigned char*, unsigned char*)+0x170 > > > > > V [libjvm.so+0x5ae56c] > > RelocIterator::RelocIterator(CompiledMethod*, > > > > > unsigned char*, unsigned char*)+0x78 > > > > > V [libjvm.so+0x10719dc] > > > > > trampoline_stub_Relocation::get_trampoline_for(unsigned char*, > > > > > nmethod*)+0x78 > > > > > V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 > > > > > V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned > > > > > char*)+0x150 > > > > > V [libjvm.so+0x106f5fc] > > > > > CallRelocation::fix_relocation_after_move(CodeBuffer const*, > > > > > CodeBuffer*)+0x74 > > > > > V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) > > > > > const+0x390 > > > > > V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 > > > > > V [libjvm.so+0x722670] > > CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > > > > > V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, > > > > > CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, > > > > > int)+0x320 > > > > > V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char > > > > > const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 > > > > > V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, > > > > > CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > > > > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > > > > > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > > > > > V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, > > > > > int, int, CodeOffsets*, int, DebugInformationRecorder*, > > Dependencies*, > > > > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > > > > > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > > > > > V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, > > > > > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > > > > > ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, > > > > > bool, bool, RTMState)+0x560 > > > > > V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 > > > > > V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 > > > > > V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, > > > > > ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 > > > > > V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, > > > > > int, DirectiveSet*)+0xc8 > > > > > V [libjvm.so+0x7b188c] > > > > > CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 > > > > > V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 > > > > > V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, > > Thread*)+0xa0 > > > > > V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 > > > > > V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 > > > > > V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 > > > > > C [libpthread.so.0+0x8a64] start_thread+0xf4 > > > > > C [libc.so.6+0x1032a0] clone+0x98 > > > > > > > > > > I haven't identified the exact cause (will analyze it tomorrow) but > > > > > the stack trace indicates that it is indeed related to your changes. > > > > > > > > > > Besides that I have some comments: > > > > > > > > > > codeBuffer.hpp: > > > > > > > > > > 472 CodeSection* insts() { return &_insts; } > > > > > 475 const CodeSection* insts() const { return &_insts; } > > > > > > > > > > - do we really need both versions? > > > > > > > > > > codeBlob.hpp: > > > > > > > > > > 135 nmethod* as_nmethod_or_null() const { return > > > > > is_nmethod() ? (nmethod*) this : NULL; } > > > > > 136 nmethod* as_nmethod() const { > > > > > assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } > > > > > 137 CompiledMethod* as_compiled_method_or_null() const { return > > > > > is_compiled() ? (CompiledMethod*) this : NULL; } > > > > > 138 CompiledMethod* as_compiled_method() const { > > > > > assert(is_compiled(), "must be compiled"); return (CompiledMethod*) > > > > > this; } > > > > > 139 CodeBlob* as_codeblob_or_null() const { return > > > > > (CodeBlob*) this; } > > > > > > > > > > - I don't like this code. You make the getters 'const' which > > > > > implicitely makes 'this' a "pointer to const" but then the returned > > > > > pointer is a normal pointer to a non-const object and therefore you > > > > > have to statically cast away the "pointer to const" (that's why you > > > > > need the cast even in the case where you return a CodeBlob*). So > > > > > either remove the const qualifier from the method declarations or > > make > > > > > them return "pointers to const". And by the way, > > as_codeblob_or_null() > > > > > doesn't seemed to be used anywhere in the code, why do we need it at > > > > > all? > > > > > > > > > > - Why do we need the non-virtual methods is_nmethod() and > > > > > is_compiled() to manually simulate virtual behavior. Why can't we > > > > > simply make them virtual and implement them accordingly in nmathod > > and > > > > > CompiledMethod? > > > > > > > > > > Regards, > > > > > Volker > > > > > > > > > > On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman > > > > > wrote: > > > > > > Hi, > > > > > > > > > > > > can I please have review for this patch please? > > > > > > > > > > > > So far CodeBlobs have required all the data (metadata, oops, code, > > etc) > > > > > > to be in one continuous blob With this patch we are looking to > > change > > > > > > that. It's been done by changing offsets in CodeBlob to addresses, > > > > > > making some methods virtual to allow different behavior and also > > > > > > creating a couple of new classes. CompiledMethod now sits inbetween > > > > > > CodeBlob and nmethod. > > > > > > > > > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > > > > > > > > > Thanks > > > > > > /R > > > > > > > From thomas.stuefe at gmail.com Wed Apr 13 13:59:44 2016 From: thomas.stuefe at gmail.com (=?UTF-8?Q?Thomas_St=C3=BCfe?=) Date: Wed, 13 Apr 2016 15:59:44 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: <570E2C45.7090201@oracle.com> References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> <570D1747.2020508@oracle.com> <570E2C45.7090201@oracle.com> Message-ID: Hi Stefan, On Wed, Apr 13, 2016 at 1:23 PM, Stefan Karlsson wrote: > Hi Thomas, > > > On 2016-04-13 12:44, Thomas St?fe wrote: > > Hi Stefan, > > On Tue, Apr 12, 2016 at 5:41 PM, Stefan Karlsson < > stefan.karlsson at oracle.com> wrote: > >> Hi Thomas, >> >> >> On 2016-04-12 16:23, Thomas St?fe wrote: >> >> Hi Stefan, >> >> >> On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson < >> stefan.karlsson at oracle.com> wrote: >> >>> Hi Thomas, >>> >>> On 2016-04-11 14:39, Thomas St?fe wrote: >>> >>> Hi Stefan, >>> >>> short question, why the mmap before the shmat? Why not shmat right away >>> at the requested address? >>> >>> >>> If we have a requested_address we do exactly what you propose. >>> >>> if (req_addr == NULL && alignment > os::large_page_size()) { >>> return shmat_with_large_alignment(shmid, bytes, alignment); >>> } else { >>> return shmat_with_normal_alignment(shmid, req_addr); >>> } >>> >>> ... >>> >>> static char* shmat_with_normal_alignment(int shmid, char* req_addr) { >>> char* addr = (char*)shmat(shmid, req_addr, 0); >>> >>> if ((intptr_t)addr == -1) { >>> shm_warning_with_errno("Failed to attach shared memory."); >>> return NULL; >>> } >>> >>> return addr; >>> } >>> >>> >>> It's when you don't have a requested address that mmap is used to find a >>> large enough virtual memory area. >>> >>> >> Sorry, seems I did not look at this coding thoroughly enough. I >> understand now that you do mmap to allocate and then to cut away the extra >> pre-/post-space, something which would not be possible with shmat, which >> cannot be unmapped page-wise. >> >> But I am still not sure why we do it his way: >> >> 3429 static char* shmat_with_alignment(int shmid, size_t bytes, size_t >> alignment, char* req_addr) { >> 3430 // If there's no requested address, the shmat call can return >> memory that is not >> 3431 // 'alignment' aligned, if the given alignment is larger than the >> large page size. >> 3432 // Special care needs to be taken to ensure that we get aligned >> memory back. >> 3433 if (req_addr == NULL && alignment > os::large_page_size()) { >> 3434 return shmat_with_large_alignment(shmid, bytes, alignment); >> 3435 } else { >> 3436 return shmat_with_normal_alignment(shmid, req_addr); >> 3437 } >> 3438 } >> >> For req_addr==0 and big alignment, we attach at the given alignment >> ("shmat_with_large_alignment"). >> For req_addr!=0, we attach at the given requested address >> ("shmat_with_normal_alignment"). >> For req_addr==0 and smaller alignment, we ignore the alignment and attach >> anywhere? >> >> Maybe I am slow, but why does it matter if the alignment is large or >> small? Why not just distinguish between: >> >> 1) address given (req_addr!=0): in this case we attach at this req_addr >> and rely on the user having aligned the address properly for his purposes. >> We specify 0 for flags, so we will attach at exactly the given address or >> fail. In this case we could simply ignore the given alignment - if one was >> given - or just use it to counter-check the req_addr. >> >> 2) alignment given (req_addr==0 and alignment > 0): attach at the given >> alignment using mmap-before-shmat. This could be done for any alignment, be >> it large or small. >> >> >> What you propose doesn't work. >> >> We're allocating large pages with SHM_HUGETLB, and if we try to attach to >> an address that is not large_page_size aligned the shmat call returns >> EINVAL. >> >> > I was aware of this. What I meant was: > > You have "shmat_with_large_alignment" which takes an alignment and does > its best to shmat with that alignment using the mmap trick. This coding > does not need to know anything about huge pages, and actually does not do > anything huge-pagey, apart from the asserts - it would just as well work > with small pages, because the only place where the code needs to know about > huge pages is in the layer above, in reserve_memory_special - where we pass SHM_HUGETLB > to shmget. (Btw, I always wondered about the "reserve_memory_special" > naming.) > > I think my point is that by renaming this to "shmat_with_alignment" and > removing the huge-page-related asserts the function would become both > simpler and more versatile and could be reused for small alignments as well > as large ones. The fact that it returns EINVAL for alignments instead of > asserting would not be a problem - we would return an error instead of > asserting because of bad alignment, and both handling this error and > asserting for huge-page-alignment could be handled better in > reserve_memory_special. > > To put it another way, I think "shmat_with_large_alignment" does not need > to know about huge pages; this should be the responsibility of > reserve_memory_special. > > About "shmat_with_normal_alignment", this is actually only a raw shmat > call and exists for the req_addr!=NULL case and for the case where we do > not pass neither req_addr nor alignment. So the only thing it does not > handle is alignment, so it is misnamed and also should not be called for > the req_addr==NULL-and-small-alignments-case. > > > The reserve_memory_special_shm function and the associated helper > functions I'm adding are specifically written to support large pages > allocations. The names "normal_alignment" and "large_alignment" are > intended to refer to alignment sizes compared to the large pages size. I > grant you that it's not obvious from the name, and we can rename them to > make it more clear. > > I want to provide a small bug fix for this large pages bug, while you are > suggesting that we re-purpose the code into supporting small page > allocations as well. Your suggestions might be good, but may I suggest that > you create a patch and an RFE that motivates why we should make this code > more generic to support small pages as well? > > Thanks, > StefanK > > Ok, we can do that. I was just worried that the code becomes more difficult to understand. But lets wait for some more reviews. Kind Regards, Thomas > > > >> >> Functions would become simpler and also could be clearer named (e.g. >> "shmat_at_address" and "shmat_with_alignment", respectivly). >> >> >> Maybe I should rename the functions to make it more obvious that these >> are large pages specific functions? >> >> >> ---- >> >> This: >> >> 3402 if ((intptr_t)addr == -1) { >> 3403 shm_warning_with_errno("Failed to attach shared memory."); >> 3404 // Since we don't know if the kernel unmapped the pre-reserved >> memory area >> 3405 // we can't unmap it, since that would potentially unmap memory >> that was >> 3406 // mapped from other threads. >> 3407 return NULL; >> 3408 } >> >> seems scary. Means for every call this happens, we leak the reserved (not >> committed) address space? >> >> >> Yes, that's unfortunate. >> >> An alternative would be to use this sequence: >> 1) Use anon_mmap_aligned to find a suitable VA range >> 2) Immediately unmap the VA range >> 3) Try to attach at that VA range _without_ SHM_REMAP >> >> That would remove the risk of leaking the reserved address space, but >> instead we risk failing at (3) if another thread manages to allocate memory >> inside the found VA range. This will cause some users to unnecessarily fail >> to get large pages, though. We've had other problems when pre-existing >> threads used mmap while we were initializing the VM. See: JDK-8007074. >> > > Yes; btw you also could do this with shmget/shmat instead of mmap. > > Note that similar unclean tricks are already done in other places, see > e.g. the windows version of os::pd_split_reserved_memory(). Which deals > with VirtualAlloc() being unable, like shmget, to deallocate piece-wise. > > >> >> >> For most cases (anything but ENOMEM, actually) could we at least assert?: >> >> EACCES - should not happen: we created the shared memory and are its owner >> EIDRM - should not happen. >> EINVAL - should not happen. (you already check now the attach address for >> alignment to SHMLBA, so this is covered) >> >> >> Sure. I'll add asserts for these. >> >> >> --- >> >> Smaller nits: >> >> Functions called "shmat_..." suggest shmat-like behaviour, so could we >> have them return -1 instead of NULL in case of error? >> >> >> That would add clutter to the reserve_memory_special_shm, and it might >> also suggest that it would be OK to check errno for the failure reason, >> which probably wouldn't work. I'll let other Reviewers chime in and help >> decide if we should change this. >> >> > You are right. If one returns -1, one would have to preserve errno for the > caller too. > > >> Thanks for reviewing this, >> StefanK >> > > You are welcome! > > Kind Regards, Thomas > > >> >> >> >> Kind Regards, Thomas >> >>> >>> Also note that mmap- and shmat-allocated memory may have different >>> alignment requirements: mmap requires a page-aligned request address, >>> whereas shmat requires alignment to SHMLBA, which may be multiple pages >>> (e.g. for ARM: >>> >>> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >>> So, for this shat-over-mmap trick to work, request address has to be >>> aligned to SHMLBA, not just page size. >>> >>> I see that you assert alignment of requ address to >>> os::large_page_size(), which I would assume is a multiple of SHMLBA, but I >>> am not sure of this. >>> >>> >>> I've added some defensive code and asserts to catch this if/when this >>> assumption fails: >>> >>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ >>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02 >>> >>> I need to verify that this works on other machines than my local Linux >>> x64 machine. >>> >>> Thanks, >>> StefanK >>> >>> >>> Kind Regards, Thomas >>> >>> >>> >>> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson < >>> stefan.karlsson at oracle.com> wrote: >>> >>>> Hi all, >>>> >>>> Please review this patch to enable SHM large page allocations even when >>>> the requested alignment is larger than os::large_page_size(). >>>> >>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >>>> https://bugs.openjdk.java.net/browse/JDK-8017629 >>>> >>>> G1 is affected by this bug since it requires the heap to start at an >>>> address that is aligned with the heap region size. The patch fixes this by >>>> changing the UseSHM large pages allocation code. First, virtual memory with >>>> correct alignment is pre-reserved and then the large pages are attached to >>>> this memory area. >>>> >>>> Tested with vm.gc.testlist and ExecuteInternaVMTests >>>> >>>> Thanks, >>>> StefanK >>>> >>> >>> >>> >> >> > > From daniel.daugherty at oracle.com Wed Apr 13 16:07:40 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Wed, 13 Apr 2016 10:07:40 -0600 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly In-Reply-To: <570C03AB.4020906@oracle.com> References: <570C03AB.4020906@oracle.com> Message-ID: <570E6ECC.5050900@oracle.com> On 4/11/16 2:06 PM, Coleen Phillimore wrote: > Summary: Constant pool merging is not thread safe for source_file_name. > > This change includes the change for the following bug because they are > tested together. > > 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert > failed: Corrupted constant pool > Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe > for MethodHandleInError and MethodTypeInError. > > The parallel constant pool merges are mostly harmless because the old > methods constant pool pointers aren't updated. The only case I found > where it isn't harmless is that we rely on finding the > source_file_name_index from the final merged constant pool, which > could be any of the parallel merged constant pools. The code to > attempt to dig out the name from redefined classes is removed. > > open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev src/share/vm/classfile/javaClasses.inline.hpp Perhaps instead of this comment: L226: // Because constant pools can be merged in parallel, the source file name index L227: // may be merged over with something else in a previous version. please consider this one: // RedefineClasses() currently permits redefine operations to // happen in parallel using a "last one wins" philosophy. That // spec laxness allows the constant pool entry associated with // the source_file_name_index for any older constant pool version // to be unstable so we shouldn't try to use it. src/share/vm/oops/constantPool.hpp I think this file is all changes from 8148772 which I've already reviewed so no comments. src/share/vm/oops/constantPool.cpp I think this file is all changes from 8148772 which I've already reviewed so no comments. src/share/vm/prims/jvmtiRedefineClasses.cpp The version number in the constant pool is new to my brain since I last dove into this code in detail... Right now you do the version increment here: L1445: // Update the version number of the constant pools (may keep scratch_cp) L1446: merge_cp->increment_and_save_version(old_cp->version()); L1447: scratch_cp->increment_and_save_version(old_cp->version()); You could choose to only do it when you know that you're going to keep the scratch_cp, but maybe that's being too picky. Serguei's find of this very old bug is good: JDK-6227506 JVMTI Spec: Atomicity of RedefineClasses should be specified https://bugs.openjdk.java.net/browse/JDK-6227506 There's another bug out there will all the notes that Tim Bell took when we did the monster RedefineClasses code walk through. I believe in that bug, the lack of locking/atomicity was also called out. I'll see if I can find that bug... Dan > bug link https://bugs.openjdk.java.net/browse/JDK-8151546 > > Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I > tried to write a test with all the conditions of the failure but > couldn't make it fail (so noreg-hard). > > Thanks, > Coleen From coleen.phillimore at oracle.com Wed Apr 13 16:48:43 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Wed, 13 Apr 2016 12:48:43 -0400 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly In-Reply-To: <570E6ECC.5050900@oracle.com> References: <570C03AB.4020906@oracle.com> <570E6ECC.5050900@oracle.com> Message-ID: <570E786B.2030701@oracle.com> Dan, Thank you for reviewing this. On 4/13/16 12:07 PM, Daniel D. Daugherty wrote: > On 4/11/16 2:06 PM, Coleen Phillimore wrote: >> Summary: Constant pool merging is not thread safe for source_file_name. >> >> This change includes the change for the following bug because they >> are tested together. >> >> 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: assert >> failed: Corrupted constant pool >> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >> for MethodHandleInError and MethodTypeInError. >> >> The parallel constant pool merges are mostly harmless because the old >> methods constant pool pointers aren't updated. The only case I found >> where it isn't harmless is that we rely on finding the >> source_file_name_index from the final merged constant pool, which >> could be any of the parallel merged constant pools. The code to >> attempt to dig out the name from redefined classes is removed. >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev > > src/share/vm/classfile/javaClasses.inline.hpp > Perhaps instead of this comment: > > L226: // Because constant pools can be merged in parallel, the > source file name index > L227: // may be merged over with something else in a previous > version. > > please consider this one: > > // RedefineClasses() currently permits redefine operations to > // happen in parallel using a "last one wins" philosophy. That > // spec laxness allows the constant pool entry associated with > // the source_file_name_index for any older constant pool version > // to be unstable so we shouldn't try to use it. Okay, your comment is more complete. I'll use your comment. > > src/share/vm/oops/constantPool.hpp > I think this file is all changes from 8148772 which I've already > reviewed so no comments. > > src/share/vm/oops/constantPool.cpp > I think this file is all changes from 8148772 which I've already > reviewed so no comments. > > src/share/vm/prims/jvmtiRedefineClasses.cpp > The version number in the constant pool is new to my brain since > I last dove into this code in detail... > > Right now you do the version increment here: > > L1445: // Update the version number of the constant pools (may > keep scratch_cp) > L1446: merge_cp->increment_and_save_version(old_cp->version()); > L1447: scratch_cp->increment_and_save_version(old_cp->version()); > > You could choose to only do it when you know that you're going > to keep the scratch_cp, but maybe that's being too picky. > I'd have to set it in two places if I did that, so I picked just once. If we use the merged_cp, the scratch_cp is discarded so the version doesn't matter. > Serguei's find of this very old bug is good: > > JDK-6227506 JVMTI Spec: Atomicity of RedefineClasses should be specified > https://bugs.openjdk.java.net/browse/JDK-6227506 > > There's another bug out there will all the notes that Tim Bell > took when we did the monster RedefineClasses code walk through. > I believe in that bug, the lack of locking/atomicity was also > called out. I'll see if I can find that bug... > Yes, that would be good. There are a lot of statics in VM_RedefineClasses. I don't know why these don't cause bugs! Coleen > Dan > > > >> bug link https://bugs.openjdk.java.net/browse/JDK-8151546 >> >> Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I >> tried to write a test with all the conditions of the failure but >> couldn't make it fail (so noreg-hard). >> >> Thanks, >> Coleen > From serguei.spitsyn at oracle.com Wed Apr 13 17:00:05 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Wed, 13 Apr 2016 10:00:05 -0700 Subject: RFR 8151546: nsk/jvmti/RedefineClasses/StressRedefine fails in hs nightly In-Reply-To: <570E786B.2030701@oracle.com> References: <570C03AB.4020906@oracle.com> <570E6ECC.5050900@oracle.com> <570E786B.2030701@oracle.com> Message-ID: <570E7B15.9030201@oracle.com> On 4/13/16 09:48, Coleen Phillimore wrote: > > Dan, Thank you for reviewing this. > > On 4/13/16 12:07 PM, Daniel D. Daugherty wrote: >> On 4/11/16 2:06 PM, Coleen Phillimore wrote: >>> Summary: Constant pool merging is not thread safe for source_file_name. >>> >>> This change includes the change for the following bug because they >>> are tested together. >>> >>> 8148772: VM crash in nsk/jvmti/RedefineClasses/StressRedefine: >>> assert failed: Corrupted constant pool >>> Summary: ConstantPool::resolve_constant_at_impl() isn't thread safe >>> for MethodHandleInError and MethodTypeInError. >>> >>> The parallel constant pool merges are mostly harmless because the >>> old methods constant pool pointers aren't updated. The only case I >>> found where it isn't harmless is that we rely on finding the >>> source_file_name_index from the final merged constant pool, which >>> could be any of the parallel merged constant pools. The code to >>> attempt to dig out the name from redefined classes is removed. >>> >>> open webrev at http://cr.openjdk.java.net/~coleenp/8151546.01/webrev >> >> src/share/vm/classfile/javaClasses.inline.hpp >> Perhaps instead of this comment: >> >> L226: // Because constant pools can be merged in parallel, the >> source file name index >> L227: // may be merged over with something else in a previous >> version. >> >> please consider this one: >> >> // RedefineClasses() currently permits redefine operations to >> // happen in parallel using a "last one wins" philosophy. That >> // spec laxness allows the constant pool entry associated with >> // the source_file_name_index for any older constant pool version >> // to be unstable so we shouldn't try to use it. > > Okay, your comment is more complete. I'll use your comment. >> >> src/share/vm/oops/constantPool.hpp >> I think this file is all changes from 8148772 which I've already >> reviewed so no comments. >> >> src/share/vm/oops/constantPool.cpp >> I think this file is all changes from 8148772 which I've already >> reviewed so no comments. >> >> src/share/vm/prims/jvmtiRedefineClasses.cpp >> The version number in the constant pool is new to my brain since >> I last dove into this code in detail... >> >> Right now you do the version increment here: >> >> L1445: // Update the version number of the constant pools (may >> keep scratch_cp) >> L1446: merge_cp->increment_and_save_version(old_cp->version()); >> L1447: scratch_cp->increment_and_save_version(old_cp->version()); >> >> You could choose to only do it when you know that you're going >> to keep the scratch_cp, but maybe that's being too picky. >> > > I'd have to set it in two places if I did that, so I picked just > once. If we use the merged_cp, the scratch_cp is discarded so the > version doesn't matter. > >> Serguei's find of this very old bug is good: >> >> JDK-6227506 JVMTI Spec: Atomicity of RedefineClasses should be specified >> https://bugs.openjdk.java.net/browse/JDK-6227506 >> >> There's another bug out there will all the notes that Tim Bell >> took when we did the monster RedefineClasses code walk through. >> I believe in that bug, the lack of locking/atomicity was also >> called out. I'll see if I can find that bug... >> > > Yes, that would be good. There are a lot of statics in > VM_RedefineClasses. > I don't know why these don't cause bugs! I think, they do but very rarely and intermittently. This is rare corner case when two agents or two threads of one agent do concurrent redefinitions. Thanks, Serguei > > Coleen > >> Dan >> >> >> >>> bug link https://bugs.openjdk.java.net/browse/JDK-8151546 >>> >>> Tested with rbt, java/lang/instrument tests, com/sun/jdi tests. I >>> tried to write a test with all the conditions of the failure but >>> couldn't make it fail (so noreg-hard). >>> >>> Thanks, >>> Coleen >> > From chris.plummer at oracle.com Wed Apr 13 20:24:03 2016 From: chris.plummer at oracle.com (Chris Plummer) Date: Wed, 13 Apr 2016 13:24:03 -0700 Subject: [9] RFR (XXS): 8154145: Missing klass/method name in stack traces on error In-Reply-To: <570E3749.8080605@oracle.com> References: <570E3749.8080605@oracle.com> Message-ID: <570EAAE3.80900@oracle.com> Hi Vladimir, Thanks for fixing this. The changes look good, but can you send out before and after stacktraces for us to look at? thanks, Chris On 4/13/16 5:10 AM, Vladimir Ivanov wrote: > http://cr.openjdk.java.net/~vlivanov/8154145/webrev.00/ > https://bugs.openjdk.java.net/browse/JDK-8154145 > > After Jigsaw merge, hs_err files contain incomplete stack information > for code located in modules: > > J 1384 C1 java.base at 9-internal9-internal (104 bytes) @ 0xf4bca070 > [0xf4bc9ea0+0x000001d0] > > The problem is that module info overwrites class/method name before it > is printed. > > Best regards, > Vladimir Ivanov From daniel.smith at oracle.com Wed Apr 13 18:16:24 2016 From: daniel.smith at oracle.com (Dan Smith) Date: Wed, 13 Apr 2016 12:16:24 -0600 Subject: Call for Speakers -- 2016 JVM Language Summit Message-ID: <24FD249B-570E-4D7B-9BAE-D3191BAE464D@oracle.com> CALL FOR SPEAKERS -- JVM LANGUAGE SUMMIT, AUGUST 2016 We are pleased to announce the 2016 JVM Language Summit to be held at Oracle's Santa Clara campus on August 1-3, 2016. Registration is now open for speaker submissions and will remain open through May 23, 2016. There is no registration fee for speakers. A limited number of early registration slots are also available for regular attendees. The JVM Language Summit is an open technical collaboration among language designers, compiler writers, tool builders, runtime engineers, and VM architects. We will share our experiences as creators of both the JVM and programming languages for the JVM. We also welcome non-JVM developers of similar technologies to attend or speak on their runtime, VM, or language of choice. Presentations will be recorded and made available to the public. This event is being organized by language and JVM engineers -- no marketers involved! So bring your slide rules and be prepared for some seriously geeky discussions. Format The summit is held in a single classroom-style room to support direct communication between participants. About 100-120 attendees are expected. The schedule consists of a single track of traditional presentations (about 7 each day) interspersed with less-formal multitrack "workshop" discussion groups (2-3 each day) and, possibly, impromptu "lightning talks." Workshops will be less structured than in the past, favoring an open discussion format with only a small amount of prepared material. Thus, rather than collecting workshop abstracts from speakers, we're asking each registrant to suggest a few topics of interest. After choosing the most popular topics, we'll ask some registrants if they'd like to act as discussion leaders. Instructions for Speaker Registration If you'd like to give a presentation, please register as a Speaker and include a detailed abstract. Speaker registration will remain open through May 23. There is no fee. See below for help preparing your abstract and talk. You will be notified about whether your proposal has been accepted; if not, you will be able to register as a regular attendee. For a successful speaker submission, please note the following: - All talks should be deeply technical, given by designers and implementors to designers and implementors. We all speak bytecode here! - Each talk, we hope and expect, will inform the audience, in detail, about the state of the art of language design or implementation on the JVM, or will explore the present and future capabilities of the JVM itself. (Some will do so indirectly by discussing non-JVM technologies.) - Know your audience: attendees may not be likely to ever use your specific language or tool, but could learn something from your interactions with the JVM. A broad goal of the summit is to inspire us to work together on JVM-based technologies that enable a rich ecosystem at higher layers. To register: register.jvmlangsummit.com For further information: jvmlangsummit.com Questions: inquire2016 at jvmlangsummit.com From erik.joelsson at oracle.com Thu Apr 14 08:24:04 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Thu, 14 Apr 2016 10:24:04 +0200 Subject: RFR: JDK-8149777: Enable enhanced failure handler for "make test" In-Reply-To: References: <570CFF92.2090702@oracle.com> Message-ID: <570F53A4.6080208@oracle.com> The failure_handler is a jtreg extension that we build and put in the test image. It's then used when running our product tests. As a stand alone product, it has its own set of tests. The old makefile had a target for running those so I felt I needed to keep that functionality. I agree the make target can be confusing, but I couldn't find a better one. /Erik On 2016-04-13 12:48, Magnus Ihse Bursie wrote: > Is the test-failure-handle used to test that the failure handler is correct? It seems a bit odd, since most other tests has their main purpose to be run, and the build part is just a necessary thing, but here I presume that the main thing is to build the handler so other tests can use it, and it is not clear what the test itself mean. Otoh, I can't see any other way to express this. > > /Magnus > >> 12 apr. 2016 kl. 16:00 skrev Erik Joelsson : >> >> Please review this change which adds a proper makefile and build sequence for the failure-handler jtreg plugin. It also adds the failure handler to the test image and makes the hotspot and jdk test/Makefile's pick it up when available. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8149777 >> Webrev: http://cr.openjdk.java.net/~erikj/8149777/webrev.03/ >> >> /Erik From serguei.spitsyn at oracle.com Thu Apr 14 08:24:17 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Thu, 14 Apr 2016 01:24:17 -0700 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events Message-ID: <570F53B1.90809@oracle.com> Please, review the Jigsaw-related fix for: https://bugs.openjdk.java.net/browse/JDK-8153749 Hotspot webrev: http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs1/ Jdk webrev: http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/jdk/8153749-Jigsaw-newcap.jdk1/ Summary: This is a Jigsaw related enhancement. Some agents need to get a CFLH event for classes loaded in the primordial phase. This is not possible in JDK 9 because existing agents may instrument code in the primordial or start phase before the module system has completed initialization. We introduce a new capability: can_generate_early_class_hook_events. If this capability and can_generate_all_class_hook_events are enabled then the CFLH event could be posted for classes loaded in the primordial phase. We leave can_generate_early_vmstart as is, no changes. This enhancement needs a CCC request filed. I will file it once the JVMTI spec changes are reviewed. Testing: Altered the nsk.jvmti co-located test nsk/jvmti/ClassFileLoadHook/classfloadhk002 to enable the can_generate_early_class_hook_events and checked that new CFLH events are posted in the primordial phase and also they are not posted otherwise. Thanks, Serguei From david.holmes at oracle.com Thu Apr 14 02:35:41 2016 From: david.holmes at oracle.com (David Holmes) Date: Thu, 14 Apr 2016 12:35:41 +1000 Subject: CFV: New hotspot Group Member: Christian Tornqvist In-Reply-To: <570BCCC7.7010602@oracle.com> References: <570BCCC7.7010602@oracle.com> Message-ID: <570F01FD.50602@oracle.com> Vote: yes David On 12/04/2016 2:11 AM, Coleen Phillimore wrote: > I hereby nominate Christian Tornqvist (OpenJDK user name: ctornqvi) to > Membership in the hotspot Group. > > Christian is an Oracle engineer, and is lead for the Hotspot runtime SQE > team. He has been working in the HotSpot team since 2011. Christian is a > Reviewer in the JDK9 project. > > Votes are due by Monday, April 25, 2016 at 12:00PM ET. > > Only current Members of the hotspot Group [1] are eligible to vote on > this nomination. Votes must be cast in the open by replying to this > mailing list. > For Lazy Consensus voting instructions, see [2]. > > Coleen Phillimore > > [1] http://openjdk.java.net/census/#hotspot > [2] http://openjdk.java.net/groups/#member-vote From dmitry.samersoff at oracle.com Thu Apr 14 09:24:34 2016 From: dmitry.samersoff at oracle.com (Dmitry Samersoff) Date: Thu, 14 Apr 2016 12:24:34 +0300 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <570F53B1.90809@oracle.com> References: <570F53B1.90809@oracle.com> Message-ID: <570F61D2.3070602@oracle.com> Serguei, Looks good for me. -Dmitry On 2016-04-14 11:24, serguei.spitsyn at oracle.com wrote: > Please, review the Jigsaw-related fix for: > https://bugs.openjdk.java.net/browse/JDK-8153749 > > > Hotspot webrev: > http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs1/ > > > Jdk webrev: > http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/jdk/8153749-Jigsaw-newcap.jdk1/ > > > > Summary: > > This is a Jigsaw related enhancement. > Some agents need to get a CFLH event for classes loaded in the > primordial phase. > This is not possible in JDK 9 because existing agents may instrument > code in the > primordial or start phase before the module system has completed > initialization. > > We introduce a new capability: can_generate_early_class_hook_events. > If this capability and can_generate_all_class_hook_events are enabled > then > the CFLH event could be posted for classes loaded in the primordial > phase. > We leave can_generate_early_vmstart as is, no changes. > > This enhancement needs a CCC request filed. > I will file it once the JVMTI spec changes are reviewed. > > > Testing: > Altered the nsk.jvmti co-located test > nsk/jvmti/ClassFileLoadHook/classfloadhk002 > to enable the can_generate_early_class_hook_events and checked that > new CFLH events > are posted in the primordial phase and also they are not posted > otherwise. > > > Thanks, > Serguei > -- Dmitry Samersoff Oracle Java development team, Saint Petersburg, Russia * I would love to change the world, but they won't give me the sources. From serguei.spitsyn at oracle.com Thu Apr 14 09:25:41 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Thu, 14 Apr 2016 02:25:41 -0700 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <570F61D2.3070602@oracle.com> References: <570F53B1.90809@oracle.com> <570F61D2.3070602@oracle.com> Message-ID: <570F6215.9020504@oracle.com> Thanks, Dmitry! Serguei On 4/14/16 02:24, Dmitry Samersoff wrote: > Serguei, > > Looks good for me. > > -Dmitry > > > On 2016-04-14 11:24, serguei.spitsyn at oracle.com wrote: >> Please, review the Jigsaw-related fix for: >> https://bugs.openjdk.java.net/browse/JDK-8153749 >> >> >> Hotspot webrev: >> http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs1/ >> >> >> Jdk webrev: >> http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/jdk/8153749-Jigsaw-newcap.jdk1/ >> >> >> >> Summary: >> >> This is a Jigsaw related enhancement. >> Some agents need to get a CFLH event for classes loaded in the >> primordial phase. >> This is not possible in JDK 9 because existing agents may instrument >> code in the >> primordial or start phase before the module system has completed >> initialization. >> >> We introduce a new capability: can_generate_early_class_hook_events. >> If this capability and can_generate_all_class_hook_events are enabled >> then >> the CFLH event could be posted for classes loaded in the primordial >> phase. >> We leave can_generate_early_vmstart as is, no changes. >> >> This enhancement needs a CCC request filed. >> I will file it once the JVMTI spec changes are reviewed. >> >> >> Testing: >> Altered the nsk.jvmti co-located test >> nsk/jvmti/ClassFileLoadHook/classfloadhk002 >> to enable the can_generate_early_class_hook_events and checked that >> new CFLH events >> are posted in the primordial phase and also they are not posted >> otherwise. >> >> >> Thanks, >> Serguei >> > From magnus.ihse.bursie at oracle.com Thu Apr 14 10:58:36 2016 From: magnus.ihse.bursie at oracle.com (Magnus Ihse Bursie) Date: Thu, 14 Apr 2016 12:58:36 +0200 Subject: RFR: JDK-8149777: Enable enhanced failure handler for "make test" In-Reply-To: <570F53A4.6080208@oracle.com> References: <570CFF92.2090702@oracle.com> <570F53A4.6080208@oracle.com> Message-ID: <437F96A0-B0B2-4B1B-A40B-FC5433744076@oracle.com> Ok. Lgtm. /Magnus > 14 apr. 2016 kl. 10:24 skrev Erik Joelsson : > > The failure_handler is a jtreg extension that we build and put in the test image. It's then used when running our product tests. As a stand alone product, it has its own set of tests. The old makefile had a target for running those so I felt I needed to keep that functionality. I agree the make target can be confusing, but I couldn't find a better one. > > /Erik > >> On 2016-04-13 12:48, Magnus Ihse Bursie wrote: >> Is the test-failure-handle used to test that the failure handler is correct? It seems a bit odd, since most other tests has their main purpose to be run, and the build part is just a necessary thing, but here I presume that the main thing is to build the handler so other tests can use it, and it is not clear what the test itself mean. Otoh, I can't see any other way to express this. >> >> /Magnus >> >>> 12 apr. 2016 kl. 16:00 skrev Erik Joelsson : >>> >>> Please review this change which adds a proper makefile and build sequence for the failure-handler jtreg plugin. It also adds the failure handler to the test image and makes the hotspot and jdk test/Makefile's pick it up when available. >>> >>> Bug: https://bugs.openjdk.java.net/browse/JDK-8149777 >>> Webrev: http://cr.openjdk.java.net/~erikj/8149777/webrev.03/ >>> >>> /Erik > From marcus.larsson at oracle.com Thu Apr 14 13:25:40 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Thu, 14 Apr 2016 15:25:40 +0200 Subject: RFR: 8145934: Make ttyLocker equivalent for Unified Logging framework In-Reply-To: <570512FF.9070908@oracle.com> References: <56BB3FD0.5000104@oracle.com> <3910DA9B-43C9-4C1A-8FD0-993A54225550@oracle.com> <56BCA8C9.102@oracle.com> <56C34F0E.4090803@oracle.com> <90DC33E3-F597-40E4-A317-6C92F4969575@oracle.com> <56EC03A4.1030705@oracle.com> <56FCE56C.6070606@oracle.com> <56FD1481.3090707@oracle.com> <56FD3CC1.4050502@oracle.com> <56FE78B3.2060802@oracle.com> <5703C863.4080403@oracle.com> <570512FF.9070908@oracle.com> Message-ID: <570F9A54.6060509@oracle.com> Updated webrev: http://cr.openjdk.java.net/~mlarsson/8145934/webrev.04/ Incremental: http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03-04/ Changed according to Thomas' feedback. John, are you fine with the latest changes? Thanks, Marcus On 04/06/2016 03:45 PM, Marcus Larsson wrote: > Hi, > > On 2016-04-06 11:38, Thomas St?fe wrote: >> Hi Marcus, >> >> still no luck applying your patch to hs-rt. On a freshly cloned repo >> I get: >> >> hg qpush -v >> >> .... >> cannot patch src/share/vm/logging/logMessage.hpp: file is not tracked >> .... >> > > Weird. Seems like webrev doesn't like my patch queue. I've regenerated > the webrev with a single patch, updated in place. > >> ---- >> >> I still feel that the benefit of different levels per log message is >> not worth the added complexity, especially since it prevents one from >> using the log message like a string stream (as you explained, using >> different log levels means a write must always be a complete line). >> >> I understand your motivation, but what you describe could just as >> well be done as (pseudocode): >> >> LogMessage(logging) msg; >> if (level >= debug) { >> msg.print("debug message"); >> if (level >= trace) { >> msg.print("additional trace information"); >> } >> } >> >> easier to understand, too. At the added costs of additional >> comparisons in the caller code. That way LogMessage does not have to >> know anything about log levels, and hence does not need to keep meta >> infos about lines, and could have a print() and print_cr() method. > > Assuming that message would then be written on debug level, you would > get trace messages included in the debug output depending on whether > or not you have trace enabled. It makes it all very confusing for the > consumers instead. > >> >> But that is just my opinion. >> >> .... >> >> Other than that, code looks fine. Small remarks: >> >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/log.cpp.udiff.html >> >> >> >> file_contains_substrings_in_order: >> >> Potential truncation if line length > 1024. >> > > Will fix. > >> -- >> >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/logFileStreamOutput.cpp.udiff.html >> >> >> >> Both LogFileStreamOutput::write(const LogDecorations& decorations, >> const char* msg) and >> LogFileStreamOutput::write(LogMessageBuffer::Iterator msg_iterator) >> can be made a tiny bit smaller by unifying the else branches, eg: >> >> + int written = 0; >> + os::flockfile(_stream); >> + for (; !msg_iterator.is_at_end(); msg_iterator++) { >> + if (use_decorations) { >> + written += write_decorations(msg_iterator.decorations()); >> + } >> + written += jio_fprintf(_stream, "%s\n", msg_iterator.message()); >> + } >> + fflush(_stream); >> + os::funlockfile(_stream); > > I will have to include a jio_fprintf in the if-case (or in > write_decorations) for the separating space between decorations and > message. It saves the else case though, so I'll update it. > >> --- >> >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/src/share/vm/logging/logPrefix.hpp.udiff.html >> >> >> >> DEBUG_ONLY(buf[0] = '\0';) \ >> >> I would get either rid of this or do this for product too. Doing this >> only for debug just hides "append-assuming-empty-string" errors in >> debug case. > > I'll remove it and change the assert to: > > assert(ret == 0 || ret == strlen(buf), ... > > >> >> Otherwise it looks fine to me. Still not a reviewer though :) so >> others should look at this too. >> >> Kind Regards, Thomas > > Thanks! > Marcus > >> >> >> >> On Tue, Apr 5, 2016 at 4:14 PM, Marcus Larsson >> > wrote: >> >> Hi, >> >> Rebased and aligned the patch with the latest changes to the UL >> API. Webrevs updated in place. >> >> Are we ready to wrap this up? >> >> Thanks, >> Marcus >> >> >> On 04/01/2016 03:33 PM, Marcus Larsson wrote: >> >> Hi again, >> >> Updated webrev with removed decoration buffers. Decorations >> are now written directly to the streams with the help of >> flockfile/funlockfile as you suggested. >> >> Webrev: >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.03/ >> >> >> Incremental: >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02-03/ >> >> >> Thanks, >> Marcus >> >> On 03/31/2016 05:05 PM, Marcus Larsson wrote: >> >> >> On 03/31/2016 03:40 PM, Thomas St?fe wrote: >> >> Hi Marcus, >> >> On Thu, Mar 31, 2016 at 2:13 PM, Marcus Larsson >> > >> > >> wrote: >> >> Hi Thomas, >> >> >> On 03/31/2016 12:48 PM, Thomas St?fe wrote: >> >> Hi Marcus, >> >> nice to see progress on that issue! >> >> >> Thanks for taking time to look at it. >> >> >> Unfortunately your patch did not apply to my >> freshly synced hs-rt >> repository. So I took a "dry look" at your >> code, and here some >> feedback (by no means complete, and also I am >> not a (R)eviewer): >> >> >> I'll rebase it and update the webrev. >> >> >> - thank you for taking my input and avoiding >> resource area for >> memory. I am still apprehensive about UL using >> NEW_C_HEAP_ARRAY >> instead of raw malloc() here, but I see it has >> pros and cons. >> >> >> It might be worth investigating, but if so it >> should probably be a >> separate RFE. >> >> >> Ok. Easy enough to fix should e.g. NMT ever want to >> use UL. >> >> >> - I am not sure about flockfile(): I really do >> not like file >> locks, this always bites in customer >> scenarios. Also, by using >> this lock, are we not just reintroducing the >> ttyLocker at a >> deeper level? >> >> >> The fprintfs locks the FILE* internally even if we >> don't. This is >> AFAIU how fprintf guarantees the writes to be >> atomic. With the >> explicit flock calls we're just ensuring nothing >> can be printed >> in-between our fprintf calls, it shouldn't add any >> cost. >> >> >> Ah, I see. If we really feel safe about flockfile(), >> we might just as well use it in >> LogFileStreamOutput::write() too. There, we assemble >> the decorators in a stack local buffer to fprintf them >> out to the FILE* in a separate step - I guess to >> prevent tearing? But if flockfile comes without cost, >> we could save the stack local buffer and do: >> >> flockfile() >> fputs(decorators) >> fputs(message) >> funlockfile() >> >> >> Good idea. >> >> >> Instead, how about assembling the total >> message in memory - like >> it would appear in the file - and print it in >> one go using >> ::write()? That usually is atomic. This way >> you would have to >> write out the decorators for each line in >> memory as they are >> added, but you could get rid of the _lines[] >> array and all its >> surrounding code. So, no lock, less >> complicated code, at the cost >> of a bit more memory usage. >> >> >> As the message might go to different outputs, >> configured for >> different levels, we can't really get rid of the >> _lines[] array. >> We could assemble each applicable message as a >> long string for >> each of the outputs, but given how fprintf seems >> to work we won't >> really have gained anything for that extra work >> and memory usage. >> >> >> Oh, I see. I did not understand the complexity of the >> whole thing. Why is it needed to write lines to a >> message with different log levels? I may be slow, but >> I find that not easy to understand. The fact that >> different lines in my message may go to different >> outputs is a bit surprising. I would have thought a >> message is just a text blob I assemble offline and >> send to the logging framework in one go, like a >> glorified String, and that I would hand it down to UL >> "send this for this level/tagset combination". And >> that the message itself would not even need to know >> anything about log levels and tagsets. >> >> >> The use case I want to support with multi-part messages on >> different levels is when you have an event you want to >> log, on for example info level, but where part of that >> event might include data that is too verbose to fit the >> info level. So then you could split the event into two >> parts, one line with the basic information on info level >> and the other line (or multiple lines) on debug or trace >> level. The framework then makes sure these lines are >> delivered together non-interleaved. >> >> >> - If I understand this correctly, there is no >> way to print part >> of a line to the message object? So, if I >> would want to assemble >> a line from various inputs, I would still have >> to assemble it on >> the stack and feed it to say >> ScopedLogMessage::debug() in one go? >> Would it be posssible to get an outputStream* >> from the >> ScopedLogMessage to write into? >> >> >> Yes, that's right. I wanted to avoid streams for >> multi-line >> messages because I thought the API would become a >> bit messy with >> that functionality. The logStreams we have today >> are line >> buffered, and will send completed lines to the log >> outputs when >> they see a terminating newline character. This >> means that it won't >> be obvious how lines from different streams or >> writes to the >> message will be ordered in the output. Perhaps >> it's not that bad, >> but I figured that we could use stringStreams or >> similar for when >> we need to build up lines for the message. This >> has the nice side >> effect that it will be very obvious when, and in >> what order, each >> line is written to the outputs. Perhaps it's worth >> a follow up RFE >> if we find ourselves writing one too many log >> cases with >> stringStreams? >> >> >> Sorry, I think I was not clear enough. What I meant >> was simpler. We have now ScopedLogMessage::debug() >> which does LogMessageBuffer::write() which writes a >> line and terminates the line. Line >> outputStream::print_cr(). I would like to have an >> option to just write but not terminate the current >> line, like outputStream::print(). That way one could >> assemble a line piece by piece, maybe in a loop (e.g. >> for table row values) without needing another >> temporary buffer. >> >> >> Ok, so say we add the debug_no_cr() family of functions >> that writes into the log message buffer without newlines. >> Then, what does it mean if someone does debug_no_cr(s1); >> trace_no_cr(s2); info(s3); ? >> >> It would be simpler if it wasn't for the support for >> different levels on different parts of the message. Maybe >> some well defined rules for how it should work would solve >> this, but I intended to avoid the whole use case for now. >> It can be done manually with stringStreams, so I don't >> think it's that serious. >> >> >> >> - I like how you implemented >> os::log_vsnprintf(), using >> _vscprintf() on windows. Would it be >> worthwhile to merge this >> with jio_vsnprintf(), which does the same but >> returns -1 on >> truncation? >> >> >> The patch for JDK-8138916 [0] added the >> log_vsnprintf. You mean to >> change jio_vsnprintf to not return -1 on >> truncation, and instead >> work like vsnprintf on POSIX? I think that would >> be useful, and it >> allows us to remove log_vsnprintf. >> >> >> That is exactly what I meant. I think that would be a >> separate RFE though, one would have to check on all >> callers of jio_snprintf. >> >> >> Yeah. >> >> Regards, >> Marcus >> >> >> Thanks, >> Marcus >> >> >> Thank you! >> >> ..Thomas >> >> [0] https://bugs.openjdk.java.net/browse/JDK-8138916 >> >> >> >> Kind Regards, Thomas >> >> >> On Thu, Mar 31, 2016 at 10:53 AM, Marcus Larsson >> > >> > >> wrote: >> >> Any further feedback on this? >> >> >> >> On 03/18/2016 02:33 PM, Marcus Larsson >> wrote: >> >> Hi again, >> >> New webrev: >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.02/ >> >> >> >> Incremental: >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt-02/ >> >> >> >> Made all allocations regular C heap >> allocations because >> of the problems with resource >> allocations that Thomas >> brought up. We can do a follow up >> change for resource >> allocation support if we really need it. >> Also added some more tests for scoped >> messages. >> >> >> On 02/17/2016 12:19 AM, John Rose wrote: >> >> On Feb 16, 2016, at 8:32 AM, >> Marcus Larsson >> > >> > > >> > >> > >>> wrote: >> >> >> Alternative version where a >> LogMessage >> automatically writes its >> messages when it goes >> out of scope: >> http://cr.openjdk.java.net/~mlarsson/8145934/webrev.alt/ >> >> >> >> >> >> >> I like this, with the >> LogMessageBuffer that does the >> heavy work, and the >> [Scoped]LogMessage which is the >> simplest way to use it. >> >> The LogMessageBuffer should have a >> neutral >> unallocated state, for use through >> the LogMessage >> macro. I.e., is_c_allocated >> should be a three-state >> flag, including 'not allocated at >> all'. That way, if >> you create the thing only to ask >> 'is_debug' and get a >> false answer, you won't have done >> more than a few >> cycles of work. Probably the >> set_prefix operation >> should be lazy in the same way. >> >> >> Fixed. Since I removed the resource >> allocation completely >> I could keep it as a boolean. >> >> >> I think the destructor should call >> a user-callable >> flush function, something like this: >> >> ~ScopedLogMessage() { flush(); } >> // in LogMessageBuffer: >> void flush() { >> if (_line_count > 0) { >> _log.write(*this); >> reset(); >> } >> } >> void reset() { >> _line_count = 0; >> _message_buffer_size = 0; >> } >> >> It will be rare for user code to >> want to either flush >> early or cancel pending output, >> but when you need it, >> it should be there. >> >> >> Fixed. >> >> >> I still prefer the first patch >> though, where >> messages are neither tied to a >> particular log, >> nor automatically written when >> they go out of >> scope. Like I've said, the >> explicit write line >> makes it easier to read the >> code. >> >> >> There's a tradeoff here: It's >> easier to read the >> *logging* code if all the >> *logging* operations are >> explicit. But the point of >> logging code is to add >> logging to code that is busy doing >> *other* operations >> besides logging. That's why (I >> assume) people have >> been noting that some uses of >> logging are >> "intrusive": The logging logic >> calls too much >> attention to itself, and with >> attention being a >> limited resource, it takes away >> attention from the >> actual algorithm that's being >> logged about. >> >> The scoped (RAII) log buffer, with >> automatic write, >> is the best way I know to reduce >> the intrusiveness of >> this auxiliary mechanism. >> >> >> Fair point. I'm going with the >> automatic write on out of >> scope. >> >> >> Of course, I'm interested in >> finding out what your >> everyday customers think about >> it. (Rachel, Coleen, >> David, Dan?) >> >> For comparison I've updated >> the first suggestion >> with the guarantee for >> unwritten messages, as >> well as cleaning it up a bit >> by moving the >> implementation to the .cpp >> rather than the .hpp. >> Full >> webrev:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.01/ >> >> >> >> Incremental:http://cr.openjdk.java.net/~mlarsson/8145934/webrev.00-01/ >> >> >> >> >> Let me know what you think. >> >> >> That option is more intrusive than >> the RAII buffered >> log alias. >> >> Separately, the review thread on >> JDK-8149383 shows a >> use for LogMessageBuffer to >> collect a complex log >> message. The log message can then >> be sent down one >> of two log streams. Something like: >> >> if (need_to_log) { >> ResourceMark rm; >> LogMessageBuffer buf; >> buf.write("Revoking bias of >> object " >> INTPTR_FORMAT " , mark " >> INTPTR_FORMAT " , type %s , >> prototype header " >> INTPTR_FORMAT >> " , >> allow rebias %d , >> requesting thread " INTPTR_FORMAT, >> p2i((void *)obj), >> (intptr_t) mark, >> obj->klass()->external_name(), >> (intptr_t) >> obj->klass()->prototype_header(), >> (allow_rebias ? 1 : 0), >> (intptr_t) requesting_thread); >> if (!is_bulk) >> log_info(biasedlocking).write(buf); >> else >> log_trace(biasedlocking).write(buf); >> } >> >> It is important here (like you >> pointed out) that the >> LogMessageBuffer is decoupled from >> log levels and >> streams, so that it can be used as >> a flexible >> component of logic like this. >> >> But the commonest usage should >> (IMO) be supported by >> a scoped auto-writing log alias. >> >> >> Yeah, I agree. >> >> Thanks, >> Marcus >> >> >> >> >> >> >> >> >> > From marcus.larsson at oracle.com Thu Apr 14 13:48:07 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Thu, 14 Apr 2016 15:48:07 +0200 Subject: RFR: 8146948: Add help information to log tags Message-ID: <570F9F97.4060808@oracle.com> Hi, Please review the following patch to add descriptions to logging tagsets. Summary: Tagsets may now be given a description in logTagSetDescriptions.inline.hpp. All described tagsets are listed in the -Xlog:help output as well as in the LogConfiguration::describe output. The patch also adds trace level logging that lists *all* tagsets in the VM (-Xlog:logging=trace). The previous 'logging=trace logging' has been moved to 'logging=debug'. Example log output: ... [0.544s][debug][logging] Described tag combinations: [0.544s][debug][logging] logging: Logging for the log framework itself ... [0.544s][trace][logging] All available tag sets: arguments, biasedlocking, classinit, classload, classload+constraints, classload+preorder, classloaderdata, classpath, classresolve, classunload, constraints, defaultmethods, ergo, exceptions, gc, gc+age, gc+alloc, gc+alloc+region, gc+barrier, gc+bot, gc+classhisto, gc+classhisto+start, gc+compaction, gc+cpu, gc+ergo, gc+ergo+cset, gc+ergo+heap, gc+ergo+ihop, gc+ergo+refine, gc+freelist, gc+freelist+census, gc+freelist+stats, gc+heap, gc+heap+coops, gc+heap+exit, gc+heap+region, gc+humongous, gc+ihop, gc+jni, gc+liveness, gc+marking, gc+marking+start, gc+metaspace, gc+metaspace+alloc, gc+metaspace+freelist, gc+phases, gc+phases+start, gc+phases+task, gc+phases+verify, gc+phases+verify+start, gc+plab, gc+promotion, gc+ref, gc+ref+start, gc+refine, gc+region, gc+remset, gc+remset+exit, gc+remset+scrub, gc+scavenge, gc+start, gc+state, gc+stats, gc+stringdedup, gc+stringtable, gc+survivor, gc+sweep, gc+task, gc+task+stats, gc+task+thread, gc+task+time, gc+tlab, gc+verify, gc+verify+start, gc+workgang, heap+ergo, itables, jfr, jfr+instrumentation, jfr+types, logging, logging+test, modules, monitorinflation, monitormismatch, os, os+cpu, os+thread, pagesize, protectiondomain, ref, safepoint, safepointcleanup, stacktrace, startuptime, verification, vmoperation, vtables Webrev: http://cr.openjdk.java.net/~mlarsson/8146948/webrev.00/ Issue: https://bugs.openjdk.java.net/browse/JDK-8146948 Testing: Internal VM tests through RBT Thanks, Marcus From Alan.Bateman at oracle.com Thu Apr 14 13:53:44 2016 From: Alan.Bateman at oracle.com (Alan Bateman) Date: Thu, 14 Apr 2016 14:53:44 +0100 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <570F53B1.90809@oracle.com> References: <570F53B1.90809@oracle.com> Message-ID: <570FA0E8.5080606@oracle.com> On 14/04/2016 09:24, serguei.spitsyn at oracle.com wrote: > Please, review the Jigsaw-related fix for: > https://bugs.openjdk.java.net/browse/JDK-8153749 > > > Hotspot webrev: > http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs1/ > > > Jdk webrev: > http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/jdk/8153749-Jigsaw-newcap.jdk1/ > > This looks good, just a few suggests for the JVM TI spec For can_generate_early_class_hook_events then it might be better if the first sentence said "in the primordial phase" rather than "early". In CFLH then alternative wording is "When can_generate_early_class_hook_events and can_generate_early_class_hook_events are enabled then this event may be send before the VM is initialized (the start phase)". I think that might be more consistent with the long standing wording. -Alan. From paul.sandoz at oracle.com Thu Apr 14 14:53:23 2016 From: paul.sandoz at oracle.com (Paul Sandoz) Date: Thu, 14 Apr 2016 16:53:23 +0200 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter Message-ID: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> Hi, I hooked up the array mismatch stub to the interpreter, with a bit of code cargo culting the CRC work and some lldb debugging [*] it appears to work and pass tests. Can someone have a quick look to see if i am not the right track here: http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ Here are some quick numbers running using -Xint for byte[] equality: Benchmark (lastNEQ) (n) Mode Cnt Score Error Units # Baseline # VM options: -Xint ByteArray.base_equals false 1024 avgt 10 16622.453 ? 498.475 ns/op ByteArray.base_equals true 1024 avgt 10 16889.244 ? 439.895 ns/op # Before patch # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? 3657.508 ns/op ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? 2723.130 ns/op # After patch # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic ByteArray.jdk_equals false 1024 avgt 10 448.764 ? 18.977 ns/op ByteArray.jdk_equals true 1024 avgt 10 448.657 ? 22.656 ns/op The next step is to wire up C1. Further steps would be to substitute some of intrinsics added/used for compact strings with mismatch, then evaluate the performance. Thanks, Paul. [*] Stubs to be used as intrinsics in the template interpreter need to be created during the initial stage of generation, otherwise the stub address is null which leads to a SEGV that?s hard to track down. From robbin.ehn at oracle.com Thu Apr 14 15:02:23 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Thu, 14 Apr 2016 17:02:23 +0200 Subject: RFR: 8146948: Add help information to log tags In-Reply-To: <570F9F97.4060808@oracle.com> References: <570F9F97.4060808@oracle.com> Message-ID: <570FB0FF.7090904@oracle.com> Hi Marcus, Looks good, thanks for fixing! /Robbin On 04/14/2016 03:48 PM, Marcus Larsson wrote: > Hi, > > Please review the following patch to add descriptions to logging tagsets. > > Summary: > Tagsets may now be given a description in > logTagSetDescriptions.inline.hpp. All described tagsets are listed in > the -Xlog:help output as well as in the LogConfiguration::describe > output. The patch also adds trace level logging that lists *all* tagsets > in the VM (-Xlog:logging=trace). The previous 'logging=trace logging' > has been moved to 'logging=debug'. > > Example log output: > > ... > [0.544s][debug][logging] Described tag combinations: > [0.544s][debug][logging] logging: Logging for the log framework itself > ... > [0.544s][trace][logging] All available tag sets: arguments, > biasedlocking, classinit, classload, classload+constraints, > classload+preorder, classloaderdata, classpath, classresolve, > classunload, constraints, defaultmethods, ergo, exceptions, gc, gc+age, > gc+alloc, gc+alloc+region, gc+barrier, gc+bot, gc+classhisto, > gc+classhisto+start, gc+compaction, gc+cpu, gc+ergo, gc+ergo+cset, > gc+ergo+heap, gc+ergo+ihop, gc+ergo+refine, gc+freelist, > gc+freelist+census, gc+freelist+stats, gc+heap, gc+heap+coops, > gc+heap+exit, gc+heap+region, gc+humongous, gc+ihop, gc+jni, > gc+liveness, gc+marking, gc+marking+start, gc+metaspace, > gc+metaspace+alloc, gc+metaspace+freelist, gc+phases, gc+phases+start, > gc+phases+task, gc+phases+verify, gc+phases+verify+start, gc+plab, > gc+promotion, gc+ref, gc+ref+start, gc+refine, gc+region, gc+remset, > gc+remset+exit, gc+remset+scrub, gc+scavenge, gc+start, gc+state, > gc+stats, gc+stringdedup, gc+stringtable, gc+survivor, gc+sweep, > gc+task, gc+task+stats, gc+task+thread, gc+task+time, gc+tlab, > gc+verify, gc+verify+start, gc+workgang, heap+ergo, itables, jfr, > jfr+instrumentation, jfr+types, logging, logging+test, modules, > monitorinflation, monitormismatch, os, os+cpu, os+thread, pagesize, > protectiondomain, ref, safepoint, safepointcleanup, stacktrace, > startuptime, verification, vmoperation, vtables > > > Webrev: > http://cr.openjdk.java.net/~mlarsson/8146948/webrev.00/ > > Issue: > https://bugs.openjdk.java.net/browse/JDK-8146948 > > Testing: > Internal VM tests through RBT > > Thanks, > Marcus > From volker.simonis at gmail.com Thu Apr 14 15:55:45 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Thu, 14 Apr 2016 17:55:45 +0200 Subject: RFR(S): 8154251: ANSI-C Quoting bug in hotspot.m4 during configure on SLES 10 and 11 Message-ID: Hi, can somebody please review this small hotspot build fix for SLES 10 an 11: http://cr.openjdk.java.net/~simonis/webrevs/2016/8154251/ https://bugs.openjdk.java.net/browse/JDK-8154251 I've encountered a strange configure problem with the new hotspot build system on SLES 10 and 11: ... checking which variants of the JVM to build... server configure: Unknown variant(s) specified: server configure: error: The available JVM variants are: server client minimal core zero zeroshark custom configure exiting with result code 1 ... The error seems nonsense since 'server' is a valid variant. For debugging purpose I ran "bash -vx configure ..." and this revealed: ... $GREP -Fvx "${VALID_JVM_VARIANTS// /$'\n'}" <<< "${JVM_VARIANTS// /$'\n'}" ++++ /usr/bin/grep -Fvx 'server$\nclient$\nminimal$\ncore$\nzero$\nzeroshark$\ncustom' +++ INVALID_VARIANTS=server ... The expected result would be: ... $GREP -Fvx "${VALID_JVM_VARIANTS// /$'\n'}" <<< "${JVM_VARIANTS// /$'\n'}" ++++ /bin/grep -Fvx 'server client minimal core zero zeroshark custom' +++ INVALID_VARIANTS= ... Apparently, the ANSI-C Quoting (see http://www.gnu.org/software/bash/manual/html_node/ANSI_002dC-Quoting.html#ANSI_002dC-Quoting) for "${VALID_JVM_VARIANTS// /$'\n'}" went wrong. Instead of replacing Spaces by newlines, it wrongly replaced Spaces by $\n literally instead. I tried to find the specific problem without success. There exist several bugs about ANSI-C Quoting in bash however. I could reproduce the problem on SLES 10.3 with the builtin bash 3.1.17 and a self-built bash 4.3.0 and also on a SLES 11.3 with the builtin bash 3.2.51. It worked on a newer SLES 12.1 with bash 4.2.47. I couldn't reproduce the problem on RHEL, Ubuntu and Fedora. Thank you and best regards, Volker From mikael.vidstedt at oracle.com Thu Apr 14 16:41:54 2016 From: mikael.vidstedt at oracle.com (Mikael Vidstedt) Date: Thu, 14 Apr 2016 09:41:54 -0700 Subject: RFR(S): 8154209: Remove client VM from default JIB profile on windows-x86 and linux-x86 Message-ID: <570FC852.10808@oracle.com> Please review the following change which removes the "client" VM from the default JIB build profile on windows-x86 and linux-x86: Bug: https://bugs.openjdk.java.net/browse/JDK-8154209 Webrev (top): http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/ Webrev (hotspot): http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/hotspot/webrev/ When not including the client VM, the build system automatically creates a jvm.cfg which makes -client an alias for -server. At some point in the future we may choose to output a warning and/or refuse to start up if -client is specified, but at least for now silently falling back on the -server VM seems appropriate. The test/runtime/SharedArchiveFile/DefaultUseWithClient.java test assumes that CDS is always compiled in and enabled in the -client VM on windows-x86. Since -client will fall back on -server that is no longer true, so the test needs to be updated. I added an @ignore and filed the following issue to track fixing the test: https://bugs.openjdk.java.net/browse/JDK-8154204 Testing: In addition to a standard JPRT push job, Christian Tornqvist helped me run the runtime nightly tests and apart from the above mentioned test all tests were successful. Cheers, Mikael From christian.thalinger at oracle.com Thu Apr 14 17:20:43 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Thu, 14 Apr 2016 07:20:43 -1000 Subject: RFR(S): 8154209: Remove client VM from default JIB profile on windows-x86 and linux-x86 In-Reply-To: <570FC852.10808@oracle.com> References: <570FC852.10808@oracle.com> Message-ID: > On Apr 14, 2016, at 6:41 AM, Mikael Vidstedt wrote: > > > Please review the following change which removes the "client" VM from the default JIB build profile Is there some public documentation about JIB? A quick search only showed a few JBS bugs that mention JIB. > on windows-x86 and linux-x86: > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154209 > Webrev (top): http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/ > Webrev (hotspot): http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/hotspot/webrev/ > > > When not including the client VM, the build system automatically creates a jvm.cfg which makes -client an alias for -server. At some point in the future we may choose to output a warning and/or refuse to start up if -client is specified, but at least for now silently falling back on the -server VM seems appropriate. > > The test/runtime/SharedArchiveFile/DefaultUseWithClient.java test assumes that CDS is always compiled in and enabled in the -client VM on windows-x86. Since -client will fall back on -server that is no longer true, so the test needs to be updated. I added an @ignore and filed the following issue to track fixing the test: > > https://bugs.openjdk.java.net/browse/JDK-8154204 > > > Testing: > > In addition to a standard JPRT push job, Christian Tornqvist helped me run the runtime nightly tests and apart from the above mentioned test all tests were successful. > > Cheers, > Mikael > From vladimir.x.ivanov at oracle.com Thu Apr 14 18:06:04 2016 From: vladimir.x.ivanov at oracle.com (Vladimir Ivanov) Date: Thu, 14 Apr 2016 21:06:04 +0300 Subject: [9] RFR (XXS): 8154145: Missing klass/method name in stack traces on error In-Reply-To: <570EAAE3.80900@oracle.com> References: <570E3749.8080605@oracle.com> <570EAAE3.80900@oracle.com> Message-ID: <570FDC0C.4080907@oracle.com> Stefan, Chris, thanks for the feedback. On 4/13/16 11:24 PM, Chris Plummer wrote: > > Thanks for fixing this. The changes look good, but can you send out > before and after stacktraces for us to look at? J 1384 C1 java.base at 9-internal9-internal (104 bytes) @ 0xf4bca070 [0xf4bc9ea0+0x000001d0] becomes J 1384 C1 SomeClass.someMethod(LParam;)LResult; java.base at 9-internal (104 bytes) @ 0xf4bca070 [0xf4bc9ea0+0x000001d0] Best regards, Vladimir Ivanov > > thanks, > > Chris > > On 4/13/16 5:10 AM, Vladimir Ivanov wrote: >> http://cr.openjdk.java.net/~vlivanov/8154145/webrev.00/ >> https://bugs.openjdk.java.net/browse/JDK-8154145 >> >> After Jigsaw merge, hs_err files contain incomplete stack information >> for code located in modules: >> >> J 1384 C1 java.base at 9-internal9-internal (104 bytes) @ 0xf4bca070 >> [0xf4bc9ea0+0x000001d0] >> >> The problem is that module info overwrites class/method name before it >> is printed. >> >> Best regards, >> Vladimir Ivanov > From serguei.spitsyn at oracle.com Thu Apr 14 18:35:08 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Thu, 14 Apr 2016 11:35:08 -0700 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <570FA0E8.5080606@oracle.com> References: <570F53B1.90809@oracle.com> <570FA0E8.5080606@oracle.com> Message-ID: <570FE2DC.4060601@oracle.com> On 4/14/16 06:53, Alan Bateman wrote: > > > On 14/04/2016 09:24, serguei.spitsyn at oracle.com wrote: >> Please, review the Jigsaw-related fix for: >> https://bugs.openjdk.java.net/browse/JDK-8153749 >> >> >> Hotspot webrev: >> http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs1/ >> >> >> Jdk webrev: >> http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/jdk/8153749-Jigsaw-newcap.jdk1/ >> >> > This looks good, just a few suggests for the JVM TI spec > > For can_generate_early_class_hook_events then it might be better if > the first sentence said "in the primordial phase" rather than "early". Agreed, fixed. > > In CFLH then alternative wording is "When > can_generate_early_class_hook_events and > can_generate_early_class_hook_events are enabled then this event > may be send before the VM is initialized (the start phase)". I think > that might be more consistent with the long standing wording. Not sure, I understand the suggestion. This is what was before the fix: "This event may be sent before the VM is initialized (the start phase)". Now it is: "The timing of this event may depend on whether the agent has added the can_generate_early_class_hook_events capability or not. If the capability has been added then the VM posts the event in the primordial phase. Otherwise, this event may be sent before the VM is initialized (the start phase). " What part do we want to change? Would you like it to be like this: "The timing of this event may depend on whether the agent has added the can_generate_early_class_hook_events and can_generate_all_class_hook_events capabilities or not. If the capabilities have been added then the VM posts the event in the primordial phase. Otherwise, this event may be sent before the VM is initialized (the start phase). " or it is about something like this: "When can_generate_early_class_hook_events and can_generate_all_class_hook_events are enabled then this event may be sent in the primordial phase. Otherwise, this event may be sent before the VM is initialized (the start phase). " Thanks, Serguei > > -Alan. > From lois.foltan at oracle.com Thu Apr 14 19:29:47 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Thu, 14 Apr 2016 15:29:47 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded Message-ID: <570FEFAB.2070809@oracle.com> Hello, Please review the following fix: Webrev: http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949/ Bug: Jigsaw crash when Klass in _fixup_module_field_list is unloaded https://bugs.openjdk.java.net/browse/JDK-8152949 Summary: Prior to java.base being defined to the VM by the module system initialization, classes loaded must be saved on a fixup list in order to later have their java.lang.Class' module field patched with java.base's java.lang.reflect.Module object once java.base is defined. Before module system initialization is complete, all classes loaded must have java.base as their defining module and be loaded by the boot loader. It was erroneously assumed that all classes placed on the module fixup list therefore would not die before java.base was defined. This assumption did not hold for anonymous classes which have a shorter lifetime than the boot loader. Test cases run with a small heap, -Xmx2m, would cause GC to unload the anonymous classes on the fixup list, later causing issues when an attempt was made to patch these classes with java.base's java.lang.reflect.Module object. Thank you to Per Liden and Stefan Karlsson for contributing this fix for the runtime team. Test: - java/lang, java/util, java/io, all Hotspot jtreg tests, Hotspot colocated tests & noncolo.quick.testlist - several iterations of ConcurrentLinkedQueue/RemoveLeak.java which exhibited the problem From chris.plummer at oracle.com Thu Apr 14 19:32:38 2016 From: chris.plummer at oracle.com (Chris Plummer) Date: Thu, 14 Apr 2016 12:32:38 -0700 Subject: [9] RFR (XXS): 8154145: Missing klass/method name in stack traces on error In-Reply-To: <570FDC0C.4080907@oracle.com> References: <570E3749.8080605@oracle.com> <570EAAE3.80900@oracle.com> <570FDC0C.4080907@oracle.com> Message-ID: <570FF056.5000704@oracle.com> Thanks! Chris On 4/14/16 11:06 AM, Vladimir Ivanov wrote: > Stefan, Chris, thanks for the feedback. > > On 4/13/16 11:24 PM, Chris Plummer wrote: >> >> Thanks for fixing this. The changes look good, but can you send out >> before and after stacktraces for us to look at? > > J 1384 C1 java.base at 9-internal9-internal (104 bytes) @ 0xf4bca070 > [0xf4bc9ea0+0x000001d0] > > becomes > > J 1384 C1 SomeClass.someMethod(LParam;)LResult; java.base at 9-internal > (104 bytes) @ 0xf4bca070 [0xf4bc9ea0+0x000001d0] > > Best regards, > Vladimir Ivanov > >> >> thanks, >> >> Chris >> >> On 4/13/16 5:10 AM, Vladimir Ivanov wrote: >>> http://cr.openjdk.java.net/~vlivanov/8154145/webrev.00/ >>> https://bugs.openjdk.java.net/browse/JDK-8154145 >>> >>> After Jigsaw merge, hs_err files contain incomplete stack information >>> for code located in modules: >>> >>> J 1384 C1 java.base at 9-internal9-internal (104 bytes) @ 0xf4bca070 >>> [0xf4bc9ea0+0x000001d0] >>> >>> The problem is that module info overwrites class/method name before it >>> is printed. >>> >>> Best regards, >>> Vladimir Ivanov >> From Alan.Bateman at oracle.com Thu Apr 14 20:00:24 2016 From: Alan.Bateman at oracle.com (Alan Bateman) Date: Thu, 14 Apr 2016 21:00:24 +0100 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <570FE2DC.4060601@oracle.com> References: <570F53B1.90809@oracle.com> <570FA0E8.5080606@oracle.com> <570FE2DC.4060601@oracle.com> Message-ID: <570FF6D8.8030404@oracle.com> On 14/04/2016 19:35, serguei.spitsyn at oracle.com wrote: > : > > > or it is about something like this: > "When can_generate_early_class_hook_events and > can_generate_all_class_hook_events > are enabled then this event may be sent in the primordial phase. > Otherwise, this event may be sent before the VM is initialized (the > start phase). " Sorry, I wasn't clear. This version is exactly what I meant. -Alan From serguei.spitsyn at oracle.com Thu Apr 14 20:02:58 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Thu, 14 Apr 2016 13:02:58 -0700 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <570FF6D8.8030404@oracle.com> References: <570F53B1.90809@oracle.com> <570FA0E8.5080606@oracle.com> <570FE2DC.4060601@oracle.com> <570FF6D8.8030404@oracle.com> Message-ID: <570FF772.3050004@oracle.com> On 4/14/16 13:00, Alan Bateman wrote: > > > On 14/04/2016 19:35, serguei.spitsyn at oracle.com wrote: >> : >> >> >> or it is about something like this: >> "When can_generate_early_class_hook_events and >> can_generate_all_class_hook_events >> are enabled then this event may be sent in the primordial phase. >> Otherwise, this event may be sent before the VM is initialized (the >> start phase). " > Sorry, I wasn't clear. This version is exactly what I meant. Nice. Thank you for the suggestion. Thanks, Serguei > > -Alan > From dean.long at oracle.com Thu Apr 14 20:23:43 2016 From: dean.long at oracle.com (Dean Long) Date: Thu, 14 Apr 2016 13:23:43 -0700 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <570FEFAB.2070809@oracle.com> References: <570FEFAB.2070809@oracle.com> Message-ID: <570FFC4F.7020000@oracle.com> Do the inc_keep_alive() and dec_keep_alive() updates need to be atomic by any chance? dl On 4/14/2016 12:29 PM, Lois Foltan wrote: > Hello, > > Please review the following fix: > > Webrev: > http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949/ > > Bug: Jigsaw crash when Klass in _fixup_module_field_list is unloaded > https://bugs.openjdk.java.net/browse/JDK-8152949 > > Summary: > Prior to java.base being defined to the VM by the module system > initialization, classes loaded must be saved on a fixup list in order > to later have their java.lang.Class' module field patched with > java.base's java.lang.reflect.Module object once java.base is > defined. Before module system initialization is complete, all classes > loaded must have java.base as their defining module and be loaded by > the boot loader. It was erroneously assumed that all classes placed > on the module fixup list therefore would not die before java.base was > defined. This assumption did not hold for anonymous classes which > have a shorter lifetime than the boot loader. Test cases run with a > small heap, -Xmx2m, would cause GC to unload the anonymous classes on > the fixup list, later causing issues when an attempt was made to patch > these classes with java.base's java.lang.reflect.Module object. Thank > you to Per Liden and Stefan Karlsson for contributing this fix for the > runtime team. > > Test: > - java/lang, java/util, java/io, all Hotspot jtreg tests, Hotspot > colocated tests & noncolo.quick.testlist > - several iterations of ConcurrentLinkedQueue/RemoveLeak.java which > exhibited the problem From serguei.spitsyn at oracle.com Thu Apr 14 20:30:44 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Thu, 14 Apr 2016 13:30:44 -0700 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <570FA0E8.5080606@oracle.com> References: <570F53B1.90809@oracle.com> <570FA0E8.5080606@oracle.com> Message-ID: <570FFDF4.8080806@oracle.com> Alan, This is for sanity check: The updated hotspot webrev: http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs2/ Please, note that the *src/share/vm/prims/jvmtiEnvBase.hpp* was corrected too. Please, find the JVMTI spec here: http:://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs2/jvmti.html Thanks, Serguei On 4/14/16 06:53, Alan Bateman wrote: > > > On 14/04/2016 09:24, serguei.spitsyn at oracle.com wrote: >> Please, review the Jigsaw-related fix for: >> https://bugs.openjdk.java.net/browse/JDK-8153749 >> >> >> Hotspot webrev: >> http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs1/ >> >> >> Jdk webrev: >> http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/jdk/8153749-Jigsaw-newcap.jdk1/ >> >> > This looks good, just a few suggests for the JVM TI spec > > For can_generate_early_class_hook_events then it might be better if > the first sentence said "in the primordial phase" rather than "early". > > In CFLH then alternative wording is "When > can_generate_early_class_hook_events and > can_generate_early_class_hook_events are enabled then this event > may be send before the VM is initialized (the start phase)". I think > that might be more consistent with the long standing wording. > > -Alan. > From coleen.phillimore at oracle.com Thu Apr 14 20:59:25 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Thu, 14 Apr 2016 16:59:25 -0400 Subject: RFR(XS): 8153275: Zero JVM fails to initialize after JDK-8152440 In-Reply-To: <1459931888.3613.10.camel@redhat.com> References: <1459791895.3762.14.camel@redhat.com> <57037BFB.1060606@redhat.com> <1459848614.4486.13.camel@redhat.com> <1459931888.3613.10.camel@redhat.com> Message-ID: <571004AD.1010305@oracle.com> Hi, I've hit this bug and reviewed it and will sponsor it. Thanks, Coleen On 4/6/16 4:38 AM, Severin Gehwolf wrote: > On Tue, 2016-04-05 at 11:30 +0200, Severin Gehwolf wrote: >> On Tue, 2016-04-05 at 09:48 +0100, Andrew Haley wrote: >>> On 04/04/16 18:44, Severin Gehwolf wrote: >>>> >>>> Hi, >>>> >>>> Could somebody please sponsor and review the following Zero-only >>>> fix? >>>> The fix for JDK-8152440 was incorrect in that it set the value >>>> for InitArrayShortSize to an illegal value (-1) failing >>>> constraint >>>> validation. Albeit not being used it must still pass constraint >>>> validation. Otherwise, the JVM fails to initialize and all bets >>>> are >>>> off. Thoughts? >>>> >>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8153275 >>>> webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/ >>>> webrev.01/ >>> OK, but please make the comment clearer. I didn't understand it. >>> >>> "the allowed range [ 0 ... 9223372036854775807 ]" >>> >>> is much clearer. >> Thanks for the review! >> >> Updated webrev: >> http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.02/ > Could somebody sponsor this please? > > Thanks, > Severin From mikael.vidstedt at oracle.com Thu Apr 14 23:56:59 2016 From: mikael.vidstedt at oracle.com (Mikael Vidstedt) Date: Thu, 14 Apr 2016 16:56:59 -0700 Subject: Merging jdk9/hs-rt with jdk9/hs In-Reply-To: <57058B56.7060605@oracle.com> References: <56F08ACA.6030705@oracle.com> <57058B56.7060605@oracle.com> Message-ID: <7C98AE1A-052D-41B6-B08B-B2EDEB844FC4@oracle.com> Everything is in shape for doing the switch *tomorrow, Friday*. In order to make the transition a smooth one we kindly ask everybody to *not push changes to jdk9/hs-rt and/or jdk9/hs starting 7pm PT today, Thursday* (hours from now). We will send out an email when jdk9/hs is open for business - or, if things don't work out, when jdk9/hs-rt is open for business again. Note that we will wait to see if everything works before actually making jdk9/hs-rt read-only on the mercurial server, so please double check the target repo URLs before pushing changes the next day or two. Cheers, Mikael > On 4/6/2016 3:19 PM, Mikael Vidstedt wrote: > > Having heard no feedback[1], we're going to go ahead with this experiment and the plan is to do the switch next week, *Friday April 15th*. Again, please note that any outstanding work based on jdk9/hs-rt will have to be rebased on jdk9/hs once the switch is made. More information as we get closer to the actual switchover. > > Let us know if you have any concerns with the date, and/or any feedback on how it's working out. > > Cheers, > Mikael > > [1] Not even from Volker *hint* ;) > >> On 3/21/2016 4:59 PM, Mikael Vidstedt wrote: >> >> All, >> >> The JDK 9 development of Hotspot is primarily done in two different mercurial forests: jdk9/hs-rt[1], and jdk9/hs-comp[2]. In June of last year we moved[3] all the GC development from jdk9/hs-gc[4] to jdk9/hs-rt, and the experience so far has been a good one. Change propagation (from jdk9/hs-rt to jdk9/hs-gc and vice verse) is now a non-issue, we get testing faster on the union of the changes where previously it could take weeks to catch a GC related bug in RT testing, etc. >> >> However, both jdk9/hs-rt and jdk9/hs-comp still integrate through a third forest - jdk9/hs[5], aka. hs "main" - before the changes are integrated to jdk9/dev[6]. In line with the previous simplification, we would like to suggest a further simplification of the forest structure. Specifically, we suggest that the work currently done on the jdk9/hs-rt forest moves directly to the jdk9/hs forest. In addition to making the forest structure easier to understand, this would have the benefit of removing one set of integrations (jdk9/hs <-> jdk9/hs-rt), which further reduces cost and propagation time. It is also paving the way for eventually integrating up to jdk9/dev more often (but that is a separate discussion). >> >> We suggest that the experiment starts on April 15th, and goes on for at least two weeks (giving us some time to adapt in case of issues). Monitoring and evaluation of the new structure will take place continuously, with an option to revert back if things do not work out. The experiment would keep going for at least a few months, after which we will evaluate it and depending on the results consider making it the new standard. If so, the jdk9/hs-rt forest will eventually be retired, with an option of looking at further reduction of forests going forward. At least for now, we suggest that jdk9/hs-comp remains a separate forest and that it integrates through jdk9/hs just like it does today. >> >> Much like when we merged the jdk9/hs-gc and jdk9/hs-rt forests we would leave the jdk9/hs-rt forest around until we see if the experiment works out. We would also lock it down so that no accidental integrations are made to it. Once the jdk9/hs-rt forest is locked down, any work in flight based on it would have to be rebased on jdk9/hs. >> >> Please let us know if you have any feedback or questions! >> >> Cheers, >> Mikael >> >> [1]http://hg.openjdk.java.net/jdk9/hs-rt >> [2]http://hg.openjdk.java.net/jdk9/hs-comp >> [3]http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-May/thread.html >> [4]http://hg.openjdk.java.net/jdk9/hs-gc >> [5]http://hg.openjdk.java.net/jdk9/hs >> [6]http://hg.openjdk.java.net/jdk9/dev From vladimir.kozlov at oracle.com Fri Apr 15 01:32:55 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Thu, 14 Apr 2016 18:32:55 -0700 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> Message-ID: <571044C7.7080406@oracle.com> Looks good. You can use the same register for *Offset as for log2ArrayIndexScale in generate_vectorizedMismatch_entry(). You need to remove #ifdef COMPILER2 in stubGenerator_x86.cpp since you are using the stub in Interpreter (and C1 later). Regards, Vladimir On 4/14/16 7:53 AM, Paul Sandoz wrote: > Hi, > > I hooked up the array mismatch stub to the interpreter, with a bit of code cargo culting the CRC work and some lldb debugging [*] it appears to work and pass tests. > > Can someone have a quick look to see if i am not the right track here: > > http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ > > > Here are some quick numbers running using -Xint for byte[] equality: > > Benchmark (lastNEQ) (n) Mode Cnt Score Error Units > # Baseline > # VM options: -Xint > ByteArray.base_equals false 1024 avgt 10 16622.453 ? 498.475 ns/op > ByteArray.base_equals true 1024 avgt 10 16889.244 ? 439.895 ns/op > > # Before patch > # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic > ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? 3657.508 ns/op > ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? 2723.130 ns/op > > # After patch > # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic > ByteArray.jdk_equals false 1024 avgt 10 448.764 ? 18.977 ns/op > ByteArray.jdk_equals true 1024 avgt 10 448.657 ? 22.656 ns/op > > > > The next step is to wire up C1. > > Further steps would be to substitute some of intrinsics added/used for compact strings with mismatch, then evaluate the performance. > > Thanks, > Paul. > > [*] Stubs to be used as intrinsics in the template interpreter need to be created during the initial stage of generation, otherwise the stub address is null which leads to a SEGV that?s hard to track down. > From erik.joelsson at oracle.com Fri Apr 15 07:43:52 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Fri, 15 Apr 2016 09:43:52 +0200 Subject: RFR(S): 8154209: Remove client VM from default JIB profile on windows-x86 and linux-x86 In-Reply-To: <570FC852.10808@oracle.com> References: <570FC852.10808@oracle.com> Message-ID: <57109BB8.4050302@oracle.com> Looks good to me. /Erik On 2016-04-14 18:41, Mikael Vidstedt wrote: > > Please review the following change which removes the "client" VM from > the default JIB build profile on windows-x86 and linux-x86: > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154209 > Webrev (top): > http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/ > Webrev (hotspot): > http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/hotspot/webrev/ > > > When not including the client VM, the build system automatically > creates a jvm.cfg which makes -client an alias for -server. At some > point in the future we may choose to output a warning and/or refuse to > start up if -client is specified, but at least for now silently > falling back on the -server VM seems appropriate. > > The test/runtime/SharedArchiveFile/DefaultUseWithClient.java test > assumes that CDS is always compiled in and enabled in the -client VM > on windows-x86. Since -client will fall back on -server that is no > longer true, so the test needs to be updated. I added an @ignore and > filed the following issue to track fixing the test: > > https://bugs.openjdk.java.net/browse/JDK-8154204 > > > Testing: > > In addition to a standard JPRT push job, Christian Tornqvist helped me > run the runtime nightly tests and apart from the above mentioned test > all tests were successful. > > Cheers, > Mikael > From erik.joelsson at oracle.com Fri Apr 15 07:45:19 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Fri, 15 Apr 2016 09:45:19 +0200 Subject: RFR(S): 8154209: Remove client VM from default JIB profile on windows-x86 and linux-x86 In-Reply-To: References: <570FC852.10808@oracle.com> Message-ID: <57109C0F.2090806@oracle.com> On 2016-04-14 19:20, Christian Thalinger wrote: >> On Apr 14, 2016, at 6:41 AM, Mikael Vidstedt wrote: >> >> >> Please review the following change which removes the "client" VM from the default JIB build profile > Is there some public documentation about JIB? A quick search only showed a few JBS bugs that mention JIB. No, Jib is an Oracle internal tool, just like JPRT. But, just as we need a JPRT configuration in the open repo, we also need a Jib configuration in the open repo. Otherwise we cannot build and test open only forests internally. /Erik >> on windows-x86 and linux-x86: >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8154209 >> Webrev (top): http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/ >> Webrev (hotspot): http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/hotspot/webrev/ >> >> >> When not including the client VM, the build system automatically creates a jvm.cfg which makes -client an alias for -server. At some point in the future we may choose to output a warning and/or refuse to start up if -client is specified, but at least for now silently falling back on the -server VM seems appropriate. >> >> The test/runtime/SharedArchiveFile/DefaultUseWithClient.java test assumes that CDS is always compiled in and enabled in the -client VM on windows-x86. Since -client will fall back on -server that is no longer true, so the test needs to be updated. I added an @ignore and filed the following issue to track fixing the test: >> >> https://bugs.openjdk.java.net/browse/JDK-8154204 >> >> >> Testing: >> >> In addition to a standard JPRT push job, Christian Tornqvist helped me run the runtime nightly tests and apart from the above mentioned test all tests were successful. >> >> Cheers, >> Mikael >> From erik.joelsson at oracle.com Fri Apr 15 08:50:41 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Fri, 15 Apr 2016 10:50:41 +0200 Subject: RFR(S): 8154251: ANSI-C Quoting bug in hotspot.m4 during configure on SLES 10 and 11 In-Reply-To: References: Message-ID: <5710AB61.1080302@oracle.com> Looks good to me. I tried the patch locally and it seems to be working. I will sponsor it as soon as the appropriate hotspot repo is open again. /Erik On 2016-04-14 17:55, Volker Simonis wrote: > Hi, > > can somebody please review this small hotspot build fix for SLES 10 an 11: > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8154251/ > https://bugs.openjdk.java.net/browse/JDK-8154251 > > I've encountered a strange configure problem with the new hotspot build > system on SLES 10 and 11: > ... > checking which variants of the JVM to build... server > configure: Unknown variant(s) specified: server > configure: error: The available JVM variants are: server client minimal > core zero zeroshark custom > configure exiting with result code 1 > ... > > The error seems nonsense since 'server' is a valid variant. > For debugging purpose I ran "bash -vx configure ..." and this revealed: > ... > $GREP -Fvx "${VALID_JVM_VARIANTS// /$'\n'}" <<< "${JVM_VARIANTS// /$'\n'}" > ++++ /usr/bin/grep -Fvx > 'server$\nclient$\nminimal$\ncore$\nzero$\nzeroshark$\ncustom' > +++ INVALID_VARIANTS=server > ... > The expected result would be: > ... > $GREP -Fvx "${VALID_JVM_VARIANTS// /$'\n'}" <<< "${JVM_VARIANTS// /$'\n'}" > ++++ /bin/grep -Fvx 'server > client > minimal > core > zero > zeroshark > custom' > +++ INVALID_VARIANTS= > ... > Apparently, the ANSI-C Quoting (see > http://www.gnu.org/software/bash/manual/html_node/ANSI_002dC-Quoting.html#ANSI_002dC-Quoting) > for "${VALID_JVM_VARIANTS// /$'\n'}" went wrong. Instead of replacing > Spaces by newlines, it wrongly replaced Spaces by $\n literally instead. > > I tried to find the specific problem without success. There exist several > bugs about ANSI-C Quoting in bash however. I could reproduce the problem on > SLES 10.3 with the builtin bash 3.1.17 and a self-built bash 4.3.0 and also > on a SLES 11.3 with the builtin bash 3.2.51. It worked on a newer SLES 12.1 > with bash 4.2.47. I couldn't reproduce the problem on RHEL, Ubuntu and > Fedora. > > Thank you and best regards, > Volker From paul.sandoz at oracle.com Fri Apr 15 10:12:02 2016 From: paul.sandoz at oracle.com (Paul Sandoz) Date: Fri, 15 Apr 2016 12:12:02 +0200 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <571044C7.7080406@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> <571044C7.7080406@oracle.com> Message-ID: <79ABC1C0-2AEF-4561-A8FE-A9036B48CCC1@oracle.com> > On 15 Apr 2016, at 03:32, Vladimir Kozlov wrote: > > Looks good. You can use the same register for *Offset as for log2ArrayIndexScale in generate_vectorizedMismatch_entry(). > You need to remove #ifdef COMPILER2 in stubGenerator_x86.cpp since you are using the stub in Interpreter (and C1 later). > Most helpful, thanks! Paul. From Alan.Bateman at oracle.com Fri Apr 15 11:00:47 2016 From: Alan.Bateman at oracle.com (Alan Bateman) Date: Fri, 15 Apr 2016 12:00:47 +0100 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <570FFDF4.8080806@oracle.com> References: <570F53B1.90809@oracle.com> <570FA0E8.5080606@oracle.com> <570FFDF4.8080806@oracle.com> Message-ID: <5710C9DF.9090007@oracle.com> On 14/04/2016 21:30, serguei.spitsyn at oracle.com wrote: > Alan, > > This is for sanity check: > > The updated hotspot webrev: > http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs2/ > > Please, note that the *src/share/vm/prims/jvmtiEnvBase.hpp* was > corrected too. > In can_generate_early_class_hook_events then "can be posted" or "may be posted" might be better than "could be posted". Also the end tag '>' at L10002 should be probably be on the proceeding line. Otherwise looks okay to me. -Alan From jesper.wilhelmsson at oracle.com Fri Apr 15 11:10:51 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Fri, 15 Apr 2016 13:10:51 +0200 Subject: hs-rt and main are CLOSED Message-ID: <5710CC3A.6000302@oracle.com> Hi, Since I've gotten the question a few times today I just want to clarify that jdk9/hs-rt AND jdk9/hs are both closed due to the merge of these two repos. We will let you know once everything is done. Thanks, /Jesper From volker.simonis at gmail.com Fri Apr 15 12:01:04 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Fri, 15 Apr 2016 14:01:04 +0200 Subject: RFR(S): 8154251: ANSI-C Quoting bug in hotspot.m4 during configure on SLES 10 and 11 In-Reply-To: <5710AB61.1080302@oracle.com> References: <5710AB61.1080302@oracle.com> Message-ID: Great! Thanks a lot Erik, Volker On Fri, Apr 15, 2016 at 10:50 AM, Erik Joelsson wrote: > Looks good to me. I tried the patch locally and it seems to be working. I > will sponsor it as soon as the appropriate hotspot repo is open again. > > /Erik > > > On 2016-04-14 17:55, Volker Simonis wrote: > >> Hi, >> >> can somebody please review this small hotspot build fix for SLES 10 an 11: >> >> http://cr.openjdk.java.net/~simonis/webrevs/2016/8154251/ >> https://bugs.openjdk.java.net/browse/JDK-8154251 >> >> I've encountered a strange configure problem with the new hotspot build >> system on SLES 10 and 11: >> ... >> checking which variants of the JVM to build... server >> configure: Unknown variant(s) specified: server >> configure: error: The available JVM variants are: server client minimal >> core zero zeroshark custom >> configure exiting with result code 1 >> ... >> >> The error seems nonsense since 'server' is a valid variant. >> For debugging purpose I ran "bash -vx configure ..." and this revealed: >> ... >> $GREP -Fvx "${VALID_JVM_VARIANTS// /$'\n'}" <<< "${JVM_VARIANTS// /$'\n'}" >> ++++ /usr/bin/grep -Fvx >> 'server$\nclient$\nminimal$\ncore$\nzero$\nzeroshark$\ncustom' >> +++ INVALID_VARIANTS=server >> ... >> The expected result would be: >> ... >> $GREP -Fvx "${VALID_JVM_VARIANTS// /$'\n'}" <<< "${JVM_VARIANTS// /$'\n'}" >> ++++ /bin/grep -Fvx 'server >> client >> minimal >> core >> zero >> zeroshark >> custom' >> +++ INVALID_VARIANTS= >> ... >> Apparently, the ANSI-C Quoting (see >> >> http://www.gnu.org/software/bash/manual/html_node/ANSI_002dC-Quoting.html#ANSI_002dC-Quoting >> ) >> for "${VALID_JVM_VARIANTS// /$'\n'}" went wrong. Instead of replacing >> Spaces by newlines, it wrongly replaced Spaces by $\n literally instead. >> >> I tried to find the specific problem without success. There exist several >> bugs about ANSI-C Quoting in bash however. I could reproduce the problem >> on >> SLES 10.3 with the builtin bash 3.1.17 and a self-built bash 4.3.0 and >> also >> on a SLES 11.3 with the builtin bash 3.2.51. It worked on a newer SLES >> 12.1 >> with bash 4.2.47. I couldn't reproduce the problem on RHEL, Ubuntu and >> Fedora. >> >> Thank you and best regards, >> Volker >> > > From coleen.phillimore at oracle.com Fri Apr 15 12:12:24 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Fri, 15 Apr 2016 08:12:24 -0400 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> Message-ID: <5710DAA8.5080402@oracle.com> I don't know why we'd add even more assembly code to the interpreter. Why doesn't the JIT optimize this function instead? By adding a stub in the interpreter does that prevent the JIT from inlining this function since it's not invocation counted? thanks, Coleen On 4/14/16 10:53 AM, Paul Sandoz wrote: > Hi, > > I hooked up the array mismatch stub to the interpreter, with a bit of code cargo culting the CRC work and some lldb debugging [*] it appears to work and pass tests. > > Can someone have a quick look to see if i am not the right track here: > > http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ > > > Here are some quick numbers running using -Xint for byte[] equality: > > Benchmark (lastNEQ) (n) Mode Cnt Score Error Units > # Baseline > # VM options: -Xint > ByteArray.base_equals false 1024 avgt 10 16622.453 ? 498.475 ns/op > ByteArray.base_equals true 1024 avgt 10 16889.244 ? 439.895 ns/op > > # Before patch > # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic > ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? 3657.508 ns/op > ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? 2723.130 ns/op > > # After patch > # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic > ByteArray.jdk_equals false 1024 avgt 10 448.764 ? 18.977 ns/op > ByteArray.jdk_equals true 1024 avgt 10 448.657 ? 22.656 ns/op > > > > The next step is to wire up C1. > > Further steps would be to substitute some of intrinsics added/used for compact strings with mismatch, then evaluate the performance. > > Thanks, > Paul. > > [*] Stubs to be used as intrinsics in the template interpreter need to be created during the initial stage of generation, otherwise the stub address is null which leads to a SEGV that?s hard to track down. From lois.foltan at oracle.com Fri Apr 15 12:50:31 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Fri, 15 Apr 2016 08:50:31 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <570FFC4F.7020000@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> Message-ID: <5710E397.3020106@oracle.com> On 4/14/2016 4:23 PM, Dean Long wrote: > Do the inc_keep_alive() and dec_keep_alive() updates need to be > atomic by any chance? Thanks Dean for the review and good point. I will make that change and send out an updated webrev. Lois > > dl > > On 4/14/2016 12:29 PM, Lois Foltan wrote: >> Hello, >> >> Please review the following fix: >> >> Webrev: >> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949/ >> >> Bug: Jigsaw crash when Klass in _fixup_module_field_list is unloaded >> https://bugs.openjdk.java.net/browse/JDK-8152949 >> >> Summary: >> Prior to java.base being defined to the VM by the module system >> initialization, classes loaded must be saved on a fixup list in order >> to later have their java.lang.Class' module field patched with >> java.base's java.lang.reflect.Module object once java.base is >> defined. Before module system initialization is complete, all >> classes loaded must have java.base as their defining module and be >> loaded by the boot loader. It was erroneously assumed that all >> classes placed on the module fixup list therefore would not die >> before java.base was defined. This assumption did not hold for >> anonymous classes which have a shorter lifetime than the boot >> loader. Test cases run with a small heap, -Xmx2m, would cause GC to >> unload the anonymous classes on the fixup list, later causing issues >> when an attempt was made to patch these classes with java.base's >> java.lang.reflect.Module object. Thank you to Per Liden and Stefan >> Karlsson for contributing this fix for the runtime team. >> >> Test: >> - java/lang, java/util, java/io, all Hotspot jtreg tests, Hotspot >> colocated tests & noncolo.quick.testlist >> - several iterations of ConcurrentLinkedQueue/RemoveLeak.java which >> exhibited the problem > From paul.sandoz at oracle.com Fri Apr 15 13:07:08 2016 From: paul.sandoz at oracle.com (Paul Sandoz) Date: Fri, 15 Apr 2016 15:07:08 +0200 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <5710DAA8.5080402@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> <5710DAA8.5080402@oracle.com> Message-ID: <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> > On 15 Apr 2016, at 14:12, Coleen Phillimore wrote: > > > I don't know why we'd add even more assembly code to the interpreter. Why doesn't the JIT optimize this function instead? By adding a stub in the interpreter does that prevent the JIT from inlining this function since it's not invocation counted? > I have updated the webrev with C1 support [1] and determined, eyeballing generated code, that the stub call gets inlined for C1 and C2 and appears unaffected by the wiring up of that same stub in the template interpreter. A stub was added and wired up to C2 with the intention to wire that up to C1, and possible to the interpreter. One reason for the latter was because of the performance results presented in the last email (potentially ~200x over the current approach, and ~35x improvement over the original Java code). Does that matter? would you be concerned about that? Array equality is quite a fundamental operation so i was concerned about such a regression in the interpreter. Another reason for the latter, which i may be off base on here, is it might make it easier to consolidate the intrinsics added for compact string equality/comparison to this more general mismatch functionality. ? Regarding the changes to C1 in [1]. Like for the CRC intrinsics i added the _vectorizedMismatch intrinsic to the set of intrinsics that preserve state and can trap. Is that correct? Also i am not sure if the 32-bit part is correct. Thanks, Paul. [1] http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ (Note: this is still incomplete i need to appropriately update all CPU-based code.) Benchmark (lastNEQ) (n) Mode Cnt Score Error Units # Baseline # VM options: -XX:TieredStopAtLevel=1 ByteArray.base_equals false 1024 avgt 10 1190.177 ? 21.387 ns/op ByteArray.base_equals true 1024 avgt 10 1191.767 ? 35.196 ns/op # Before patch # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals -XX:-UseVectorizedMismatchIntrinsic ByteArray.jdk_equals false 1024 avgt 10 208.014 ? 5.224 ns/op ByteArray.jdk_equals true 1024 avgt 10 218.271 ? 10.749 ns/op # After patch # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals -XX:+UseVectorizedMismatchIntrinsic ByteArray.jdk_equals false 1024 avgt 10 70.097 ? 2.321 ns/op ByteArray.jdk_equals true 1024 avgt 10 72.284 ? 1.578 ns/op > thanks, > Coleen > > > On 4/14/16 10:53 AM, Paul Sandoz wrote: >> Hi, >> >> I hooked up the array mismatch stub to the interpreter, with a bit of code cargo culting the CRC work and some lldb debugging [*] it appears to work and pass tests. >> >> Can someone have a quick look to see if i am not the right track here: >> >> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >> >> >> Here are some quick numbers running using -Xint for byte[] equality: >> >> Benchmark (lastNEQ) (n) Mode Cnt Score Error Units >> # Baseline >> # VM options: -Xint >> ByteArray.base_equals false 1024 avgt 10 16622.453 ? 498.475 ns/op >> ByteArray.base_equals true 1024 avgt 10 16889.244 ? 439.895 ns/op >> >> # Before patch >> # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic >> ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? 3657.508 ns/op >> ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? 2723.130 ns/op >> >> # After patch >> # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic >> ByteArray.jdk_equals false 1024 avgt 10 448.764 ? 18.977 ns/op >> ByteArray.jdk_equals true 1024 avgt 10 448.657 ? 22.656 ns/op >> >> >> >> The next step is to wire up C1. >> >> Further steps would be to substitute some of intrinsics added/used for compact strings with mismatch, then evaluate the performance. >> >> Thanks, >> Paul. >> >> [*] Stubs to be used as intrinsics in the template interpreter need to be created during the initial stage of generation, otherwise the stub address is null which leads to a SEGV that?s hard to track down. > From stefan.karlsson at oracle.com Fri Apr 15 13:11:11 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Fri, 15 Apr 2016 15:11:11 +0200 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <5710E397.3020106@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> Message-ID: <5710E86F.3090606@oracle.com> Hi Lois, On 2016-04-15 14:50, Lois Foltan wrote: > > On 4/14/2016 4:23 PM, Dean Long wrote: >> Do the inc_keep_alive() and dec_keep_alive() updates need to be >> atomic by any chance? > > Thanks Dean for the review and good point. I will make that change > and send out an updated webrev. When would we ever race on the _keep_alive variable? Or is this more of a defensive change to safeguard against future changes? Thanks, StefanK > Lois > >> >> dl >> >> On 4/14/2016 12:29 PM, Lois Foltan wrote: >>> Hello, >>> >>> Please review the following fix: >>> >>> Webrev: >>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949/ >>> >>> Bug: Jigsaw crash when Klass in _fixup_module_field_list is unloaded >>> https://bugs.openjdk.java.net/browse/JDK-8152949 >>> >>> Summary: >>> Prior to java.base being defined to the VM by the module system >>> initialization, classes loaded must be saved on a fixup list in >>> order to later have their java.lang.Class' module field patched with >>> java.base's java.lang.reflect.Module object once java.base is >>> defined. Before module system initialization is complete, all >>> classes loaded must have java.base as their defining module and be >>> loaded by the boot loader. It was erroneously assumed that all >>> classes placed on the module fixup list therefore would not die >>> before java.base was defined. This assumption did not hold for >>> anonymous classes which have a shorter lifetime than the boot >>> loader. Test cases run with a small heap, -Xmx2m, would cause GC to >>> unload the anonymous classes on the fixup list, later causing issues >>> when an attempt was made to patch these classes with java.base's >>> java.lang.reflect.Module object. Thank you to Per Liden and Stefan >>> Karlsson for contributing this fix for the runtime team. >>> >>> Test: >>> - java/lang, java/util, java/io, all Hotspot jtreg tests, Hotspot >>> colocated tests & noncolo.quick.testlist >>> - several iterations of ConcurrentLinkedQueue/RemoveLeak.java which >>> exhibited the problem >> > From vladimir.x.ivanov at oracle.com Fri Apr 15 13:29:41 2016 From: vladimir.x.ivanov at oracle.com (Vladimir Ivanov) Date: Fri, 15 Apr 2016 16:29:41 +0300 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> <5710DAA8.5080402@oracle.com> <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> Message-ID: <5710ECC5.1080706@oracle.com> An idea how to avoid interpreter changes. Interpreter can't benefit from "intrinsifiable" methods directly, but if you create a wrapper and call it instead [1], JIT-compilers can take care of stand-alone versions for you. The interpreter will work with them as if they are ordinary Java methods. The only missing case is early startup phase when everything is interpreted, but we can add a special logic in the JVM to eagerly compile such methods (either during startup or on the first invocation) which would be much simpler than adding intrinsics specifically for the interpreter. Best regards, Vladimir Ivanov [1] diff --git a/src/java.base/share/classes/java/util/ArraysSupport.java b/src/java.base/share/classes/java/util/ArraysSupport.java --- a/src/java.base/share/classes/java/util/ArraysSupport.java +++ b/src/java.base/share/classes/java/util/ArraysSupport.java @@ -26,6 +26,7 @@ import jdk.internal.HotSpotIntrinsicCandidate; import jdk.internal.misc.Unsafe; +import jdk.internal.vm.annotation.ForceInline; /** * Utility methods to find a mismatch between two primitive arrays. @@ -106,8 +107,16 @@ * compliment of the number of remaining pairs of elements to be checked in * the tail of the two arrays. */ + @ForceInline + static int vectorizedMismatch(Object a, long aOffset, + Object b, long bOffset, + int length, + int log2ArrayIndexScale) { + return vectorizedMismatch0(a, aOffset, b, bOffset, length, log2ArrayIndexScale); + } + @HotSpotIntrinsicCandidate - static int vectorizedMismatch(Object a, long aOffset, + private static int vectorizedMismatch0(Object a, long aOffset, Object b, long bOffset, int length, int log2ArrayIndexScale) { On 4/15/16 4:07 PM, Paul Sandoz wrote: > >> On 15 Apr 2016, at 14:12, Coleen Phillimore wrote: >> >> >> I don't know why we'd add even more assembly code to the interpreter. Why doesn't the JIT optimize this function instead? By adding a stub in the interpreter does that prevent the JIT from inlining this function since it's not invocation counted? >> > > I have updated the webrev with C1 support [1] and determined, eyeballing generated code, that the stub call gets inlined for C1 and C2 and appears unaffected by the wiring up of that same stub in the template interpreter. > > A stub was added and wired up to C2 with the intention to wire that up to C1, and possible to the interpreter. One reason for the latter was because of the performance results presented in the last email (potentially ~200x over the current approach, and ~35x improvement over the original Java code). Does that matter? would you be concerned about that? > > Array equality is quite a fundamental operation so i was concerned about such a regression in the interpreter. > > Another reason for the latter, which i may be off base on here, is it might make it easier to consolidate the intrinsics added for compact string equality/comparison to this more general mismatch functionality. > > ? > > Regarding the changes to C1 in [1]. Like for the CRC intrinsics i added the _vectorizedMismatch intrinsic to the set of intrinsics that preserve state and can trap. Is that correct? Also i am not sure if the 32-bit part is correct. > > Thanks, > Paul. > > [1] http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ > (Note: this is still incomplete i need to appropriately update all CPU-based code.) > > Benchmark (lastNEQ) (n) Mode Cnt Score Error Units > # Baseline > # VM options: -XX:TieredStopAtLevel=1 > ByteArray.base_equals false 1024 avgt 10 1190.177 ? 21.387 ns/op > ByteArray.base_equals true 1024 avgt 10 1191.767 ? 35.196 ns/op > > # Before patch > # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals -XX:-UseVectorizedMismatchIntrinsic > ByteArray.jdk_equals false 1024 avgt 10 208.014 ? 5.224 ns/op > ByteArray.jdk_equals true 1024 avgt 10 218.271 ? 10.749 ns/op > > # After patch > # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals -XX:+UseVectorizedMismatchIntrinsic > ByteArray.jdk_equals false 1024 avgt 10 70.097 ? 2.321 ns/op > ByteArray.jdk_equals true 1024 avgt 10 72.284 ? 1.578 ns/op > > > >> thanks, >> Coleen >> >> >> On 4/14/16 10:53 AM, Paul Sandoz wrote: >>> Hi, >>> >>> I hooked up the array mismatch stub to the interpreter, with a bit of code cargo culting the CRC work and some lldb debugging [*] it appears to work and pass tests. >>> >>> Can someone have a quick look to see if i am not the right track here: >>> >>> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >>> >>> >>> Here are some quick numbers running using -Xint for byte[] equality: >>> >>> Benchmark (lastNEQ) (n) Mode Cnt Score Error Units >>> # Baseline >>> # VM options: -Xint >>> ByteArray.base_equals false 1024 avgt 10 16622.453 ? 498.475 ns/op >>> ByteArray.base_equals true 1024 avgt 10 16889.244 ? 439.895 ns/op >>> >>> # Before patch >>> # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic >>> ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? 3657.508 ns/op >>> ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? 2723.130 ns/op >>> >>> # After patch >>> # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic >>> ByteArray.jdk_equals false 1024 avgt 10 448.764 ? 18.977 ns/op >>> ByteArray.jdk_equals true 1024 avgt 10 448.657 ? 22.656 ns/op >>> >>> >>> >>> The next step is to wire up C1. >>> >>> Further steps would be to substitute some of intrinsics added/used for compact strings with mismatch, then evaluate the performance. >>> >>> Thanks, >>> Paul. >>> >>> [*] Stubs to be used as intrinsics in the template interpreter need to be created during the initial stage of generation, otherwise the stub address is null which leads to a SEGV that?s hard to track down. >> > From vladimir.x.ivanov at oracle.com Fri Apr 15 13:39:41 2016 From: vladimir.x.ivanov at oracle.com (Vladimir Ivanov) Date: Fri, 15 Apr 2016 16:39:41 +0300 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <5710ECC5.1080706@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> <5710DAA8.5080402@oracle.com> <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> <5710ECC5.1080706@oracle.com> Message-ID: <5710EF1D.4080800@oracle.com> On 4/15/16 4:29 PM, Vladimir Ivanov wrote: > An idea how to avoid interpreter changes. > > Interpreter can't benefit from "intrinsifiable" methods directly, but if > you create a wrapper and call it instead [1], JIT-compilers can take > care of stand-alone versions for you. The interpreter will work with > them as if they are ordinary Java methods. ... or even add such logic directly into the JVM: for methods marked w/ @HotSpotIntrinsicCandidate (or better with some new annotation, since most intrinsics depend on the context they are invoked in) create an intrinsified stand-alone version. Best regards, Vladimir Ivanov > > The only missing case is early startup phase when everything is > interpreted, but we can add a special logic in the JVM to eagerly > compile such methods (either during startup or on the first invocation) > which would be much simpler than adding intrinsics specifically for the > interpreter. > > [1] > diff --git a/src/java.base/share/classes/java/util/ArraysSupport.java > b/src/java.base/share/classes/java/util/ArraysSupport.java > --- a/src/java.base/share/classes/java/util/ArraysSupport.java > +++ b/src/java.base/share/classes/java/util/ArraysSupport.java > @@ -26,6 +26,7 @@ > > import jdk.internal.HotSpotIntrinsicCandidate; > import jdk.internal.misc.Unsafe; > +import jdk.internal.vm.annotation.ForceInline; > > /** > * Utility methods to find a mismatch between two primitive arrays. > @@ -106,8 +107,16 @@ > * compliment of the number of remaining pairs of elements to be > checked in > * the tail of the two arrays. > */ > + @ForceInline > + static int vectorizedMismatch(Object a, long aOffset, > + Object b, long bOffset, > + int length, > + int log2ArrayIndexScale) { > + return vectorizedMismatch0(a, aOffset, b, bOffset, length, > log2ArrayIndexScale); > + } > + > @HotSpotIntrinsicCandidate > - static int vectorizedMismatch(Object a, long aOffset, > + private static int vectorizedMismatch0(Object a, long aOffset, > Object b, long bOffset, > int length, > int log2ArrayIndexScale) { > > On 4/15/16 4:07 PM, Paul Sandoz wrote: >> >>> On 15 Apr 2016, at 14:12, Coleen Phillimore >>> wrote: >>> >>> >>> I don't know why we'd add even more assembly code to the >>> interpreter. Why doesn't the JIT optimize this function instead? By >>> adding a stub in the interpreter does that prevent the JIT from >>> inlining this function since it's not invocation counted? >>> >> >> I have updated the webrev with C1 support [1] and determined, >> eyeballing generated code, that the stub call gets inlined for C1 and >> C2 and appears unaffected by the wiring up of that same stub in the >> template interpreter. >> >> A stub was added and wired up to C2 with the intention to wire that up >> to C1, and possible to the interpreter. One reason for the latter was >> because of the performance results presented in the last email >> (potentially ~200x over the current approach, and ~35x improvement >> over the original Java code). Does that matter? would you be concerned >> about that? >> >> Array equality is quite a fundamental operation so i was concerned >> about such a regression in the interpreter. >> >> Another reason for the latter, which i may be off base on here, is it >> might make it easier to consolidate the intrinsics added for compact >> string equality/comparison to this more general mismatch functionality. >> >> ? >> >> Regarding the changes to C1 in [1]. Like for the CRC intrinsics i >> added the _vectorizedMismatch intrinsic to the set of intrinsics that >> preserve state and can trap. Is that correct? Also i am not sure if >> the 32-bit part is correct. >> >> Thanks, >> Paul. >> >> [1] >> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >> >> (Note: this is still incomplete i need to appropriately update all >> CPU-based code.) >> >> Benchmark (lastNEQ) (n) Mode Cnt Score Error >> Units >> # Baseline >> # VM options: -XX:TieredStopAtLevel=1 >> ByteArray.base_equals false 1024 avgt 10 1190.177 ? 21.387 >> ns/op >> ByteArray.base_equals true 1024 avgt 10 1191.767 ? 35.196 >> ns/op >> >> # Before patch >> # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals >> -XX:-UseVectorizedMismatchIntrinsic >> ByteArray.jdk_equals false 1024 avgt 10 208.014 ? 5.224 >> ns/op >> ByteArray.jdk_equals true 1024 avgt 10 218.271 ? 10.749 >> ns/op >> >> # After patch >> # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals >> -XX:+UseVectorizedMismatchIntrinsic >> ByteArray.jdk_equals false 1024 avgt 10 70.097 ? 2.321 >> ns/op >> ByteArray.jdk_equals true 1024 avgt 10 72.284 ? 1.578 >> ns/op >> >> >> >>> thanks, >>> Coleen >>> >>> >>> On 4/14/16 10:53 AM, Paul Sandoz wrote: >>>> Hi, >>>> >>>> I hooked up the array mismatch stub to the interpreter, with a bit >>>> of code cargo culting the CRC work and some lldb debugging [*] it >>>> appears to work and pass tests. >>>> >>>> Can someone have a quick look to see if i am not the right track here: >>>> >>>> >>>> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >>>> >>>> >>>> >>>> >>>> Here are some quick numbers running using -Xint for byte[] equality: >>>> >>>> Benchmark (lastNEQ) (n) Mode Cnt Score >>>> Error Units >>>> # Baseline >>>> # VM options: -Xint >>>> ByteArray.base_equals false 1024 avgt 10 16622.453 ? >>>> 498.475 ns/op >>>> ByteArray.base_equals true 1024 avgt 10 16889.244 ? >>>> 439.895 ns/op >>>> >>>> # Before patch >>>> # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic >>>> ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? >>>> 3657.508 ns/op >>>> ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? >>>> 2723.130 ns/op >>>> >>>> # After patch >>>> # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic >>>> ByteArray.jdk_equals false 1024 avgt 10 448.764 ? >>>> 18.977 ns/op >>>> ByteArray.jdk_equals true 1024 avgt 10 448.657 ? >>>> 22.656 ns/op >>>> >>>> >>>> >>>> The next step is to wire up C1. >>>> >>>> Further steps would be to substitute some of intrinsics added/used >>>> for compact strings with mismatch, then evaluate the performance. >>>> >>>> Thanks, >>>> Paul. >>>> >>>> [*] Stubs to be used as intrinsics in the template interpreter need >>>> to be created during the initial stage of generation, otherwise the >>>> stub address is null which leads to a SEGV that?s hard to track down. >>> >> From coleen.phillimore at oracle.com Fri Apr 15 14:01:47 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Fri, 15 Apr 2016 10:01:47 -0400 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> <5710DAA8.5080402@oracle.com> <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> Message-ID: <5710F44B.2070504@oracle.com> On 4/15/16 9:07 AM, Paul Sandoz wrote: >> On 15 Apr 2016, at 14:12, Coleen Phillimore wrote: >> >> >> I don't know why we'd add even more assembly code to the interpreter. Why doesn't the JIT optimize this function instead? By adding a stub in the interpreter does that prevent the JIT from inlining this function since it's not invocation counted? >> > I have updated the webrev with C1 support [1] and determined, eyeballing generated code, that the stub call gets inlined for C1 and C2 and appears unaffected by the wiring up of that same stub in the template interpreter. > > A stub was added and wired up to C2 with the intention to wire that up to C1, and possible to the interpreter. One reason for the latter was because of the performance results presented in the last email (potentially ~200x over the current approach, and ~35x improvement over the original Java code). Does that matter? would you be concerned about that? What workload is this running? What results do you get with refworkload? > Array equality is quite a fundamental operation so i was concerned about such a regression in the interpreter. The interpreter is mostly run during startup time so we'd like to see some workload results with perhaps the startup_3 benchmarks set. Again, we are trying to not have special case assembly code in the interpreter. Adding these sorts of special optimizations to the compilers makes a lot more sense. Coleen > > Another reason for the latter, which i may be off base on here, is it might make it easier to consolidate the intrinsics added for compact string equality/comparison to this more general mismatch functionality. > > ? > > Regarding the changes to C1 in [1]. Like for the CRC intrinsics i added the _vectorizedMismatch intrinsic to the set of intrinsics that preserve state and can trap. Is that correct? Also i am not sure if the 32-bit part is correct. > > Thanks, > Paul. > > [1] http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ > (Note: this is still incomplete i need to appropriately update all CPU-based code.) > > Benchmark (lastNEQ) (n) Mode Cnt Score Error Units > # Baseline > # VM options: -XX:TieredStopAtLevel=1 > ByteArray.base_equals false 1024 avgt 10 1190.177 ? 21.387 ns/op > ByteArray.base_equals true 1024 avgt 10 1191.767 ? 35.196 ns/op > > # Before patch > # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals -XX:-UseVectorizedMismatchIntrinsic > ByteArray.jdk_equals false 1024 avgt 10 208.014 ? 5.224 ns/op > ByteArray.jdk_equals true 1024 avgt 10 218.271 ? 10.749 ns/op > > # After patch > # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals -XX:+UseVectorizedMismatchIntrinsic > ByteArray.jdk_equals false 1024 avgt 10 70.097 ? 2.321 ns/op > ByteArray.jdk_equals true 1024 avgt 10 72.284 ? 1.578 ns/op > > > >> thanks, >> Coleen >> >> >> On 4/14/16 10:53 AM, Paul Sandoz wrote: >>> Hi, >>> >>> I hooked up the array mismatch stub to the interpreter, with a bit of code cargo culting the CRC work and some lldb debugging [*] it appears to work and pass tests. >>> >>> Can someone have a quick look to see if i am not the right track here: >>> >>> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >>> >>> >>> Here are some quick numbers running using -Xint for byte[] equality: >>> >>> Benchmark (lastNEQ) (n) Mode Cnt Score Error Units >>> # Baseline >>> # VM options: -Xint >>> ByteArray.base_equals false 1024 avgt 10 16622.453 ? 498.475 ns/op >>> ByteArray.base_equals true 1024 avgt 10 16889.244 ? 439.895 ns/op >>> >>> # Before patch >>> # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic >>> ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? 3657.508 ns/op >>> ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? 2723.130 ns/op >>> >>> # After patch >>> # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic >>> ByteArray.jdk_equals false 1024 avgt 10 448.764 ? 18.977 ns/op >>> ByteArray.jdk_equals true 1024 avgt 10 448.657 ? 22.656 ns/op >>> >>> >>> >>> The next step is to wire up C1. >>> >>> Further steps would be to substitute some of intrinsics added/used for compact strings with mismatch, then evaluate the performance. >>> >>> Thanks, >>> Paul. >>> >>> [*] Stubs to be used as intrinsics in the template interpreter need to be created during the initial stage of generation, otherwise the stub address is null which leads to a SEGV that?s hard to track down. From coleen.phillimore at oracle.com Fri Apr 15 14:02:16 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Fri, 15 Apr 2016 10:02:16 -0400 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <5710ECC5.1080706@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> <5710DAA8.5080402@oracle.com> <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> <5710ECC5.1080706@oracle.com> Message-ID: <5710F468.5030006@oracle.com> Thank you, Vladimir. Coleen On 4/15/16 9:29 AM, Vladimir Ivanov wrote: > An idea how to avoid interpreter changes. > > Interpreter can't benefit from "intrinsifiable" methods directly, but > if you create a wrapper and call it instead [1], JIT-compilers can > take care of stand-alone versions for you. The interpreter will work > with them as if they are ordinary Java methods. > > The only missing case is early startup phase when everything is > interpreted, but we can add a special logic in the JVM to eagerly > compile such methods (either during startup or on the first > invocation) which would be much simpler than adding intrinsics > specifically for the interpreter. > > Best regards, > Vladimir Ivanov > > [1] > diff --git a/src/java.base/share/classes/java/util/ArraysSupport.java > b/src/java.base/share/classes/java/util/ArraysSupport.java > --- a/src/java.base/share/classes/java/util/ArraysSupport.java > +++ b/src/java.base/share/classes/java/util/ArraysSupport.java > @@ -26,6 +26,7 @@ > > import jdk.internal.HotSpotIntrinsicCandidate; > import jdk.internal.misc.Unsafe; > +import jdk.internal.vm.annotation.ForceInline; > > /** > * Utility methods to find a mismatch between two primitive arrays. > @@ -106,8 +107,16 @@ > * compliment of the number of remaining pairs of elements to be > checked in > * the tail of the two arrays. > */ > + @ForceInline > + static int vectorizedMismatch(Object a, long aOffset, > + Object b, long bOffset, > + int length, > + int log2ArrayIndexScale) { > + return vectorizedMismatch0(a, aOffset, b, bOffset, length, > log2ArrayIndexScale); > + } > + > @HotSpotIntrinsicCandidate > - static int vectorizedMismatch(Object a, long aOffset, > + private static int vectorizedMismatch0(Object a, long aOffset, > Object b, long bOffset, > int length, > int log2ArrayIndexScale) { > > On 4/15/16 4:07 PM, Paul Sandoz wrote: >> >>> On 15 Apr 2016, at 14:12, Coleen Phillimore >>> wrote: >>> >>> >>> I don't know why we'd add even more assembly code to the >>> interpreter. Why doesn't the JIT optimize this function instead? By >>> adding a stub in the interpreter does that prevent the JIT from >>> inlining this function since it's not invocation counted? >>> >> >> I have updated the webrev with C1 support [1] and determined, >> eyeballing generated code, that the stub call gets inlined for C1 and >> C2 and appears unaffected by the wiring up of that same stub in the >> template interpreter. >> >> A stub was added and wired up to C2 with the intention to wire that >> up to C1, and possible to the interpreter. One reason for the latter >> was because of the performance results presented in the last email >> (potentially ~200x over the current approach, and ~35x improvement >> over the original Java code). Does that matter? would you be >> concerned about that? >> >> Array equality is quite a fundamental operation so i was concerned >> about such a regression in the interpreter. >> >> Another reason for the latter, which i may be off base on here, is it >> might make it easier to consolidate the intrinsics added for compact >> string equality/comparison to this more general mismatch functionality. >> >> ? >> >> Regarding the changes to C1 in [1]. Like for the CRC intrinsics i >> added the _vectorizedMismatch intrinsic to the set of intrinsics that >> preserve state and can trap. Is that correct? Also i am not sure if >> the 32-bit part is correct. >> >> Thanks, >> Paul. >> >> [1] >> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >> (Note: this is still incomplete i need to appropriately update all >> CPU-based code.) >> >> Benchmark (lastNEQ) (n) Mode Cnt Score Error Units >> # Baseline >> # VM options: -XX:TieredStopAtLevel=1 >> ByteArray.base_equals false 1024 avgt 10 1190.177 ? 21.387 >> ns/op >> ByteArray.base_equals true 1024 avgt 10 1191.767 ? 35.196 >> ns/op >> >> # Before patch >> # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals >> -XX:-UseVectorizedMismatchIntrinsic >> ByteArray.jdk_equals false 1024 avgt 10 208.014 ? 5.224 >> ns/op >> ByteArray.jdk_equals true 1024 avgt 10 218.271 ? 10.749 >> ns/op >> >> # After patch >> # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals >> -XX:+UseVectorizedMismatchIntrinsic >> ByteArray.jdk_equals false 1024 avgt 10 70.097 ? 2.321 >> ns/op >> ByteArray.jdk_equals true 1024 avgt 10 72.284 ? 1.578 >> ns/op >> >> >> >>> thanks, >>> Coleen >>> >>> >>> On 4/14/16 10:53 AM, Paul Sandoz wrote: >>>> Hi, >>>> >>>> I hooked up the array mismatch stub to the interpreter, with a bit >>>> of code cargo culting the CRC work and some lldb debugging [*] it >>>> appears to work and pass tests. >>>> >>>> Can someone have a quick look to see if i am not the right track here: >>>> >>>> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >>>> >>>> >>>> >>>> >>>> Here are some quick numbers running using -Xint for byte[] equality: >>>> >>>> Benchmark (lastNEQ) (n) Mode Cnt Score Error >>>> Units >>>> # Baseline >>>> # VM options: -Xint >>>> ByteArray.base_equals false 1024 avgt 10 16622.453 ? >>>> 498.475 ns/op >>>> ByteArray.base_equals true 1024 avgt 10 16889.244 ? >>>> 439.895 ns/op >>>> >>>> # Before patch >>>> # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic >>>> ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? >>>> 3657.508 ns/op >>>> ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? >>>> 2723.130 ns/op >>>> >>>> # After patch >>>> # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic >>>> ByteArray.jdk_equals false 1024 avgt 10 448.764 ? >>>> 18.977 ns/op >>>> ByteArray.jdk_equals true 1024 avgt 10 448.657 ? >>>> 22.656 ns/op >>>> >>>> >>>> >>>> The next step is to wire up C1. >>>> >>>> Further steps would be to substitute some of intrinsics added/used >>>> for compact strings with mismatch, then evaluate the performance. >>>> >>>> Thanks, >>>> Paul. >>>> >>>> [*] Stubs to be used as intrinsics in the template interpreter need >>>> to be created during the initial stage of generation, otherwise the >>>> stub address is null which leads to a SEGV that?s hard to track down. >>> >> From stefan.karlsson at oracle.com Fri Apr 15 14:13:40 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Fri, 15 Apr 2016 16:13:40 +0200 Subject: RFR: 8067785: Using AlwaysPreTouch does not always touch all pages Message-ID: <5710F714.5070601@oracle.com> Hi all, Please review this patch that ensures that VirtualSpace::expand_by pre-touches all committed memory. http://cr.openjdk.java.net/~stefank/8067785/webrev.01 https://bugs.openjdk.java.net/browse/JDK-8067785 1) Previously, we pre-touched the memory between the old and new _high pointers. Since the _high variable isn't page aligned, the pre-touch code didn't touch all committed pages. I've moved the pre-touch code to be done for every os::commit_memory call in VirtuaSpace::expand_by. 2) expand_by has segregated the VirtualSpace into three segements. [small pages | large pages | small pages] so that we can have VirtualSpaces that are not large page size aligned. Previously, only the middle section called commit_memory with an alignment hint, and the small pages sections called commit_memory without a small pages hint. On all platforms, except Solaris, this boils down to the same code. On Solaris we have this additional code executed: size_t page_size = page_size_for_alignment(alignment_hint); if (page_size > (size_t) vm_page_size()) { (void)Solaris::setup_large_pages(addr, bytes, page_size); } But since the alignment_hint is set to vm_page_size we won't try to setup_large_pages here either. 3) The patch also contains a few style changes to make the VirtualSpace::expand_by easier to read (at least for me) Tested with JPRT and -XX:+ExecuteInternalVMTests with AlwaysPreTouch temporarily forced to true. Thanks, StefanK From paul.sandoz at oracle.com Fri Apr 15 15:00:35 2016 From: paul.sandoz at oracle.com (Paul Sandoz) Date: Fri, 15 Apr 2016 17:00:35 +0200 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <5710F44B.2070504@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> <5710DAA8.5080402@oracle.com> <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> <5710F44B.2070504@oracle.com> Message-ID: <8EE611A5-F366-4375-BDC9-FE29EA509DC7@oracle.com> > On 15 Apr 2016, at 16:01, Coleen Phillimore wrote: > > > > On 4/15/16 9:07 AM, Paul Sandoz wrote: >>> On 15 Apr 2016, at 14:12, Coleen Phillimore wrote: >>> >>> >>> I don't know why we'd add even more assembly code to the interpreter. Why doesn't the JIT optimize this function instead? By adding a stub in the interpreter does that prevent the JIT from inlining this function since it's not invocation counted? >>> >> I have updated the webrev with C1 support [1] and determined, eyeballing generated code, that the stub call gets inlined for C1 and C2 and appears unaffected by the wiring up of that same stub in the template interpreter. >> >> A stub was added and wired up to C2 with the intention to wire that up to C1, and possible to the interpreter. One reason for the latter was because of the performance results presented in the last email (potentially ~200x over the current approach, and ~35x improvement over the original Java code). Does that matter? would you be concerned about that? > > What workload is this running? > byte[] array comparison of 1024 bytes. It was just a quick smoke test that the intrinsic was working as expected. > What results do you get with ref workload? See results starting with the ?base_?. For more details about the benchmarks with some existing analysis see here: http://mail.openjdk.java.net/pipermail/hotspot-dev/2015-December/021225.html >> Array equality is quite a fundamental operation so i was concerned about such a regression in the interpreter. > > The interpreter is mostly run during startup time so we'd like to see some workload results with perhaps the startup_3 benchmarks set. > Ok. > Again, we are trying to not have special case assembly code in the interpreter. Adding these sorts of special optimizations to the compilers makes a lot more sense. > Yes. I talked a bit off-line with Aleksey. I was off base about the compact string work, adding the intrinsic to the interpreter will not help here because a dependency issue is at VM genesis time. This can be worked around by aliasing intrinsics to avoid the class dependency. So, i will withdraw the additions to the template interpreter and focus the webrev on C1. Thanks, Paul. From paul.sandoz at oracle.com Fri Apr 15 15:06:27 2016 From: paul.sandoz at oracle.com (Paul Sandoz) Date: Fri, 15 Apr 2016 17:06:27 +0200 Subject: Hooking up the array mismatch stub as an intrinsic in the template interpreter In-Reply-To: <5710EF1D.4080800@oracle.com> References: <223E649D-201D-46D2-9C7F-5A536EA1405E@oracle.com> <5710DAA8.5080402@oracle.com> <0AFAAAA1-61FA-4DB2-B1B6-C1B816636C65@oracle.com> <5710ECC5.1080706@oracle.com> <5710EF1D.4080800@oracle.com> Message-ID: <1AD4ED68-5925-41F4-AD89-F04BAA361113@oracle.com> > On 15 Apr 2016, at 15:39, Vladimir Ivanov wrote: > > > > On 4/15/16 4:29 PM, Vladimir Ivanov wrote: >> An idea how to avoid interpreter changes. >> >> Interpreter can't benefit from "intrinsifiable" methods directly, but if >> you create a wrapper and call it instead [1], JIT-compilers can take >> care of stand-alone versions for you. The interpreter will work with >> them as if they are ordinary Java methods. > > ... or even add such logic directly into the JVM: for methods marked w/ @HotSpotIntrinsicCandidate (or better with some new annotation, since most intrinsics depend on the context they are invoked in) create an intrinsified stand-alone version. > Very interesting, some good lateral thinking here! Thanks, Paul. > Best regards, > Vladimir Ivanov > >> >> The only missing case is early startup phase when everything is >> interpreted, but we can add a special logic in the JVM to eagerly >> compile such methods (either during startup or on the first invocation) >> which would be much simpler than adding intrinsics specifically for the >> interpreter. >> >> [1] >> diff --git a/src/java.base/share/classes/java/util/ArraysSupport.java >> b/src/java.base/share/classes/java/util/ArraysSupport.java >> --- a/src/java.base/share/classes/java/util/ArraysSupport.java >> +++ b/src/java.base/share/classes/java/util/ArraysSupport.java >> @@ -26,6 +26,7 @@ >> >> import jdk.internal.HotSpotIntrinsicCandidate; >> import jdk.internal.misc.Unsafe; >> +import jdk.internal.vm.annotation.ForceInline; >> >> /** >> * Utility methods to find a mismatch between two primitive arrays. >> @@ -106,8 +107,16 @@ >> * compliment of the number of remaining pairs of elements to be >> checked in >> * the tail of the two arrays. >> */ >> + @ForceInline >> + static int vectorizedMismatch(Object a, long aOffset, >> + Object b, long bOffset, >> + int length, >> + int log2ArrayIndexScale) { >> + return vectorizedMismatch0(a, aOffset, b, bOffset, length, >> log2ArrayIndexScale); >> + } >> + >> @HotSpotIntrinsicCandidate >> - static int vectorizedMismatch(Object a, long aOffset, >> + private static int vectorizedMismatch0(Object a, long aOffset, >> Object b, long bOffset, >> int length, >> int log2ArrayIndexScale) { >> >> On 4/15/16 4:07 PM, Paul Sandoz wrote: >>> >>>> On 15 Apr 2016, at 14:12, Coleen Phillimore >>>> wrote: >>>> >>>> >>>> I don't know why we'd add even more assembly code to the >>>> interpreter. Why doesn't the JIT optimize this function instead? By >>>> adding a stub in the interpreter does that prevent the JIT from >>>> inlining this function since it's not invocation counted? >>>> >>> >>> I have updated the webrev with C1 support [1] and determined, >>> eyeballing generated code, that the stub call gets inlined for C1 and >>> C2 and appears unaffected by the wiring up of that same stub in the >>> template interpreter. >>> >>> A stub was added and wired up to C2 with the intention to wire that up >>> to C1, and possible to the interpreter. One reason for the latter was >>> because of the performance results presented in the last email >>> (potentially ~200x over the current approach, and ~35x improvement >>> over the original Java code). Does that matter? would you be concerned >>> about that? >>> >>> Array equality is quite a fundamental operation so i was concerned >>> about such a regression in the interpreter. >>> >>> Another reason for the latter, which i may be off base on here, is it >>> might make it easier to consolidate the intrinsics added for compact >>> string equality/comparison to this more general mismatch functionality. >>> >>> ? >>> >>> Regarding the changes to C1 in [1]. Like for the CRC intrinsics i >>> added the _vectorizedMismatch intrinsic to the set of intrinsics that >>> preserve state and can trap. Is that correct? Also i am not sure if >>> the 32-bit part is correct. >>> >>> Thanks, >>> Paul. >>> >>> [1] >>> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >>> >>> (Note: this is still incomplete i need to appropriately update all >>> CPU-based code.) >>> >>> Benchmark (lastNEQ) (n) Mode Cnt Score Error >>> Units >>> # Baseline >>> # VM options: -XX:TieredStopAtLevel=1 >>> ByteArray.base_equals false 1024 avgt 10 1190.177 ? 21.387 >>> ns/op >>> ByteArray.base_equals true 1024 avgt 10 1191.767 ? 35.196 >>> ns/op >>> >>> # Before patch >>> # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals >>> -XX:-UseVectorizedMismatchIntrinsic >>> ByteArray.jdk_equals false 1024 avgt 10 208.014 ? 5.224 >>> ns/op >>> ByteArray.jdk_equals true 1024 avgt 10 218.271 ? 10.749 >>> ns/op >>> >>> # After patch >>> # VM options: -XX:TieredStopAtLevel=1 -XX:-SpecialArraysEquals >>> -XX:+UseVectorizedMismatchIntrinsic >>> ByteArray.jdk_equals false 1024 avgt 10 70.097 ? 2.321 >>> ns/op >>> ByteArray.jdk_equals true 1024 avgt 10 72.284 ? 1.578 >>> ns/op >>> >>> >>> >>>> thanks, >>>> Coleen >>>> >>>> >>>> On 4/14/16 10:53 AM, Paul Sandoz wrote: >>>>> Hi, >>>>> >>>>> I hooked up the array mismatch stub to the interpreter, with a bit >>>>> of code cargo culting the CRC work and some lldb debugging [*] it >>>>> appears to work and pass tests. >>>>> >>>>> Can someone have a quick look to see if i am not the right track here: >>>>> >>>>> >>>>> http://cr.openjdk.java.net/~psandoz/jdk9/JDK-8151268-int-c1-mismatch/webrev/ >>>>> >>>>> >>>>> >>>>> >>>>> Here are some quick numbers running using -Xint for byte[] equality: >>>>> >>>>> Benchmark (lastNEQ) (n) Mode Cnt Score >>>>> Error Units >>>>> # Baseline >>>>> # VM options: -Xint >>>>> ByteArray.base_equals false 1024 avgt 10 16622.453 ? >>>>> 498.475 ns/op >>>>> ByteArray.base_equals true 1024 avgt 10 16889.244 ? >>>>> 439.895 ns/op >>>>> >>>>> # Before patch >>>>> # VM options: -Xint -XX:-UseVectorizedMismatchIntrinsic >>>>> ByteArray.jdk_equals false 1024 avgt 10 106436.195 ? >>>>> 3657.508 ns/op >>>>> ByteArray.jdk_equals true 1024 avgt 10 103306.001 ? >>>>> 2723.130 ns/op >>>>> >>>>> # After patch >>>>> # VM options: -Xint -XX:+UseVectorizedMismatchIntrinsic >>>>> ByteArray.jdk_equals false 1024 avgt 10 448.764 ? >>>>> 18.977 ns/op >>>>> ByteArray.jdk_equals true 1024 avgt 10 448.657 ? >>>>> 22.656 ns/op >>>>> >>>>> >>>>> >>>>> The next step is to wire up C1. >>>>> >>>>> Further steps would be to substitute some of intrinsics added/used >>>>> for compact strings with mismatch, then evaluate the performance. >>>>> >>>>> Thanks, >>>>> Paul. >>>>> >>>>> [*] Stubs to be used as intrinsics in the template interpreter need >>>>> to be created during the initial stage of generation, otherwise the >>>>> stub address is null which leads to a SEGV that?s hard to track down. >>>> >>> From lois.foltan at oracle.com Fri Apr 15 17:02:52 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Fri, 15 Apr 2016 13:02:52 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <5710E86F.3090606@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> Message-ID: <57111EBC.4000308@oracle.com> On 4/15/2016 9:11 AM, Stefan Karlsson wrote: > Hi Lois, > > On 2016-04-15 14:50, Lois Foltan wrote: >> >> On 4/14/2016 4:23 PM, Dean Long wrote: >>> Do the inc_keep_alive() and dec_keep_alive() updates need to be >>> atomic by any chance? >> >> Thanks Dean for the review and good point. I will make that change >> and send out an updated webrev. > > When would we ever race on the _keep_alive variable? Or is this more > of a defensive change to safeguard against future changes? Hi Stefan, In start up before module system initialization in complete I believe the VM is single threaded, so the increment/decrement reference counts do not need to be atomic. Adding it is a defensive move in case the reference count is ever used passed start up in the future. It kind of does seem a bit excessive, sounds like you agree? Thanks, Lois > > Thanks, > StefanK > >> Lois >> >>> >>> dl >>> >>> On 4/14/2016 12:29 PM, Lois Foltan wrote: >>>> Hello, >>>> >>>> Please review the following fix: >>>> >>>> Webrev: >>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949/ >>>> >>>> Bug: Jigsaw crash when Klass in _fixup_module_field_list is unloaded >>>> https://bugs.openjdk.java.net/browse/JDK-8152949 >>>> >>>> Summary: >>>> Prior to java.base being defined to the VM by the module system >>>> initialization, classes loaded must be saved on a fixup list in >>>> order to later have their java.lang.Class' module field patched >>>> with java.base's java.lang.reflect.Module object once java.base is >>>> defined. Before module system initialization is complete, all >>>> classes loaded must have java.base as their defining module and be >>>> loaded by the boot loader. It was erroneously assumed that all >>>> classes placed on the module fixup list therefore would not die >>>> before java.base was defined. This assumption did not hold for >>>> anonymous classes which have a shorter lifetime than the boot >>>> loader. Test cases run with a small heap, -Xmx2m, would cause GC >>>> to unload the anonymous classes on the fixup list, later causing >>>> issues when an attempt was made to patch these classes with >>>> java.base's java.lang.reflect.Module object. Thank you to Per Liden >>>> and Stefan Karlsson for contributing this fix for the runtime team. >>>> >>>> Test: >>>> - java/lang, java/util, java/io, all Hotspot jtreg tests, Hotspot >>>> colocated tests & noncolo.quick.testlist >>>> - several iterations of ConcurrentLinkedQueue/RemoveLeak.java which >>>> exhibited the problem >>> >> > From serguei.spitsyn at oracle.com Fri Apr 15 17:17:28 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Fri, 15 Apr 2016 10:17:28 -0700 Subject: RFR: 8153749 - New capability can_generate_early_class_hook_events In-Reply-To: <5710C9DF.9090007@oracle.com> References: <570F53B1.90809@oracle.com> <570FA0E8.5080606@oracle.com> <570FFDF4.8080806@oracle.com> <5710C9DF.9090007@oracle.com> Message-ID: <57112228.5040201@oracle.com> On 4/15/16 04:00, Alan Bateman wrote: > > > On 14/04/2016 21:30, serguei.spitsyn at oracle.com wrote: >> Alan, >> >> This is for sanity check: >> >> The updated hotspot webrev: >> http://cr.openjdk.java.net/~sspitsyn/webrevs/2016/hotspot/8153749-Jigsaw-newcap.hs2/ >> >> Please, note that the *src/share/vm/prims/jvmtiEnvBase.hpp* was >> corrected too. >> > In can_generate_early_class_hook_events then "can be posted" or "may > be posted" might be better than "could be posted". Also the end tag > '>' at L10002 should be probably be on the proceeding line. Fixed, thanks. > > Otherwise looks okay to me. Thanks, Alan! Serguei > > -Alan From daniel.daugherty at oracle.com Fri Apr 15 17:26:12 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Fri, 15 Apr 2016 11:26:12 -0600 Subject: hs-rt and main are CLOSED In-Reply-To: <5710CC3A.6000302@oracle.com> References: <5710CC3A.6000302@oracle.com> Message-ID: <57112434.2050903@oracle.com> Greetings, The last changesets from JDK9-hs-rt have been pushed to JDK9-hs. JDK9-hs-rt is closed. Please do not push any changes there. JDK9-hs is *still closed* until Jesper has the chance to do his own sanity checks and declare it open again. Leonid is also doing some Adhoc testing to verify the state of JDK9-hs. Dan On 4/15/16 5:10 AM, Jesper Wilhelmsson wrote: > Hi, > > Since I've gotten the question a few times today I just want to > clarify that jdk9/hs-rt AND jdk9/hs are both closed due to the merge > of these two repos. > > We will let you know once everything is done. > > Thanks, > /Jesper > From dean.long at oracle.com Fri Apr 15 18:26:12 2016 From: dean.long at oracle.com (Dean Long) Date: Fri, 15 Apr 2016 11:26:12 -0700 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <57111EBC.4000308@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> Message-ID: <57113244.1040203@oracle.com> On 4/15/2016 10:02 AM, Lois Foltan wrote: > > On 4/15/2016 9:11 AM, Stefan Karlsson wrote: >> Hi Lois, >> >> On 2016-04-15 14:50, Lois Foltan wrote: >>> >>> On 4/14/2016 4:23 PM, Dean Long wrote: >>>> Do the inc_keep_alive() and dec_keep_alive() updates need to be >>>> atomic by any chance? >>> >>> Thanks Dean for the review and good point. I will make that change >>> and send out an updated webrev. >> >> When would we ever race on the _keep_alive variable? Or is this more >> of a defensive change to safeguard against future changes? > > Hi Stefan, > > In start up before module system initialization in complete I believe > the VM is single threaded, so the increment/decrement reference counts > do not need to be atomic. Adding it is a defensive move in case the > reference count is ever used passed start up in the future. It kind > of does seem a bit excessive, sounds like you agree? > It does seems excessive if we are single threaded. So calling Universe::is_module_initialized() should always return false in those functions? If so, then you could add an assert. dl > Thanks, > Lois > >> >> Thanks, >> StefanK >> >>> Lois >>> >>>> >>>> dl >>>> >>>> On 4/14/2016 12:29 PM, Lois Foltan wrote: >>>>> Hello, >>>>> >>>>> Please review the following fix: >>>>> >>>>> Webrev: >>>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949/ >>>>> >>>>> Bug: Jigsaw crash when Klass in _fixup_module_field_list is unloaded >>>>> https://bugs.openjdk.java.net/browse/JDK-8152949 >>>>> >>>>> Summary: >>>>> Prior to java.base being defined to the VM by the module system >>>>> initialization, classes loaded must be saved on a fixup list in >>>>> order to later have their java.lang.Class' module field patched >>>>> with java.base's java.lang.reflect.Module object once java.base is >>>>> defined. Before module system initialization is complete, all >>>>> classes loaded must have java.base as their defining module and be >>>>> loaded by the boot loader. It was erroneously assumed that all >>>>> classes placed on the module fixup list therefore would not die >>>>> before java.base was defined. This assumption did not hold for >>>>> anonymous classes which have a shorter lifetime than the boot >>>>> loader. Test cases run with a small heap, -Xmx2m, would cause GC >>>>> to unload the anonymous classes on the fixup list, later causing >>>>> issues when an attempt was made to patch these classes with >>>>> java.base's java.lang.reflect.Module object. Thank you to Per >>>>> Liden and Stefan Karlsson for contributing this fix for the >>>>> runtime team. >>>>> >>>>> Test: >>>>> - java/lang, java/util, java/io, all Hotspot jtreg tests, Hotspot >>>>> colocated tests & noncolo.quick.testlist >>>>> - several iterations of ConcurrentLinkedQueue/RemoveLeak.java >>>>> which exhibited the problem >>>> >>> >> > From volker.simonis at gmail.com Fri Apr 15 18:28:12 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Fri, 15 Apr 2016 20:28:12 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160413133128.GV9504@rbackman> References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> Message-ID: Hi, this one was a real puzzler :) But I finally found some quiet hours in the office today and came up with this rather simple solution: http://cr.openjdk.java.net/~simonis/webrevs/2016/8151956/ First I've tried to only move the relocation handling out of the CodeBlob constructor into the derived constructors but that didn't work very well and was quite complicated. So finally, I've just moved the call to CodeBuffer::copy_code_and_locs_to() from the CodeBlob constructor into the nmethod and RuntimeBlob constructors respectively. I couldn't find a reason why we shouldn't do this. The change is minimal and makes the whole handling more robust. I've compiled and smoke tested with JVM 98 on Linux/x86_64, Linux/ppc64le and Solaris/SPARC. I will run some more tests on Monday, but it would be great if you (i.e. Andrew) could verify this fix on ARM and if you (i.e. Rickard/Dean) could run some of your internal tests. I'd also like to ask if I should submit an extra RFR for 8151956 with my fix or if we should close 8151956 and fix it as part of Rickard's change for 8152664. I'd be happy with both solutions :) A nice weekend everybody, Volker On Wed, Apr 13, 2016 at 3:31 PM, Rickard B?ckman wrote: > Volker, > > yes, I didn't realize at first that the nmethod was casted to a > CompiledMethod before the call to consts_begin(). Otherwise it would > have used the non-virtual consts_begin of nmethod that didn't have any > virtual calls. > > The entire code chain and looking up itself from the CodeCache before > fully constructed seems quite problematic. Even before the changes I > made. Previous to my changes the calls would have succeeded but returned > header_begin() or this for all the consts_begin, consts_end, etc... ? > > /R > > On 04/11, Volker Simonis wrote: >> Rickard, Dean, >> >> I'm afraid all this hacks can not work. It doesn't help to make >> CompiledMethod::consts_begin() non-virtual and then calling a virtual >> function from it. The problem ist that at the point where you call >> consts_begin_v(), the vtable of 'this' is still the one of CodeBlob and >> this results in calling yet another arbitrary function: >> >> #0 CodeBlob::is_locked_by_vm (this=0x3fff607d0c10) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.hpp:168 >> #1 0x00003fffb6e38048 in CompiledMethod::consts_begin >> (this=0x3fff607d0c10) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.hpp:255 >> #2 0x00003fffb758d658 in RelocIterator::initialize (this=0x3ffdfd3fc9a8, >> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:142 >> #3 0x00003fffb6ace56c in RelocIterator::RelocIterator >> (this=0x3ffdfd3fc9a8, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >> limit=0x0) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >> #4 0x00003fffb7591afc in trampoline_stub_Relocation::get_trampoline_for >> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >> #5 0x00003fffb741ba4c in NativeCall::get_trampoline (this=0x3fff607d0fac) >> at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >> #6 0x00003fffb7596a34 in Relocation::pd_call_destination >> (this=0x3ffdfd3fcd10, orig_addr=0x3fff6033482c "\001") at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >> #7 0x00003fffb758f71c in CallRelocation::fix_relocation_after_move >> (this=0x3ffdfd3fcd10, src=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >> #8 0x00003fffb6c48914 in CodeBuffer::relocate_code_to >> (this=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >> #9 0x00003fffb6c48480 in CodeBuffer::copy_code_to (this=0x3ffdfd3fdbc0, >> dest_blob=0x3fff607d0c10) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >> #10 0x00003fffb6c426ec in CodeBuffer::copy_code_and_locs_to >> (this=0x3ffdfd3fdbc0, blob=0x3fff607d0c10) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >> #11 0x00003fffb6c3f8b0 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >> name=0x3fffb7a760f8 "nmethod", layout=..., cb=0x3ffdfd3fdbc0, >> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe0001ed00, >> caller_must_gc_arguments=false, subtype=8) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >> #12 0x00003fffb6ce5360 in CompiledMethod::CompiledMethod >> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a760f8 "nmethod", >> size=1768, header_size=392, cb=0x3ffdfd3fdbc0, frame_complete_offset=20, >> frame_size=14, oop_maps=0x3ffe0001ed00, caller_must_gc_arguments=false) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >> #13 0x00003fffb7422198 in nmethod::nmethod (this=0x3fff607d0c10, >> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >> offsets=0x3ffdfd3fdb98, orig_pc_offset=104, debug_info=0x3fffb03f2dc0, >> dependencies=0x3ffe0001ed70, code_buffer=0x3ffdfd3fdbc0, frame_size=14, >> oop_maps=0x3ffe0001ed00, handler_table=0x3ffdfd3fdb50, >> nul_chk_table=0x3ffdfd3fdb70, compiler=0x3fffb03d0cd0, comp_level=3) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >> #14 0x00003fffb7421850 in nmethod::new_nmethod (method=..., compile_id=4, >> entry_bci=-1, offsets=0x3ffdfd3fdb98, orig_pc_offset=104, >> debug_info=0x3fffb03f2dc0, dependencies=0x3ffe0001ed70, >> code_buffer=0x3ffdfd3fdbc0, frame_size=14, oop_maps=0x3ffe0001ed00, >> handler_table=0x3ffdfd3fdb50, nul_chk_table=0x3ffdfd3fdb70, >> compiler=0x3fffb03d0cd0, comp_level=3) at >> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:548 >> >> I think we really need to rework this as proposed by Andrew in his last >> mail. I'm working on such a fix. >> >> Regards, >> Volker >> >> >> On Mon, Apr 11, 2016 at 1:55 PM, Rickard B?ckman > > wrote: >> >> > Volker, >> > >> > here is the patch if you want to try it. >> > http://cr.openjdk.java.net/~rbackman/8152664/virtual.patch >> > >> > /R >> > >> > On 04/11, Rickard B?ckman wrote: >> > > Volker, >> > > >> > > thanks for finding this issue. >> > > >> > > I think that maybe the easiest fix is as follows: >> > > >> > > create new virtual methods in CompiledMethod: >> > > >> > > virtual address stub_begin_v() = 0; >> > > >> > > make the now virtual stub_begin non-virtual like: >> > > >> > > address stub_begin() { return stub_begin_v(); } >> > > >> > > in nmethod we override the stub_begin() with the normal this + offset >> > > compuation and implement stub_begin_v() to call stub_begin(). >> > > >> > > That will avoid all virtual calls in the case were we are not working on >> > > a CompiledMethod. >> > > >> > > It adds a couple of methods though. What do you think? >> > > >> > > /R >> > > >> > > On 04/08, Volker Simonis wrote: >> > > > Hi Rickard, >> > > > >> > > > I found the problem why your change crashes the VM on ppc (and I'm >> > pretty >> > > > sure it will also crash on ARM - @Andrew, maybe you can try it out?). >> > It is >> > > > caused by the following code in address NativeCall::get_trampoline() >> > which >> > > > is also present on arm64: >> > > > >> > > > address NativeCall::get_trampoline() { >> > > > address call_addr = addr_at(0); >> > > > CodeBlob *code = CodeCache::find_blob(call_addr); >> > > > ... >> > > > // If the codeBlob is not a nmethod, this is because we get here >> > from the >> > > > // CodeBlob constructor, which is called within the nmethod >> > constructor. >> > > > return trampoline_stub_Relocation::get_trampoline_for(call_addr, >> > > > (nmethod*)code); >> > > > } >> > > > >> > > > The comment explains the situation quite well: we're in the CodeBlob >> > > > constructor which was called by the CompiledMethod constructor which >> > was >> > > > called from the nmethod constructor: >> > > > >> > > > #3 0x00003fffb741b80c in NativeCall::get_trampoline >> > (this=0x3fff607d0fac) >> > > > at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >> > > > #4 0x00003fffb7596914 in Relocation::pd_call_destination >> > > > (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >> > > > #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move >> > > > (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >> > > > #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to >> > > > (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >> > > > #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to >> > (this=0x3ffdfe3fdb40, >> > > > dest_blob=0x3fff607d0c10) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >> > > > #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to >> > > > (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >> > > > #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >> > > > name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, >> > > > frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, >> > > > caller_must_gc_arguments=false, subtype=8) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >> > > > #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod >> > > > (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 >> > "nmethod", >> > > > size=1768, header_size=392, cb=0x3ffdfe3fdb40, >> > frame_complete_offset=20, >> > > > frame_size=14, oop_maps=0x3ffe00049620, >> > caller_must_gc_arguments=false) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >> > > > #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, >> > > > method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >> > > > offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, >> > > > dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, >> > > > oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, >> > > > nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >> > > > >> > > > Now we cast 'code' to 'nmethod' but at this point in time 'code' is >> > still a >> > > > CodeBlob from the C++ point of view (i.e. it still has a CodeBlob >> > vtable >> > > > (see [1] for an explanation)). >> > > > >> > > > Later on, in RelocIterator::initialize() we call virtual methods on the >> > > > nmethod which still has the vtable of a "CodeBlob" and this fails >> > badly: >> > > > >> > > > #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 >> > > > #1 0x00003fffb758d51c in RelocIterator::initialize >> > (this=0x3ffdfe3fc928, >> > > > nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 >> > > > #2 0x00003fffb6ace56c in RelocIterator::RelocIterator >> > > > (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >> > > > limit=0x0) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >> > > > #3 0x00003fffb75919dc in >> > trampoline_stub_Relocation::get_trampoline_for >> > > > (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >> > > > #4 0x00003fffb741b80c in NativeCall::get_trampoline >> > (this=0x3fff607d0fac) >> > > > at >> > > > >> > /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >> > > > >> > > > As you can see, we actually want to call nmethod::stub_begin() at >> > > > relocInfo.cpp:144 >> > > > >> > > > 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); >> > > > 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; >> > > > 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; >> > > > >> > > > but we actually end up in SingletonBlob::print_on() which is a >> > completely >> > > > different method. Notice that the call to nm->consts_begin() before >> > also >> > > > fails, but it doesn't crash the VM because it happens to call >> > > > SingletonBlob::verify() which has no bad side effect. The call to >> > > > nm->insts_begin() in line 143 is non-virtual and thus works fine. Here >> > are >> > > > the corresponding vtable slots in the CodeBlob vtable for >> > consts_begin() >> > > > and stub_begin() >> > > > >> > > > (gdb) p &nmethod::consts_begin >> > > > $76 = &virtual table offset 42 >> > > > (gdb) p &nmethod::stub_begin >> > > > $77 = &virtual table offset 44 >> > > > (gdb) p ((*(void ***)nm) + 1)[42] >> > > > $86 = (void *) 0x3fffb6c41df8 >> > > > (gdb) p ((*(void ***)nm) + 1)[44] >> > > > $87 = (void *) 0x3fffb6c41e64 > > const> >> > > > >> > > > As you can see, 'nm' is indeed a "CodeBlob" at this point: >> > > > >> > > > (gdb) p *(void ***)nm >> > > > $91 = (void **) 0x3fffb7befa00 >> > > > (gdb) p nm->print() >> > > > [CodeBlob (0x00003fff607d1090)] >> > > > Framesize: 14 >> > > > >> > > > The offending calls succeeded before your change, because they where >> > not >> > > > virtual. Any idea how we can fix this with the new class hierarchy? >> > > > >> > > > Regards, >> > > > Volker >> > > > >> > > > [1] >> > > > >> > http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class >> > > > >> > > > >> > > > >> > > > On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis < >> > volker.simonis at gmail.com> >> > > > wrote: >> > > > >> > > > > Hi Rickard, >> > > > > >> > > > > I'd also like to know what's the rational behind this quite large >> > > > > change. Do you expect some performance or memory consumption >> > > > > improvements or is this a prerequisite for another change which is >> > > > > still to come? >> > > > > >> > > > > The change itself currently doesn't work on ppc64 (neither on Linux >> > > > > nor on AIX). I get the following crash during the build when the >> > newly >> > > > > built Hotspot is JIT-compiling java.lang.String::charAt on C1 : >> > > > > >> > > > > # >> > > > > # A fatal error has been detected by the Java Runtime Environment: >> > > > > # >> > > > > # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 >> > > > > # >> > > > > # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build >> > > > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) >> > > > > # Java VM: OpenJDK 64-Bit Server VM (slowdebug >> > > > > 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, >> > > > > tiered, compressed oo >> > > > > ps, serial gc, linux-ppc64le) >> > > > > # Problematic frame: >> > > > > # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >> > > > > const*, char*, bool)+0x40 >> > > > > # >> > > > > # No core dump will be written. Core dumps have been disabled. To >> > > > > enable core dumping, try "ulimit -c unlimited" before starting Java >> > > > > again >> > > > > # >> > > > > # If you would like to submit a bug report, please visit: >> > > > > # http://bugreport.java.com/bugreport/crash.jsp >> > > > > # >> > > > > >> > > > > --------------- S U M M A R Y ------------ >> > > > > >> > > > > Command Line: >> > > > > >> > -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk >> > > > > -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. >> > > > > module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create >> > > > > --module-version 9-internal --os-name Linux --os-arch ppc64le >> > > > > --os-version >> > > > > 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods >> > > > > --hash-dependencies .* --exclude **_the.* --libs >> > > > > >> > > > > >> > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base >> > > > > --cmds >> > > > > >> > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base >> > > > > --config >> > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base >> > > > > --class-path >> > /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base >> > > > > /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod >> > > > > >> > > > > Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # >> > > > > Please check /etc/os-release for details about this release. >> > > > > Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h >> > 0m 0s) >> > > > > >> > > > > --------------- T H R E A D --------------- >> > > > > >> > > > > Current thread (0x000010000429c800): JavaThread "C1 >> > CompilerThread10" >> > > > > daemon [_thread_in_vm, id=35404, >> > > > > stack(0x000010006a800000,0x000010006ac00000)] >> > > > > >> > > > > >> > > > > Current CompileTask: >> > > > > C1: 761 3 3 java.lang.String::charAt (25 bytes) >> > > > > >> > > > > Stack: [0x000010006a800000,0x000010006ac00000], >> > > > > sp=0x000010006abfc6c0, free space=4081k >> > > > > Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, >> > C=native >> > > > > code) >> > > > > V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >> > > > > const*, char*, bool)+0x40 >> > > > > V [libjvm.so+0xf74668] outputStream::print_cr(char const*, >> > ...)+0x68 >> > > > > V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 >> > > > > V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) >> > const+0x40 >> > > > > V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) >> > const+0x4c >> > > > > V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, >> > > > > unsigned char*, unsigned char*)+0x170 >> > > > > V [libjvm.so+0x5ae56c] >> > RelocIterator::RelocIterator(CompiledMethod*, >> > > > > unsigned char*, unsigned char*)+0x78 >> > > > > V [libjvm.so+0x10719dc] >> > > > > trampoline_stub_Relocation::get_trampoline_for(unsigned char*, >> > > > > nmethod*)+0x78 >> > > > > V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 >> > > > > V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned >> > > > > char*)+0x150 >> > > > > V [libjvm.so+0x106f5fc] >> > > > > CallRelocation::fix_relocation_after_move(CodeBuffer const*, >> > > > > CodeBuffer*)+0x74 >> > > > > V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) >> > > > > const+0x390 >> > > > > V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 >> > > > > V [libjvm.so+0x722670] >> > CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 >> > > > > V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, >> > > > > CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, >> > > > > int)+0x320 >> > > > > V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char >> > > > > const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 >> > > > > V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, >> > > > > CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >> > > > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >> > > > > ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 >> > > > > V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, >> > > > > int, int, CodeOffsets*, int, DebugInformationRecorder*, >> > Dependencies*, >> > > > > CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >> > > > > ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 >> > > > > V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, >> > > > > CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, >> > > > > ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, >> > > > > bool, bool, RTMState)+0x560 >> > > > > V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 >> > > > > V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 >> > > > > V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, >> > > > > ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 >> > > > > V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, >> > > > > int, DirectiveSet*)+0xc8 >> > > > > V [libjvm.so+0x7b188c] >> > > > > CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 >> > > > > V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 >> > > > > V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, >> > Thread*)+0xa0 >> > > > > V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 >> > > > > V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 >> > > > > V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 >> > > > > C [libpthread.so.0+0x8a64] start_thread+0xf4 >> > > > > C [libc.so.6+0x1032a0] clone+0x98 >> > > > > >> > > > > I haven't identified the exact cause (will analyze it tomorrow) but >> > > > > the stack trace indicates that it is indeed related to your changes. >> > > > > >> > > > > Besides that I have some comments: >> > > > > >> > > > > codeBuffer.hpp: >> > > > > >> > > > > 472 CodeSection* insts() { return &_insts; } >> > > > > 475 const CodeSection* insts() const { return &_insts; } >> > > > > >> > > > > - do we really need both versions? >> > > > > >> > > > > codeBlob.hpp: >> > > > > >> > > > > 135 nmethod* as_nmethod_or_null() const { return >> > > > > is_nmethod() ? (nmethod*) this : NULL; } >> > > > > 136 nmethod* as_nmethod() const { >> > > > > assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } >> > > > > 137 CompiledMethod* as_compiled_method_or_null() const { return >> > > > > is_compiled() ? (CompiledMethod*) this : NULL; } >> > > > > 138 CompiledMethod* as_compiled_method() const { >> > > > > assert(is_compiled(), "must be compiled"); return (CompiledMethod*) >> > > > > this; } >> > > > > 139 CodeBlob* as_codeblob_or_null() const { return >> > > > > (CodeBlob*) this; } >> > > > > >> > > > > - I don't like this code. You make the getters 'const' which >> > > > > implicitely makes 'this' a "pointer to const" but then the returned >> > > > > pointer is a normal pointer to a non-const object and therefore you >> > > > > have to statically cast away the "pointer to const" (that's why you >> > > > > need the cast even in the case where you return a CodeBlob*). So >> > > > > either remove the const qualifier from the method declarations or >> > make >> > > > > them return "pointers to const". And by the way, >> > as_codeblob_or_null() >> > > > > doesn't seemed to be used anywhere in the code, why do we need it at >> > > > > all? >> > > > > >> > > > > - Why do we need the non-virtual methods is_nmethod() and >> > > > > is_compiled() to manually simulate virtual behavior. Why can't we >> > > > > simply make them virtual and implement them accordingly in nmathod >> > and >> > > > > CompiledMethod? >> > > > > >> > > > > Regards, >> > > > > Volker >> > > > > >> > > > > On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman >> > > > > wrote: >> > > > > > Hi, >> > > > > > >> > > > > > can I please have review for this patch please? >> > > > > > >> > > > > > So far CodeBlobs have required all the data (metadata, oops, code, >> > etc) >> > > > > > to be in one continuous blob With this patch we are looking to >> > change >> > > > > > that. It's been done by changing offsets in CodeBlob to addresses, >> > > > > > making some methods virtual to allow different behavior and also >> > > > > > creating a couple of new classes. CompiledMethod now sits inbetween >> > > > > > CodeBlob and nmethod. >> > > > > > >> > > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >> > > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >> > > > > > >> > > > > > Thanks >> > > > > > /R >> > > > > >> > From per.liden at oracle.com Fri Apr 15 19:35:05 2016 From: per.liden at oracle.com (Per Liden) Date: Fri, 15 Apr 2016 21:35:05 +0200 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <57113244.1040203@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <57113244.1040203@oracle.com> Message-ID: <57114269.6070007@oracle.com> Hi, On 2016-04-15 20:26, Dean Long wrote: > On 4/15/2016 10:02 AM, Lois Foltan wrote: >> >> On 4/15/2016 9:11 AM, Stefan Karlsson wrote: >>> Hi Lois, >>> >>> On 2016-04-15 14:50, Lois Foltan wrote: >>>> >>>> On 4/14/2016 4:23 PM, Dean Long wrote: >>>>> Do the inc_keep_alive() and dec_keep_alive() updates need to be >>>>> atomic by any chance? >>>> >>>> Thanks Dean for the review and good point. I will make that change >>>> and send out an updated webrev. >>> >>> When would we ever race on the _keep_alive variable? Or is this more >>> of a defensive change to safeguard against future changes? >> >> Hi Stefan, >> >> In start up before module system initialization in complete I believe >> the VM is single threaded, so the increment/decrement reference counts >> do not need to be atomic. Adding it is a defensive move in case the >> reference count is ever used passed start up in the future. It kind >> of does seem a bit excessive, sounds like you agree? >> > > It does seems excessive if we are single threaded. So calling > Universe::is_module_initialized() should always return false in those > functions? If so, then you could add an assert. I think it's excessive and potentially confusing for someone reading the code, since it kind of signals that we would be in a multi-threaded context when we're not. I think adding an assert would be more clear. cheers, Per > > dl > >> Thanks, >> Lois >> >>> >>> Thanks, >>> StefanK >>> >>>> Lois >>>> >>>>> >>>>> dl >>>>> >>>>> On 4/14/2016 12:29 PM, Lois Foltan wrote: >>>>>> Hello, >>>>>> >>>>>> Please review the following fix: >>>>>> >>>>>> Webrev: >>>>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949/ >>>>>> >>>>>> Bug: Jigsaw crash when Klass in _fixup_module_field_list is unloaded >>>>>> https://bugs.openjdk.java.net/browse/JDK-8152949 >>>>>> >>>>>> Summary: >>>>>> Prior to java.base being defined to the VM by the module system >>>>>> initialization, classes loaded must be saved on a fixup list in >>>>>> order to later have their java.lang.Class' module field patched >>>>>> with java.base's java.lang.reflect.Module object once java.base is >>>>>> defined. Before module system initialization is complete, all >>>>>> classes loaded must have java.base as their defining module and be >>>>>> loaded by the boot loader. It was erroneously assumed that all >>>>>> classes placed on the module fixup list therefore would not die >>>>>> before java.base was defined. This assumption did not hold for >>>>>> anonymous classes which have a shorter lifetime than the boot >>>>>> loader. Test cases run with a small heap, -Xmx2m, would cause GC >>>>>> to unload the anonymous classes on the fixup list, later causing >>>>>> issues when an attempt was made to patch these classes with >>>>>> java.base's java.lang.reflect.Module object. Thank you to Per >>>>>> Liden and Stefan Karlsson for contributing this fix for the >>>>>> runtime team. >>>>>> >>>>>> Test: >>>>>> - java/lang, java/util, java/io, all Hotspot jtreg tests, Hotspot >>>>>> colocated tests & noncolo.quick.testlist >>>>>> - several iterations of ConcurrentLinkedQueue/RemoveLeak.java >>>>>> which exhibited the problem >>>>> >>>> >>> >> > From Alan.Bateman at oracle.com Fri Apr 15 19:45:49 2016 From: Alan.Bateman at oracle.com (Alan Bateman) Date: Fri, 15 Apr 2016 20:45:49 +0100 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <57111EBC.4000308@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> Message-ID: <571144ED.1050902@oracle.com> On 15/04/2016 18:02, Lois Foltan wrote: > > Hi Stefan, > > In start up before module system initialization in complete I believe > the VM is single threaded, so the increment/decrement reference counts > do not need to be atomic. Adding it is a defensive move in case the > reference count is ever used passed start up in the future. It kind > of does seem a bit excessive, sounds like you agree? There will be a number of threads running before the base module is defined to the VM. As things stand the the java threads at this point will be the Common-Cleaner, Finalizer, Reference Handler and Signal Handler. -Alan From dean.long at oracle.com Fri Apr 15 21:19:22 2016 From: dean.long at oracle.com (Dean Long) Date: Fri, 15 Apr 2016 14:19:22 -0700 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> Message-ID: <97c76657-4904-b1b2-c961-592a8baa44df@oracle.com> On 4/15/2016 11:28 AM, Volker Simonis wrote: > Hi, > > this one was a real puzzler :) > But I finally found some quiet hours in the office today and came up > with this rather simple solution: > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8151956/ > > First I've tried to only move the relocation handling out of the > CodeBlob constructor into the derived constructors but that didn't > work very well and was quite complicated. So finally, I've just moved > the call to CodeBuffer::copy_code_and_locs_to() from the CodeBlob > constructor into the nmethod and RuntimeBlob constructors > respectively. I couldn't find a reason why we shouldn't do this. The > change is minimal and makes the whole handling more robust. I've > compiled and smoke tested with JVM 98 on Linux/x86_64, Linux/ppc64le > and Solaris/SPARC. > > I will run some more tests on Monday, but it would be great if you > (i.e. Andrew) could verify this fix on ARM and if you (i.e. > Rickard/Dean) could run some of your internal tests. Hi Volker. This looks like what I was trying to accomplish with my patch, but I introduced a new function CodeBlob::initialize(), and I called it too early (in CompiledMethod instead of nmethod). > I'd also like to ask if I should submit an extra RFR for 8151956 with > my fix or if we should close 8151956 and fix it as part of Rickard's > change for 8152664. I'd be happy with both solutions :) Either way is fine with me. dl > A nice weekend everybody, > Volker > > > On Wed, Apr 13, 2016 at 3:31 PM, Rickard B?ckman > wrote: >> Volker, >> >> yes, I didn't realize at first that the nmethod was casted to a >> CompiledMethod before the call to consts_begin(). Otherwise it would >> have used the non-virtual consts_begin of nmethod that didn't have any >> virtual calls. >> >> The entire code chain and looking up itself from the CodeCache before >> fully constructed seems quite problematic. Even before the changes I >> made. Previous to my changes the calls would have succeeded but returned >> header_begin() or this for all the consts_begin, consts_end, etc... ? >> >> /R >> >> On 04/11, Volker Simonis wrote: >>> Rickard, Dean, >>> >>> I'm afraid all this hacks can not work. It doesn't help to make >>> CompiledMethod::consts_begin() non-virtual and then calling a virtual >>> function from it. The problem ist that at the point where you call >>> consts_begin_v(), the vtable of 'this' is still the one of CodeBlob and >>> this results in calling yet another arbitrary function: >>> >>> #0 CodeBlob::is_locked_by_vm (this=0x3fff607d0c10) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.hpp:168 >>> #1 0x00003fffb6e38048 in CompiledMethod::consts_begin >>> (this=0x3fff607d0c10) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.hpp:255 >>> #2 0x00003fffb758d658 in RelocIterator::initialize (this=0x3ffdfd3fc9a8, >>> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:142 >>> #3 0x00003fffb6ace56c in RelocIterator::RelocIterator >>> (this=0x3ffdfd3fc9a8, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >>> limit=0x0) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >>> #4 0x00003fffb7591afc in trampoline_stub_Relocation::get_trampoline_for >>> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >>> #5 0x00003fffb741ba4c in NativeCall::get_trampoline (this=0x3fff607d0fac) >>> at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>> #6 0x00003fffb7596a34 in Relocation::pd_call_destination >>> (this=0x3ffdfd3fcd10, orig_addr=0x3fff6033482c "\001") at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >>> #7 0x00003fffb758f71c in CallRelocation::fix_relocation_after_move >>> (this=0x3ffdfd3fcd10, src=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >>> #8 0x00003fffb6c48914 in CodeBuffer::relocate_code_to >>> (this=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >>> #9 0x00003fffb6c48480 in CodeBuffer::copy_code_to (this=0x3ffdfd3fdbc0, >>> dest_blob=0x3fff607d0c10) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >>> #10 0x00003fffb6c426ec in CodeBuffer::copy_code_and_locs_to >>> (this=0x3ffdfd3fdbc0, blob=0x3fff607d0c10) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >>> #11 0x00003fffb6c3f8b0 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >>> name=0x3fffb7a760f8 "nmethod", layout=..., cb=0x3ffdfd3fdbc0, >>> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe0001ed00, >>> caller_must_gc_arguments=false, subtype=8) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >>> #12 0x00003fffb6ce5360 in CompiledMethod::CompiledMethod >>> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a760f8 "nmethod", >>> size=1768, header_size=392, cb=0x3ffdfd3fdbc0, frame_complete_offset=20, >>> frame_size=14, oop_maps=0x3ffe0001ed00, caller_must_gc_arguments=false) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >>> #13 0x00003fffb7422198 in nmethod::nmethod (this=0x3fff607d0c10, >>> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >>> offsets=0x3ffdfd3fdb98, orig_pc_offset=104, debug_info=0x3fffb03f2dc0, >>> dependencies=0x3ffe0001ed70, code_buffer=0x3ffdfd3fdbc0, frame_size=14, >>> oop_maps=0x3ffe0001ed00, handler_table=0x3ffdfd3fdb50, >>> nul_chk_table=0x3ffdfd3fdb70, compiler=0x3fffb03d0cd0, comp_level=3) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >>> #14 0x00003fffb7421850 in nmethod::new_nmethod (method=..., compile_id=4, >>> entry_bci=-1, offsets=0x3ffdfd3fdb98, orig_pc_offset=104, >>> debug_info=0x3fffb03f2dc0, dependencies=0x3ffe0001ed70, >>> code_buffer=0x3ffdfd3fdbc0, frame_size=14, oop_maps=0x3ffe0001ed00, >>> handler_table=0x3ffdfd3fdb50, nul_chk_table=0x3ffdfd3fdb70, >>> compiler=0x3fffb03d0cd0, comp_level=3) at >>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:548 >>> >>> I think we really need to rework this as proposed by Andrew in his last >>> mail. I'm working on such a fix. >>> >>> Regards, >>> Volker >>> >>> >>> On Mon, Apr 11, 2016 at 1:55 PM, Rickard B?ckman >>> wrote: >>>> Volker, >>>> >>>> here is the patch if you want to try it. >>>> http://cr.openjdk.java.net/~rbackman/8152664/virtual.patch >>>> >>>> /R >>>> >>>> On 04/11, Rickard B?ckman wrote: >>>>> Volker, >>>>> >>>>> thanks for finding this issue. >>>>> >>>>> I think that maybe the easiest fix is as follows: >>>>> >>>>> create new virtual methods in CompiledMethod: >>>>> >>>>> virtual address stub_begin_v() = 0; >>>>> >>>>> make the now virtual stub_begin non-virtual like: >>>>> >>>>> address stub_begin() { return stub_begin_v(); } >>>>> >>>>> in nmethod we override the stub_begin() with the normal this + offset >>>>> compuation and implement stub_begin_v() to call stub_begin(). >>>>> >>>>> That will avoid all virtual calls in the case were we are not working on >>>>> a CompiledMethod. >>>>> >>>>> It adds a couple of methods though. What do you think? >>>>> >>>>> /R >>>>> >>>>> On 04/08, Volker Simonis wrote: >>>>>> Hi Rickard, >>>>>> >>>>>> I found the problem why your change crashes the VM on ppc (and I'm >>>> pretty >>>>>> sure it will also crash on ARM - @Andrew, maybe you can try it out?). >>>> It is >>>>>> caused by the following code in address NativeCall::get_trampoline() >>>> which >>>>>> is also present on arm64: >>>>>> >>>>>> address NativeCall::get_trampoline() { >>>>>> address call_addr = addr_at(0); >>>>>> CodeBlob *code = CodeCache::find_blob(call_addr); >>>>>> ... >>>>>> // If the codeBlob is not a nmethod, this is because we get here >>>> from the >>>>>> // CodeBlob constructor, which is called within the nmethod >>>> constructor. >>>>>> return trampoline_stub_Relocation::get_trampoline_for(call_addr, >>>>>> (nmethod*)code); >>>>>> } >>>>>> >>>>>> The comment explains the situation quite well: we're in the CodeBlob >>>>>> constructor which was called by the CompiledMethod constructor which >>>> was >>>>>> called from the nmethod constructor: >>>>>> >>>>>> #3 0x00003fffb741b80c in NativeCall::get_trampoline >>>> (this=0x3fff607d0fac) >>>>>> at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>>>>> #4 0x00003fffb7596914 in Relocation::pd_call_destination >>>>>> (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >>>>>> #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move >>>>>> (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >>>>>> #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to >>>>>> (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >>>>>> #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to >>>> (this=0x3ffdfe3fdb40, >>>>>> dest_blob=0x3fff607d0c10) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >>>>>> #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to >>>>>> (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >>>>>> #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >>>>>> name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, >>>>>> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, >>>>>> caller_must_gc_arguments=false, subtype=8) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >>>>>> #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod >>>>>> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 >>>> "nmethod", >>>>>> size=1768, header_size=392, cb=0x3ffdfe3fdb40, >>>> frame_complete_offset=20, >>>>>> frame_size=14, oop_maps=0x3ffe00049620, >>>> caller_must_gc_arguments=false) at >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >>>>>> #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, >>>>>> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >>>>>> offsets=0x3ffdfe3fdb18, orig_pc_offset=104, debug_info=0x3fffb03d55f0, >>>>>> dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, frame_size=14, >>>>>> oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, >>>>>> nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >>>>>> Now we cast 'code' to 'nmethod' but at this point in time 'code' is >>>> still a >>>>>> CodeBlob from the C++ point of view (i.e. it still has a CodeBlob >>>> vtable >>>>>> (see [1] for an explanation)). >>>>>> >>>>>> Later on, in RelocIterator::initialize() we call virtual methods on the >>>>>> nmethod which still has the vtable of a "CodeBlob" and this fails >>>> badly: >>>>>> #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 >>>>>> #1 0x00003fffb758d51c in RelocIterator::initialize >>>> (this=0x3ffdfe3fc928, >>>>>> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 >>>>>> #2 0x00003fffb6ace56c in RelocIterator::RelocIterator >>>>>> (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >>>>>> limit=0x0) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >>>>>> #3 0x00003fffb75919dc in >>>> trampoline_stub_Relocation::get_trampoline_for >>>>>> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >>>>>> #4 0x00003fffb741b80c in NativeCall::get_trampoline >>>> (this=0x3fff607d0fac) >>>>>> at >>>>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>>>>> As you can see, we actually want to call nmethod::stub_begin() at >>>>>> relocInfo.cpp:144 >>>>>> >>>>>> 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); >>>>>> 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; >>>>>> 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; >>>>>> >>>>>> but we actually end up in SingletonBlob::print_on() which is a >>>> completely >>>>>> different method. Notice that the call to nm->consts_begin() before >>>> also >>>>>> fails, but it doesn't crash the VM because it happens to call >>>>>> SingletonBlob::verify() which has no bad side effect. The call to >>>>>> nm->insts_begin() in line 143 is non-virtual and thus works fine. Here >>>> are >>>>>> the corresponding vtable slots in the CodeBlob vtable for >>>> consts_begin() >>>>>> and stub_begin() >>>>>> >>>>>> (gdb) p &nmethod::consts_begin >>>>>> $76 = &virtual table offset 42 >>>>>> (gdb) p &nmethod::stub_begin >>>>>> $77 = &virtual table offset 44 >>>>>> (gdb) p ((*(void ***)nm) + 1)[42] >>>>>> $86 = (void *) 0x3fffb6c41df8 >>>>>> (gdb) p ((*(void ***)nm) + 1)[44] >>>>>> $87 = (void *) 0x3fffb6c41e64 >>> const> >>>>>> As you can see, 'nm' is indeed a "CodeBlob" at this point: >>>>>> >>>>>> (gdb) p *(void ***)nm >>>>>> $91 = (void **) 0x3fffb7befa00 >>>>>> (gdb) p nm->print() >>>>>> [CodeBlob (0x00003fff607d1090)] >>>>>> Framesize: 14 >>>>>> >>>>>> The offending calls succeeded before your change, because they where >>>> not >>>>>> virtual. Any idea how we can fix this with the new class hierarchy? >>>>>> >>>>>> Regards, >>>>>> Volker >>>>>> >>>>>> [1] >>>>>> >>>> http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class >>>>>> >>>>>> >>>>>> On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis < >>>> volker.simonis at gmail.com> >>>>>> wrote: >>>>>> >>>>>>> Hi Rickard, >>>>>>> >>>>>>> I'd also like to know what's the rational behind this quite large >>>>>>> change. Do you expect some performance or memory consumption >>>>>>> improvements or is this a prerequisite for another change which is >>>>>>> still to come? >>>>>>> >>>>>>> The change itself currently doesn't work on ppc64 (neither on Linux >>>>>>> nor on AIX). I get the following crash during the build when the >>>> newly >>>>>>> built Hotspot is JIT-compiling java.lang.String::charAt on C1 : >>>>>>> >>>>>>> # >>>>>>> # A fatal error has been detected by the Java Runtime Environment: >>>>>>> # >>>>>>> # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 >>>>>>> # >>>>>>> # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build >>>>>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) >>>>>>> # Java VM: OpenJDK 64-Bit Server VM (slowdebug >>>>>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, >>>>>>> tiered, compressed oo >>>>>>> ps, serial gc, linux-ppc64le) >>>>>>> # Problematic frame: >>>>>>> # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>>>>>> const*, char*, bool)+0x40 >>>>>>> # >>>>>>> # No core dump will be written. Core dumps have been disabled. To >>>>>>> enable core dumping, try "ulimit -c unlimited" before starting Java >>>>>>> again >>>>>>> # >>>>>>> # If you would like to submit a bug report, please visit: >>>>>>> # http://bugreport.java.com/bugreport/crash.jsp >>>>>>> # >>>>>>> >>>>>>> --------------- S U M M A R Y ------------ >>>>>>> >>>>>>> Command Line: >>>>>>> >>>> -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk >>>>>>> -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. >>>>>>> module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create >>>>>>> --module-version 9-internal --os-name Linux --os-arch ppc64le >>>>>>> --os-version >>>>>>> 2.6 --modulepath /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods >>>>>>> --hash-dependencies .* --exclude **_the.* --libs >>>>>>> >>>>>>> >>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base >>>>>>> --cmds >>>>>>> >>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base >>>>>>> --config >>>>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base >>>>>>> --class-path >>>> /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base >>>>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod >>>>>>> >>>>>>> Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # >>>>>>> Please check /etc/os-release for details about this release. >>>>>>> Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h >>>> 0m 0s) >>>>>>> --------------- T H R E A D --------------- >>>>>>> >>>>>>> Current thread (0x000010000429c800): JavaThread "C1 >>>> CompilerThread10" >>>>>>> daemon [_thread_in_vm, id=35404, >>>>>>> stack(0x000010006a800000,0x000010006ac00000)] >>>>>>> >>>>>>> >>>>>>> Current CompileTask: >>>>>>> C1: 761 3 3 java.lang.String::charAt (25 bytes) >>>>>>> >>>>>>> Stack: [0x000010006a800000,0x000010006ac00000], >>>>>>> sp=0x000010006abfc6c0, free space=4081k >>>>>>> Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, >>>> C=native >>>>>>> code) >>>>>>> V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>>>>>> const*, char*, bool)+0x40 >>>>>>> V [libjvm.so+0xf74668] outputStream::print_cr(char const*, >>>> ...)+0x68 >>>>>>> V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) const+0x50 >>>>>>> V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) >>>> const+0x40 >>>>>>> V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) >>>> const+0x4c >>>>>>> V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, >>>>>>> unsigned char*, unsigned char*)+0x170 >>>>>>> V [libjvm.so+0x5ae56c] >>>> RelocIterator::RelocIterator(CompiledMethod*, >>>>>>> unsigned char*, unsigned char*)+0x78 >>>>>>> V [libjvm.so+0x10719dc] >>>>>>> trampoline_stub_Relocation::get_trampoline_for(unsigned char*, >>>>>>> nmethod*)+0x78 >>>>>>> V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 >>>>>>> V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned >>>>>>> char*)+0x150 >>>>>>> V [libjvm.so+0x106f5fc] >>>>>>> CallRelocation::fix_relocation_after_move(CodeBuffer const*, >>>>>>> CodeBuffer*)+0x74 >>>>>>> V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) >>>>>>> const+0x390 >>>>>>> V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 >>>>>>> V [libjvm.so+0x722670] >>>> CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 >>>>>>> V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, >>>>>>> CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, >>>>>>> int)+0x320 >>>>>>> V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, char >>>>>>> const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 >>>>>>> V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, >>>>>>> CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >>>>>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>>>>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 >>>>>>> V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, >>>>>>> int, int, CodeOffsets*, int, DebugInformationRecorder*, >>>> Dependencies*, >>>>>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>>>>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 >>>>>>> V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, >>>>>>> CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, >>>>>>> ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, >>>>>>> bool, bool, RTMState)+0x560 >>>>>>> V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 >>>>>>> V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 >>>>>>> V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, >>>>>>> ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 >>>>>>> V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, >>>>>>> int, DirectiveSet*)+0xc8 >>>>>>> V [libjvm.so+0x7b188c] >>>>>>> CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 >>>>>>> V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 >>>>>>> V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, >>>> Thread*)+0xa0 >>>>>>> V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 >>>>>>> V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 >>>>>>> V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 >>>>>>> C [libpthread.so.0+0x8a64] start_thread+0xf4 >>>>>>> C [libc.so.6+0x1032a0] clone+0x98 >>>>>>> >>>>>>> I haven't identified the exact cause (will analyze it tomorrow) but >>>>>>> the stack trace indicates that it is indeed related to your changes. >>>>>>> >>>>>>> Besides that I have some comments: >>>>>>> >>>>>>> codeBuffer.hpp: >>>>>>> >>>>>>> 472 CodeSection* insts() { return &_insts; } >>>>>>> 475 const CodeSection* insts() const { return &_insts; } >>>>>>> >>>>>>> - do we really need both versions? >>>>>>> >>>>>>> codeBlob.hpp: >>>>>>> >>>>>>> 135 nmethod* as_nmethod_or_null() const { return >>>>>>> is_nmethod() ? (nmethod*) this : NULL; } >>>>>>> 136 nmethod* as_nmethod() const { >>>>>>> assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } >>>>>>> 137 CompiledMethod* as_compiled_method_or_null() const { return >>>>>>> is_compiled() ? (CompiledMethod*) this : NULL; } >>>>>>> 138 CompiledMethod* as_compiled_method() const { >>>>>>> assert(is_compiled(), "must be compiled"); return (CompiledMethod*) >>>>>>> this; } >>>>>>> 139 CodeBlob* as_codeblob_or_null() const { return >>>>>>> (CodeBlob*) this; } >>>>>>> >>>>>>> - I don't like this code. You make the getters 'const' which >>>>>>> implicitely makes 'this' a "pointer to const" but then the returned >>>>>>> pointer is a normal pointer to a non-const object and therefore you >>>>>>> have to statically cast away the "pointer to const" (that's why you >>>>>>> need the cast even in the case where you return a CodeBlob*). So >>>>>>> either remove the const qualifier from the method declarations or >>>> make >>>>>>> them return "pointers to const". And by the way, >>>> as_codeblob_or_null() >>>>>>> doesn't seemed to be used anywhere in the code, why do we need it at >>>>>>> all? >>>>>>> >>>>>>> - Why do we need the non-virtual methods is_nmethod() and >>>>>>> is_compiled() to manually simulate virtual behavior. Why can't we >>>>>>> simply make them virtual and implement them accordingly in nmathod >>>> and >>>>>>> CompiledMethod? >>>>>>> >>>>>>> Regards, >>>>>>> Volker >>>>>>> >>>>>>> On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman >>>>>>> wrote: >>>>>>>> Hi, >>>>>>>> >>>>>>>> can I please have review for this patch please? >>>>>>>> >>>>>>>> So far CodeBlobs have required all the data (metadata, oops, code, >>>> etc) >>>>>>>> to be in one continuous blob With this patch we are looking to >>>> change >>>>>>>> that. It's been done by changing offsets in CodeBlob to addresses, >>>>>>>> making some methods virtual to allow different behavior and also >>>>>>>> creating a couple of new classes. CompiledMethod now sits inbetween >>>>>>>> CodeBlob and nmethod. >>>>>>>> >>>>>>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>>>>>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>>>>>>> >>>>>>>> Thanks >>>>>>>> /R From daniel.daugherty at oracle.com Sat Apr 16 18:01:27 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Sat, 16 Apr 2016 12:01:27 -0600 Subject: hs-rt and main are CLOSED In-Reply-To: <57112434.2050903@oracle.com> References: <5710CC3A.6000302@oracle.com> <57112434.2050903@oracle.com> Message-ID: <57127DF7.6030206@oracle.com> Just a reminder... to get to the top of everyone Inbox... JDK9-hs-rt is closed. Please do not push any changes there. JDK9-hs is *still closed* while we evaluate the results of the various nightlies (RT/SVC, GC and Main). We currently have one integration_blocker: JDK-8154258 [TESTBUG] Various serviceability tests fail compilation https://bugs.openjdk.java.net/browse/JDK-8154258 This looks like another collision between the following bug fix that came down from JDK9-dev: JDK-8153737 Unsupported Module https://bugs.openjdk.java.net/browse/JDK-8153737 with uses of "module sun.misc.Unsafe" that still remain in the hotspot code base. Dmitry is assigned the bug and it is marked as "in progress". Dan On 4/15/16 11:26 AM, Daniel D. Daugherty wrote: > Greetings, > > The last changesets from JDK9-hs-rt have been pushed to JDK9-hs. > JDK9-hs-rt is closed. Please do not push any changes there. > > JDK9-hs is *still closed* until Jesper has the chance to do his > own sanity checks and declare it open again. Leonid is also doing > some Adhoc testing to verify the state of JDK9-hs. > > Dan > > > On 4/15/16 5:10 AM, Jesper Wilhelmsson wrote: >> Hi, >> >> Since I've gotten the question a few times today I just want to >> clarify that jdk9/hs-rt AND jdk9/hs are both closed due to the merge >> of these two repos. >> >> We will let you know once everything is done. >> >> Thanks, >> /Jesper >> > > From david.holmes at oracle.com Sun Apr 17 10:37:39 2016 From: david.holmes at oracle.com (David Holmes) Date: Sun, 17 Apr 2016 20:37:39 +1000 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <5704D28E.1060304@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <5704D28E.1060304@oracle.com> Message-ID: <57136773.5080901@oracle.com> I missed all the fun while I was on vacation and have still not yet had a chance to look at the actual code, but have one query ... On 6/04/2016 7:10 PM, Erik Joelsson wrote: > Hello Dan and thank you for the review! I know it's a lot to chew through. > > I have incorporated your changes and published a new webrev: > http://cr.openjdk.java.net/~erikj/8152666/webrev.02/ > > On 2016-04-05 20:10, Daniel D. Daugherty wrote: >> >> >> > The new build supports the following variants: >> > >> > * server (C1+C2) >> >> The above "server" variant is the "tiered server". Does the new >> build system support the "C2 server" variant? What about the >> 32-bit server and 64-bit server build variants? For example, >> on Linux you can have: >> >> * C1/Client, 32-bit >> * C2/Server, 32-bit >> * Tiered (C1 & C2), 32-bit >> * C2/Server, 64-bit >> * Tiered (C1 + C2), 64-bit >> >> The above wide range of variants is also true for Win*. >> > There is a way to achieve this even if it's not as straight forward. > It's controlled through the new "jvm-feature" setting. To build a > completely custom set of features for a jvm, you set the > --with-jvm-variants=custom and then define the full feature set using > --with-jvm-features=compiler2,... For "server, client, core, minimal, > zero and zeroshark" there is a predefined set of features while the > custom variant has no features by default. It isn't clear to me how Tiered fits into this scheme. Even in the old hotspot build Tiered is managed as a variant of building C2 with another make variable controlling tiered or non-tiered. It is also unclear to me what a "jvm variant" is if compiler2 etc are all considered features - is a variant a predefined set of features? Thanks, David ----- >> >> General >> Please make sure all the copyrights are updated. >> > Done >> >> common/autoconf/basics.m4 >> No comments. >> >> common/autoconf/build-performance.m4 >> No comments. >> >> common/autoconf/buildjdk-spec.gmk.in >> No comments. >> >> common/autoconf/compare.sh.in >> No comments. >> >> common/autoconf/configure >> No comments. >> >> common/autoconf/configure.ac >> No comments. >> >> common/autoconf/flags.m4 >> L274: SHARED_LIBRARY_FLAGS="-dynamiclib >> -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" >> L275: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >> >> L275 is new, but seeing it next to L274 makes me wonder if >> $PICFLAG should be used instead of the literal '-fPIC'? > Fixed >> >> L303: JVM_CFLAGS="$JVM_CFLAGS -fPIC" >> Same question about literal '-fPIC'. >> > Not sure, leaving for now. It seems we leave the PICFLAG empty for the > JDK build and only add it to the hotspot build. This should be addressed > in a followup where we try to align flag usage more between the > different libraries. >> For most of the changes to flags.m4, I can't see how any of it >> relates to the new HotSpot build. >> >> Update: Now I'm wondering if this is one of those files that >> we typically don't review because it is auto generated. >> Sorry, don't remember for sure. > It's a file that should be reviewed, only generated-configure.sh can be > ignored. The majority of the changes in here are related to cross > compiling in the modular world. When cross compiling now, we need to > also build a jvm for the build platform in order to run jlink and jmod > when building images. With the old hotspot build, that was simpler, just > invoke the hotspot build with some ARCH and compiler related variables > set. For the rest of the JDK build, an approximation of flags used was > enough so the problem was never fully solved. > > In the new build, we derive all the compiler options in configure so I > had to introduce a more proper solution. I did this by parameterizing > some macros in flags.m4 and platform.m4 so that we can run them twice, > once for the "target" toolchain" and one for the "build" toolchain. > These are the majority of the changes you are seeing. I also removed the > old hard coded "build" versions of certain flag and platform variables. >> common/autoconf/generated-configure.sh >> 2642 lines changed... I think this is one of those files >> you're supposed to skip in build-dev review... :-| > Yes, please do. >> >> common/autoconf/help.m4 >> L179: $PRINTF "Which are valid to use depends on the target >> platform.\n " >> L180: $PRINTF "%s " $VALID_JVM_FEATURES >> Why are there blanks after the last '\n' on L179 instead of >> at the beginning of L180? >> > If you do $PRINTF " %s " $VALID_JVM_FEATURES, it adds those spaces > between every element in VALID_JVM_FEATURES. >> common/autoconf/hotspot-spec.gmk.in >> No comments. >> >> common/autoconf/hotspot.m4 >> L46: # Check if the specified JVM features are explicitely >> enabled. To be used in >> Typo: 'explicitely' -> 'explicitly' >> >> L59: # server: normal interpreter, and a tiered C1/C2 compiler >> So no support for a C2-only server config? >> >> L77: # Have the user listed more than one variant? >> Typo: 'Have' -> 'Has' >> > fixed >> common/autoconf/jdk-options.m4 >> No comments other than to say thanks for keeping support >> for 'optimized' builds. >> >> common/autoconf/jdk-version.m4 >> No comments. >> >> common/autoconf/lib-std.m4 >> No comments. >> >> common/autoconf/libraries.m4 >> No comments. >> >> common/autoconf/platform.m4 >> No comments, but mind numbing amount of diffs. >> > Same explanation as for flags.m4 >> common/autoconf/spec.gmk.in >> No comments. >> >> common/autoconf/toolchain.m4 >> No comments. >> >> common/autoconf/version-numbers >> No comments. >> >> common/bin/compare.sh >> No comments. >> >> common/bin/compare_exceptions.sh.incl >> No comments. >> >> make/Jprt.gmk >> No comments. >> >> make/Main.gmk >> No comments other than the 'hotspot-ide-project' target >> looks interesting... >> > This is the replacement for the visual studio project generator. We > currently only support VS here. >> make/common/MakeBase.gmk >> No comments. >> >> make/common/NativeCompilation.gmk >> L649: else ifeq (LOW, $$($1_OPTIMIZATION)) >> L650: $1_OPT_CFLAGS := $(C_O_FLAG_NORM) >> L651: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM) >> Instead of "_NORM", I was expecting "_LOW". >> >> L652: else ifeq (HIGH, $$($1_OPTIMIZATION)) >> L653: $1_OPT_CFLAGS := $(C_O_FLAG_HI) >> L654: $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI) >> Instead of "_HI" I was expecting "_HIGH". >> > The names here were defined way back when we did build infra for the JDK > build. I wouldn't mind better alignment in naming the optimization levels. >> make/jprt.properties >> L136: # Don't disable precompiled headers on windows. It's simply >> too slow. >> This is a surprise. Not the slowness part, but not being >> able to do a non-PCH JPRT build on Win*. IMHO, it's a >> little too much motherhood... >> > Actually, the old hotspot build does not allow disabling of PCH for > windows at all. The flag is simply ignored. In the new build, we treat > the flag the same on all platforms, so disabling precompiled headers > works on Windows. In the current JPRT config, we disable precompiled > headers on all fastdebug builds as a way of making sure we aren't > breaking that build configuration. We noticed a major build time > regression on Windows fastdebug builds in JPRT until we figured out it > was caused by this. Since we aren't currently disabling precompiled > header on Windows, I see no reason to start now. The build time > regression for just building hotspot is around 2m->12m. >> jdk/make/Import.gmk >> No comments. >> >> jdk/make/copy/Copy-java.base.gmk >> No comments. >> >> jdk/make/lib/CoreLibraries.gmk >> No comments. >> >> hotspot/makefiles/BuildHotspot.gmk >> No comments. >> >> hotspot/makefiles/Dist.gmk >> L52: define macosx_universalize >> I thought MacOS X universal support was going away? >> >> Update: OK, I see the mention of 8069540 ahead... >> > Yeah, we need to be binary the same as the old build for now. Hopefully > we can get rid of the universal stuff soon. >> L120: # these files are identical, and just pick one arbitrarily >> to use as souce. >> Typo: 'souce' -> 'source' >> >> L139: # This might have been defined in a custom extenstion >> Typo: 'extenstion' -> 'extension' >> > fixed >> L168: # NOTE: In the old build, this file was not copied on Windows. >> L169: ifneq ($(OPENJDK_TARGET_OS), windows) >> L170: $(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \ >> I'm not quite sure why the jvmti.html work is done for >> more than a single platform. >> >> Update: Thinking about this more... I vaguely remember that >> JVM/TI tracing used to be disabled in Client VMs. Don't know >> if that's still the case. > The jvmti.html file is just copied into the docs bundle later. IMO, the > docs bundle should be the same regardless of platform. In practice we > only publish the bundle from one build platform anyway. > > /Erik >> >> hotspot/makefiles/HotspotCommon.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GenerateSources.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GensrcAdlc.gmk >> L98: # NOTE: Windows adlc flags was different in the old >> build. Is this really >> L99: # correct? >> John Rose may know the answer to this historical question. >> >> hotspot/makefiles/gensrc/GensrcDtrace.gmk >> No comments. >> >> hotspot/makefiles/gensrc/GensrcJvmti.gmk >> No comments. >> >> hotspot/makefiles/ide/CreateVSProject.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileDtracePostJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileDtracePreJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileJvm.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileLibjsig.gmk >> No comments. >> >> hotspot/makefiles/lib/CompileLibraries.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmFeatures.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmMapfile.gmk >> No comments. >> >> hotspot/makefiles/lib/JvmOverrideFiles.gmk >> No comments. >> >> hotspot/makefiles/mapfiles/libjsig/mapfile-vers-solaris >> hotspot/makefiles/mapfiles/libjvm_db/mapfile-vers >> hotspot/makefiles/mapfiles/libjvm_dtrace/mapfile-vers >> No comments on the mapfiles. >> >> hotspot/makefiles/symbols/symbols-aix >> hotspot/makefiles/symbols/symbols-aix-debug >> hotspot/makefiles/symbols/symbols-linux >> hotspot/makefiles/symbols/symbols-macosx >> hotspot/makefiles/symbols/symbols-shared >> hotspot/makefiles/symbols/symbols-solaris >> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler1 >> hotspot/makefiles/symbols/symbols-solaris-dtrace-compiler2 >> hotspot/makefiles/symbols/symbols-unix >> No comments on the symbol files. >> >> >> Thumbs up on this fix; I don't think that anything I noted >> above is a show stopper for this changeset. >> >> Dan >> >> >>> >>> /Erik >> > From stefan.karlsson at oracle.com Mon Apr 18 07:06:20 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 18 Apr 2016 09:06:20 +0200 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <571144ED.1050902@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> Message-ID: <5714876C.2050207@oracle.com> On 2016-04-15 21:45, Alan Bateman wrote: > > On 15/04/2016 18:02, Lois Foltan wrote: >> >> Hi Stefan, >> >> In start up before module system initialization in complete I believe >> the VM is single threaded, so the increment/decrement reference >> counts do not need to be atomic. Adding it is a defensive move in >> case the reference count is ever used passed start up in the future. >> It kind of does seem a bit excessive, sounds like you agree? > There will be a number of threads running before the base module is > defined to the VM. As things stand the the java threads at this point > will be the Common-Cleaner, Finalizer, Reference Handler and Signal > Handler. So, are you saying that we need the atomics? The java_lang_Class::create_mirror function isn't multi-thread safe, and must already be guarded by a lock (SystemDictionary_lock AFAICT). The increment in Unsafe_DefineAnonymousClass0, will only be done once, for the single InstanceKlass instance in the CLD. And all reads of _keep_alive from the GC are done during safepoints. How does ModuleEntryTable::patch_javabase_entries guard against concurrent inserts into the _fixup_module_field_list list? thanks, StefanK > > -Alan From erik.joelsson at oracle.com Mon Apr 18 07:28:09 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Mon, 18 Apr 2016 09:28:09 +0200 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <57136773.5080901@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <5704D28E.1060304@oracle.com> <57136773.5080901@oracle.com> Message-ID: <57148C89.709@oracle.com> On 2016-04-17 12:37, David Holmes wrote: > I missed all the fun while I was on vacation and have still not yet > had a chance to look at the actual code, but have one query ... Yeah, bad timing, but time was also running out. There is still time for adjustments of course. > > On 6/04/2016 7:10 PM, Erik Joelsson wrote: >> Hello Dan and thank you for the review! I know it's a lot to chew >> through. >> >> I have incorporated your changes and published a new webrev: >> http://cr.openjdk.java.net/~erikj/8152666/webrev.02/ >> >> On 2016-04-05 20:10, Daniel D. Daugherty wrote: >>> >>> >>> > The new build supports the following variants: >>> > >>> > * server (C1+C2) >>> >>> The above "server" variant is the "tiered server". Does the new >>> build system support the "C2 server" variant? What about the >>> 32-bit server and 64-bit server build variants? For example, >>> on Linux you can have: >>> >>> * C1/Client, 32-bit >>> * C2/Server, 32-bit >>> * Tiered (C1 & C2), 32-bit >>> * C2/Server, 64-bit >>> * Tiered (C1 + C2), 64-bit >>> >>> The above wide range of variants is also true for Win*. >>> >> There is a way to achieve this even if it's not as straight forward. >> It's controlled through the new "jvm-feature" setting. To build a >> completely custom set of features for a jvm, you set the >> --with-jvm-variants=custom and then define the full feature set using >> --with-jvm-features=compiler2,... For "server, client, core, minimal, >> zero and zeroshark" there is a predefined set of features while the >> custom variant has no features by default. > > It isn't clear to me how Tiered fits into this scheme. Even in the old > hotspot build Tiered is managed as a variant of building C2 with > another make variable controlling tiered or non-tiered. > In the new build, tiered is simply defined as enabling both the compiler1 and the compiler2 "features". If you look in the old build tiered.make, the only thing it does is setting both -DCOMPILER2 and -DCOMPILER1 on CFLAGS. From what I can tell, there is no other meaningful way of interpreting enabling both c1 and c2 in the same jvm. > It is also unclear to me what a "jvm variant" is if compiler2 etc are > all considered features - is a variant a predefined set of features? > Yes, that is essentially what a variant is, but it's also in part a way of naming a particular jvm, since we can sometimes build multiple variants in the same build. In theory we could enhance the configure UI for this to be able to arbitrarily define any number of variants to be built together, name them arbitrary things and define an arbitrary set of features for each one. The build itself would support such a scheme at this point (the internal variables are general enough), but in reality, I don't think we are really interested in supporting that. Also the source code is certainly not setup to handle any combination of features today. What configure supports now is picking certain valid combinations of variants to build. Enable or disable certain features through specialized parameters. Enabling certain other features through the --with-jvm-features argument. Note that --with-jvm-features will apply to all variants currently being built. Finally there is the custom variant, which by default has no features, where you may create your own special combination if you really want to. /Erik From Leonid.Mesnik at oracle.com Mon Apr 18 07:52:48 2016 From: Leonid.Mesnik at oracle.com (Leonid Mesnik) Date: Mon, 18 Apr 2016 10:52:48 +0300 Subject: RFR(S): 8154209: Remove client VM from default JIB profile on windows-x86 and linux-x86 In-Reply-To: <570FC852.10808@oracle.com> References: <570FC852.10808@oracle.com> Message-ID: <57149250.10008@oracle.com> Hi Shouldn't be jprt targets in jprt.properties updates to stop using client also? http://hg.openjdk.java.net/jdk9/hs/file/645c48292130/make/jprt.properties line 206 - 214 # Test target list (no fastdebug & limited c2 testing) my.test.target.set= \ solaris_sparcv9_5.11-product-c2-TESTNAME, \ solaris_x64_5.11-product-c2-TESTNAME, \ linux_i586_3.8-product-{c1|c2}-TESTNAME, \ linux_x64_3.8-product-c2-TESTNAME, \ macosx_x64_10.9-product-c2-TESTNAME, \ windows_i586_6.3-product-c1-TESTNAME, \ windows_x64_6.3-product-c2-TESTNAME and line 294-299 # JCK test targets in test/Makefile (no windows) my.test.target.set.jck= \ solaris_sparcv9_5.11-product-c2-JCK7TESTRULE, \ solaris_x64_5.11-product-c2-JCK7TESTRULE, \ linux_i586_3.8-product-c1-JCK7TESTRULE, \ linux_x64_3.8-product-c2-JCK7TESTRULE Leonid On 14.04.2016 19:41, Mikael Vidstedt wrote: > > Please review the following change which removes the "client" VM from > the default JIB build profile on windows-x86 and linux-x86: > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154209 > Webrev (top): > http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/ > Webrev (hotspot): > http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/hotspot/webrev/ > > > When not including the client VM, the build system automatically > creates a jvm.cfg which makes -client an alias for -server. At some > point in the future we may choose to output a warning and/or refuse to > start up if -client is specified, but at least for now silently > falling back on the -server VM seems appropriate. > > The test/runtime/SharedArchiveFile/DefaultUseWithClient.java test > assumes that CDS is always compiled in and enabled in the -client VM > on windows-x86. Since -client will fall back on -server that is no > longer true, so the test needs to be updated. I added an @ignore and > filed the following issue to track fixing the test: > > https://bugs.openjdk.java.net/browse/JDK-8154204 > > > Testing: > > In addition to a standard JPRT push job, Christian Tornqvist helped me > run the runtime nightly tests and apart from the above mentioned test > all tests were successful. > > Cheers, > Mikael > From per.liden at oracle.com Mon Apr 18 09:28:03 2016 From: per.liden at oracle.com (Per Liden) Date: Mon, 18 Apr 2016 11:28:03 +0200 Subject: RFR: 8067785: Using AlwaysPreTouch does not always touch all pages In-Reply-To: <5710F714.5070601@oracle.com> References: <5710F714.5070601@oracle.com> Message-ID: <5714A8A3.2080109@oracle.com> Hi Stefan, On 2016-04-15 16:13, Stefan Karlsson wrote: > Hi all, > > Please review this patch that ensures that VirtualSpace::expand_by > pre-touches all committed memory. > > http://cr.openjdk.java.net/~stefank/8067785/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8067785 Looks good. cheers, Per > > 1) Previously, we pre-touched the memory between the old and new _high > pointers. Since the _high variable isn't page aligned, the pre-touch > code didn't touch all committed pages. I've moved the pre-touch code to > be done for every os::commit_memory call in VirtuaSpace::expand_by. > > 2) expand_by has segregated the VirtualSpace into three segements. > [small pages | large pages | small pages] so that we can have > VirtualSpaces that are not large page size aligned. Previously, only the > middle section called commit_memory with an alignment hint, and the > small pages sections called commit_memory without a small pages hint. On > all platforms, except Solaris, this boils down to the same code. On > Solaris we have this additional code executed: > size_t page_size = page_size_for_alignment(alignment_hint); > if (page_size > (size_t) vm_page_size()) { > (void)Solaris::setup_large_pages(addr, bytes, page_size); > } > > But since the alignment_hint is set to vm_page_size we won't try to > setup_large_pages here either. > > 3) The patch also contains a few style changes to make the > VirtualSpace::expand_by easier to read (at least for me) > > Tested with JPRT and -XX:+ExecuteInternalVMTests with AlwaysPreTouch > temporarily forced to true. > > Thanks, > StefanK From volker.simonis at gmail.com Mon Apr 18 09:34:45 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Mon, 18 Apr 2016 11:34:45 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <97c76657-4904-b1b2-c961-592a8baa44df@oracle.com> References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> <97c76657-4904-b1b2-c961-592a8baa44df@oracle.com> Message-ID: Hi Rickard, are you fine if we are fixing the issue about non-initialized nmethods independently under "8151956 : CodeBlob ctor virtual call on partially constructed subclass"? In that case I'll submit an extra RFR today with that fix only. Anyway you or Dean will have to sponsor it because it is in shared code. Or do you want to fix the issue together with your change for 8152664? Anyway, after we've fixed the problem with the partially constructed classes I think we should make is_compiled() and is_nmethod() virtual again and get rid of the "subtype()" hack: http://cr.openjdk.java.net/~simonis/webrevs/2016/8152664_addon/ (This is relatively to your webrev.) Regards, Volker On Fri, Apr 15, 2016 at 11:19 PM, Dean Long wrote: > > On 4/15/2016 11:28 AM, Volker Simonis wrote: >> >> Hi, >> >> this one was a real puzzler :) >> But I finally found some quiet hours in the office today and came up >> with this rather simple solution: >> >> http://cr.openjdk.java.net/~simonis/webrevs/2016/8151956/ >> >> First I've tried to only move the relocation handling out of the >> CodeBlob constructor into the derived constructors but that didn't >> work very well and was quite complicated. So finally, I've just moved >> the call to CodeBuffer::copy_code_and_locs_to() from the CodeBlob >> constructor into the nmethod and RuntimeBlob constructors >> respectively. I couldn't find a reason why we shouldn't do this. The >> change is minimal and makes the whole handling more robust. I've >> compiled and smoke tested with JVM 98 on Linux/x86_64, Linux/ppc64le >> and Solaris/SPARC. >> >> I will run some more tests on Monday, but it would be great if you >> (i.e. Andrew) could verify this fix on ARM and if you (i.e. >> Rickard/Dean) could run some of your internal tests. > > > Hi Volker. This looks like what I was trying to accomplish with my patch, > but I introduced a new function CodeBlob::initialize(), and I called it too > early (in CompiledMethod instead of nmethod). > Yes, now I see. Initially I just patched your changes in and saw that they don't work so I didn't looked at the actual changes in more detail. I think I'll add the various casts/type changes from your version (they are a nice clean-up) into my patch if Rickard agrees to fix this independently of his change. >> I'd also like to ask if I should submit an extra RFR for 8151956 with >> my fix or if we should close 8151956 and fix it as part of Rickard's >> change for 8152664. I'd be happy with both solutions :) > > > Either way is fine with me. > OK, let's wait for Rickard's opinion. > dl > > >> A nice weekend everybody, >> Volker >> >> >> On Wed, Apr 13, 2016 at 3:31 PM, Rickard B?ckman >> wrote: >>> >>> Volker, >>> >>> yes, I didn't realize at first that the nmethod was casted to a >>> CompiledMethod before the call to consts_begin(). Otherwise it would >>> have used the non-virtual consts_begin of nmethod that didn't have any >>> virtual calls. >>> >>> The entire code chain and looking up itself from the CodeCache before >>> fully constructed seems quite problematic. Even before the changes I >>> made. Previous to my changes the calls would have succeeded but returned >>> header_begin() or this for all the consts_begin, consts_end, etc... ? >>> >>> /R >>> >>> On 04/11, Volker Simonis wrote: >>>> >>>> Rickard, Dean, >>>> >>>> I'm afraid all this hacks can not work. It doesn't help to make >>>> CompiledMethod::consts_begin() non-virtual and then calling a virtual >>>> function from it. The problem ist that at the point where you call >>>> consts_begin_v(), the vtable of 'this' is still the one of CodeBlob and >>>> this results in calling yet another arbitrary function: >>>> >>>> #0 CodeBlob::is_locked_by_vm (this=0x3fff607d0c10) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.hpp:168 >>>> #1 0x00003fffb6e38048 in CompiledMethod::consts_begin >>>> (this=0x3fff607d0c10) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.hpp:255 >>>> #2 0x00003fffb758d658 in RelocIterator::initialize >>>> (this=0x3ffdfd3fc9a8, >>>> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:142 >>>> #3 0x00003fffb6ace56c in RelocIterator::RelocIterator >>>> (this=0x3ffdfd3fc9a8, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >>>> limit=0x0) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >>>> #4 0x00003fffb7591afc in trampoline_stub_Relocation::get_trampoline_for >>>> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >>>> #5 0x00003fffb741ba4c in NativeCall::get_trampoline >>>> (this=0x3fff607d0fac) >>>> at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>>> #6 0x00003fffb7596a34 in Relocation::pd_call_destination >>>> (this=0x3ffdfd3fcd10, orig_addr=0x3fff6033482c "\001") at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >>>> #7 0x00003fffb758f71c in CallRelocation::fix_relocation_after_move >>>> (this=0x3ffdfd3fcd10, src=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >>>> #8 0x00003fffb6c48914 in CodeBuffer::relocate_code_to >>>> (this=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >>>> #9 0x00003fffb6c48480 in CodeBuffer::copy_code_to (this=0x3ffdfd3fdbc0, >>>> dest_blob=0x3fff607d0c10) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >>>> #10 0x00003fffb6c426ec in CodeBuffer::copy_code_and_locs_to >>>> (this=0x3ffdfd3fdbc0, blob=0x3fff607d0c10) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >>>> #11 0x00003fffb6c3f8b0 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >>>> name=0x3fffb7a760f8 "nmethod", layout=..., cb=0x3ffdfd3fdbc0, >>>> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe0001ed00, >>>> caller_must_gc_arguments=false, subtype=8) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >>>> #12 0x00003fffb6ce5360 in CompiledMethod::CompiledMethod >>>> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a760f8 >>>> "nmethod", >>>> size=1768, header_size=392, cb=0x3ffdfd3fdbc0, frame_complete_offset=20, >>>> frame_size=14, oop_maps=0x3ffe0001ed00, caller_must_gc_arguments=false) >>>> at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >>>> #13 0x00003fffb7422198 in nmethod::nmethod (this=0x3fff607d0c10, >>>> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >>>> offsets=0x3ffdfd3fdb98, orig_pc_offset=104, debug_info=0x3fffb03f2dc0, >>>> dependencies=0x3ffe0001ed70, code_buffer=0x3ffdfd3fdbc0, frame_size=14, >>>> oop_maps=0x3ffe0001ed00, handler_table=0x3ffdfd3fdb50, >>>> nul_chk_table=0x3ffdfd3fdb70, compiler=0x3fffb03d0cd0, comp_level=3) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >>>> #14 0x00003fffb7421850 in nmethod::new_nmethod (method=..., >>>> compile_id=4, >>>> entry_bci=-1, offsets=0x3ffdfd3fdb98, orig_pc_offset=104, >>>> debug_info=0x3fffb03f2dc0, dependencies=0x3ffe0001ed70, >>>> code_buffer=0x3ffdfd3fdbc0, frame_size=14, oop_maps=0x3ffe0001ed00, >>>> handler_table=0x3ffdfd3fdb50, nul_chk_table=0x3ffdfd3fdb70, >>>> compiler=0x3fffb03d0cd0, comp_level=3) at >>>> >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:548 >>>> >>>> I think we really need to rework this as proposed by Andrew in his last >>>> mail. I'm working on such a fix. >>>> >>>> Regards, >>>> Volker >>>> >>>> >>>> On Mon, Apr 11, 2016 at 1:55 PM, Rickard B?ckman >>>> >>>> >>>>> wrote: >>>>> Volker, >>>>> >>>>> here is the patch if you want to try it. >>>>> http://cr.openjdk.java.net/~rbackman/8152664/virtual.patch >>>>> >>>>> /R >>>>> >>>>> On 04/11, Rickard B?ckman wrote: >>>>>> >>>>>> Volker, >>>>>> >>>>>> thanks for finding this issue. >>>>>> >>>>>> I think that maybe the easiest fix is as follows: >>>>>> >>>>>> create new virtual methods in CompiledMethod: >>>>>> >>>>>> virtual address stub_begin_v() = 0; >>>>>> >>>>>> make the now virtual stub_begin non-virtual like: >>>>>> >>>>>> address stub_begin() { return stub_begin_v(); } >>>>>> >>>>>> in nmethod we override the stub_begin() with the normal this + offset >>>>>> compuation and implement stub_begin_v() to call stub_begin(). >>>>>> >>>>>> That will avoid all virtual calls in the case were we are not working >>>>>> on >>>>>> a CompiledMethod. >>>>>> >>>>>> It adds a couple of methods though. What do you think? >>>>>> >>>>>> /R >>>>>> >>>>>> On 04/08, Volker Simonis wrote: >>>>>>> >>>>>>> Hi Rickard, >>>>>>> >>>>>>> I found the problem why your change crashes the VM on ppc (and I'm >>>>> >>>>> pretty >>>>>>> >>>>>>> sure it will also crash on ARM - @Andrew, maybe you can try it out?). >>>>> >>>>> It is >>>>>>> >>>>>>> caused by the following code in address NativeCall::get_trampoline() >>>>> >>>>> which >>>>>>> >>>>>>> is also present on arm64: >>>>>>> >>>>>>> address NativeCall::get_trampoline() { >>>>>>> address call_addr = addr_at(0); >>>>>>> CodeBlob *code = CodeCache::find_blob(call_addr); >>>>>>> ... >>>>>>> // If the codeBlob is not a nmethod, this is because we get here >>>>> >>>>> from the >>>>>>> >>>>>>> // CodeBlob constructor, which is called within the nmethod >>>>> >>>>> constructor. >>>>>>> >>>>>>> return trampoline_stub_Relocation::get_trampoline_for(call_addr, >>>>>>> (nmethod*)code); >>>>>>> } >>>>>>> >>>>>>> The comment explains the situation quite well: we're in the CodeBlob >>>>>>> constructor which was called by the CompiledMethod constructor which >>>>> >>>>> was >>>>>>> >>>>>>> called from the nmethod constructor: >>>>>>> >>>>>>> #3 0x00003fffb741b80c in NativeCall::get_trampoline >>>>> >>>>> (this=0x3fff607d0fac) >>>>>>> >>>>>>> at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>>>>>> >>>>>>> #4 0x00003fffb7596914 in Relocation::pd_call_destination >>>>>>> (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 >>>>>>> >>>>>>> #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move >>>>>>> (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 >>>>>>> >>>>>>> #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to >>>>>>> (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 >>>>>>> >>>>>>> #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to >>>>> >>>>> (this=0x3ffdfe3fdb40, >>>>>>> >>>>>>> dest_blob=0x3fff607d0c10) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 >>>>>>> >>>>>>> #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to >>>>>>> (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 >>>>>>> >>>>>>> #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, >>>>>>> name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, >>>>>>> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, >>>>>>> caller_must_gc_arguments=false, subtype=8) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 >>>>>>> >>>>>>> #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod >>>>>>> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 >>>>> >>>>> "nmethod", >>>>>>> >>>>>>> size=1768, header_size=392, cb=0x3ffdfe3fdb40, >>>>> >>>>> frame_complete_offset=20, >>>>>>> >>>>>>> frame_size=14, oop_maps=0x3ffe00049620, >>>>> >>>>> caller_must_gc_arguments=false) at >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 >>>>>>> >>>>>>> #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, >>>>>>> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, >>>>>>> offsets=0x3ffdfe3fdb18, orig_pc_offset=104, >>>>>>> debug_info=0x3fffb03d55f0, >>>>>>> dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, >>>>>>> frame_size=14, >>>>>>> oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, >>>>>>> nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) >>>>>>> at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 >>>>>>> >>>>>>> Now we cast 'code' to 'nmethod' but at this point in time 'code' is >>>>> >>>>> still a >>>>>>> >>>>>>> CodeBlob from the C++ point of view (i.e. it still has a CodeBlob >>>>> >>>>> vtable >>>>>>> >>>>>>> (see [1] for an explanation)). >>>>>>> >>>>>>> Later on, in RelocIterator::initialize() we call virtual methods on >>>>>>> the >>>>>>> nmethod which still has the vtable of a "CodeBlob" and this fails >>>>> >>>>> badly: >>>>>>> >>>>>>> #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 >>>>>>> >>>>>>> #1 0x00003fffb758d51c in RelocIterator::initialize >>>>> >>>>> (this=0x3ffdfe3fc928, >>>>>>> >>>>>>> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 >>>>>>> >>>>>>> #2 0x00003fffb6ace56c in RelocIterator::RelocIterator >>>>>>> (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", >>>>>>> limit=0x0) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 >>>>>>> >>>>>>> #3 0x00003fffb75919dc in >>>>> >>>>> trampoline_stub_Relocation::get_trampoline_for >>>>>>> >>>>>>> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 >>>>>>> >>>>>>> #4 0x00003fffb741b80c in NativeCall::get_trampoline >>>>> >>>>> (this=0x3fff607d0fac) >>>>>>> >>>>>>> at >>>>>>> >>>>> >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 >>>>>>> >>>>>>> As you can see, we actually want to call nmethod::stub_begin() at >>>>>>> relocInfo.cpp:144 >>>>>>> >>>>>>> 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); >>>>>>> 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; >>>>>>> 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; >>>>>>> >>>>>>> but we actually end up in SingletonBlob::print_on() which is a >>>>> >>>>> completely >>>>>>> >>>>>>> different method. Notice that the call to nm->consts_begin() before >>>>> >>>>> also >>>>>>> >>>>>>> fails, but it doesn't crash the VM because it happens to call >>>>>>> SingletonBlob::verify() which has no bad side effect. The call to >>>>>>> nm->insts_begin() in line 143 is non-virtual and thus works fine. >>>>>>> Here >>>>> >>>>> are >>>>>>> >>>>>>> the corresponding vtable slots in the CodeBlob vtable for >>>>> >>>>> consts_begin() >>>>>>> >>>>>>> and stub_begin() >>>>>>> >>>>>>> (gdb) p &nmethod::consts_begin >>>>>>> $76 = &virtual table offset 42 >>>>>>> (gdb) p &nmethod::stub_begin >>>>>>> $77 = &virtual table offset 44 >>>>>>> (gdb) p ((*(void ***)nm) + 1)[42] >>>>>>> $86 = (void *) 0x3fffb6c41df8 >>>>>>> (gdb) p ((*(void ***)nm) + 1)[44] >>>>>>> $87 = (void *) 0x3fffb6c41e64 >>>> >>>>> const> >>>>>>> >>>>>>> As you can see, 'nm' is indeed a "CodeBlob" at this point: >>>>>>> >>>>>>> (gdb) p *(void ***)nm >>>>>>> $91 = (void **) 0x3fffb7befa00 >>>>>>> (gdb) p nm->print() >>>>>>> [CodeBlob (0x00003fff607d1090)] >>>>>>> Framesize: 14 >>>>>>> >>>>>>> The offending calls succeeded before your change, because they where >>>>> >>>>> not >>>>>>> >>>>>>> virtual. Any idea how we can fix this with the new class hierarchy? >>>>>>> >>>>>>> Regards, >>>>>>> Volker >>>>>>> >>>>>>> [1] >>>>>>> >>>>> >>>>> http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class >>>>>>> >>>>>>> >>>>>>> >>>>>>> On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis < >>>>> >>>>> volker.simonis at gmail.com> >>>>>>> >>>>>>> wrote: >>>>>>> >>>>>>>> Hi Rickard, >>>>>>>> >>>>>>>> I'd also like to know what's the rational behind this quite large >>>>>>>> change. Do you expect some performance or memory consumption >>>>>>>> improvements or is this a prerequisite for another change which is >>>>>>>> still to come? >>>>>>>> >>>>>>>> The change itself currently doesn't work on ppc64 (neither on Linux >>>>>>>> nor on AIX). I get the following crash during the build when the >>>>> >>>>> newly >>>>>>>> >>>>>>>> built Hotspot is JIT-compiling java.lang.String::charAt on C1 : >>>>>>>> >>>>>>>> # >>>>>>>> # A fatal error has been detected by the Java Runtime Environment: >>>>>>>> # >>>>>>>> # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 >>>>>>>> # >>>>>>>> # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build >>>>>>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) >>>>>>>> # Java VM: OpenJDK 64-Bit Server VM (slowdebug >>>>>>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, >>>>>>>> tiered, compressed oo >>>>>>>> ps, serial gc, linux-ppc64le) >>>>>>>> # Problematic frame: >>>>>>>> # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>>>>>>> const*, char*, bool)+0x40 >>>>>>>> # >>>>>>>> # No core dump will be written. Core dumps have been disabled. To >>>>>>>> enable core dumping, try "ulimit -c unlimited" before starting Java >>>>>>>> again >>>>>>>> # >>>>>>>> # If you would like to submit a bug report, please visit: >>>>>>>> # http://bugreport.java.com/bugreport/crash.jsp >>>>>>>> # >>>>>>>> >>>>>>>> --------------- S U M M A R Y ------------ >>>>>>>> >>>>>>>> Command Line: >>>>>>>> >>>>> -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk >>>>>>>> >>>>>>>> -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. >>>>>>>> module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create >>>>>>>> --module-version 9-internal --os-name Linux --os-arch ppc64le >>>>>>>> --os-version >>>>>>>> 2.6 --modulepath >>>>>>>> /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods >>>>>>>> --hash-dependencies .* --exclude **_the.* --libs >>>>>>>> >>>>>>>> >>>>> >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base >>>>>>>> >>>>>>>> --cmds >>>>>>>> >>>>> >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base >>>>>>>> >>>>>>>> --config >>>>>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base >>>>>>>> --class-path >>>>> >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base >>>>>>>> >>>>>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod >>>>>>>> >>>>>>>> Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # >>>>>>>> Please check /etc/os-release for details about this release. >>>>>>>> Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h >>>>> >>>>> 0m 0s) >>>>>>>> >>>>>>>> --------------- T H R E A D --------------- >>>>>>>> >>>>>>>> Current thread (0x000010000429c800): JavaThread "C1 >>>>> >>>>> CompilerThread10" >>>>>>>> >>>>>>>> daemon [_thread_in_vm, id=35404, >>>>>>>> stack(0x000010006a800000,0x000010006ac00000)] >>>>>>>> >>>>>>>> >>>>>>>> Current CompileTask: >>>>>>>> C1: 761 3 3 java.lang.String::charAt (25 bytes) >>>>>>>> >>>>>>>> Stack: [0x000010006a800000,0x000010006ac00000], >>>>>>>> sp=0x000010006abfc6c0, free space=4081k >>>>>>>> Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, >>>>> >>>>> C=native >>>>>>>> >>>>>>>> code) >>>>>>>> V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char >>>>>>>> const*, char*, bool)+0x40 >>>>>>>> V [libjvm.so+0xf74668] outputStream::print_cr(char const*, >>>>> >>>>> ...)+0x68 >>>>>>>> >>>>>>>> V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) >>>>>>>> const+0x50 >>>>>>>> V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) >>>>> >>>>> const+0x40 >>>>>>>> >>>>>>>> V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) >>>>> >>>>> const+0x4c >>>>>>>> >>>>>>>> V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, >>>>>>>> unsigned char*, unsigned char*)+0x170 >>>>>>>> V [libjvm.so+0x5ae56c] >>>>> >>>>> RelocIterator::RelocIterator(CompiledMethod*, >>>>>>>> >>>>>>>> unsigned char*, unsigned char*)+0x78 >>>>>>>> V [libjvm.so+0x10719dc] >>>>>>>> trampoline_stub_Relocation::get_trampoline_for(unsigned char*, >>>>>>>> nmethod*)+0x78 >>>>>>>> V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 >>>>>>>> V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned >>>>>>>> char*)+0x150 >>>>>>>> V [libjvm.so+0x106f5fc] >>>>>>>> CallRelocation::fix_relocation_after_move(CodeBuffer const*, >>>>>>>> CodeBuffer*)+0x74 >>>>>>>> V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) >>>>>>>> const+0x390 >>>>>>>> V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 >>>>>>>> V [libjvm.so+0x722670] >>>>> >>>>> CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 >>>>>>>> >>>>>>>> V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, >>>>>>>> CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, >>>>>>>> int)+0x320 >>>>>>>> V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, >>>>>>>> char >>>>>>>> const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 >>>>>>>> V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, >>>>>>>> CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, >>>>>>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>>>>>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 >>>>>>>> V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, >>>>>>>> int, int, CodeOffsets*, int, DebugInformationRecorder*, >>>>> >>>>> Dependencies*, >>>>>>>> >>>>>>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, >>>>>>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 >>>>>>>> V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, >>>>>>>> CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, >>>>>>>> ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, >>>>>>>> bool, bool, RTMState)+0x560 >>>>>>>> V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 >>>>>>>> V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 >>>>>>>> V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, >>>>>>>> ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 >>>>>>>> V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, >>>>>>>> int, DirectiveSet*)+0xc8 >>>>>>>> V [libjvm.so+0x7b188c] >>>>>>>> CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 >>>>>>>> V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 >>>>>>>> V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, >>>>> >>>>> Thread*)+0xa0 >>>>>>>> >>>>>>>> V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 >>>>>>>> V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 >>>>>>>> V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 >>>>>>>> C [libpthread.so.0+0x8a64] start_thread+0xf4 >>>>>>>> C [libc.so.6+0x1032a0] clone+0x98 >>>>>>>> >>>>>>>> I haven't identified the exact cause (will analyze it tomorrow) but >>>>>>>> the stack trace indicates that it is indeed related to your changes. >>>>>>>> >>>>>>>> Besides that I have some comments: >>>>>>>> >>>>>>>> codeBuffer.hpp: >>>>>>>> >>>>>>>> 472 CodeSection* insts() { return &_insts; } >>>>>>>> 475 const CodeSection* insts() const { return &_insts; } >>>>>>>> >>>>>>>> - do we really need both versions? >>>>>>>> >>>>>>>> codeBlob.hpp: >>>>>>>> >>>>>>>> 135 nmethod* as_nmethod_or_null() const { return >>>>>>>> is_nmethod() ? (nmethod*) this : NULL; } >>>>>>>> 136 nmethod* as_nmethod() const { >>>>>>>> assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } >>>>>>>> 137 CompiledMethod* as_compiled_method_or_null() const { return >>>>>>>> is_compiled() ? (CompiledMethod*) this : NULL; } >>>>>>>> 138 CompiledMethod* as_compiled_method() const { >>>>>>>> assert(is_compiled(), "must be compiled"); return (CompiledMethod*) >>>>>>>> this; } >>>>>>>> 139 CodeBlob* as_codeblob_or_null() const { return >>>>>>>> (CodeBlob*) this; } >>>>>>>> >>>>>>>> - I don't like this code. You make the getters 'const' which >>>>>>>> implicitely makes 'this' a "pointer to const" but then the returned >>>>>>>> pointer is a normal pointer to a non-const object and therefore you >>>>>>>> have to statically cast away the "pointer to const" (that's why you >>>>>>>> need the cast even in the case where you return a CodeBlob*). So >>>>>>>> either remove the const qualifier from the method declarations or >>>>> >>>>> make >>>>>>>> >>>>>>>> them return "pointers to const". And by the way, >>>>> >>>>> as_codeblob_or_null() >>>>>>>> >>>>>>>> doesn't seemed to be used anywhere in the code, why do we need it at >>>>>>>> all? >>>>>>>> >>>>>>>> - Why do we need the non-virtual methods is_nmethod() and >>>>>>>> is_compiled() to manually simulate virtual behavior. Why can't we >>>>>>>> simply make them virtual and implement them accordingly in nmathod >>>>> >>>>> and >>>>>>>> >>>>>>>> CompiledMethod? >>>>>>>> >>>>>>>> Regards, >>>>>>>> Volker >>>>>>>> >>>>>>>> On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman >>>>>>>> wrote: >>>>>>>>> >>>>>>>>> Hi, >>>>>>>>> >>>>>>>>> can I please have review for this patch please? >>>>>>>>> >>>>>>>>> So far CodeBlobs have required all the data (metadata, oops, code, >>>>> >>>>> etc) >>>>>>>>> >>>>>>>>> to be in one continuous blob With this patch we are looking to >>>>> >>>>> change >>>>>>>>> >>>>>>>>> that. It's been done by changing offsets in CodeBlob to addresses, >>>>>>>>> making some methods virtual to allow different behavior and also >>>>>>>>> creating a couple of new classes. CompiledMethod now sits inbetween >>>>>>>>> CodeBlob and nmethod. >>>>>>>>> >>>>>>>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>>>>>>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>>>>>>>> >>>>>>>>> Thanks >>>>>>>>> /R > > From sgehwolf at redhat.com Mon Apr 18 09:40:36 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Mon, 18 Apr 2016 11:40:36 +0200 Subject: RFR(XS): 8153275: Zero JVM fails to initialize after JDK-8152440 In-Reply-To: <1716616804.19704221.1459969173210.JavaMail.zimbra@redhat.com> References: <1459791895.3762.14.camel@redhat.com> <57037BFB.1060606@redhat.com> <1716616804.19704221.1459969173210.JavaMail.zimbra@redhat.com> Message-ID: <1460972436.3790.20.camel@redhat.com> Hi, On Wed, 2016-04-06 at 14:59 -0400, Andrew Hughes wrote: > ----- Original Message ----- > > > > On 04/04/16 18:44, Severin Gehwolf wrote: > > > > > > Hi, > > > > > > Could somebody please sponsor and review the following Zero-only > > > fix? > > > The fix for JDK-8152440 was incorrect in that it set the value > > > for InitArrayShortSize to an illegal value (-1) failing > > > constraint > > > validation. Albeit not being used it must still pass constraint > > > validation. Otherwise, the JVM fails to initialize and all bets > > > are > > > off. Thoughts? > > > > > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153275 > > > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/ > > > webrev.01/ > > OK, but please make the comment clearer.??I didn't understand it. > > > > ?"the allowed range [ 0 ... 9223372036854775807 ]" > > > > is much clearer. > > > > Andrew. > > > > > Maybe using 2^63-1 for the max range would be even clearer? Or > 0x7FFFFFFFFFFFFFFF, which is what is used in globalDefinitions.hpp. > > It's also wrong for 32-bit architectures; the range there is 0 ... > 0x7FFFFFFF > or 0 ... 2^31-1. > > From globalDefinitions.hpp: > > typedef intptr_t??intx; > const intx??min_intx??= (intx)1 << (sizeof(intx)*BitsPerByte-1); > const intx??max_intx??= (uintx)min_intx - 1; Thanks for the review! I've avoided adding the actual range values in the comment altogether and hopefully this is better: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.03/ Cheers, Severin From stefan.karlsson at oracle.com Mon Apr 18 09:41:51 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 18 Apr 2016 11:41:51 +0200 Subject: RFR: 8067785: Using AlwaysPreTouch does not always touch all pages In-Reply-To: <5714A8A3.2080109@oracle.com> References: <5710F714.5070601@oracle.com> <5714A8A3.2080109@oracle.com> Message-ID: <5714ABDF.2060709@oracle.com> Thanks, Per. StefanK On 2016-04-18 11:28, Per Liden wrote: > Hi Stefan, > > On 2016-04-15 16:13, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch that ensures that VirtualSpace::expand_by >> pre-touches all committed memory. >> >> http://cr.openjdk.java.net/~stefank/8067785/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8067785 > > Looks good. > > cheers, > Per > >> >> 1) Previously, we pre-touched the memory between the old and new _high >> pointers. Since the _high variable isn't page aligned, the pre-touch >> code didn't touch all committed pages. I've moved the pre-touch code to >> be done for every os::commit_memory call in VirtuaSpace::expand_by. >> >> 2) expand_by has segregated the VirtualSpace into three segements. >> [small pages | large pages | small pages] so that we can have >> VirtualSpaces that are not large page size aligned. Previously, only the >> middle section called commit_memory with an alignment hint, and the >> small pages sections called commit_memory without a small pages hint. On >> all platforms, except Solaris, this boils down to the same code. On >> Solaris we have this additional code executed: >> size_t page_size = page_size_for_alignment(alignment_hint); >> if (page_size > (size_t) vm_page_size()) { >> (void)Solaris::setup_large_pages(addr, bytes, page_size); >> } >> >> But since the alignment_hint is set to vm_page_size we won't try to >> setup_large_pages here either. >> >> 3) The patch also contains a few style changes to make the >> VirtualSpace::expand_by easier to read (at least for me) >> >> Tested with JPRT and -XX:+ExecuteInternalVMTests with AlwaysPreTouch >> temporarily forced to true. >> >> Thanks, >> StefanK From sgehwolf at redhat.com Mon Apr 18 09:42:09 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Mon, 18 Apr 2016 11:42:09 +0200 Subject: RFR(XS): 8153275: Zero JVM fails to initialize after JDK-8152440 In-Reply-To: <571004AD.1010305@oracle.com> References: <1459791895.3762.14.camel@redhat.com> <57037BFB.1060606@redhat.com> <1459848614.4486.13.camel@redhat.com> <1459931888.3613.10.camel@redhat.com> <571004AD.1010305@oracle.com> Message-ID: <1460972529.3790.22.camel@redhat.com> On Thu, 2016-04-14 at 16:59 -0400, Coleen Phillimore wrote: > Hi,??I've hit this bug and reviewed it and will sponsor it. Thanks Coleen! An exported changeset of the latest patch is here: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/JDK-8153275-jdk9-hotspot.export.patch Cheers, Severin > Thanks, > Coleen > > > On 4/6/16 4:38 AM, Severin Gehwolf wrote: > > > > On Tue, 2016-04-05 at 11:30 +0200, Severin Gehwolf wrote: > > > > > > On Tue, 2016-04-05 at 09:48 +0100, Andrew Haley wrote: > > > > > > > > On 04/04/16 18:44, Severin Gehwolf wrote: > > > > > > > > > > > > > > > Hi, > > > > > > > > > > Could somebody please sponsor and review the following Zero- > > > > > only > > > > > fix? > > > > > The fix for JDK-8152440 was incorrect in that it set the > > > > > value > > > > > for InitArrayShortSize to an illegal value (-1) failing > > > > > constraint > > > > > validation. Albeit not being used it must still pass > > > > > constraint > > > > > validation. Otherwise, the JVM fails to initialize and all > > > > > bets > > > > > are > > > > > off. Thoughts? > > > > > > > > > > Bug: https://bugs.openjdk.java.net/browse/JDK-8153275 > > > > > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153 > > > > > 275/ > > > > > webrev.01/ > > > > OK, but please make the comment clearer.??I didn't understand > > > > it. > > > > > > > > ? "the allowed range [ 0 ... 9223372036854775807 ]" > > > > > > > > is much clearer. > > > Thanks for the review! > > > > > > Updated webrev: > > > http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8153275/webrev.0 > > > 2/ > > Could somebody sponsor this please? > > > > Thanks, > > Severin From shafi.s.ahmad at oracle.com Mon Apr 18 09:56:47 2016 From: shafi.s.ahmad at oracle.com (Shafi Ahmad) Date: Mon, 18 Apr 2016 02:56:47 -0700 (PDT) Subject: [8u] RFR: JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined Message-ID: Hi, Please review the backport of bug: "JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined" to jdk8u. There is a single line conflict. Webrev: http://cr.openjdk.java.net/~csahu/8055530/webrev.00/ Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8055530 Original patch pushed to jdk9: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/7dfa629d203a Test: Run jprt. Regards, Shafi From stefan.karlsson at oracle.com Mon Apr 18 10:04:47 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Mon, 18 Apr 2016 12:04:47 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> <570D1747.2020508@oracle.com> <570E2C45.7090201@oracle.com> Message-ID: <5714B13F.7080307@oracle.com> Hi Thomas, I discussed the code with Per and updated the names and changed the code slightly. http://cr.openjdk.java.net/~stefank/8017629/webrev.03.delta http://cr.openjdk.java.net/~stefank/8017629/webrev.03 1) shmat_with_large_alignment was renamed to shmat_with_alignment and all references to large pages were removed. 2) shmat_with_normal_alignment was renamed to shmat_at_address and all references to pages sizes were removed. 3) shmat_with_alignment was renamed to shmat_large_pages and all large pages specific code were kept in that function. 4) shmat_large_pages was restructured to have one section for the req_addr != NULL case, and another section for req_addr == NULL. I know that you suggested to call shmat_with_alignment (previously shmat_with_normal_alignment) for both cases in the req_addr == NULL section, but I would like to only have to use shmat_with_alignment when it's really necessary. Thanks, StefanK On 2016-04-13 15:59, Thomas St?fe wrote: > Hi Stefan, > > On Wed, Apr 13, 2016 at 1:23 PM, Stefan Karlsson > > wrote: > > Hi Thomas, > > > On 2016-04-13 12:44, Thomas St?fe wrote: >> Hi Stefan, >> >> On Tue, Apr 12, 2016 at 5:41 PM, Stefan Karlsson >> > >> wrote: >> >> Hi Thomas, >> >> >> On 2016-04-12 16:23, Thomas St?fe wrote: >>> Hi Stefan, >>> >>> >>> On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson >>> >> > wrote: >>> >>> Hi Thomas, >>> >>> On 2016-04-11 14:39, Thomas St?fe wrote: >>>> Hi Stefan, >>>> >>>> short question, why the mmap before the shmat? Why not >>>> shmat right away at the requested address? >>> >>> If we have a requested_address we do exactly what you >>> propose. >>> >>> if (req_addr == NULL && alignment > >>> os::large_page_size()) { >>> return shmat_with_large_alignment(shmid, bytes, >>> alignment); >>> } else { >>> return shmat_with_normal_alignment(shmid, req_addr); >>> } >>> >>> ... >>> >>> static char* shmat_with_normal_alignment(int shmid, >>> char* req_addr) { >>> char* addr = (char*)shmat(shmid, req_addr, 0); >>> >>> if ((intptr_t)addr == -1) { >>> shm_warning_with_errno("Failed to attach shared memory."); >>> return NULL; >>> } >>> >>> return addr; >>> } >>> >>> >>> It's when you don't have a requested address that mmap >>> is used to find a large enough virtual memory area. >>> >>> >>> Sorry, seems I did not look at this coding thoroughly >>> enough. I understand now that you do mmap to allocate and >>> then to cut away the extra pre-/post-space, something which >>> would not be possible with shmat, which cannot be unmapped >>> page-wise. >>> >>> But I am still not sure why we do it his way: >>> >>> 3429 static char* shmat_with_alignment(int shmid, size_t >>> bytes, size_t alignment, char* req_addr) { >>> 3430 // If there's no requested address, the shmat call >>> can return memory that is not >>> 3431 // 'alignment' aligned, if the given alignment is >>> larger than the large page size. >>> 3432 // Special care needs to be taken to ensure that we >>> get aligned memory back. >>> 3433 if (req_addr == NULL && alignment > >>> os::large_page_size()) { >>> 3434 return shmat_with_large_alignment(shmid, bytes, >>> alignment); >>> 3435 } else { >>> 3436 return shmat_with_normal_alignment(shmid, req_addr); >>> 3437 } >>> 3438 } >>> >>> For req_addr==0 and big alignment, we attach at the given >>> alignment ("shmat_with_large_alignment"). >>> For req_addr!=0, we attach at the given requested address >>> ("shmat_with_normal_alignment"). >>> For req_addr==0 and smaller alignment, we ignore the >>> alignment and attach anywhere? >>> >>> Maybe I am slow, but why does it matter if the alignment is >>> large or small? Why not just distinguish between: >>> >>> 1) address given (req_addr!=0): in this case we attach at >>> this req_addr and rely on the user having aligned the >>> address properly for his purposes. We specify 0 for flags, >>> so we will attach at exactly the given address or fail. In >>> this case we could simply ignore the given alignment - if >>> one was given - or just use it to counter-check the req_addr. >>> >>> 2) alignment given (req_addr==0 and alignment > 0): attach >>> at the given alignment using mmap-before-shmat. This could >>> be done for any alignment, be it large or small. >> >> What you propose doesn't work. >> >> We're allocating large pages with SHM_HUGETLB, and if we try >> to attach to an address that is not large_page_size aligned >> the shmat call returns EINVAL. >> >> >> I was aware of this. What I meant was: >> >> You have "shmat_with_large_alignment" which takes an alignment >> and does its best to shmat with that alignment using the mmap >> trick. This coding does not need to know anything about huge >> pages, and actually does not do anything huge-pagey, apart from >> the asserts - it would just as well work with small pages, >> because the only place where the code needs to know about huge >> pages is in the layer above, in reserve_memory_special - where we >> pass SHM_HUGETLB to shmget. (Btw, I always wondered about the >> "reserve_memory_special" naming.) >> >> I think my point is that by renaming this to >> "shmat_with_alignment" and removing the huge-page-related asserts >> the function would become both simpler and more versatile and >> could be reused for small alignments as well as large ones. The >> fact that it returns EINVAL for alignments instead of asserting >> would not be a problem - we would return an error instead of >> asserting because of bad alignment, and both handling this error >> and asserting for huge-page-alignment could be handled better in >> reserve_memory_special. >> >> To put it another way, I think "shmat_with_large_alignment" does >> not need to know about huge pages; this should be the >> responsibility of reserve_memory_special. >> >> About "shmat_with_normal_alignment", this is actually only a raw >> shmat call and exists for the req_addr!=NULL case and for the >> case where we do not pass neither req_addr nor alignment. So the >> only thing it does not handle is alignment, so it is misnamed and >> also should not be called for the >> req_addr==NULL-and-small-alignments-case. > > The reserve_memory_special_shm function and the associated helper > functions I'm adding are specifically written to support large > pages allocations. The names "normal_alignment" and > "large_alignment" are intended to refer to alignment sizes > compared to the large pages size. I grant you that it's not > obvious from the name, and we can rename them to make it more clear. > > I want to provide a small bug fix for this large pages bug, while > you are suggesting that we re-purpose the code into supporting > small page allocations as well. Your suggestions might be good, > but may I suggest that you create a patch and an RFE that > motivates why we should make this code more generic to support > small pages as well? > > Thanks, > StefanK > > > Ok, we can do that. I was just worried that the code becomes more > difficult to understand. But lets wait for some more reviews. > > Kind Regards, Thomas > > >>> >>> Functions would become simpler and also could be clearer >>> named (e.g. "shmat_at_address" and "shmat_with_alignment", >>> respectivly). >> >> Maybe I should rename the functions to make it more obvious >> that these are large pages specific functions? >> >>> >>> ---- >>> >>> This: >>> >>> 3402 if ((intptr_t)addr == -1) { >>> 3403 shm_warning_with_errno("Failed to attach shared memory."); >>> 3404 // Since we don't know if the kernel unmapped the >>> pre-reserved memory area >>> 3405 // we can't unmap it, since that would potentially >>> unmap memory that was >>> 3406 // mapped from other threads. >>> 3407 return NULL; >>> 3408 } >>> >>> seems scary. Means for every call this happens, we leak the >>> reserved (not committed) address space? >> >> Yes, that's unfortunate. >> >> An alternative would be to use this sequence: >> 1) Use anon_mmap_aligned to find a suitable VA range >> 2) Immediately unmap the VA range >> 3) Try to attach at that VA range _without_ SHM_REMAP >> >> That would remove the risk of leaking the reserved address >> space, but instead we risk failing at (3) if another thread >> manages to allocate memory inside the found VA range. This >> will cause some users to unnecessarily fail to get large >> pages, though. We've had other problems when pre-existing >> threads used mmap while we were initializing the VM. See: >> JDK-8007074. >> >> >> Yes; btw you also could do this with shmget/shmat instead of mmap. >> >> Note that similar unclean tricks are already done in other >> places, see e.g. the windows version of >> os::pd_split_reserved_memory(). Which deals with VirtualAlloc() >> being unable, like shmget, to deallocate piece-wise. >> >> >> >>> For most cases (anything but ENOMEM, actually) could we at >>> least assert?: >>> >>> EACCES - should not happen: we created the shared memory and >>> are its owner >>> EIDRM - should not happen. >>> EINVAL - should not happen. (you already check now the >>> attach address for alignment to SHMLBA, so this is covered) >> >> Sure. I'll add asserts for these. >> >>> >>> --- >>> >>> Smaller nits: >>> >>> Functions called "shmat_..." suggest shmat-like behaviour, >>> so could we have them return -1 instead of NULL in case of >>> error? >> >> That would add clutter to the reserve_memory_special_shm, and >> it might also suggest that it would be OK to check errno for >> the failure reason, which probably wouldn't work. I'll let >> other Reviewers chime in and help decide if we should change >> this. >> >> >> You are right. If one returns -1, one would have to preserve >> errno for the caller too. >> >> Thanks for reviewing this, >> StefanK >> >> >> You are welcome! >> >> Kind Regards, Thomas >> >> >> >>> >>> Kind Regards, Thomas >>> >>>> >>>> Also note that mmap- and shmat-allocated memory may >>>> have different alignment requirements: mmap requires a >>>> page-aligned request address, whereas shmat requires >>>> alignment to SHMLBA, which may be multiple pages (e.g. >>>> for ARM: >>>> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >>>> So, for this shat-over-mmap trick to work, request >>>> address has to be aligned to SHMLBA, not just page size. >>>> >>>> I see that you assert alignment of requ address to >>>> os::large_page_size(), which I would assume is a >>>> multiple of SHMLBA, but I am not sure of this. >>> >>> I've added some defensive code and asserts to catch this >>> if/when this assumption fails: >>> >>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ >>> >>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02 >>> >>> >>> I need to verify that this works on other machines than >>> my local Linux x64 machine. >>> >>> Thanks, >>> StefanK >>> >>>> >>>> Kind Regards, Thomas >>>> >>>> >>>> >>>> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson >>>> >>> > wrote: >>>> >>>> Hi all, >>>> >>>> Please review this patch to enable SHM large page >>>> allocations even when the requested alignment is >>>> larger than os::large_page_size(). >>>> >>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >>>> >>>> https://bugs.openjdk.java.net/browse/JDK-8017629 >>>> >>>> G1 is affected by this bug since it requires the >>>> heap to start at an address that is aligned with >>>> the heap region size. The patch fixes this by >>>> changing the UseSHM large pages allocation code. >>>> First, virtual memory with correct alignment is >>>> pre-reserved and then the large pages are attached >>>> to this memory area. >>>> >>>> Tested with vm.gc.testlist and ExecuteInternaVMTests >>>> >>>> Thanks, >>>> StefanK >>>> >>>> >>> >>> >> >> > > From adinn at redhat.com Mon Apr 18 10:25:39 2016 From: adinn at redhat.com (Andrew Dinn) Date: Mon, 18 Apr 2016 11:25:39 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> Message-ID: <5714B623.7010409@redhat.com> Hi Volker, Just letting you know I am currently checking this on AArch64 regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) From shafi.s.ahmad at oracle.com Mon Apr 18 10:26:04 2016 From: shafi.s.ahmad at oracle.com (Shafi Ahmad) Date: Mon, 18 Apr 2016 03:26:04 -0700 (PDT) Subject: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays In-Reply-To: References: Message-ID: <524f6be3-1639-4718-b099-6c873f3dbbd8@default> Hi, Please review the backport of bug: "JDK-8141551: C2 can not handle returns with incompatible interface arrays" to jdk8u. Please note the backport is not clean. Webrev: http://cr.openjdk.java.net/~rpatil/8141551/webrev.00/ Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8141551 Original patch pushed to jdk9: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/b425a78e8512 Test: Run jprt. Regards, Shafi From lois.foltan at oracle.com Mon Apr 18 11:31:08 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Mon, 18 Apr 2016 07:31:08 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <5714876C.2050207@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> Message-ID: <5714C57C.1000109@oracle.com> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: > On 2016-04-15 21:45, Alan Bateman wrote: >> >> On 15/04/2016 18:02, Lois Foltan wrote: >>> >>> Hi Stefan, >>> >>> In start up before module system initialization in complete I >>> believe the VM is single threaded, so the increment/decrement >>> reference counts do not need to be atomic. Adding it is a defensive >>> move in case the reference count is ever used passed start up in the >>> future. It kind of does seem a bit excessive, sounds like you agree? >> There will be a number of threads running before the base module is >> defined to the VM. As things stand the the java threads at this point >> will be the Common-Cleaner, Finalizer, Reference Handler and Signal >> Handler. > > So, are you saying that we need the atomics? > > The java_lang_Class::create_mirror function isn't multi-thread safe, > and must already be guarded by a lock (SystemDictionary_lock AFAICT). > The increment in Unsafe_DefineAnonymousClass0, will only be done once, > for the single InstanceKlass instance in the CLD. And all reads of > _keep_alive from the GC are done during safepoints. The anonymous class is inserted in the fixup mirror and fixup module lists during java_lang_Class::create_mirror() before it is made public or "published" as loaded. So the two instances where the reference count is incremented, Unsafe_DefineAnonymousClass0 and in java_lang_Class::create_mirror(), are guarded by a lock as well as the decrement in Unsafe_DefineAnonymousClass0. No other thread has access to the class during this time, as it is being loaded. > > How does ModuleEntryTable::patch_javabase_entries guard against > concurrent inserts into the _fixup_module_field_list list? That leaves the decrement in ModuleEntryTable::patch_javabase_entries() as possibly unguarded. This only occurs when the VM is called to define the module java.base. I believe this should be okay but will double check. Thanks, Lois > > thanks, > StefanK > > >> >> -Alan > From david.holmes at oracle.com Mon Apr 18 12:03:51 2016 From: david.holmes at oracle.com (David Holmes) Date: Mon, 18 Apr 2016 22:03:51 +1000 Subject: RFR: JDK-8152666: The new Hotspot Build System In-Reply-To: <57148C89.709@oracle.com> References: <56F4F0EE.1040508@oracle.com> <5703FF7C.6010309@oracle.com> <5704D28E.1060304@oracle.com> <57136773.5080901@oracle.com> <57148C89.709@oracle.com> Message-ID: <5714CD27.90907@oracle.com> On 18/04/2016 5:28 PM, Erik Joelsson wrote: > > > On 2016-04-17 12:37, David Holmes wrote: >> I missed all the fun while I was on vacation and have still not yet >> had a chance to look at the actual code, but have one query ... > Yeah, bad timing, but time was also running out. There is still time for > adjustments of course. >> >> On 6/04/2016 7:10 PM, Erik Joelsson wrote: >>> Hello Dan and thank you for the review! I know it's a lot to chew >>> through. >>> >>> I have incorporated your changes and published a new webrev: >>> http://cr.openjdk.java.net/~erikj/8152666/webrev.02/ >>> >>> On 2016-04-05 20:10, Daniel D. Daugherty wrote: >>>> >>>> >>>> > The new build supports the following variants: >>>> > >>>> > * server (C1+C2) >>>> >>>> The above "server" variant is the "tiered server". Does the new >>>> build system support the "C2 server" variant? What about the >>>> 32-bit server and 64-bit server build variants? For example, >>>> on Linux you can have: >>>> >>>> * C1/Client, 32-bit >>>> * C2/Server, 32-bit >>>> * Tiered (C1 & C2), 32-bit >>>> * C2/Server, 64-bit >>>> * Tiered (C1 + C2), 64-bit >>>> >>>> The above wide range of variants is also true for Win*. >>>> >>> There is a way to achieve this even if it's not as straight forward. >>> It's controlled through the new "jvm-feature" setting. To build a >>> completely custom set of features for a jvm, you set the >>> --with-jvm-variants=custom and then define the full feature set using >>> --with-jvm-features=compiler2,... For "server, client, core, minimal, >>> zero and zeroshark" there is a predefined set of features while the >>> custom variant has no features by default. >> >> It isn't clear to me how Tiered fits into this scheme. Even in the old >> hotspot build Tiered is managed as a variant of building C2 with >> another make variable controlling tiered or non-tiered. >> > In the new build, tiered is simply defined as enabling both the > compiler1 and the compiler2 "features". If you look in the old build > tiered.make, the only thing it does is setting both -DCOMPILER2 and > -DCOMPILER1 on CFLAGS. From what I can tell, there is no other > meaningful way of interpreting enabling both c1 and c2 in the same jvm. There was also Variant=tiered and the FORCED_TIERED variable - but yes the only time COMPILER1 and COMPILER2 are defined is when building tiered. I'll need to examine this bit closer. Thanks, David >> It is also unclear to me what a "jvm variant" is if compiler2 etc are >> all considered features - is a variant a predefined set of features? >> > Yes, that is essentially what a variant is, but it's also in part a way > of naming a particular jvm, since we can sometimes build multiple > variants in the same build. In theory we could enhance the configure UI > for this to be able to arbitrarily define any number of variants to be > built together, name them arbitrary things and define an arbitrary set > of features for each one. The build itself would support such a scheme > at this point (the internal variables are general enough), but in > reality, I don't think we are really interested in supporting that. Also > the source code is certainly not setup to handle any combination of > features today. > > What configure supports now is picking certain valid combinations of > variants to build. Enable or disable certain features through > specialized parameters. Enabling certain other features through the > --with-jvm-features argument. Note that --with-jvm-features will apply > to all variants currently being built. Finally there is the custom > variant, which by default has no features, where you may create your own > special combination if you really want to. > > /Erik > From matthias.baesken at sap.com Mon Apr 18 12:05:19 2016 From: matthias.baesken at sap.com (Baesken, Matthias) Date: Mon, 18 Apr 2016 12:05:19 +0000 Subject: RFR: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases Message-ID: Hello , the current implementation of the parse_os_info-function in os_linux.cpp gets the last line of a Linux-distro related file to provide a meaningful OS version string. However the information provided currently on SuSE Linux (SLES) is not very descriptive, it currently uses /etc/lsb-release and gives : more /etc/lsb-release LSB_VERSION="core-2.0-noarch:core-3.2-noarch:core-4.0-noarch:core-2.0-x86_64:core-3.2-x86_64:core-4.0-x86_64" So I suggest to use /etc/SuSE-release instead, which gives a good information for SLES 9 - 12 in the ***first line*** of /etc/SuSE-release : Example SLES11 : more /etc/SuSE-release SUSE Linux Enterprise Server 11 (x86_64) VERSION = 11 PATCHLEVEL = 3 (this is similar to using /etc/redhat-release on Red Hat with the difference that the ***first line*** has the relevant info). Additionally, /etc/os-release needs some special handling as well, because the meaningful OS-release description string is not always the last line of the file but in the line containing the information PRETTY_NAME=... See also : https://www.freedesktop.org/software/systemd/man/os-release.html Example from Ubuntu 14 : $ more /etc/os-release ... PRETTY_NAME="Ubuntu 14.04.3 LTS" ... It might also be a good idea to place /etc/os-release higher in the distro_files list, but I do not have access to turbolinux / gentoo to check the situation on these distros. Regards, Matthias Diff : --- a/src/os/linux/vm/os_linux.cpp Fri Apr 15 16:19:15 2016 +0100 +++ b/src/os/linux/vm/os_linux.cpp Mon Apr 18 13:54:04 2016 +0200 @@ -2013,8 +2013,8 @@ // their own specific XXX-release file as well as a redhat-release file. // Because of this the XXX-release file needs to be searched for before the // redhat-release file. -// Since Red Hat has a lsb-release file that is not very descriptive the -// search for redhat-release needs to be before lsb-release. +// Since Red Hat and SuSE have an lsb-release file that is not very descriptive the +// search for redhat-release / SuSE-release needs to be before lsb-release. // Since the lsb-release file is the new standard it needs to be searched // before the older style release files. // Searching system-release (Red Hat) and os-release (other Linuxes) are a @@ -2031,8 +2031,8 @@ "/etc/mandrake-release", "/etc/sun-release", "/etc/redhat-release", + "/etc/SuSE-release", "/etc/lsb-release", - "/etc/SuSE-release", "/etc/turbolinux-release", "/etc/gentoo-release", "/etc/ltib-release", @@ -2065,11 +2065,36 @@ static void parse_os_info(char* distro, size_t length, const char* file) { FILE* fp = fopen(file, "r"); if (fp != NULL) { + // SuSE-release : first line is interesting + // os-release : PRETTY_NAME= line is interesting + // (might be at different locations in the file) char buf[256]; - // get last line of the file. - while (fgets(buf, sizeof(buf), fp)) { } + int lcnt = 0; + bool is_etc_suserelease = false; + bool is_etc_osrelease = false; + if (strcmp(file, "/etc/SuSE-release") == 0) { + is_etc_suserelease = true; + } + if (strcmp(file, "/etc/os-release") == 0) { + is_etc_osrelease = true; + } + + // get last line of the file or + // other interesting line on SUSE / os-release + while (fgets(buf, sizeof(buf), fp)) { + if (lcnt == 0 && is_etc_suserelease) { + break; + } + if (is_etc_osrelease) { + if (strstr(buf, "PRETTY_NAME=") != NULL) { + break; + } + } + lcnt++; + } + // Edit out extra stuff in expected ubuntu format - if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL) { + if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) { char* ptr = strstr(buf, "\""); // the name is in quotes if (ptr != NULL) { ptr++; // go beyond first quote From volker.simonis at gmail.com Mon Apr 18 12:08:38 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Mon, 18 Apr 2016 14:08:38 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <5714B623.7010409@redhat.com> References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> <5714B623.7010409@redhat.com> Message-ID: Thanks Andrew! Volker On Mon, Apr 18, 2016 at 12:25 PM, Andrew Dinn wrote: > Hi Volker, > > Just letting you know I am currently checking this on AArch64 > > regards, > > > Andrew Dinn > ----------- > Senior Principal Software Engineer > Red Hat UK Ltd > Registered in UK and Wales under Company Registration No. 3798903 > Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul > Argiry (US) From volker.simonis at gmail.com Mon Apr 18 12:39:26 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Mon, 18 Apr 2016 14:39:26 +0200 Subject: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays In-Reply-To: <524f6be3-1639-4718-b099-6c873f3dbbd8@default> References: <524f6be3-1639-4718-b099-6c873f3dbbd8@default> Message-ID: Hi Shafi, please not that I'm not a formal jdk8u reviewer. The change to the source files looks good. Where did the change not applied cleanly? I'm only a little worried about the test because it uses the '@modules' tag. But if it runs and executed fine with jdk8u it's fine. Thanks for backporting this to jdk8, Volker On Mon, Apr 18, 2016 at 12:26 PM, Shafi Ahmad wrote: > Hi, > > > > Please review the backport of bug: "JDK-8141551: C2 can not handle returns with incompatible interface arrays" to jdk8u. > > Please note the backport is not clean. > > > > Webrev: http://cr.openjdk.java.net/~rpatil/8141551/webrev.00/ > > Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8141551 > > Original patch pushed to jdk9: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/b425a78e8512 > > > > Test: Run jprt. > > > > Regards, > > Shafi > > From jesper.wilhelmsson at oracle.com Mon Apr 18 13:31:39 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Mon, 18 Apr 2016 15:31:39 +0200 Subject: jdk9/hs is now OPEN for pushes Message-ID: <5714E1BB.8050905@oracle.com> Hi, The Friday nightlies and other adhoc testing done looks good. We are now open for business in jdk9/hs. We do not intend to pull any more changes from hs-rt to hs, so make sure you rebase and update any forests before pushing. /Jesper From shafi.s.ahmad at oracle.com Mon Apr 18 14:23:26 2016 From: shafi.s.ahmad at oracle.com (Shafi Ahmad) Date: Mon, 18 Apr 2016 07:23:26 -0700 (PDT) Subject: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays In-Reply-To: References: <524f6be3-1639-4718-b099-6c873f3dbbd8@default> Message-ID: <3667a362-5f71-460c-83a7-ae394a60a66a@default> Thanks Volker for looking into it. I have run jprt test but haven't verify whether this test is executed under jprt or not. Should I verify whether this test is picked up by jprt run or not? This change is done on top of the fix of https://bugs.openjdk.java.net/browse/JDK-8055530. For this I have already sent other RFR http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022714.html. shafi at shafi-ahmad:~/Java/jdk8/jdk8u-dev/hotspot$ hg import y applying y patching file src/share/vm/opto/parse1.cpp Hunk #2 FAILED at 2148 1 out of 2 hunks FAILED -- saving rejects to file src/share/vm/opto/parse1.cpp.rej abort: patch failed to apply shafi at shafi-ahmad:~/Java/jdk8/jdk8u-dev/hotspot$ cat src/share/vm/opto/parse1.cpp.rej --- parse1.cpp +++ parse1.cpp @@ -2144,15 +2149,24 @@ // here. Node* phi = _exits.argument(0); const TypeInstPtr *tr = phi->bottom_type()->isa_instptr(); - if( tr && tr->klass()->is_loaded() && - tr->klass()->is_interface() ) { + if (tr && tr->klass()->is_loaded() && + tr->klass()->is_interface()) { const TypeInstPtr *tp = value->bottom_type()->isa_instptr(); if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) { // sharpen the type eagerly; this eases certain assert checking if (tp->higher_equal(TypeInstPtr::NOTNULL)) tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr(); - value = _gvn.transform(new CheckCastPPNode(0,value,tr)); + value = _gvn.transform(new CheckCastPPNode(0, value, tr)); + } + } else { + // Also handle returns of oop-arrays to an arrays-of-interface return + const TypeInstPtr* phi_tip; + const TypeInstPtr* val_tip; + Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip); + if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() && + val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) { + value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type())); } } phi->add_req(value); Also I was getting compilation error due to line " value = _gvn.transform(new CheckCastPPNode(0, value, tr));" So I have changed it to "value = _gvn.transform(new (C) CheckCastPPNode(0, value, tr));" Similar change is done for line " value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type()));". Regards, Shafi -----Original Message----- From: Volker Simonis [mailto:volker.simonis at gmail.com] Sent: Monday, April 18, 2016 6:09 PM To: Shafi Ahmad Cc: HotSpot Open Source Developers Subject: Re: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays Hi Shafi, please not that I'm not a formal jdk8u reviewer. The change to the source files looks good. Where did the change not applied cleanly? I'm only a little worried about the test because it uses the '@modules' tag. But if it runs and executed fine with jdk8u it's fine. Thanks for backporting this to jdk8, Volker On Mon, Apr 18, 2016 at 12:26 PM, Shafi Ahmad wrote: > Hi, > > > > Please review the backport of bug: "JDK-8141551: C2 can not handle returns with incompatible interface arrays" to jdk8u. > > Please note the backport is not clean. > > > > Webrev: http://cr.openjdk.java.net/~rpatil/8141551/webrev.00/ > > Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8141551 > > Original patch pushed to jdk9: > http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/b425a78e8512 > > > > Test: Run jprt. > > > > Regards, > > Shafi > > From chris.hegarty at oracle.com Mon Apr 18 15:07:27 2016 From: chris.hegarty at oracle.com (Chris Hegarty) Date: Mon, 18 Apr 2016 16:07:27 +0100 Subject: RFR [9] 8153756: jdk.vm.ci should not depend on sun.misc ( jdk.unsupported module ) Message-ID: <5714F82F.7080402@oracle.com> Refactoring due to JEP 260 [1] has moved the "real" Unsafe to jdk.internal.misc. All JDK modules, if they require Unsafe, should depend on a qualified export of jdk.internal.misc from the base module, rather than Unsafe in the jdk.unsupported module. The base module already exports jdk.internal.misc to the jdk.vm.ci module. This issue updates all usages of sun.misc.Unsafe to jdk.internal.misc.Unsafe, in the jdk.vm.ci module. http://cr.openjdk.java.net/~chegar/8153756/ https://bugs.openjdk.java.net/browse/JDK-8153756 The webrev is against jdk9/dev, but I intend to push this through hs-comp. -Chris. [1] https://bugs.openjdk.java.net/browse/JDK-8132928 From adinn at redhat.com Mon Apr 18 15:15:52 2016 From: adinn at redhat.com (Andrew Dinn) Date: Mon, 18 Apr 2016 16:15:52 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> <5714B623.7010409@redhat.com> Message-ID: <5714FA28.8010903@redhat.com> Hi Volker, As expected your patch fixes the problems with Rickard's original patch on AArch64. regards, Andrew Dinn ----------- From shafi.s.ahmad at oracle.com Mon Apr 18 15:22:58 2016 From: shafi.s.ahmad at oracle.com (Shafi Ahmad) Date: Mon, 18 Apr 2016 08:22:58 -0700 (PDT) Subject: [8u] RFR: JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined In-Reply-To: References: Message-ID: <58092e5c-52f7-462d-903e-44165a4505a1@default> Hi Roland, Could you please review this backport. Regards, Shafi From: Shafi Ahmad Sent: Monday, April 18, 2016 3:27 PM To: hotspot-dev at openjdk.java.net Cc: Shafi Ahmad Subject: [8u] RFR: JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined Hi, Please review the backport of bug: "JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined" to jdk8u. There is a single line conflict. Webrev: http://cr.openjdk.java.net/~csahu/8055530/webrev.00/ Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8055530 Original patch pushed to jdk9: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/7dfa629d203a Test: Run jprt. Regards, Shafi From vladimir.kozlov at oracle.com Mon Apr 18 15:46:35 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Mon, 18 Apr 2016 08:46:35 -0700 Subject: [8u] RFR: JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined In-Reply-To: References: Message-ID: <5715015B.9020604@oracle.com> Hi Shafi, I verified that changes match jdk 9 changes. They are good. Where was conflict? Thanks, Vladimir On 4/18/16 2:56 AM, Shafi Ahmad wrote: > Hi, > > > > Please review the backport of bug: "JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined" to jdk8u. > > There is a single line conflict. > > > > Webrev: http://cr.openjdk.java.net/~csahu/8055530/webrev.00/ > > Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8055530 > > Original patch pushed to jdk9: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/7dfa629d203a > > > > Test: Run jprt. > > > > Regards, > > Shafi > > > From adinn at redhat.com Mon Apr 18 16:09:11 2016 From: adinn at redhat.com (Andrew Dinn) Date: Mon, 18 Apr 2016 17:09:11 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <5714FA28.8010903@redhat.com> References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> <5714B623.7010409@redhat.com> <5714FA28.8010903@redhat.com> Message-ID: <571506A7.4060704@redhat.com> On 18/04/16 16:15, Andrew Dinn wrote: > Hi Volker, > > As expected your patch fixes the problems with Rickard's original patch > on AArch64. n.b as mentioned elsewhere in this thread I had to add a missing ? to Rickard's patch in file os_linux_aarch64.cpp. @@ -389,7 +389,7 @@ // here if the underlying file has been truncated. // Do not crash the VM in such a case. CodeBlob* cb = CodeCache::find_blob_unsafe(pc); - nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL; + CompiledMethod* nm = (cb != NULL) cb->as_compiled_method_or_null() : NULL; if (nm != NULL && nm->has_unsafe_access()) { stub = handle_unsafe_access(thread, pc); } As you can see there ought to be a ? on the modified line after (cb != NULL). So, this needs to be included in whatever patch finally gets pushed (really it ought to go into Rickard's patch). regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) From vladimir.kozlov at oracle.com Mon Apr 18 16:17:37 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Mon, 18 Apr 2016 09:17:37 -0700 Subject: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays In-Reply-To: References: <524f6be3-1639-4718-b099-6c873f3dbbd8@default> Message-ID: <571508A1.2070407@oracle.com> On 4/18/16 5:39 AM, Volker Simonis wrote: > Hi Shafi, > > please not that I'm not a formal jdk8u reviewer. > > The change to the source files looks good. Where did the change not > applied cleanly? Looks like it is new() vs new(Compiler) Node allocation known difference. I verified that changes are matching jdk 9 changes except new() difference. > > I'm only a little worried about the test because it uses the > '@modules' tag. But if it runs and executed fine with jdk8u it's fine. The test should not have @modules for jdk8u since it is not supported there. JPRT does not run jtreg tests when testing jdk8u sources (only one very simple ExecuteInternalVMTests.java): http://hg.openjdk.java.net/jdk8u/jdk8u-dev/hotspot/file/d025821b6b6d/test/TEST.groups#l131 You should run jtreg tests yourself using your jdk8u as test jdk. Please, verify results of your testing before pushing. Thanks, Vladimir > > Thanks for backporting this to jdk8, > Volker > > > On Mon, Apr 18, 2016 at 12:26 PM, Shafi Ahmad wrote: >> Hi, >> >> >> >> Please review the backport of bug: "JDK-8141551: C2 can not handle returns with incompatible interface arrays" to jdk8u. >> >> Please note the backport is not clean. >> >> >> >> Webrev: http://cr.openjdk.java.net/~rpatil/8141551/webrev.00/ >> >> Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8141551 >> >> Original patch pushed to jdk9: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/b425a78e8512 >> >> >> >> Test: Run jprt. >> >> >> >> Regards, >> >> Shafi >> >> From volker.simonis at gmail.com Mon Apr 18 16:26:03 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Mon, 18 Apr 2016 18:26:03 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <571506A7.4060704@redhat.com> References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> <5714B623.7010409@redhat.com> <5714FA28.8010903@redhat.com> <571506A7.4060704@redhat.com> Message-ID: Thanks again Andrew. I'm just waiting for Rickards evaluation and I'm sure he'll incorporate your fix into his patch. Regards, Volker On Mon, Apr 18, 2016 at 6:09 PM, Andrew Dinn wrote: > On 18/04/16 16:15, Andrew Dinn wrote: >> Hi Volker, >> >> As expected your patch fixes the problems with Rickard's original patch >> on AArch64. > > n.b as mentioned elsewhere in this thread I had to add a missing ? to > Rickard's patch in file os_linux_aarch64.cpp. > > > @@ -389,7 +389,7 @@ > // here if the underlying file has been truncated. > // Do not crash the VM in such a case. > CodeBlob* cb = CodeCache::find_blob_unsafe(pc); > - nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : > NULL; > + CompiledMethod* nm = (cb != NULL) > cb->as_compiled_method_or_null() : NULL; > if (nm != NULL && nm->has_unsafe_access()) { > stub = handle_unsafe_access(thread, pc); > } > > As you can see there ought to be a ? on the modified line after (cb != > NULL). > > So, this needs to be included in whatever patch finally gets pushed > (really it ought to go into Rickard's patch). > > regards, > > > > Andrew Dinn > ----------- > Senior Principal Software Engineer > Red Hat UK Ltd > Registered in UK and Wales under Company Registration No. 3798903 > Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul > Argiry (US) From volker.simonis at gmail.com Mon Apr 18 16:29:59 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Mon, 18 Apr 2016 18:29:59 +0200 Subject: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays In-Reply-To: <571508A1.2070407@oracle.com> References: <524f6be3-1639-4718-b099-6c873f3dbbd8@default> <571508A1.2070407@oracle.com> Message-ID: On Mon, Apr 18, 2016 at 6:17 PM, Vladimir Kozlov wrote: > On 4/18/16 5:39 AM, Volker Simonis wrote: >> >> Hi Shafi, >> >> please not that I'm not a formal jdk8u reviewer. >> >> The change to the source files looks good. Where did the change not >> applied cleanly? > > > Looks like it is new() vs new(Compiler) Node allocation known difference. > I verified that changes are matching jdk 9 changes except new() difference. > Hi, Shavi, Vladimir, thanks for the explanation. Then I think everything looks fine if you verify that the JTREG test are passing. Regards, Volker >> >> I'm only a little worried about the test because it uses the >> '@modules' tag. But if it runs and executed fine with jdk8u it's fine. > > > > The test should not have @modules for jdk8u since it is not supported there. > > JPRT does not run jtreg tests when testing jdk8u sources (only one very > simple ExecuteInternalVMTests.java): > > http://hg.openjdk.java.net/jdk8u/jdk8u-dev/hotspot/file/d025821b6b6d/test/TEST.groups#l131 > > You should run jtreg tests yourself using your jdk8u as test jdk. > > Please, verify results of your testing before pushing. > > Thanks, > Vladimir > > >> >> Thanks for backporting this to jdk8, >> Volker >> >> >> On Mon, Apr 18, 2016 at 12:26 PM, Shafi Ahmad >> wrote: >>> >>> Hi, >>> >>> >>> >>> Please review the backport of bug: "JDK-8141551: C2 can not handle >>> returns with incompatible interface arrays" to jdk8u. >>> >>> Please note the backport is not clean. >>> >>> >>> >>> Webrev: http://cr.openjdk.java.net/~rpatil/8141551/webrev.00/ >>> >>> Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8141551 >>> >>> Original patch pushed to jdk9: >>> http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/b425a78e8512 >>> >>> >>> >>> Test: Run jprt. >>> >>> >>> >>> Regards, >>> >>> Shafi >>> >>> > From mikael.vidstedt at oracle.com Mon Apr 18 17:43:32 2016 From: mikael.vidstedt at oracle.com (Mikael Vidstedt) Date: Mon, 18 Apr 2016 10:43:32 -0700 Subject: jdk9/hs is now OPEN for pushes In-Reply-To: <5714E1BB.8050905@oracle.com> References: <5714E1BB.8050905@oracle.com> Message-ID: <57151CC4.1050508@oracle.com> FYI: jdk9/hs-rt has now been made READ-ONLY on hg.openjdk.java.net. Cheers, Mikael On 4/18/2016 6:31 AM, Jesper Wilhelmsson wrote: > Hi, > > The Friday nightlies and other adhoc testing done looks good. We are > now open for business in jdk9/hs. > > We do not intend to pull any more changes from hs-rt to hs, so make > sure you rebase and update any forests before pushing. > > /Jesper From daniel.daugherty at oracle.com Mon Apr 18 18:17:36 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Mon, 18 Apr 2016 12:17:36 -0600 Subject: jdk9/hs is now OPEN for pushes In-Reply-To: <57151CC4.1050508@oracle.com> References: <5714E1BB.8050905@oracle.com> <57151CC4.1050508@oracle.com> Message-ID: <571524C0.4060809@oracle.com> Woot! Dan On 4/18/16 11:43 AM, Mikael Vidstedt wrote: > > FYI: jdk9/hs-rt has now been made READ-ONLY on hg.openjdk.java.net. > > Cheers, > Mikael > > On 4/18/2016 6:31 AM, Jesper Wilhelmsson wrote: >> Hi, >> >> The Friday nightlies and other adhoc testing done looks good. We are >> now open for business in jdk9/hs. >> >> We do not intend to pull any more changes from hs-rt to hs, so make >> sure you rebase and update any forests before pushing. >> >> /Jesper > From christian.thalinger at oracle.com Mon Apr 18 19:32:31 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Mon, 18 Apr 2016 09:32:31 -1000 Subject: RFR [9] 8153756: jdk.vm.ci should not depend on sun.misc ( jdk.unsupported module ) In-Reply-To: <5714F82F.7080402@oracle.com> References: <5714F82F.7080402@oracle.com> Message-ID: <034B3B98-011C-4B72-90B3-30AD1E23E084@oracle.com> > On Apr 18, 2016, at 5:07 AM, Chris Hegarty wrote: > > Refactoring due to JEP 260 [1] has moved the "real" Unsafe to > jdk.internal.misc. All JDK modules, if they require Unsafe, > should depend on a qualified export of jdk.internal.misc from > the base module, rather than Unsafe in the jdk.unsupported module. > The base module already exports jdk.internal.misc to the jdk.vm.ci > module. > > This issue updates all usages of sun.misc.Unsafe to > jdk.internal.misc.Unsafe, in the jdk.vm.ci module. > > http://cr.openjdk.java.net/~chegar/8153756/ src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMethodData.java -import sun.misc.Unsafe; This file and others have Unsafe references in Javadoc that's why the import is required. I assume you did run all compiler/jvmci tests? > https://bugs.openjdk.java.net/browse/JDK-8153756 > > The webrev is against jdk9/dev, but I intend to push this through > hs-comp. > > -Chris. > > [1] https://bugs.openjdk.java.net/browse/JDK-8132928 From coleen.phillimore at oracle.com Mon Apr 18 19:34:12 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 18 Apr 2016 15:34:12 -0400 Subject: RFR: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases In-Reply-To: References: Message-ID: <571536B4.8080807@oracle.com> Thank you for finding this problem with SuSE and the diff. I don't know how many different distros were tested with these changes but it's easy not to get them all. Would it make more sense, rather than getting the last line then checking for DISTRIB_DESCRIPTION=, checking in the fgets() loop for both this and PRETTY_NAME. Then we could change it for Ubuntu and there's less conditionalizing on the name of the release file. Like: open webrev at http://cr.openjdk.java.net/~coleenp/suse/webrev Coleen On 4/18/16 8:05 AM, Baesken, Matthias wrote: > Hello , the current implementation of the parse_os_info-function in os_linux.cpp gets the last line of a Linux-distro related file to > provide a meaningful OS version string. > > However the information provided currently on SuSE Linux (SLES) is not very descriptive, it currently uses /etc/lsb-release and gives : > > more /etc/lsb-release > LSB_VERSION="core-2.0-noarch:core-3.2-noarch:core-4.0-noarch:core-2.0-x86_64:core-3.2-x86_64:core-4.0-x86_64" > > So I suggest to use /etc/SuSE-release instead, which gives a good information for > SLES 9 - 12 in the ***first line*** of /etc/SuSE-release : > > Example SLES11 : > > more /etc/SuSE-release > SUSE Linux Enterprise Server 11 (x86_64) > VERSION = 11 > PATCHLEVEL = 3 > > (this is similar to using /etc/redhat-release on Red Hat with the difference that the ***first line*** has the relevant info). > > > Additionally, /etc/os-release needs some special handling as well, because > the meaningful OS-release description string is not always the last line of the file but in the line > containing the information PRETTY_NAME=... > See also : > > https://www.freedesktop.org/software/systemd/man/os-release.html > > Example from Ubuntu 14 : > > $ more /etc/os-release > ... > PRETTY_NAME="Ubuntu 14.04.3 LTS" > ... > > It might also be a good idea to place /etc/os-release higher in the distro_files list, but I do not have access to > turbolinux / gentoo to check the situation on these distros. > > Regards, Matthias > > > > Diff : > --- a/src/os/linux/vm/os_linux.cpp Fri Apr 15 16:19:15 2016 +0100 > +++ b/src/os/linux/vm/os_linux.cpp Mon Apr 18 13:54:04 2016 +0200 > @@ -2013,8 +2013,8 @@ > // their own specific XXX-release file as well as a redhat-release file. > // Because of this the XXX-release file needs to be searched for before the > // redhat-release file. > -// Since Red Hat has a lsb-release file that is not very descriptive the > -// search for redhat-release needs to be before lsb-release. > +// Since Red Hat and SuSE have an lsb-release file that is not very descriptive the > +// search for redhat-release / SuSE-release needs to be before lsb-release. > // Since the lsb-release file is the new standard it needs to be searched > // before the older style release files. > // Searching system-release (Red Hat) and os-release (other Linuxes) are a > @@ -2031,8 +2031,8 @@ > "/etc/mandrake-release", > "/etc/sun-release", > "/etc/redhat-release", > + "/etc/SuSE-release", > "/etc/lsb-release", > - "/etc/SuSE-release", > "/etc/turbolinux-release", > "/etc/gentoo-release", > "/etc/ltib-release", > @@ -2065,11 +2065,36 @@ > static void parse_os_info(char* distro, size_t length, const char* file) { > FILE* fp = fopen(file, "r"); > if (fp != NULL) { > + // SuSE-release : first line is interesting > + // os-release : PRETTY_NAME= line is interesting > + // (might be at different locations in the file) > char buf[256]; > - // get last line of the file. > - while (fgets(buf, sizeof(buf), fp)) { } > + int lcnt = 0; > + bool is_etc_suserelease = false; > + bool is_etc_osrelease = false; > + if (strcmp(file, "/etc/SuSE-release") == 0) { > + is_etc_suserelease = true; > + } > + if (strcmp(file, "/etc/os-release") == 0) { > + is_etc_osrelease = true; > + } > + > + // get last line of the file or > + // other interesting line on SUSE / os-release > + while (fgets(buf, sizeof(buf), fp)) { > + if (lcnt == 0 && is_etc_suserelease) { > + break; > + } > + if (is_etc_osrelease) { > + if (strstr(buf, "PRETTY_NAME=") != NULL) { > + break; > + } > + } > + lcnt++; > + } > + > // Edit out extra stuff in expected ubuntu format > - if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL) { > + if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) { > char* ptr = strstr(buf, "\""); // the name is in quotes > if (ptr != NULL) { > ptr++; // go beyond first quote From derek.white at oracle.com Mon Apr 18 19:40:40 2016 From: derek.white at oracle.com (Derek White) Date: Mon, 18 Apr 2016 15:40:40 -0400 Subject: RFR: 8067785: Using AlwaysPreTouch does not always touch all pages In-Reply-To: <5710F714.5070601@oracle.com> References: <5710F714.5070601@oracle.com> Message-ID: <57153838.8000902@oracle.com> Hi Stefan, On 4/15/16 10:13 AM, Stefan Karlsson wrote: > Hi all, > > Please review this patch that ensures that VirtualSpace::expand_by > pre-touches all committed memory. > > http://cr.openjdk.java.net/~stefank/8067785/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8067785 > > 1) Previously, we pre-touched the memory between the old and new _high > pointers. Since the _high variable isn't page aligned, the pre-touch > code didn't touch all committed pages. I've moved the pre-touch code > to be done for every os::commit_memory call in VirtuaSpace::expand_by. > > 2) expand_by has segregated the VirtualSpace into three segements. > [small pages | large pages | small pages] so that we can have > VirtualSpaces that are not large page size aligned. Previously, only > the middle section called commit_memory with an alignment hint, and > the small pages sections called commit_memory without a small pages > hint. On all platforms, except Solaris, this boils down to the same > code. On Solaris we have this additional code executed: > size_t page_size = page_size_for_alignment(alignment_hint); > if (page_size > (size_t) vm_page_size()) { > (void)Solaris::setup_large_pages(addr, bytes, page_size); > } > > But since the alignment_hint is set to vm_page_size we won't try to > setup_large_pages here either. > > 3) The patch also contains a few style changes to make the > VirtualSpace::expand_by easier to read (at least for me) > > Tested with JPRT and -XX:+ExecuteInternalVMTests with AlwaysPreTouch > temporarily forced to true. > > Thanks, > StefanK Looks Good(tm)! - Derek From chris.hegarty at oracle.com Mon Apr 18 19:51:36 2016 From: chris.hegarty at oracle.com (Chris Hegarty) Date: Mon, 18 Apr 2016 20:51:36 +0100 Subject: RFR [9] 8153756: jdk.vm.ci should not depend on sun.misc ( jdk.unsupported module ) In-Reply-To: <034B3B98-011C-4B72-90B3-30AD1E23E084@oracle.com> References: <5714F82F.7080402@oracle.com> <034B3B98-011C-4B72-90B3-30AD1E23E084@oracle.com> Message-ID: <558D6707-F421-4808-87F0-EC14D8E1F1A3@oracle.com> > On 18 Apr 2016, at 20:32, Christian Thalinger wrote: > > >> On Apr 18, 2016, at 5:07 AM, Chris Hegarty > wrote: >> >> Refactoring due to JEP 260 [1] has moved the "real" Unsafe to >> jdk.internal.misc. All JDK modules, if they require Unsafe, >> should depend on a qualified export of jdk.internal.misc from >> the base module, rather than Unsafe in the jdk.unsupported module. >> The base module already exports jdk.internal.misc to the jdk.vm.ci >> module. >> >> This issue updates all usages of sun.misc.Unsafe to >> jdk.internal.misc.Unsafe, in the jdk.vm.ci module. >> >> http://cr.openjdk.java.net/~chegar/8153756/ > > src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMethodData.java > > -import sun.misc.Unsafe; > This file and others have Unsafe references in Javadoc that's why the import is required. There are two files that have imports but no static references, as you say these are for javadoc. I can reinstate an updated import, or inline the single file javadoc usage in the @link? Whichever you prefer. > I assume you did run all compiler/jvmci tests? Yes, all hotspot testset pass. -Chris H >> https://bugs.openjdk.java.net/browse/JDK-8153756 >> >> The webrev is against jdk9/dev, but I intend to push this through >> hs-comp. >> >> -Chris. >> >> [1] https://bugs.openjdk.java.net/browse/JDK-8132928 > From lois.foltan at oracle.com Mon Apr 18 20:25:38 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Mon, 18 Apr 2016 16:25:38 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <5714C57C.1000109@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> Message-ID: <571542C2.5060707@oracle.com> On 4/18/2016 7:31 AM, Lois Foltan wrote: > > On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >> On 2016-04-15 21:45, Alan Bateman wrote: >>> >>> On 15/04/2016 18:02, Lois Foltan wrote: >>>> >>>> Hi Stefan, >>>> >>>> In start up before module system initialization in complete I >>>> believe the VM is single threaded, so the increment/decrement >>>> reference counts do not need to be atomic. Adding it is a >>>> defensive move in case the reference count is ever used passed >>>> start up in the future. It kind of does seem a bit excessive, >>>> sounds like you agree? >>> There will be a number of threads running before the base module is >>> defined to the VM. As things stand the the java threads at this >>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>> Signal Handler. >> >> So, are you saying that we need the atomics? >> >> The java_lang_Class::create_mirror function isn't multi-thread safe, >> and must already be guarded by a lock (SystemDictionary_lock AFAICT). >> The increment in Unsafe_DefineAnonymousClass0, will only be done >> once, for the single InstanceKlass instance in the CLD. And all reads >> of _keep_alive from the GC are done during safepoints. > The anonymous class is inserted in the fixup mirror and fixup module > lists during java_lang_Class::create_mirror() before it is made public > or "published" as loaded. So the two instances where the reference > count is incremented, Unsafe_DefineAnonymousClass0 and in > java_lang_Class::create_mirror(), are guarded by a lock as well as the > decrement in Unsafe_DefineAnonymousClass0. No other thread has access > to the class during this time, as it is being loaded. >> >> How does ModuleEntryTable::patch_javabase_entries guard against >> concurrent inserts into the _fixup_module_field_list list? > That leaves the decrement in > ModuleEntryTable::patch_javabase_entries() as possibly unguarded. This > only occurs when the VM is called to define the module java.base. I > believe this should be okay but will double check. One small change in modules.cpp/define_javabase_module() to ensure that only one definition attempt of java.base will occur and thus only one call to ModuleEntryTable::patch_javabase_entries(). If a situation arises where java.base is trying to be multiply defined, according to the expected error conditions for JVM_DefineModule(), an IllegalArgumentException should be thrown. I have also added a comment in classfile/classLoaderData.hpp explaining why _keep_alive does need to be defined volatile or atomic. Please review at: http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ Retesting in progress. Thanks, Lois > > Thanks, > Lois > >> >> thanks, >> StefanK >> >> >>> >>> -Alan >> > From david.holmes at oracle.com Tue Apr 19 00:12:27 2016 From: david.holmes at oracle.com (David Holmes) Date: Tue, 19 Apr 2016 10:12:27 +1000 Subject: enhancement of cmpxchg and copy_to_survivor for ppc64 In-Reply-To: References: <201604081054.u38As2K6014953@d19av07.sagamino.japan.ibm.com> <5711ED18.7000706@oracle.com> <201604180215.u3I2FUZi001650@d19av07.sagamino.japan.ibm.com> <571464DF.3070706@oracle.com> <5714E416.6030300@redhat.com> Message-ID: <571577EB.1080907@oracle.com> On 19/04/2016 2:23 AM, Volker Simonis wrote: > We've looked at the proposed changes and we are pretty sure that the > cmpxchg done during copy_to_survivor_space in the parallel GC doesn't > require the full fence/acquire semantics. But we also agree that this > should not be ifdefed PPC64 in shared code. > > Andrews suggestion of using the new C++11 atomic memory operators is > good, although in practice it may be hard to get all the different > compilers under the hood. > > But now that we've even got the corresponding cmpxchg routines with > various acquire/release semantics in Java-land in the new > jdk.internal.Unsafe package, it would be a pity if it would not be > possible to use that functionality within the Hotspot. > > I think one approach to enable an easy transition would be to do the > proposed enhancements (or something similar) to cmpxchg > unconditionally in atomic.hpp. For example instead of two extra > boolean parameters we could use an enum similar to the one in > library_call.cpp: > > typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind; > > The default value of this parameter should of course be conservative > (i.e. Volatile) so we don't change the current behavior. After that > individual, performance critical callers of these routines can be > examined if they really require the most conservative setting and > maybe optimized. > > What do you think? I think expanding our Atomic and OrderAccess API's to align with the C++11 atomic and memory-ordering APIs is a good thing to do - even if we don't actually switch to direct compiler support for a while yet. It may be challenging to efficiently implement all of the C++11 semantics directly in the meantime. However this is too late for JDK 9 I think, with FC (for hotspot) on May 12. Though addressing the immediate concern, without trying to generalize to full C++11 semantic support may be feasible - eg add a "relaxed cas" for use in one or two particular pieces of code. Thanks, David > Regards, > Martin and Volker > > On Mon, Apr 18, 2016 at 3:41 PM, Andrew Haley wrote: >> On 04/18/2016 02:01 PM, Carsten Varming wrote: >>> An important question is: Should the shared parts of hotspot move towards >>> weaker memory models? If yes, then everybody should review code assuming >>> the weaker semantics. If no, then there really isn't room for patches like >>> this one :(. >> >> This would surely be useful. For example, the bitmap marking uses a >> two-way acquire and release barrier at the moment, and I'm fairly sure >> we don't need that. >> >> I don't think this change should be #ifdef PPC64. That disadvantages >> other targets such as AArch64, to no advantage. I understand that >> moving this to shared code requires more work, but we should do at >> least some of it in the JDK9 timeframe. >> >> C++11 has a considerably greater variety of atomic memory operators >> than the ones in HotSpot. Over time I believe we should migrate to >> C++11-like operators in our code base. One way to do this would be to >> create new operators which map in a simple way onto the standard ones. >> The we can get rid of much of this inline assembly code. >> >> Andrew. From david.holmes at oracle.com Tue Apr 19 02:11:33 2016 From: david.holmes at oracle.com (David Holmes) Date: Tue, 19 Apr 2016 12:11:33 +1000 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <571542C2.5060707@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> Message-ID: <571593D5.7010504@oracle.com> Hi Lois, On 19/04/2016 6:25 AM, Lois Foltan wrote: > > On 4/18/2016 7:31 AM, Lois Foltan wrote: >> >> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>> On 2016-04-15 21:45, Alan Bateman wrote: >>>> >>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>> >>>>> Hi Stefan, >>>>> >>>>> In start up before module system initialization in complete I >>>>> believe the VM is single threaded, so the increment/decrement >>>>> reference counts do not need to be atomic. Adding it is a >>>>> defensive move in case the reference count is ever used passed >>>>> start up in the future. It kind of does seem a bit excessive, >>>>> sounds like you agree? >>>> There will be a number of threads running before the base module is >>>> defined to the VM. As things stand the the java threads at this >>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>> Signal Handler. >>> >>> So, are you saying that we need the atomics? >>> >>> The java_lang_Class::create_mirror function isn't multi-thread safe, >>> and must already be guarded by a lock (SystemDictionary_lock AFAICT). >>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>> once, for the single InstanceKlass instance in the CLD. And all reads >>> of _keep_alive from the GC are done during safepoints. >> The anonymous class is inserted in the fixup mirror and fixup module >> lists during java_lang_Class::create_mirror() before it is made public >> or "published" as loaded. So the two instances where the reference >> count is incremented, Unsafe_DefineAnonymousClass0 and in >> java_lang_Class::create_mirror(), are guarded by a lock as well as the >> decrement in Unsafe_DefineAnonymousClass0. No other thread has access >> to the class during this time, as it is being loaded. >>> >>> How does ModuleEntryTable::patch_javabase_entries guard against >>> concurrent inserts into the _fixup_module_field_list list? >> That leaves the decrement in >> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. This >> only occurs when the VM is called to define the module java.base. I >> believe this should be okay but will double check. > > One small change in modules.cpp/define_javabase_module() to ensure that > only one definition attempt of java.base will occur and thus only one > call to ModuleEntryTable::patch_javabase_entries(). If a situation > arises where java.base is trying to be multiply defined, according to > the expected error conditions for JVM_DefineModule(), an > IllegalArgumentException should be thrown. > > I have also added a comment in classfile/classLoaderData.hpp explaining > why _keep_alive does need to be defined volatile or atomic. Can you add assertions to check that _keep_alive is only modified under the protection of the lock (with a special case perhaps for the unguarded java.base case) ? Thanks, David > Please review at: > > http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ > > Retesting in progress. > > Thanks, > Lois > >> >> Thanks, >> Lois >> >>> >>> thanks, >>> StefanK >>> >>> >>>> >>>> -Alan >>> >> > From christian.thalinger at oracle.com Tue Apr 19 04:58:56 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Mon, 18 Apr 2016 18:58:56 -1000 Subject: RFR [9] 8153756: jdk.vm.ci should not depend on sun.misc ( jdk.unsupported module ) In-Reply-To: <558D6707-F421-4808-87F0-EC14D8E1F1A3@oracle.com> References: <5714F82F.7080402@oracle.com> <034B3B98-011C-4B72-90B3-30AD1E23E084@oracle.com> <558D6707-F421-4808-87F0-EC14D8E1F1A3@oracle.com> Message-ID: > On Apr 18, 2016, at 9:51 AM, Chris Hegarty wrote: > > >> On 18 Apr 2016, at 20:32, Christian Thalinger wrote: >> >> >>> On Apr 18, 2016, at 5:07 AM, Chris Hegarty > wrote: >>> >>> Refactoring due to JEP 260 [1] has moved the "real" Unsafe to >>> jdk.internal.misc. All JDK modules, if they require Unsafe, >>> should depend on a qualified export of jdk.internal.misc from >>> the base module, rather than Unsafe in the jdk.unsupported module. >>> The base module already exports jdk.internal.misc to the jdk.vm.ci >>> module. >>> >>> This issue updates all usages of sun.misc.Unsafe to >>> jdk.internal.misc.Unsafe, in the jdk.vm.ci module. >>> >>> http://cr.openjdk.java.net/~chegar/8153756/ >> >> src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMethodData.java >> >> -import sun.misc.Unsafe; >> This file and others have Unsafe references in Javadoc that's why the import is required. > > There are two files that have imports but no static references, as you > say these are for javadoc. I can reinstate an updated import, or inline > the single file javadoc usage in the @link? Whichever you prefer. Please put back the import. > >> I assume you did run all compiler/jvmci tests? > > Yes, all hotspot testset pass. Please remove the empty line here: module jdk.vm.ci { - // 8153756 - requires jdk.unsupported; uses jdk.vm.ci.hotspot.HotSpotVMEventListener; and then it?s good to go. > > -Chris H > >>> https://bugs.openjdk.java.net/browse/JDK-8153756 >>> >>> The webrev is against jdk9/dev, but I intend to push this through >>> hs-comp. >>> >>> -Chris. >>> >>> [1] https://bugs.openjdk.java.net/browse/JDK-8132928 >> > From rickard.backman at oracle.com Tue Apr 19 05:20:27 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Tue, 19 Apr 2016 07:20:27 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160411090501.GS9504@rbackman> <20160411115521.GU9504@rbackman> <20160413133128.GV9504@rbackman> <97c76657-4904-b1b2-c961-592a8baa44df@oracle.com> Message-ID: <20160419052027.GA19871@rbackman> Volker, I'm fine with 8151956 going in as a separate commit. I can sponsor that change and will push it at the same time as I push this (8152664) change. Please put it out for review as soon as possible. Thanks /R On 04/18, Volker Simonis wrote: > Hi Rickard, > > are you fine if we are fixing the issue about non-initialized nmethods > independently under "8151956 : CodeBlob ctor virtual call on partially > constructed subclass"? In that case I'll submit an extra RFR today > with that fix only. Anyway you or Dean will have to sponsor it because > it is in shared code. > > Or do you want to fix the issue together with your change for 8152664? > > Anyway, after we've fixed the problem with the partially constructed > classes I think we should make is_compiled() and is_nmethod() virtual > again and get rid of the "subtype()" hack: > > http://cr.openjdk.java.net/~simonis/webrevs/2016/8152664_addon/ > > (This is relatively to your webrev.) > > Regards, > Volker > > > On Fri, Apr 15, 2016 at 11:19 PM, Dean Long wrote: > > > > On 4/15/2016 11:28 AM, Volker Simonis wrote: > >> > >> Hi, > >> > >> this one was a real puzzler :) > >> But I finally found some quiet hours in the office today and came up > >> with this rather simple solution: > >> > >> http://cr.openjdk.java.net/~simonis/webrevs/2016/8151956/ > >> > >> First I've tried to only move the relocation handling out of the > >> CodeBlob constructor into the derived constructors but that didn't > >> work very well and was quite complicated. So finally, I've just moved > >> the call to CodeBuffer::copy_code_and_locs_to() from the CodeBlob > >> constructor into the nmethod and RuntimeBlob constructors > >> respectively. I couldn't find a reason why we shouldn't do this. The > >> change is minimal and makes the whole handling more robust. I've > >> compiled and smoke tested with JVM 98 on Linux/x86_64, Linux/ppc64le > >> and Solaris/SPARC. > >> > >> I will run some more tests on Monday, but it would be great if you > >> (i.e. Andrew) could verify this fix on ARM and if you (i.e. > >> Rickard/Dean) could run some of your internal tests. > > > > > > Hi Volker. This looks like what I was trying to accomplish with my patch, > > but I introduced a new function CodeBlob::initialize(), and I called it too > > early (in CompiledMethod instead of nmethod). > > > > Yes, now I see. Initially I just patched your changes in and saw that > they don't work so I didn't looked at the actual changes in more > detail. I think I'll add the various casts/type changes from your > version (they are a nice clean-up) into my patch if Rickard agrees to > fix this independently of his change. > > >> I'd also like to ask if I should submit an extra RFR for 8151956 with > >> my fix or if we should close 8151956 and fix it as part of Rickard's > >> change for 8152664. I'd be happy with both solutions :) > > > > > > Either way is fine with me. > > > > OK, let's wait for Rickard's opinion. > > > dl > > > > > >> A nice weekend everybody, > >> Volker > >> > >> > >> On Wed, Apr 13, 2016 at 3:31 PM, Rickard B?ckman > >> wrote: > >>> > >>> Volker, > >>> > >>> yes, I didn't realize at first that the nmethod was casted to a > >>> CompiledMethod before the call to consts_begin(). Otherwise it would > >>> have used the non-virtual consts_begin of nmethod that didn't have any > >>> virtual calls. > >>> > >>> The entire code chain and looking up itself from the CodeCache before > >>> fully constructed seems quite problematic. Even before the changes I > >>> made. Previous to my changes the calls would have succeeded but returned > >>> header_begin() or this for all the consts_begin, consts_end, etc... ? > >>> > >>> /R > >>> > >>> On 04/11, Volker Simonis wrote: > >>>> > >>>> Rickard, Dean, > >>>> > >>>> I'm afraid all this hacks can not work. It doesn't help to make > >>>> CompiledMethod::consts_begin() non-virtual and then calling a virtual > >>>> function from it. The problem ist that at the point where you call > >>>> consts_begin_v(), the vtable of 'this' is still the one of CodeBlob and > >>>> this results in calling yet another arbitrary function: > >>>> > >>>> #0 CodeBlob::is_locked_by_vm (this=0x3fff607d0c10) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.hpp:168 > >>>> #1 0x00003fffb6e38048 in CompiledMethod::consts_begin > >>>> (this=0x3fff607d0c10) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.hpp:255 > >>>> #2 0x00003fffb758d658 in RelocIterator::initialize > >>>> (this=0x3ffdfd3fc9a8, > >>>> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:142 > >>>> #3 0x00003fffb6ace56c in RelocIterator::RelocIterator > >>>> (this=0x3ffdfd3fc9a8, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > >>>> limit=0x0) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > >>>> #4 0x00003fffb7591afc in trampoline_stub_Relocation::get_trampoline_for > >>>> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > >>>> #5 0x00003fffb741ba4c in NativeCall::get_trampoline > >>>> (this=0x3fff607d0fac) > >>>> at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > >>>> #6 0x00003fffb7596a34 in Relocation::pd_call_destination > >>>> (this=0x3ffdfd3fcd10, orig_addr=0x3fff6033482c "\001") at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > >>>> #7 0x00003fffb758f71c in CallRelocation::fix_relocation_after_move > >>>> (this=0x3ffdfd3fcd10, src=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > >>>> #8 0x00003fffb6c48914 in CodeBuffer::relocate_code_to > >>>> (this=0x3ffdfd3fdbc0, dest=0x3ffdfd3fcdd8) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > >>>> #9 0x00003fffb6c48480 in CodeBuffer::copy_code_to (this=0x3ffdfd3fdbc0, > >>>> dest_blob=0x3fff607d0c10) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > >>>> #10 0x00003fffb6c426ec in CodeBuffer::copy_code_and_locs_to > >>>> (this=0x3ffdfd3fdbc0, blob=0x3fff607d0c10) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > >>>> #11 0x00003fffb6c3f8b0 in CodeBlob::CodeBlob (this=0x3fff607d0c10, > >>>> name=0x3fffb7a760f8 "nmethod", layout=..., cb=0x3ffdfd3fdbc0, > >>>> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe0001ed00, > >>>> caller_must_gc_arguments=false, subtype=8) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > >>>> #12 0x00003fffb6ce5360 in CompiledMethod::CompiledMethod > >>>> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a760f8 > >>>> "nmethod", > >>>> size=1768, header_size=392, cb=0x3ffdfd3fdbc0, frame_complete_offset=20, > >>>> frame_size=14, oop_maps=0x3ffe0001ed00, caller_must_gc_arguments=false) > >>>> at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > >>>> #13 0x00003fffb7422198 in nmethod::nmethod (this=0x3fff607d0c10, > >>>> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, > >>>> offsets=0x3ffdfd3fdb98, orig_pc_offset=104, debug_info=0x3fffb03f2dc0, > >>>> dependencies=0x3ffe0001ed70, code_buffer=0x3ffdfd3fdbc0, frame_size=14, > >>>> oop_maps=0x3ffe0001ed00, handler_table=0x3ffdfd3fdb50, > >>>> nul_chk_table=0x3ffdfd3fdb70, compiler=0x3fffb03d0cd0, comp_level=3) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > >>>> #14 0x00003fffb7421850 in nmethod::new_nmethod (method=..., > >>>> compile_id=4, > >>>> entry_bci=-1, offsets=0x3ffdfd3fdb98, orig_pc_offset=104, > >>>> debug_info=0x3fffb03f2dc0, dependencies=0x3ffe0001ed70, > >>>> code_buffer=0x3ffdfd3fdbc0, frame_size=14, oop_maps=0x3ffe0001ed00, > >>>> handler_table=0x3ffdfd3fdb50, nul_chk_table=0x3ffdfd3fdb70, > >>>> compiler=0x3fffb03d0cd0, comp_level=3) at > >>>> > >>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:548 > >>>> > >>>> I think we really need to rework this as proposed by Andrew in his last > >>>> mail. I'm working on such a fix. > >>>> > >>>> Regards, > >>>> Volker > >>>> > >>>> > >>>> On Mon, Apr 11, 2016 at 1:55 PM, Rickard B?ckman > >>>> >>>>> > >>>>> wrote: > >>>>> Volker, > >>>>> > >>>>> here is the patch if you want to try it. > >>>>> http://cr.openjdk.java.net/~rbackman/8152664/virtual.patch > >>>>> > >>>>> /R > >>>>> > >>>>> On 04/11, Rickard B?ckman wrote: > >>>>>> > >>>>>> Volker, > >>>>>> > >>>>>> thanks for finding this issue. > >>>>>> > >>>>>> I think that maybe the easiest fix is as follows: > >>>>>> > >>>>>> create new virtual methods in CompiledMethod: > >>>>>> > >>>>>> virtual address stub_begin_v() = 0; > >>>>>> > >>>>>> make the now virtual stub_begin non-virtual like: > >>>>>> > >>>>>> address stub_begin() { return stub_begin_v(); } > >>>>>> > >>>>>> in nmethod we override the stub_begin() with the normal this + offset > >>>>>> compuation and implement stub_begin_v() to call stub_begin(). > >>>>>> > >>>>>> That will avoid all virtual calls in the case were we are not working > >>>>>> on > >>>>>> a CompiledMethod. > >>>>>> > >>>>>> It adds a couple of methods though. What do you think? > >>>>>> > >>>>>> /R > >>>>>> > >>>>>> On 04/08, Volker Simonis wrote: > >>>>>>> > >>>>>>> Hi Rickard, > >>>>>>> > >>>>>>> I found the problem why your change crashes the VM on ppc (and I'm > >>>>> > >>>>> pretty > >>>>>>> > >>>>>>> sure it will also crash on ARM - @Andrew, maybe you can try it out?). > >>>>> > >>>>> It is > >>>>>>> > >>>>>>> caused by the following code in address NativeCall::get_trampoline() > >>>>> > >>>>> which > >>>>>>> > >>>>>>> is also present on arm64: > >>>>>>> > >>>>>>> address NativeCall::get_trampoline() { > >>>>>>> address call_addr = addr_at(0); > >>>>>>> CodeBlob *code = CodeCache::find_blob(call_addr); > >>>>>>> ... > >>>>>>> // If the codeBlob is not a nmethod, this is because we get here > >>>>> > >>>>> from the > >>>>>>> > >>>>>>> // CodeBlob constructor, which is called within the nmethod > >>>>> > >>>>> constructor. > >>>>>>> > >>>>>>> return trampoline_stub_Relocation::get_trampoline_for(call_addr, > >>>>>>> (nmethod*)code); > >>>>>>> } > >>>>>>> > >>>>>>> The comment explains the situation quite well: we're in the CodeBlob > >>>>>>> constructor which was called by the CompiledMethod constructor which > >>>>> > >>>>> was > >>>>>>> > >>>>>>> called from the nmethod constructor: > >>>>>>> > >>>>>>> #3 0x00003fffb741b80c in NativeCall::get_trampoline > >>>>> > >>>>> (this=0x3fff607d0fac) > >>>>>>> > >>>>>>> at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > >>>>>>> > >>>>>>> #4 0x00003fffb7596914 in Relocation::pd_call_destination > >>>>>>> (this=0x3ffdfe3fcc90, orig_addr=0x3fff603b8a2c "\001") at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp:87 > >>>>>>> > >>>>>>> #5 0x00003fffb758f5fc in CallRelocation::fix_relocation_after_move > >>>>>>> (this=0x3ffdfe3fcc90, src=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:506 > >>>>>>> > >>>>>>> #6 0x00003fffb6c48898 in CodeBuffer::relocate_code_to > >>>>>>> (this=0x3ffdfe3fdb40, dest=0x3ffdfe3fcd58) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:812 > >>>>>>> > >>>>>>> #7 0x00003fffb6c48404 in CodeBuffer::copy_code_to > >>>>> > >>>>> (this=0x3ffdfe3fdb40, > >>>>>>> > >>>>>>> dest_blob=0x3fff607d0c10) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.cpp:748 > >>>>>>> > >>>>>>> #8 0x00003fffb6c42670 in CodeBuffer::copy_code_and_locs_to > >>>>>>> (this=0x3ffdfe3fdb40, blob=0x3fff607d0c10) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/asm/codeBuffer.hpp:607 > >>>>>>> > >>>>>>> #9 0x00003fffb6c3f834 in CodeBlob::CodeBlob (this=0x3fff607d0c10, > >>>>>>> name=0x3fffb7a75fd8 "nmethod", layout=..., cb=0x3ffdfe3fdb40, > >>>>>>> frame_complete_offset=20, frame_size=14, oop_maps=0x3ffe00049620, > >>>>>>> caller_must_gc_arguments=false, subtype=8) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:117 > >>>>>>> > >>>>>>> #10 0x00003fffb6ce52c8 in CompiledMethod::CompiledMethod > >>>>>>> (this=0x3fff607d0c10, method=0x3ffe1ddce568, name=0x3fffb7a75fd8 > >>>>> > >>>>> "nmethod", > >>>>>>> > >>>>>>> size=1768, header_size=392, cb=0x3ffdfe3fdb40, > >>>>> > >>>>> frame_complete_offset=20, > >>>>>>> > >>>>>>> frame_size=14, oop_maps=0x3ffe00049620, > >>>>> > >>>>> caller_must_gc_arguments=false) at > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/compiledMethod.cpp:42 > >>>>>>> > >>>>>>> #11 0x00003fffb7421f58 in nmethod::nmethod (this=0x3fff607d0c10, > >>>>>>> method=0x3ffe1ddce568, nmethod_size=1768, compile_id=4, entry_bci=-1, > >>>>>>> offsets=0x3ffdfe3fdb18, orig_pc_offset=104, > >>>>>>> debug_info=0x3fffb03d55f0, > >>>>>>> dependencies=0x3ffe00049690, code_buffer=0x3ffdfe3fdb40, > >>>>>>> frame_size=14, > >>>>>>> oop_maps=0x3ffe00049620, handler_table=0x3ffdfe3fdad0, > >>>>>>> nul_chk_table=0x3ffdfe3fdaf0, compiler=0x3fffb03bc270, comp_level=3) > >>>>>>> at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/nmethod.cpp:706 > >>>>>>> > >>>>>>> Now we cast 'code' to 'nmethod' but at this point in time 'code' is > >>>>> > >>>>> still a > >>>>>>> > >>>>>>> CodeBlob from the C++ point of view (i.e. it still has a CodeBlob > >>>>> > >>>>> vtable > >>>>>>> > >>>>>>> (see [1] for an explanation)). > >>>>>>> > >>>>>>> Later on, in RelocIterator::initialize() we call virtual methods on > >>>>>>> the > >>>>>>> nmethod which still has the vtable of a "CodeBlob" and this fails > >>>>> > >>>>> badly: > >>>>>>> > >>>>>>> #0 SingletonBlob::print_on (this=0x3fff607d0c10, st=0x0) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/codeBlob.cpp:584 > >>>>>>> > >>>>>>> #1 0x00003fffb758d51c in RelocIterator::initialize > >>>>> > >>>>> (this=0x3ffdfe3fc928, > >>>>>>> > >>>>>>> nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", limit=0x0) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:144 > >>>>>>> > >>>>>>> #2 0x00003fffb6ace56c in RelocIterator::RelocIterator > >>>>>>> (this=0x3ffdfe3fc928, nm=0x3fff607d0c10, begin=0x3fff607d0fac "\001", > >>>>>>> limit=0x0) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.hpp:1378 > >>>>>>> > >>>>>>> #3 0x00003fffb75919dc in > >>>>> > >>>>> trampoline_stub_Relocation::get_trampoline_for > >>>>>>> > >>>>>>> (call=0x3fff607d0fac "\001", code=0x3fff607d0c10) at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/share/vm/code/relocInfo.cpp:849 > >>>>>>> > >>>>>>> #4 0x00003fffb741b80c in NativeCall::get_trampoline > >>>>> > >>>>> (this=0x3fff607d0fac) > >>>>>>> > >>>>>>> at > >>>>>>> > >>>>> > >>>>> /usr/work/d046063/OpenJDK/jdk9-hs-comp/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp:146 > >>>>>>> > >>>>>>> As you can see, we actually want to call nmethod::stub_begin() at > >>>>>>> relocInfo.cpp:144 > >>>>>>> > >>>>>>> 142 _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin(); > >>>>>>> 143 _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ; > >>>>>>> 144 _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin() ; > >>>>>>> > >>>>>>> but we actually end up in SingletonBlob::print_on() which is a > >>>>> > >>>>> completely > >>>>>>> > >>>>>>> different method. Notice that the call to nm->consts_begin() before > >>>>> > >>>>> also > >>>>>>> > >>>>>>> fails, but it doesn't crash the VM because it happens to call > >>>>>>> SingletonBlob::verify() which has no bad side effect. The call to > >>>>>>> nm->insts_begin() in line 143 is non-virtual and thus works fine. > >>>>>>> Here > >>>>> > >>>>> are > >>>>>>> > >>>>>>> the corresponding vtable slots in the CodeBlob vtable for > >>>>> > >>>>> consts_begin() > >>>>>>> > >>>>>>> and stub_begin() > >>>>>>> > >>>>>>> (gdb) p &nmethod::consts_begin > >>>>>>> $76 = &virtual table offset 42 > >>>>>>> (gdb) p &nmethod::stub_begin > >>>>>>> $77 = &virtual table offset 44 > >>>>>>> (gdb) p ((*(void ***)nm) + 1)[42] > >>>>>>> $86 = (void *) 0x3fffb6c41df8 > >>>>>>> (gdb) p ((*(void ***)nm) + 1)[44] > >>>>>>> $87 = (void *) 0x3fffb6c41e64 >>>>> > >>>>> const> > >>>>>>> > >>>>>>> As you can see, 'nm' is indeed a "CodeBlob" at this point: > >>>>>>> > >>>>>>> (gdb) p *(void ***)nm > >>>>>>> $91 = (void **) 0x3fffb7befa00 > >>>>>>> (gdb) p nm->print() > >>>>>>> [CodeBlob (0x00003fff607d1090)] > >>>>>>> Framesize: 14 > >>>>>>> > >>>>>>> The offending calls succeeded before your change, because they where > >>>>> > >>>>> not > >>>>>>> > >>>>>>> virtual. Any idea how we can fix this with the new class hierarchy? > >>>>>>> > >>>>>>> Regards, > >>>>>>> Volker > >>>>>>> > >>>>>>> [1] > >>>>>>> > >>>>> > >>>>> http://stackoverflow.com/questions/6591859/when-does-the-vptr-pointing-to-vtable-get-initialized-for-a-polymorphic-class > >>>>>>> > >>>>>>> > >>>>>>> > >>>>>>> On Thu, Apr 7, 2016 at 5:50 PM, Volker Simonis < > >>>>> > >>>>> volker.simonis at gmail.com> > >>>>>>> > >>>>>>> wrote: > >>>>>>> > >>>>>>>> Hi Rickard, > >>>>>>>> > >>>>>>>> I'd also like to know what's the rational behind this quite large > >>>>>>>> change. Do you expect some performance or memory consumption > >>>>>>>> improvements or is this a prerequisite for another change which is > >>>>>>>> still to come? > >>>>>>>> > >>>>>>>> The change itself currently doesn't work on ppc64 (neither on Linux > >>>>>>>> nor on AIX). I get the following crash during the build when the > >>>>> > >>>>> newly > >>>>>>>> > >>>>>>>> built Hotspot is JIT-compiling java.lang.String::charAt on C1 : > >>>>>>>> > >>>>>>>> # > >>>>>>>> # A fatal error has been detected by the Java Runtime Environment: > >>>>>>>> # > >>>>>>>> # SIGSEGV (0xb) at pc=0x00001000012a44d0, pid=35331, tid=35404 > >>>>>>>> # > >>>>>>>> # JRE version: OpenJDK Runtime Environment (9.0) (slowdebug build > >>>>>>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp) > >>>>>>>> # Java VM: OpenJDK 64-Bit Server VM (slowdebug > >>>>>>>> 9-internal+0-2016-04-07-162501.d046063.jdk9-hs-comp, mixed mode, > >>>>>>>> tiered, compressed oo > >>>>>>>> ps, serial gc, linux-ppc64le) > >>>>>>>> # Problematic frame: > >>>>>>>> # V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > >>>>>>>> const*, char*, bool)+0x40 > >>>>>>>> # > >>>>>>>> # No core dump will be written. Core dumps have been disabled. To > >>>>>>>> enable core dumping, try "ulimit -c unlimited" before starting Java > >>>>>>>> again > >>>>>>>> # > >>>>>>>> # If you would like to submit a bug report, please visit: > >>>>>>>> # http://bugreport.java.com/bugreport/crash.jsp > >>>>>>>> # > >>>>>>>> > >>>>>>>> --------------- S U M M A R Y ------------ > >>>>>>>> > >>>>>>>> Command Line: > >>>>>>>> > >>>>> -Dapplication.home=/sapmnt/ld9510/a/d046063/output-jdk9-hs-comp-dbg/jdk > >>>>>>>> > >>>>>>>> -Xms8m -XX:+UseSerialGC -Xms32M -Xmx512M -Djdk. > >>>>>>>> module.main=jdk.jlink jdk.jlink/jdk.tools.jmod.Main create > >>>>>>>> --module-version 9-internal --os-name Linux --os-arch ppc64le > >>>>>>>> --os-version > >>>>>>>> 2.6 --modulepath > >>>>>>>> /priv/d046063/output-jdk9-hs-comp-dbg/images/jmods > >>>>>>>> --hash-dependencies .* --exclude **_the.* --libs > >>>>>>>> > >>>>>>>> > >>>>> > >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_libs-stripped/java.base > >>>>>>>> > >>>>>>>> --cmds > >>>>>>>> > >>>>> > >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_cmds-stripped/java.base > >>>>>>>> > >>>>>>>> --config > >>>>>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/modules_conf/java.base > >>>>>>>> --class-path > >>>>> > >>>>> /priv/d046063/output-jdk9-hs-comp-dbg/jdk/modules/java.base > >>>>>>>> > >>>>>>>> /priv/d046063/output-jdk9-hs-comp-dbg/support/jmods/java.base.jmod > >>>>>>>> > >>>>>>>> Host: ld9510, POWER8E (raw), altivec supported, 48 cores, 61G, # > >>>>>>>> Please check /etc/os-release for details about this release. > >>>>>>>> Time: Thu Apr 7 16:28:55 2016 CEST elapsed time: 0 seconds (0d 0h > >>>>> > >>>>> 0m 0s) > >>>>>>>> > >>>>>>>> --------------- T H R E A D --------------- > >>>>>>>> > >>>>>>>> Current thread (0x000010000429c800): JavaThread "C1 > >>>>> > >>>>> CompilerThread10" > >>>>>>>> > >>>>>>>> daemon [_thread_in_vm, id=35404, > >>>>>>>> stack(0x000010006a800000,0x000010006ac00000)] > >>>>>>>> > >>>>>>>> > >>>>>>>> Current CompileTask: > >>>>>>>> C1: 761 3 3 java.lang.String::charAt (25 bytes) > >>>>>>>> > >>>>>>>> Stack: [0x000010006a800000,0x000010006ac00000], > >>>>>>>> sp=0x000010006abfc6c0, free space=4081k > >>>>>>>> Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, > >>>>> > >>>>> C=native > >>>>>>>> > >>>>>>>> code) > >>>>>>>> V [libjvm.so+0xf744d0] outputStream::do_vsnprintf_and_write(char > >>>>>>>> const*, char*, bool)+0x40 > >>>>>>>> V [libjvm.so+0xf74668] outputStream::print_cr(char const*, > >>>>> > >>>>> ...)+0x68 > >>>>>>>> > >>>>>>>> V [libjvm.so+0x72189c] CodeBlob::print_on(outputStream*) > >>>>>>>> const+0x50 > >>>>>>>> V [libjvm.so+0x723bdc] RuntimeBlob::print_on(outputStream*) > >>>>> > >>>>> const+0x40 > >>>>>>>> > >>>>>>>> V [libjvm.so+0x721eb0] SingletonBlob::print_on(outputStream*) > >>>>> > >>>>> const+0x4c > >>>>>>>> > >>>>>>>> V [libjvm.so+0x106d51c] RelocIterator::initialize(CompiledMethod*, > >>>>>>>> unsigned char*, unsigned char*)+0x170 > >>>>>>>> V [libjvm.so+0x5ae56c] > >>>>> > >>>>> RelocIterator::RelocIterator(CompiledMethod*, > >>>>>>>> > >>>>>>>> unsigned char*, unsigned char*)+0x78 > >>>>>>>> V [libjvm.so+0x10719dc] > >>>>>>>> trampoline_stub_Relocation::get_trampoline_for(unsigned char*, > >>>>>>>> nmethod*)+0x78 > >>>>>>>> V [libjvm.so+0xefb80c] NativeCall::get_trampoline()+0x110 > >>>>>>>> V [libjvm.so+0x1076914] Relocation::pd_call_destination(unsigned > >>>>>>>> char*)+0x150 > >>>>>>>> V [libjvm.so+0x106f5fc] > >>>>>>>> CallRelocation::fix_relocation_after_move(CodeBuffer const*, > >>>>>>>> CodeBuffer*)+0x74 > >>>>>>>> V [libjvm.so+0x728898] CodeBuffer::relocate_code_to(CodeBuffer*) > >>>>>>>> const+0x390 > >>>>>>>> V [libjvm.so+0x728404] CodeBuffer::copy_code_to(CodeBlob*)+0x134 > >>>>>>>> V [libjvm.so+0x722670] > >>>>> > >>>>> CodeBuffer::copy_code_and_locs_to(CodeBlob*)+0x84 > >>>>>>>> > >>>>>>>> V [libjvm.so+0x71f834] CodeBlob::CodeBlob(char const*, > >>>>>>>> CodeBlobLayout const&, CodeBuffer*, int, int, OopMapSet*, bool, > >>>>>>>> int)+0x320 > >>>>>>>> V [libjvm.so+0x7c52c8] CompiledMethod::CompiledMethod(Method*, > >>>>>>>> char > >>>>>>>> const*, int, int, CodeBuffer*, int, int, OopMapSet*, bool)+0xd8 > >>>>>>>> V [libjvm.so+0xf01f58] nmethod::nmethod(Method*, int, int, int, > >>>>>>>> CodeOffsets*, int, DebugInformationRecorder*, Dependencies*, > >>>>>>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > >>>>>>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0xe0 > >>>>>>>> V [libjvm.so+0xf01610] nmethod::new_nmethod(methodHandle const&, > >>>>>>>> int, int, CodeOffsets*, int, DebugInformationRecorder*, > >>>>> > >>>>> Dependencies*, > >>>>>>>> > >>>>>>>> CodeBuffer*, int, OopMapSet*, ExceptionHandlerTable*, > >>>>>>>> ImplicitExceptionTable*, AbstractCompiler*, int)+0x2c4 > >>>>>>>> V [libjvm.so+0x632970] ciEnv::register_method(ciMethod*, int, > >>>>>>>> CodeOffsets*, int, CodeBuffer*, int, OopMapSet*, > >>>>>>>> ExceptionHandlerTable*, ImplicitExceptionTable*, AbstractCompiler*, > >>>>>>>> bool, bool, RTMState)+0x560 > >>>>>>>> V [libjvm.so+0x48ee00] Compilation::install_code(int)+0x264 > >>>>>>>> V [libjvm.so+0x48eff8] Compilation::compile_method()+0x184 > >>>>>>>> V [libjvm.so+0x48f7a8] Compilation::Compilation(AbstractCompiler*, > >>>>>>>> ciEnv*, ciMethod*, int, BufferBlob*, DirectiveSet*)+0x288 > >>>>>>>> V [libjvm.so+0x4980d0] Compiler::compile_method(ciEnv*, ciMethod*, > >>>>>>>> int, DirectiveSet*)+0xc8 > >>>>>>>> V [libjvm.so+0x7b188c] > >>>>>>>> CompileBroker::invoke_compiler_on_method(CompileTask*)+0x590 > >>>>>>>> V [libjvm.so+0x7b07bc] CompileBroker::compiler_thread_loop()+0x310 > >>>>>>>> V [libjvm.so+0x11a614c] compiler_thread_entry(JavaThread*, > >>>>> > >>>>> Thread*)+0xa0 > >>>>>>>> > >>>>>>>> V [libjvm.so+0x119f3a8] JavaThread::thread_main_inner()+0x1b4 > >>>>>>>> V [libjvm.so+0x119f1a4] JavaThread::run()+0x1b8 > >>>>>>>> V [libjvm.so+0xf53d90] java_start(Thread*)+0x204 > >>>>>>>> C [libpthread.so.0+0x8a64] start_thread+0xf4 > >>>>>>>> C [libc.so.6+0x1032a0] clone+0x98 > >>>>>>>> > >>>>>>>> I haven't identified the exact cause (will analyze it tomorrow) but > >>>>>>>> the stack trace indicates that it is indeed related to your changes. > >>>>>>>> > >>>>>>>> Besides that I have some comments: > >>>>>>>> > >>>>>>>> codeBuffer.hpp: > >>>>>>>> > >>>>>>>> 472 CodeSection* insts() { return &_insts; } > >>>>>>>> 475 const CodeSection* insts() const { return &_insts; } > >>>>>>>> > >>>>>>>> - do we really need both versions? > >>>>>>>> > >>>>>>>> codeBlob.hpp: > >>>>>>>> > >>>>>>>> 135 nmethod* as_nmethod_or_null() const { return > >>>>>>>> is_nmethod() ? (nmethod*) this : NULL; } > >>>>>>>> 136 nmethod* as_nmethod() const { > >>>>>>>> assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } > >>>>>>>> 137 CompiledMethod* as_compiled_method_or_null() const { return > >>>>>>>> is_compiled() ? (CompiledMethod*) this : NULL; } > >>>>>>>> 138 CompiledMethod* as_compiled_method() const { > >>>>>>>> assert(is_compiled(), "must be compiled"); return (CompiledMethod*) > >>>>>>>> this; } > >>>>>>>> 139 CodeBlob* as_codeblob_or_null() const { return > >>>>>>>> (CodeBlob*) this; } > >>>>>>>> > >>>>>>>> - I don't like this code. You make the getters 'const' which > >>>>>>>> implicitely makes 'this' a "pointer to const" but then the returned > >>>>>>>> pointer is a normal pointer to a non-const object and therefore you > >>>>>>>> have to statically cast away the "pointer to const" (that's why you > >>>>>>>> need the cast even in the case where you return a CodeBlob*). So > >>>>>>>> either remove the const qualifier from the method declarations or > >>>>> > >>>>> make > >>>>>>>> > >>>>>>>> them return "pointers to const". And by the way, > >>>>> > >>>>> as_codeblob_or_null() > >>>>>>>> > >>>>>>>> doesn't seemed to be used anywhere in the code, why do we need it at > >>>>>>>> all? > >>>>>>>> > >>>>>>>> - Why do we need the non-virtual methods is_nmethod() and > >>>>>>>> is_compiled() to manually simulate virtual behavior. Why can't we > >>>>>>>> simply make them virtual and implement them accordingly in nmathod > >>>>> > >>>>> and > >>>>>>>> > >>>>>>>> CompiledMethod? > >>>>>>>> > >>>>>>>> Regards, > >>>>>>>> Volker > >>>>>>>> > >>>>>>>> On Thu, Apr 7, 2016 at 2:12 PM, Rickard B?ckman > >>>>>>>> wrote: > >>>>>>>>> > >>>>>>>>> Hi, > >>>>>>>>> > >>>>>>>>> can I please have review for this patch please? > >>>>>>>>> > >>>>>>>>> So far CodeBlobs have required all the data (metadata, oops, code, > >>>>> > >>>>> etc) > >>>>>>>>> > >>>>>>>>> to be in one continuous blob With this patch we are looking to > >>>>> > >>>>> change > >>>>>>>>> > >>>>>>>>> that. It's been done by changing offsets in CodeBlob to addresses, > >>>>>>>>> making some methods virtual to allow different behavior and also > >>>>>>>>> creating a couple of new classes. CompiledMethod now sits inbetween > >>>>>>>>> CodeBlob and nmethod. > >>>>>>>>> > >>>>>>>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > >>>>>>>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > >>>>>>>>> > >>>>>>>>> Thanks > >>>>>>>>> /R > > > > From rickard.backman at oracle.com Tue Apr 19 05:32:12 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Tue, 19 Apr 2016 07:32:12 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160407121221.GQ9504@rbackman> References: <20160407121221.GQ9504@rbackman> Message-ID: <20160419053212.GB19871@rbackman> Here is the updated webrev, rebased and I think I have fixed all the comments with one exception. I've avoided making CompiledMethodIterator and NMethodIterator a template class for now. I agree we should do something to reuse the parts that are identical but for now I think there will be a few more changes to CompiledMethodIterator in an upcoming RFR. So can we hold off with that change? Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ Thanks On 04/07, Rickard B?ckman wrote: > Hi, > > can I please have review for this patch please? > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > to be in one continuous blob With this patch we are looking to change > that. It's been done by changing offsets in CodeBlob to addresses, > making some methods virtual to allow different behavior and also > creating a couple of new classes. CompiledMethod now sits inbetween > CodeBlob and nmethod. > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > Thanks > /R /R From stefan.karlsson at oracle.com Tue Apr 19 05:58:48 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 19 Apr 2016 07:58:48 +0200 Subject: RFR: 8067785: Using AlwaysPreTouch does not always touch all pages In-Reply-To: <57153838.8000902@oracle.com> References: <5710F714.5070601@oracle.com> <57153838.8000902@oracle.com> Message-ID: <5715C918.8070504@oracle.com> Thanks, Derek! StefanK On 2016-04-18 21:40, Derek White wrote: > Hi Stefan, > > On 4/15/16 10:13 AM, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch that ensures that VirtualSpace::expand_by >> pre-touches all committed memory. >> >> http://cr.openjdk.java.net/~stefank/8067785/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8067785 >> >> 1) Previously, we pre-touched the memory between the old and new >> _high pointers. Since the _high variable isn't page aligned, the >> pre-touch code didn't touch all committed pages. I've moved the >> pre-touch code to be done for every os::commit_memory call in >> VirtuaSpace::expand_by. >> >> 2) expand_by has segregated the VirtualSpace into three segements. >> [small pages | large pages | small pages] so that we can have >> VirtualSpaces that are not large page size aligned. Previously, only >> the middle section called commit_memory with an alignment hint, and >> the small pages sections called commit_memory without a small pages >> hint. On all platforms, except Solaris, this boils down to the same >> code. On Solaris we have this additional code executed: >> size_t page_size = page_size_for_alignment(alignment_hint); >> if (page_size > (size_t) vm_page_size()) { >> (void)Solaris::setup_large_pages(addr, bytes, page_size); >> } >> >> But since the alignment_hint is set to vm_page_size we won't try to >> setup_large_pages here either. >> >> 3) The patch also contains a few style changes to make the >> VirtualSpace::expand_by easier to read (at least for me) >> >> Tested with JPRT and -XX:+ExecuteInternalVMTests with AlwaysPreTouch >> temporarily forced to true. >> >> Thanks, >> StefanK > Looks Good(tm)! > > - Derek From per.liden at oracle.com Tue Apr 19 06:53:45 2016 From: per.liden at oracle.com (Per Liden) Date: Tue, 19 Apr 2016 08:53:45 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: <5714B13F.7080307@oracle.com> References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> <570D1747.2020508@oracle.com> <570E2C45.7090201@oracle.com> <5714B13F.7080307@oracle.com> Message-ID: <5715D5F9.7070109@oracle.com> Hi Stefan, On 2016-04-18 12:04, Stefan Karlsson wrote: > Hi Thomas, > > I discussed the code with Per and updated the names and changed the code > slightly. > > http://cr.openjdk.java.net/~stefank/8017629/webrev.03.delta > http://cr.openjdk.java.net/~stefank/8017629/webrev.03 Looks good to me. cheers, Per > > 1) shmat_with_large_alignment was renamed to shmat_with_alignment and > all references to large pages were removed. > > 2) shmat_with_normal_alignment was renamed to shmat_at_address and all > references to pages sizes were removed. > > 3) shmat_with_alignment was renamed to shmat_large_pages and all large > pages specific code were kept in that function. > > 4) shmat_large_pages was restructured to have one section for the > req_addr != NULL case, and another section for req_addr == NULL. I know > that you suggested to call shmat_with_alignment (previously > shmat_with_normal_alignment) for both cases in the req_addr == NULL > section, but I would like to only have to use shmat_with_alignment when > it's really necessary. > > Thanks, > StefanK > > On 2016-04-13 15:59, Thomas St?fe wrote: >> Hi Stefan, >> >> On Wed, Apr 13, 2016 at 1:23 PM, Stefan Karlsson >> > wrote: >> >> Hi Thomas, >> >> >> On 2016-04-13 12:44, Thomas St?fe wrote: >>> Hi Stefan, >>> >>> On Tue, Apr 12, 2016 at 5:41 PM, Stefan Karlsson >>> > >>> wrote: >>> >>> Hi Thomas, >>> >>> >>> On 2016-04-12 16:23, Thomas St?fe wrote: >>>> Hi Stefan, >>>> >>>> >>>> On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson >>>> >>> > wrote: >>>> >>>> Hi Thomas, >>>> >>>> On 2016-04-11 14:39, Thomas St?fe wrote: >>>>> Hi Stefan, >>>>> >>>>> short question, why the mmap before the shmat? Why not >>>>> shmat right away at the requested address? >>>> >>>> If we have a requested_address we do exactly what you >>>> propose. >>>> >>>> if (req_addr == NULL && alignment > >>>> os::large_page_size()) { >>>> return shmat_with_large_alignment(shmid, bytes, >>>> alignment); >>>> } else { >>>> return shmat_with_normal_alignment(shmid, req_addr); >>>> } >>>> >>>> ... >>>> >>>> static char* shmat_with_normal_alignment(int shmid, >>>> char* req_addr) { >>>> char* addr = (char*)shmat(shmid, req_addr, 0); >>>> >>>> if ((intptr_t)addr == -1) { >>>> shm_warning_with_errno("Failed to attach shared memory."); >>>> return NULL; >>>> } >>>> >>>> return addr; >>>> } >>>> >>>> >>>> It's when you don't have a requested address that mmap >>>> is used to find a large enough virtual memory area. >>>> >>>> >>>> Sorry, seems I did not look at this coding thoroughly >>>> enough. I understand now that you do mmap to allocate and >>>> then to cut away the extra pre-/post-space, something which >>>> would not be possible with shmat, which cannot be unmapped >>>> page-wise. >>>> >>>> But I am still not sure why we do it his way: >>>> >>>> 3429 static char* shmat_with_alignment(int shmid, size_t >>>> bytes, size_t alignment, char* req_addr) { >>>> 3430 // If there's no requested address, the shmat call >>>> can return memory that is not >>>> 3431 // 'alignment' aligned, if the given alignment is >>>> larger than the large page size. >>>> 3432 // Special care needs to be taken to ensure that we >>>> get aligned memory back. >>>> 3433 if (req_addr == NULL && alignment > >>>> os::large_page_size()) { >>>> 3434 return shmat_with_large_alignment(shmid, bytes, >>>> alignment); >>>> 3435 } else { >>>> 3436 return shmat_with_normal_alignment(shmid, req_addr); >>>> 3437 } >>>> 3438 } >>>> >>>> For req_addr==0 and big alignment, we attach at the given >>>> alignment ("shmat_with_large_alignment"). >>>> For req_addr!=0, we attach at the given requested address >>>> ("shmat_with_normal_alignment"). >>>> For req_addr==0 and smaller alignment, we ignore the >>>> alignment and attach anywhere? >>>> >>>> Maybe I am slow, but why does it matter if the alignment is >>>> large or small? Why not just distinguish between: >>>> >>>> 1) address given (req_addr!=0): in this case we attach at >>>> this req_addr and rely on the user having aligned the >>>> address properly for his purposes. We specify 0 for flags, >>>> so we will attach at exactly the given address or fail. In >>>> this case we could simply ignore the given alignment - if >>>> one was given - or just use it to counter-check the req_addr. >>>> >>>> 2) alignment given (req_addr==0 and alignment > 0): attach >>>> at the given alignment using mmap-before-shmat. This could >>>> be done for any alignment, be it large or small. >>> >>> What you propose doesn't work. >>> >>> We're allocating large pages with SHM_HUGETLB, and if we try >>> to attach to an address that is not large_page_size aligned >>> the shmat call returns EINVAL. >>> >>> >>> I was aware of this. What I meant was: >>> >>> You have "shmat_with_large_alignment" which takes an alignment >>> and does its best to shmat with that alignment using the mmap >>> trick. This coding does not need to know anything about huge >>> pages, and actually does not do anything huge-pagey, apart from >>> the asserts - it would just as well work with small pages, >>> because the only place where the code needs to know about huge >>> pages is in the layer above, in reserve_memory_special - where we >>> pass SHM_HUGETLB to shmget. (Btw, I always wondered about the >>> "reserve_memory_special" naming.) >>> >>> I think my point is that by renaming this to >>> "shmat_with_alignment" and removing the huge-page-related asserts >>> the function would become both simpler and more versatile and >>> could be reused for small alignments as well as large ones. The >>> fact that it returns EINVAL for alignments instead of asserting >>> would not be a problem - we would return an error instead of >>> asserting because of bad alignment, and both handling this error >>> and asserting for huge-page-alignment could be handled better in >>> reserve_memory_special. >>> >>> To put it another way, I think "shmat_with_large_alignment" does >>> not need to know about huge pages; this should be the >>> responsibility of reserve_memory_special. >>> >>> About "shmat_with_normal_alignment", this is actually only a raw >>> shmat call and exists for the req_addr!=NULL case and for the >>> case where we do not pass neither req_addr nor alignment. So the >>> only thing it does not handle is alignment, so it is misnamed and >>> also should not be called for the >>> req_addr==NULL-and-small-alignments-case. >> >> The reserve_memory_special_shm function and the associated helper >> functions I'm adding are specifically written to support large >> pages allocations. The names "normal_alignment" and >> "large_alignment" are intended to refer to alignment sizes >> compared to the large pages size. I grant you that it's not >> obvious from the name, and we can rename them to make it more clear. >> >> I want to provide a small bug fix for this large pages bug, while >> you are suggesting that we re-purpose the code into supporting >> small page allocations as well. Your suggestions might be good, >> but may I suggest that you create a patch and an RFE that >> motivates why we should make this code more generic to support >> small pages as well? >> >> Thanks, >> StefanK >> >> >> Ok, we can do that. I was just worried that the code becomes more >> difficult to understand. But lets wait for some more reviews. >> >> Kind Regards, Thomas >> >> >>>> >>>> Functions would become simpler and also could be clearer >>>> named (e.g. "shmat_at_address" and "shmat_with_alignment", >>>> respectivly). >>> >>> Maybe I should rename the functions to make it more obvious >>> that these are large pages specific functions? >>> >>>> >>>> ---- >>>> >>>> This: >>>> >>>> 3402 if ((intptr_t)addr == -1) { >>>> 3403 shm_warning_with_errno("Failed to attach shared memory."); >>>> 3404 // Since we don't know if the kernel unmapped the >>>> pre-reserved memory area >>>> 3405 // we can't unmap it, since that would potentially >>>> unmap memory that was >>>> 3406 // mapped from other threads. >>>> 3407 return NULL; >>>> 3408 } >>>> >>>> seems scary. Means for every call this happens, we leak the >>>> reserved (not committed) address space? >>> >>> Yes, that's unfortunate. >>> >>> An alternative would be to use this sequence: >>> 1) Use anon_mmap_aligned to find a suitable VA range >>> 2) Immediately unmap the VA range >>> 3) Try to attach at that VA range _without_ SHM_REMAP >>> >>> That would remove the risk of leaking the reserved address >>> space, but instead we risk failing at (3) if another thread >>> manages to allocate memory inside the found VA range. This >>> will cause some users to unnecessarily fail to get large >>> pages, though. We've had other problems when pre-existing >>> threads used mmap while we were initializing the VM. See: >>> JDK-8007074. >>> >>> >>> Yes; btw you also could do this with shmget/shmat instead of mmap. >>> >>> Note that similar unclean tricks are already done in other >>> places, see e.g. the windows version of >>> os::pd_split_reserved_memory(). Which deals with VirtualAlloc() >>> being unable, like shmget, to deallocate piece-wise. >>> >>> >>> >>>> For most cases (anything but ENOMEM, actually) could we at >>>> least assert?: >>>> >>>> EACCES - should not happen: we created the shared memory and >>>> are its owner >>>> EIDRM - should not happen. >>>> EINVAL - should not happen. (you already check now the >>>> attach address for alignment to SHMLBA, so this is covered) >>> >>> Sure. I'll add asserts for these. >>> >>>> >>>> --- >>>> >>>> Smaller nits: >>>> >>>> Functions called "shmat_..." suggest shmat-like behaviour, >>>> so could we have them return -1 instead of NULL in case of >>>> error? >>> >>> That would add clutter to the reserve_memory_special_shm, and >>> it might also suggest that it would be OK to check errno for >>> the failure reason, which probably wouldn't work. I'll let >>> other Reviewers chime in and help decide if we should change >>> this. >>> >>> >>> You are right. If one returns -1, one would have to preserve >>> errno for the caller too. >>> >>> Thanks for reviewing this, >>> StefanK >>> >>> >>> You are welcome! >>> >>> Kind Regards, Thomas >>> >>> >>> >>>> >>>> Kind Regards, Thomas >>>> >>>>> >>>>> Also note that mmap- and shmat-allocated memory may >>>>> have different alignment requirements: mmap requires a >>>>> page-aligned request address, whereas shmat requires >>>>> alignment to SHMLBA, which may be multiple pages (e.g. >>>>> for ARM: >>>>> >>>>> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >>>>> >>>>> So, for this shat-over-mmap trick to work, request >>>>> address has to be aligned to SHMLBA, not just page size. >>>>> >>>>> I see that you assert alignment of requ address to >>>>> os::large_page_size(), which I would assume is a >>>>> multiple of SHMLBA, but I am not sure of this. >>>> >>>> I've added some defensive code and asserts to catch this >>>> if/when this assumption fails: >>>> >>>> >>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ >>>> >>>> >>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02 >>>> >>>> >>>> I need to verify that this works on other machines than >>>> my local Linux x64 machine. >>>> >>>> Thanks, >>>> StefanK >>>> >>>>> >>>>> Kind Regards, Thomas >>>>> >>>>> >>>>> >>>>> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson >>>>> >>>> > wrote: >>>>> >>>>> Hi all, >>>>> >>>>> Please review this patch to enable SHM large page >>>>> allocations even when the requested alignment is >>>>> larger than os::large_page_size(). >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >>>>> >>>>> >>>>> https://bugs.openjdk.java.net/browse/JDK-8017629 >>>>> >>>>> G1 is affected by this bug since it requires the >>>>> heap to start at an address that is aligned with >>>>> the heap region size. The patch fixes this by >>>>> changing the UseSHM large pages allocation code. >>>>> First, virtual memory with correct alignment is >>>>> pre-reserved and then the large pages are attached >>>>> to this memory area. >>>>> >>>>> Tested with vm.gc.testlist and ExecuteInternaVMTests >>>>> >>>>> Thanks, >>>>> StefanK >>>>> >>>>> >>>> >>>> >>> >>> >> >> > From stefan.karlsson at oracle.com Tue Apr 19 06:57:26 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 19 Apr 2016 08:57:26 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: <5715D5F9.7070109@oracle.com> References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> <570D1747.2020508@oracle.com> <570E2C45.7090201@oracle.com> <5714B13F.7080307@oracle.com> <5715D5F9.7070109@oracle.com> Message-ID: <5715D6D6.6080603@oracle.com> Thanks, Per! StefanK On 2016-04-19 08:53, Per Liden wrote: > Hi Stefan, > > On 2016-04-18 12:04, Stefan Karlsson wrote: >> Hi Thomas, >> >> I discussed the code with Per and updated the names and changed the code >> slightly. >> >> http://cr.openjdk.java.net/~stefank/8017629/webrev.03.delta >> http://cr.openjdk.java.net/~stefank/8017629/webrev.03 > > Looks good to me. > > cheers, > Per > >> >> 1) shmat_with_large_alignment was renamed to shmat_with_alignment and >> all references to large pages were removed. >> >> 2) shmat_with_normal_alignment was renamed to shmat_at_address and all >> references to pages sizes were removed. >> >> 3) shmat_with_alignment was renamed to shmat_large_pages and all large >> pages specific code were kept in that function. >> >> 4) shmat_large_pages was restructured to have one section for the >> req_addr != NULL case, and another section for req_addr == NULL. I know >> that you suggested to call shmat_with_alignment (previously >> shmat_with_normal_alignment) for both cases in the req_addr == NULL >> section, but I would like to only have to use shmat_with_alignment when >> it's really necessary. >> >> Thanks, >> StefanK >> >> On 2016-04-13 15:59, Thomas St?fe wrote: >>> Hi Stefan, >>> >>> On Wed, Apr 13, 2016 at 1:23 PM, Stefan Karlsson >>> > wrote: >>> >>> Hi Thomas, >>> >>> >>> On 2016-04-13 12:44, Thomas St?fe wrote: >>>> Hi Stefan, >>>> >>>> On Tue, Apr 12, 2016 at 5:41 PM, Stefan Karlsson >>>> > >>>> wrote: >>>> >>>> Hi Thomas, >>>> >>>> >>>> On 2016-04-12 16:23, Thomas St?fe wrote: >>>>> Hi Stefan, >>>>> >>>>> >>>>> On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson >>>>> >>>> > wrote: >>>>> >>>>> Hi Thomas, >>>>> >>>>> On 2016-04-11 14:39, Thomas St?fe wrote: >>>>>> Hi Stefan, >>>>>> >>>>>> short question, why the mmap before the shmat? Why not >>>>>> shmat right away at the requested address? >>>>> >>>>> If we have a requested_address we do exactly what you >>>>> propose. >>>>> >>>>> if (req_addr == NULL && alignment > >>>>> os::large_page_size()) { >>>>> return shmat_with_large_alignment(shmid, bytes, >>>>> alignment); >>>>> } else { >>>>> return shmat_with_normal_alignment(shmid, req_addr); >>>>> } >>>>> >>>>> ... >>>>> >>>>> static char* shmat_with_normal_alignment(int shmid, >>>>> char* req_addr) { >>>>> char* addr = (char*)shmat(shmid, req_addr, 0); >>>>> >>>>> if ((intptr_t)addr == -1) { >>>>> shm_warning_with_errno("Failed to attach shared >>>>> memory."); >>>>> return NULL; >>>>> } >>>>> >>>>> return addr; >>>>> } >>>>> >>>>> >>>>> It's when you don't have a requested address that mmap >>>>> is used to find a large enough virtual memory area. >>>>> >>>>> >>>>> Sorry, seems I did not look at this coding thoroughly >>>>> enough. I understand now that you do mmap to allocate and >>>>> then to cut away the extra pre-/post-space, something which >>>>> would not be possible with shmat, which cannot be unmapped >>>>> page-wise. >>>>> >>>>> But I am still not sure why we do it his way: >>>>> >>>>> 3429 static char* shmat_with_alignment(int shmid, size_t >>>>> bytes, size_t alignment, char* req_addr) { >>>>> 3430 // If there's no requested address, the shmat call >>>>> can return memory that is not >>>>> 3431 // 'alignment' aligned, if the given alignment is >>>>> larger than the large page size. >>>>> 3432 // Special care needs to be taken to ensure that we >>>>> get aligned memory back. >>>>> 3433 if (req_addr == NULL && alignment > >>>>> os::large_page_size()) { >>>>> 3434 return shmat_with_large_alignment(shmid, bytes, >>>>> alignment); >>>>> 3435 } else { >>>>> 3436 return shmat_with_normal_alignment(shmid, req_addr); >>>>> 3437 } >>>>> 3438 } >>>>> >>>>> For req_addr==0 and big alignment, we attach at the given >>>>> alignment ("shmat_with_large_alignment"). >>>>> For req_addr!=0, we attach at the given requested address >>>>> ("shmat_with_normal_alignment"). >>>>> For req_addr==0 and smaller alignment, we ignore the >>>>> alignment and attach anywhere? >>>>> >>>>> Maybe I am slow, but why does it matter if the alignment is >>>>> large or small? Why not just distinguish between: >>>>> >>>>> 1) address given (req_addr!=0): in this case we attach at >>>>> this req_addr and rely on the user having aligned the >>>>> address properly for his purposes. We specify 0 for flags, >>>>> so we will attach at exactly the given address or fail. In >>>>> this case we could simply ignore the given alignment - if >>>>> one was given - or just use it to counter-check the req_addr. >>>>> >>>>> 2) alignment given (req_addr==0 and alignment > 0): attach >>>>> at the given alignment using mmap-before-shmat. This could >>>>> be done for any alignment, be it large or small. >>>> >>>> What you propose doesn't work. >>>> >>>> We're allocating large pages with SHM_HUGETLB, and if we try >>>> to attach to an address that is not large_page_size aligned >>>> the shmat call returns EINVAL. >>>> >>>> >>>> I was aware of this. What I meant was: >>>> >>>> You have "shmat_with_large_alignment" which takes an alignment >>>> and does its best to shmat with that alignment using the mmap >>>> trick. This coding does not need to know anything about huge >>>> pages, and actually does not do anything huge-pagey, apart from >>>> the asserts - it would just as well work with small pages, >>>> because the only place where the code needs to know about huge >>>> pages is in the layer above, in reserve_memory_special - where we >>>> pass SHM_HUGETLB to shmget. (Btw, I always wondered about the >>>> "reserve_memory_special" naming.) >>>> >>>> I think my point is that by renaming this to >>>> "shmat_with_alignment" and removing the huge-page-related asserts >>>> the function would become both simpler and more versatile and >>>> could be reused for small alignments as well as large ones. The >>>> fact that it returns EINVAL for alignments instead of asserting >>>> would not be a problem - we would return an error instead of >>>> asserting because of bad alignment, and both handling this error >>>> and asserting for huge-page-alignment could be handled better in >>>> reserve_memory_special. >>>> >>>> To put it another way, I think "shmat_with_large_alignment" does >>>> not need to know about huge pages; this should be the >>>> responsibility of reserve_memory_special. >>>> >>>> About "shmat_with_normal_alignment", this is actually only a raw >>>> shmat call and exists for the req_addr!=NULL case and for the >>>> case where we do not pass neither req_addr nor alignment. So the >>>> only thing it does not handle is alignment, so it is misnamed and >>>> also should not be called for the >>>> req_addr==NULL-and-small-alignments-case. >>> >>> The reserve_memory_special_shm function and the associated helper >>> functions I'm adding are specifically written to support large >>> pages allocations. The names "normal_alignment" and >>> "large_alignment" are intended to refer to alignment sizes >>> compared to the large pages size. I grant you that it's not >>> obvious from the name, and we can rename them to make it more >>> clear. >>> >>> I want to provide a small bug fix for this large pages bug, while >>> you are suggesting that we re-purpose the code into supporting >>> small page allocations as well. Your suggestions might be good, >>> but may I suggest that you create a patch and an RFE that >>> motivates why we should make this code more generic to support >>> small pages as well? >>> >>> Thanks, >>> StefanK >>> >>> >>> Ok, we can do that. I was just worried that the code becomes more >>> difficult to understand. But lets wait for some more reviews. >>> >>> Kind Regards, Thomas >>> >>> >>>>> >>>>> Functions would become simpler and also could be clearer >>>>> named (e.g. "shmat_at_address" and "shmat_with_alignment", >>>>> respectivly). >>>> >>>> Maybe I should rename the functions to make it more obvious >>>> that these are large pages specific functions? >>>> >>>>> >>>>> ---- >>>>> >>>>> This: >>>>> >>>>> 3402 if ((intptr_t)addr == -1) { >>>>> 3403 shm_warning_with_errno("Failed to attach shared >>>>> memory."); >>>>> 3404 // Since we don't know if the kernel unmapped the >>>>> pre-reserved memory area >>>>> 3405 // we can't unmap it, since that would potentially >>>>> unmap memory that was >>>>> 3406 // mapped from other threads. >>>>> 3407 return NULL; >>>>> 3408 } >>>>> >>>>> seems scary. Means for every call this happens, we leak the >>>>> reserved (not committed) address space? >>>> >>>> Yes, that's unfortunate. >>>> >>>> An alternative would be to use this sequence: >>>> 1) Use anon_mmap_aligned to find a suitable VA range >>>> 2) Immediately unmap the VA range >>>> 3) Try to attach at that VA range _without_ SHM_REMAP >>>> >>>> That would remove the risk of leaking the reserved address >>>> space, but instead we risk failing at (3) if another thread >>>> manages to allocate memory inside the found VA range. This >>>> will cause some users to unnecessarily fail to get large >>>> pages, though. We've had other problems when pre-existing >>>> threads used mmap while we were initializing the VM. See: >>>> JDK-8007074. >>>> >>>> >>>> Yes; btw you also could do this with shmget/shmat instead of mmap. >>>> >>>> Note that similar unclean tricks are already done in other >>>> places, see e.g. the windows version of >>>> os::pd_split_reserved_memory(). Which deals with VirtualAlloc() >>>> being unable, like shmget, to deallocate piece-wise. >>>> >>>> >>>> >>>>> For most cases (anything but ENOMEM, actually) could we at >>>>> least assert?: >>>>> >>>>> EACCES - should not happen: we created the shared memory and >>>>> are its owner >>>>> EIDRM - should not happen. >>>>> EINVAL - should not happen. (you already check now the >>>>> attach address for alignment to SHMLBA, so this is covered) >>>> >>>> Sure. I'll add asserts for these. >>>> >>>>> >>>>> --- >>>>> >>>>> Smaller nits: >>>>> >>>>> Functions called "shmat_..." suggest shmat-like behaviour, >>>>> so could we have them return -1 instead of NULL in case of >>>>> error? >>>> >>>> That would add clutter to the reserve_memory_special_shm, and >>>> it might also suggest that it would be OK to check errno for >>>> the failure reason, which probably wouldn't work. I'll let >>>> other Reviewers chime in and help decide if we should change >>>> this. >>>> >>>> >>>> You are right. If one returns -1, one would have to preserve >>>> errno for the caller too. >>>> >>>> Thanks for reviewing this, >>>> StefanK >>>> >>>> >>>> You are welcome! >>>> >>>> Kind Regards, Thomas >>>> >>>> >>>> >>>>> >>>>> Kind Regards, Thomas >>>>> >>>>>> >>>>>> Also note that mmap- and shmat-allocated memory may >>>>>> have different alignment requirements: mmap requires a >>>>>> page-aligned request address, whereas shmat requires >>>>>> alignment to SHMLBA, which may be multiple pages (e.g. >>>>>> for ARM: >>>>>> >>>>>> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >>>>>> >>>>>> >>>>>> So, for this shat-over-mmap trick to work, request >>>>>> address has to be aligned to SHMLBA, not just page size. >>>>>> >>>>>> I see that you assert alignment of requ address to >>>>>> os::large_page_size(), which I would assume is a >>>>>> multiple of SHMLBA, but I am not sure of this. >>>>> >>>>> I've added some defensive code and asserts to catch this >>>>> if/when this assumption fails: >>>>> >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ >>>>> >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02 >>>>> >>>>> >>>>> I need to verify that this works on other machines than >>>>> my local Linux x64 machine. >>>>> >>>>> Thanks, >>>>> StefanK >>>>> >>>>>> >>>>>> Kind Regards, Thomas >>>>>> >>>>>> >>>>>> >>>>>> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson >>>>>> >>>>> > wrote: >>>>>> >>>>>> Hi all, >>>>>> >>>>>> Please review this patch to enable SHM large page >>>>>> allocations even when the requested alignment is >>>>>> larger than os::large_page_size(). >>>>>> >>>>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >>>>>> >>>>>> >>>>>> https://bugs.openjdk.java.net/browse/JDK-8017629 >>>>>> >>>>>> G1 is affected by this bug since it requires the >>>>>> heap to start at an address that is aligned with >>>>>> the heap region size. The patch fixes this by >>>>>> changing the UseSHM large pages allocation code. >>>>>> First, virtual memory with correct alignment is >>>>>> pre-reserved and then the large pages are attached >>>>>> to this memory area. >>>>>> >>>>>> Tested with vm.gc.testlist and ExecuteInternaVMTests >>>>>> >>>>>> Thanks, >>>>>> StefanK >>>>>> >>>>>> >>>>> >>>>> >>>> >>>> >>> >>> >> From rwestrel at redhat.com Tue Apr 19 08:03:30 2016 From: rwestrel at redhat.com (Roland Westrelin) Date: Tue, 19 Apr 2016 10:03:30 +0200 Subject: RFR 8153310: AArch64: JEP 254: Implement byte_array_inflate and char_array_compress In-Reply-To: <57037B02.6030208@redhat.com> References: <56FEB045.8000905@redhat.com> <1459790432.3233.7.camel@mint> <57037B02.6030208@redhat.com> Message-ID: <5715E652.6030106@redhat.com> >>> http://cr.openjdk.java.net/~aph/8153310/ That looks good to me. Roland. From shafi.s.ahmad at oracle.com Tue Apr 19 09:46:53 2016 From: shafi.s.ahmad at oracle.com (Shafi Ahmad) Date: Tue, 19 Apr 2016 02:46:53 -0700 (PDT) Subject: [8u] RFR: JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined In-Reply-To: <5715015B.9020604@oracle.com> References: <5715015B.9020604@oracle.com> Message-ID: <816bfeba-ba47-4524-b911-736a5875d8b5@default> Thanks for review. > Where was conflict? shafi at shafi-ahmad:~/Java/jdk8/jdk8u-dev/hotspot$ hg import 7dfa629d203a applying 7dfa629d203a patching file src/share/vm/opto/parse1.cpp Hunk #1 FAILED at 26 1 out of 2 hunks FAILED -- saving rejects to file src/share/vm/opto/parse1.cpp.rej abort: patch failed to apply shafi at shafi-ahmad:~/Java/jdk8/jdk8u-dev/hotspot$ cat src/share/vm/opto/parse1.cpp.rej --- parse1.cpp +++ parse1.cpp @@ -27,6 +27,7 @@ #include "interpreter/linkResolver.hpp" #include "oops/method.hpp" #include "opto/addnode.hpp" +#include "opto/c2compiler.hpp" #include "opto/castnode.hpp" #include "opto/idealGraphPrinter.hpp" #include "opto/locknode.hpp" Regards, Shafi -----Original Message----- From: Vladimir Kozlov Sent: Monday, April 18, 2016 9:17 PM To: Shafi Ahmad; hotspot-dev at openjdk.java.net Subject: Re: [8u] RFR: JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined Hi Shafi, I verified that changes match jdk 9 changes. They are good. Where was conflict? Thanks, Vladimir On 4/18/16 2:56 AM, Shafi Ahmad wrote: > Hi, > > > > Please review the backport of bug: "JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined" to jdk8u. > > There is a single line conflict. > > > > Webrev: http://cr.openjdk.java.net/~csahu/8055530/webrev.00/ > > Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8055530 > > Original patch pushed to jdk9: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/7dfa629d203a > > > > Test: Run jprt. > > > > Regards, > > Shafi > > > From shafi.s.ahmad at oracle.com Tue Apr 19 13:14:12 2016 From: shafi.s.ahmad at oracle.com (Shafi Ahmad) Date: Tue, 19 Apr 2016 06:14:12 -0700 (PDT) Subject: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays In-Reply-To: <13f589a0-b877-4fa8-910d-c24c5375db1d@default> References: <524f6be3-1639-4718-b099-6c873f3dbbd8@default> <571508A1.2070407@oracle.com> <13f589a0-b877-4fa8-910d-c24c5375db1d@default> Message-ID: Hi All, Please review. It contains the additional change of removing @modules and below change to make the test case run in jdk8. New webrev link: http://cr.openjdk.java.net/~rpatil/8141551/webrev.01/ I run the newly added jtreg test individually and it passes after the change and also run all other hotspot test. Regards, Shafi -----Original Message----- From: Shafi Ahmad Sent: Tuesday, April 19, 2016 11:46 AM To: Vladimir Kozlov; Volker Simonis Subject: RE: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays Thanks Vladimir for reviewing it and clarifying my doubts. In jdk9 sun/hotspot/Whitebox is inside jdk9-dev/test/lib where as in jdk8 same is inside jdk8u-dev/hotspot/test/testlibrary/whitebox. So with the below change this test is running fine. diff -r ac05b856c6bc test/compiler/types/TestMeetIncompatibleInterfaceArrays.java --- a/test/compiler/types/TestMeetIncompatibleInterfaceArrays.java Tue Apr 19 10:31:00 2016 +0530 +++ b/test/compiler/types/TestMeetIncompatibleInterfaceArrays.java Tue Apr 19 11:34:52 2016 +0530 @@ -27,7 +27,7 @@ * @summary C2 can not handle returns with inccompatible interface arrays * @modules java.base/jdk.internal.org.objectweb.asm * java.base/sun.misc - * @library /testlibrary /../../test/lib + * @library /testlibrary /testlibrary/whitebox/ * @build sun.hotspot.WhiteBox * @run main ClassFileInstaller sun.hotspot.WhiteBox * sun.hotspot.WhiteBox$WhiteBoxPermission shafi at shafi-ahmad:~/Java/jtreg/lib$ java -jar jtreg.jar -jdk:/home/shafi/Java/jdk8/jdk8u-dev/build/linux-x86_64-normal-server-release/jdk/ /home/shafi/Java/jdk8/jdk8u-dev/hotspot/test/compiler/types/TestMeetIncompatibleInterfaceArrays.java Test results: passed: 1 Report written to /home/shafi/Java/jtreg/lib/JTreport/html/report.html Results written to /home/shafi/Java/jtreg/lib/JTwork On top of above change I will remove the @modules and after running the jtreg tests I will send incremental change for review. Regards, Shafi -----Original Message----- From: Vladimir Kozlov Sent: Monday, April 18, 2016 9:48 PM To: Volker Simonis; Shafi Ahmad Cc: HotSpot Open Source Developers Subject: Re: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays On 4/18/16 5:39 AM, Volker Simonis wrote: > Hi Shafi, > > please not that I'm not a formal jdk8u reviewer. > > The change to the source files looks good. Where did the change not > applied cleanly? Looks like it is new() vs new(Compiler) Node allocation known difference. I verified that changes are matching jdk 9 changes except new() difference. > > I'm only a little worried about the test because it uses the > '@modules' tag. But if it runs and executed fine with jdk8u it's fine. The test should not have @modules for jdk8u since it is not supported there. JPRT does not run jtreg tests when testing jdk8u sources (only one very simple ExecuteInternalVMTests.java): http://hg.openjdk.java.net/jdk8u/jdk8u-dev/hotspot/file/d025821b6b6d/test/TEST.groups#l131 You should run jtreg tests yourself using your jdk8u as test jdk. Please, verify results of your testing before pushing. Thanks, Vladimir > > Thanks for backporting this to jdk8, > Volker > > > On Mon, Apr 18, 2016 at 12:26 PM, Shafi Ahmad wrote: >> Hi, >> >> >> >> Please review the backport of bug: "JDK-8141551: C2 can not handle returns with incompatible interface arrays" to jdk8u. >> >> Please note the backport is not clean. >> >> >> >> Webrev: http://cr.openjdk.java.net/~rpatil/8141551/webrev.00/ >> >> Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8141551 >> >> Original patch pushed to jdk9: >> http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/b425a78e8512 >> >> >> >> Test: Run jprt. >> >> >> >> Regards, >> >> Shafi >> >> From volker.simonis at gmail.com Tue Apr 19 14:30:05 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Tue, 19 Apr 2016 16:30:05 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160419053212.GB19871@rbackman> References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> Message-ID: Hi Rickard, I just wanted to prepare the new webrev for 8151956 but I'm a little confused because I realized that your latest webrev already contains the changes which I had proposed for 8151956. But after thinking about it a little bit I think that's fine. If I prepare a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had to adapt 8152664 to take care of the new changes introduced by 8151956. If I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 it would be hard to review it (because it will depend on 8152664) and we would get a change in the repo which would not build on PPC64 and AARCH64 which isn't nice either. So altogether I think it's fine to incorporate the fix for 8151956 into your change. Please only don't forget to close 8151956 as "fixed by 8152664" after you have pushed the changes for 8152664. I've verified that your last webrev builds and runs fine on Linux/ppc64 and AIX. You've also fixed all the issues I've addressed in my first mail to this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! Some final nit-picking: - you still have the white-space only change in os_windows.cpp objected by Vladimir. - in codeBlob.cpp can you please update the following comments to reflect the new types: // Creates a simple CodeBlob. Sets up the size of the different regions.* CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {** assert(size == round_to(size, oopSize), "unaligned size");**+ RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)* // Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions, // and copy code and relocation info.*! CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* - why do we need: *+ bool make_not_used() { return make_not_entrant(); }* it only forwards to make_not_entrant() and it is only used a single time in ciEnv.cpp: *! old->make_not_entrant();**! old->make_not_used();* - I don't understand why we need both NMethodIterator and CompiledMethodIterator - they're virtually the same and nmethod is currently the only subclass of CompiledMethod. Can you please be more specific why you've changed some instances of NMethodIterator to CompiledMethodIterator and others not. Without background information this makes no sense to me. Also, the advance method in CompiledMethodIterator isn't "inline" while the one in NMethodIterator is - don't know if this will be a performance problem. The rest looks good to me but please notice that I still haven't looked at all changes (especially not on the agent/ and dtrace/ files). So you should get at least one more reviewer for such a big change. Regards, Volker On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman wrote: > Here is the updated webrev, rebased and I think I have fixed all the > comments with one exception. > > I've avoided making CompiledMethodIterator and NMethodIterator a > template class for now. I agree we should do something to reuse the > parts that are identical but for now I think there will be a few more > changes to CompiledMethodIterator in an upcoming RFR. So can we hold off > with that change? > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ > > Thanks > > On 04/07, Rickard B?ckman wrote: > > Hi, > > > > can I please have review for this patch please? > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > to be in one continuous blob With this patch we are looking to change > > that. It's been done by changing offsets in CodeBlob to addresses, > > making some methods virtual to allow different behavior and also > > creating a couple of new classes. CompiledMethod now sits inbetween > > CodeBlob and nmethod. > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > Thanks > > /R > /R > From matthias.baesken at sap.com Tue Apr 19 14:46:24 2016 From: matthias.baesken at sap.com (Baesken, Matthias) Date: Tue, 19 Apr 2016 14:46:24 +0000 Subject: RFR: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases Message-ID: <4bb0db384fa24f85aca2aea88cc177e9@derote13de22.global.corp.sap> ? Thank you for finding this problem with SuSE and the diff. I don't know ? how many different distros were tested with these changes but it's easy ? not to get them all. ? Would it make more sense, rather than getting the last line then ? checking for DISTRIB_DESCRIPTION=, checking in the fgets() loop for both ? this and PRETTY_NAME. Then we could change it for Ubuntu and there's ? less conditionalizing on the name of the release file. ? Like: ? open webrev at http://cr.openjdk.java.net/~coleenp/suse/webrev ? ? Coleen Hello Coleen, thanks a lot for creating the webrev, I think it looks good (better than original diff) with the exception of the indentation at the beginning of parse_os_info_helper (buf declaration) . 2065 static void parse_os_info_helper(FILE* fp, char* distro, size_t length, bool get_first_line) { 2066 char buf[256]; 2067 while (fgets(buf, sizeof(buf), fp)) { Best regards, Matthias -------------------------------------------------------------- On 4/18/16 8:05 AM, Baesken, Matthias wrote: > Hello , the current implementation of the parse_os_info-function in os_linux.cpp gets the last line of a Linux-distro related file to > provide a meaningful OS version string. > > However the information provided currently on SuSE Linux (SLES) is not very descriptive, it currently uses /etc/lsb-release and gives : > > more /etc/lsb-release > LSB_VERSION="core-2.0-noarch:core-3.2-noarch:core-4.0-noarch:core-2.0-x86_64:core-3.2-x86_64:core-4.0-x86_64" > > So I suggest to use /etc/SuSE-release instead, which gives a good information for > SLES 9 - 12 in the ***first line*** of /etc/SuSE-release : > > Example SLES11 : > > more /etc/SuSE-release > SUSE Linux Enterprise Server 11 (x86_64) > VERSION = 11 > PATCHLEVEL = 3 > > (this is similar to using /etc/redhat-release on Red Hat with the difference that the ***first line*** has the relevant info). > > > Additionally, /etc/os-release needs some special handling as well, because > the meaningful OS-release description string is not always the last line of the file but in the line > containing the information PRETTY_NAME=... > See also : > > https://www.freedesktop.org/software/systemd/man/os-release.html > > Example from Ubuntu 14 : > > $ more /etc/os-release > ... > PRETTY_NAME="Ubuntu 14.04.3 LTS" > ... > > It might also be a good idea to place /etc/os-release higher in the distro_files list, but I do not have access to > turbolinux / gentoo to check the situation on these distros. > > Regards, Matthias > > > > Diff : > --- a/src/os/linux/vm/os_linux.cpp Fri Apr 15 16:19:15 2016 +0100 > +++ b/src/os/linux/vm/os_linux.cpp Mon Apr 18 13:54:04 2016 +0200 > @@ -2013,8 +2013,8 @@ > // their own specific XXX-release file as well as a redhat-release file. > // Because of this the XXX-release file needs to be searched for before the > // redhat-release file. > -// Since Red Hat has a lsb-release file that is not very descriptive the > -// search for redhat-release needs to be before lsb-release. > +// Since Red Hat and SuSE have an lsb-release file that is not very descriptive the > +// search for redhat-release / SuSE-release needs to be before lsb-release. > // Since the lsb-release file is the new standard it needs to be searched > // before the older style release files. > // Searching system-release (Red Hat) and os-release (other Linuxes) are a > @@ -2031,8 +2031,8 @@ > "/etc/mandrake-release", > "/etc/sun-release", > "/etc/redhat-release", > + "/etc/SuSE-release", > "/etc/lsb-release", > - "/etc/SuSE-release", > "/etc/turbolinux-release", > "/etc/gentoo-release", > "/etc/ltib-release", > @@ -2065,11 +2065,36 @@ > static void parse_os_info(char* distro, size_t length, const char* file) { > FILE* fp = fopen(file, "r"); > if (fp != NULL) { > + // SuSE-release : first line is interesting > + // os-release : PRETTY_NAME= line is interesting > + // (might be at different locations in the file) > char buf[256]; > - // get last line of the file. > - while (fgets(buf, sizeof(buf), fp)) { } > + int lcnt = 0; > + bool is_etc_suserelease = false; > + bool is_etc_osrelease = false; > + if (strcmp(file, "/etc/SuSE-release") == 0) { > + is_etc_suserelease = true; > + } > + if (strcmp(file, "/etc/os-release") == 0) { > + is_etc_osrelease = true; > + } > + > + // get last line of the file or > + // other interesting line on SUSE / os-release > + while (fgets(buf, sizeof(buf), fp)) { > + if (lcnt == 0 && is_etc_suserelease) { > + break; > + } > + if (is_etc_osrelease) { > + if (strstr(buf, "PRETTY_NAME=") != NULL) { > + break; > + } > + } > + lcnt++; > + } > + > // Edit out extra stuff in expected ubuntu format > - if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL) { > + if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) { > char* ptr = strstr(buf, "\""); // the name is in quotes > if (ptr != NULL) { > ptr++; // go beyond first quote From vladimir.kozlov at oracle.com Tue Apr 19 15:44:55 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Tue, 19 Apr 2016 08:44:55 -0700 (PDT) Subject: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays In-Reply-To: References: <524f6be3-1639-4718-b099-6c873f3dbbd8@default> <571508A1.2070407@oracle.com> <13f589a0-b877-4fa8-910d-c24c5375db1d@default> Message-ID: <57165277.50007@oracle.com> Looks good. Thanks, Vladimir On 4/19/16 6:14 AM, Shafi Ahmad wrote: > Hi All, > > Please review. It contains the additional change of removing @modules and below change to make the test case run in jdk8. > > New webrev link: http://cr.openjdk.java.net/~rpatil/8141551/webrev.01/ > > I run the newly added jtreg test individually and it passes after the change and also run all other hotspot test. > > Regards, > Shafi > > -----Original Message----- > From: Shafi Ahmad > Sent: Tuesday, April 19, 2016 11:46 AM > To: Vladimir Kozlov; Volker Simonis > Subject: RE: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays > > Thanks Vladimir for reviewing it and clarifying my doubts. In jdk9 sun/hotspot/Whitebox is inside jdk9-dev/test/lib where as in jdk8 same is inside jdk8u-dev/hotspot/test/testlibrary/whitebox. So with the below change this test is running fine. > > diff -r ac05b856c6bc test/compiler/types/TestMeetIncompatibleInterfaceArrays.java > --- a/test/compiler/types/TestMeetIncompatibleInterfaceArrays.java Tue Apr 19 10:31:00 2016 +0530 > +++ b/test/compiler/types/TestMeetIncompatibleInterfaceArrays.java Tue Apr 19 11:34:52 2016 +0530 > @@ -27,7 +27,7 @@ > * @summary C2 can not handle returns with inccompatible interface arrays > * @modules java.base/jdk.internal.org.objectweb.asm > * java.base/sun.misc > - * @library /testlibrary /../../test/lib > + * @library /testlibrary /testlibrary/whitebox/ > * @build sun.hotspot.WhiteBox > * @run main ClassFileInstaller sun.hotspot.WhiteBox > * sun.hotspot.WhiteBox$WhiteBoxPermission > shafi at shafi-ahmad:~/Java/jtreg/lib$ java -jar jtreg.jar -jdk:/home/shafi/Java/jdk8/jdk8u-dev/build/linux-x86_64-normal-server-release/jdk/ /home/shafi/Java/jdk8/jdk8u-dev/hotspot/test/compiler/types/TestMeetIncompatibleInterfaceArrays.java > Test results: passed: 1 > Report written to /home/shafi/Java/jtreg/lib/JTreport/html/report.html > Results written to /home/shafi/Java/jtreg/lib/JTwork > > On top of above change I will remove the @modules and after running the jtreg tests I will send incremental change for review. > > Regards, > Shafi > > > -----Original Message----- > From: Vladimir Kozlov > Sent: Monday, April 18, 2016 9:48 PM > To: Volker Simonis; Shafi Ahmad > Cc: HotSpot Open Source Developers > Subject: Re: [8u] RFR: JDK-8141551: C2 can not handle returns with incompatible interface arrays > > On 4/18/16 5:39 AM, Volker Simonis wrote: >> Hi Shafi, >> >> please not that I'm not a formal jdk8u reviewer. >> >> The change to the source files looks good. Where did the change not >> applied cleanly? > > Looks like it is new() vs new(Compiler) Node allocation known difference. > I verified that changes are matching jdk 9 changes except new() difference. > >> >> I'm only a little worried about the test because it uses the >> '@modules' tag. But if it runs and executed fine with jdk8u it's fine. > > > The test should not have @modules for jdk8u since it is not supported there. > > JPRT does not run jtreg tests when testing jdk8u sources (only one very simple ExecuteInternalVMTests.java): > > http://hg.openjdk.java.net/jdk8u/jdk8u-dev/hotspot/file/d025821b6b6d/test/TEST.groups#l131 > > You should run jtreg tests yourself using your jdk8u as test jdk. > > Please, verify results of your testing before pushing. > > Thanks, > Vladimir > >> >> Thanks for backporting this to jdk8, >> Volker >> >> >> On Mon, Apr 18, 2016 at 12:26 PM, Shafi Ahmad wrote: >>> Hi, >>> >>> >>> >>> Please review the backport of bug: "JDK-8141551: C2 can not handle returns with incompatible interface arrays" to jdk8u. >>> >>> Please note the backport is not clean. >>> >>> >>> >>> Webrev: http://cr.openjdk.java.net/~rpatil/8141551/webrev.00/ >>> >>> Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8141551 >>> >>> Original patch pushed to jdk9: >>> http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/b425a78e8512 >>> >>> >>> >>> Test: Run jprt. >>> >>> >>> >>> Regards, >>> >>> Shafi >>> >>> From vladimir.kozlov at oracle.com Tue Apr 19 15:51:25 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Tue, 19 Apr 2016 08:51:25 -0700 (PDT) Subject: [8u] RFR: JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined In-Reply-To: <816bfeba-ba47-4524-b911-736a5875d8b5@default> References: <5715015B.9020604@oracle.com> <816bfeba-ba47-4524-b911-736a5875d8b5@default> Message-ID: <571653FD.7080004@oracle.com> I see, it is different #includes. Thank you. Again, changes are good. Thanks, Vladimir On 4/19/16 2:46 AM, Shafi Ahmad wrote: > Thanks for review. > >> Where was conflict? > > shafi at shafi-ahmad:~/Java/jdk8/jdk8u-dev/hotspot$ hg import 7dfa629d203a > applying 7dfa629d203a > patching file src/share/vm/opto/parse1.cpp > Hunk #1 FAILED at 26 > 1 out of 2 hunks FAILED -- saving rejects to file src/share/vm/opto/parse1.cpp.rej > abort: patch failed to apply > > shafi at shafi-ahmad:~/Java/jdk8/jdk8u-dev/hotspot$ cat src/share/vm/opto/parse1.cpp.rej > --- parse1.cpp > +++ parse1.cpp > @@ -27,6 +27,7 @@ > #include "interpreter/linkResolver.hpp" > #include "oops/method.hpp" > #include "opto/addnode.hpp" > +#include "opto/c2compiler.hpp" > #include "opto/castnode.hpp" > #include "opto/idealGraphPrinter.hpp" > #include "opto/locknode.hpp" > > Regards, > Shafi > > -----Original Message----- > From: Vladimir Kozlov > Sent: Monday, April 18, 2016 9:17 PM > To: Shafi Ahmad; hotspot-dev at openjdk.java.net > Subject: Re: [8u] RFR: JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined > > Hi Shafi, > > I verified that changes match jdk 9 changes. They are good. > > Where was conflict? > > Thanks, > Vladimir > > On 4/18/16 2:56 AM, Shafi Ahmad wrote: >> Hi, >> >> >> >> Please review the backport of bug: "JDK-8055530 - assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty()) failed: return value must be well defined" to jdk8u. >> >> There is a single line conflict. >> >> >> >> Webrev: http://cr.openjdk.java.net/~csahu/8055530/webrev.00/ >> >> Jdk9 bug: https://bugs.openjdk.java.net/browse/JDK-8055530 >> >> Original patch pushed to jdk9: http://hg.openjdk.java.net/jdk9/jdk9/hotspot/rev/7dfa629d203a >> >> >> >> Test: Run jprt. >> >> >> >> Regards, >> >> Shafi >> >> >> From aph at redhat.com Tue Apr 19 16:45:58 2016 From: aph at redhat.com (Andrew Haley) Date: Tue, 19 Apr 2016 17:45:58 +0100 Subject: Testing with JTREG Message-ID: <571660C6.7070606@redhat.com> I've recently started to have problems with jtreg. This one: JAVA_HOME=/home/aph/hs/build/linux-aarch64-normal-server-release/images/jdk/ ~/jtreg/bin/jtreg -v compiler/intrinsics/string Error. Parse Exception: Bad classname provided for `build': java.base/java.lang.Helper This is my jtreg: jtreg, version 4.2.0 dev 3204 Installed in /home/aph/jtreg/lib/jtreg.jar Running on platform version 1.8.0-internal from /home/aph/jdk/jre. Built with 1.7.0_79 on 09/28/2015 06:55 AM. Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. Use is subject to license terms. JCov 2.0-1 TestNG: version 6.8-201210030754 Is it that I need something newer? Thanks, Andrew. From christian.thalinger at oracle.com Tue Apr 19 16:49:20 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Tue, 19 Apr 2016 09:49:20 -0700 (PDT) Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> Message-ID: <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> > On Apr 19, 2016, at 4:30 AM, Volker Simonis wrote: > > Hi Rickard, > > I just wanted to prepare the new webrev for 8151956 but I'm a little > confused because I realized that your latest webrev already contains the > changes which I had proposed for 8151956. > > But after thinking about it a little bit I think that's fine. If I prepare > a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had > to adapt 8152664 to take care of the new changes introduced by 8151956. If > I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 > it would be hard to review it (because it will depend on 8152664) and we > would get a change in the repo which would not build on PPC64 and AARCH64 > which isn't nice either. > > So altogether I think it's fine to incorporate the fix for 8151956 into > your change. Please only don't forget to close 8151956 as "fixed by > 8152664" after you have pushed the changes for 8152664. > > I've verified that your last webrev builds and runs fine on Linux/ppc64 and > AIX. You've also fixed all the issues I've addressed in my first mail to > this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! > > Some final nit-picking: > > - you still have the white-space only change in os_windows.cpp objected by > Vladimir. > > - in codeBlob.cpp can you please update the following comments to reflect > the new types: > > // Creates a simple CodeBlob. Sets up the size of the different > regions.* CodeBlob::CodeBlob(const char* name, int header_size, int > size, int frame_complete, int locs_size) {** assert(size == > round_to(size, oopSize), "unaligned size");**+ > RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, > int frame_complete, int locs_size)* > > // Creates a CodeBlob from a CodeBuffer. Sets up the size of the > different regions, // and copy code and relocation info.*! > CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* > > > - why do we need: > > *+ bool make_not_used() { return make_not_entrant(); }* > > it only forwards to make_not_entrant() and it is only used a single time in > ciEnv.cpp: > > *! old->make_not_entrant();**! old->make_not_used();* I can answer this. make_not_used is virtual: virtual bool make_not_used() = 0; Can you guess why this is the case? :-) The reason is that the implementation is different for AOT compiled methods. > > > - I don't understand why we need both NMethodIterator and > CompiledMethodIterator - they're virtually the same and nmethod is > currently the only subclass of CompiledMethod. Can you please be more > specific why you've changed some instances of NMethodIterator to > CompiledMethodIterator and others not. Without background information this > makes no sense to me. Also, the advance method in CompiledMethodIterator > isn't "inline" while the one in NMethodIterator is - don't know if this > will be a performance problem. > > The rest looks good to me but please notice that I still haven't looked at > all changes (especially not on the agent/ and dtrace/ files). So you should > get at least one more reviewer for such a big change. > > Regards, > Volker > > > > On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman > wrote: > >> Here is the updated webrev, rebased and I think I have fixed all the >> comments with one exception. >> >> I've avoided making CompiledMethodIterator and NMethodIterator a >> template class for now. I agree we should do something to reuse the >> parts that are identical but for now I think there will be a few more >> changes to CompiledMethodIterator in an upcoming RFR. So can we hold off >> with that change? >> >> Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ >> >> Thanks >> >> On 04/07, Rickard B?ckman wrote: >>> Hi, >>> >>> can I please have review for this patch please? >>> >>> So far CodeBlobs have required all the data (metadata, oops, code, etc) >>> to be in one continuous blob With this patch we are looking to change >>> that. It's been done by changing offsets in CodeBlob to addresses, >>> making some methods virtual to allow different behavior and also >>> creating a couple of new classes. CompiledMethod now sits inbetween >>> CodeBlob and nmethod. >>> >>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>> >>> Thanks >>> /R >> /R >> From coleen.phillimore at oracle.com Tue Apr 19 16:53:49 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 19 Apr 2016 09:53:49 -0700 (PDT) Subject: RFR: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases In-Reply-To: <4bb0db384fa24f85aca2aea88cc177e9@derote13de22.global.corp.sap> References: <4bb0db384fa24f85aca2aea88cc177e9@derote13de22.global.corp.sap> Message-ID: <5716629D.9030900@oracle.com> Hi Matthais, I captured this email in bug and will fix the indentation and will send this out as both of us as contributors. https://bugs.openjdk.java.net/browse/JDK-8154566 So this works properly on SuSE? Thanks! Coleen On 4/19/16 10:46 AM, Baesken, Matthias wrote: > > ?Thank you for finding this problem with SuSE and the diff. I don't know > > ?how many different distros were tested with these changes but it's easy > > ?not to get them all. > > ?Would it make more sense, rather than getting the last line then > > ?checking for DISTRIB_DESCRIPTION=, checking in the fgets() loop for both > > ?this and PRETTY_NAME. Then we could change it for Ubuntu and there's > > ?less conditionalizing on the name of the release file. > > ?Like: > > ?open webrev at http://cr.openjdk.java.net/~coleenp/suse/webrev > > > ? > > ?Coleen > > Hello Coleen, thanks a lot for creating the webrev, I think it looks > good (better than original diff) with the exception of > > the indentation at the beginning of parse_os_info_helper (buf > declaration) . > > 2065 static void parse_os_info_helper(FILE* fp, char* distro, size_t > length, bool get_first_line) { > > 2066 char buf[256]; > > 2067 while (fgets(buf, sizeof(buf), fp)) { > > Best regards, Matthias > > -------------------------------------------------------------- > > On 4/18/16 8:05 AM, Baesken, Matthias wrote: > > > Hello , the current implementation of the parse_os_info-function in > os_linux.cpp gets the last line of a Linux-distro related file to > > > provide a meaningful OS version string. > > > > > > However the information provided currently on SuSE Linux (SLES) is not very > descriptive, it currently uses /etc/lsb-release and gives : > > > > > > more /etc/lsb-release > > > LSB_VERSION="core-2.0-noarch:core-3.2-noarch:core-4.0-noarch:core-2.0-x86_64:core-3.2-x86_64:core-4.0-x86_64" > > > > > > So I suggest to use /etc/SuSE-release instead, which gives a good information for > > > SLES 9 - 12 in the ***first line*** of /etc/SuSE-release : > > > > > > Example SLES11 : > > > > > > more /etc/SuSE-release > > > SUSE Linux Enterprise Server 11 (x86_64) > > > VERSION = 11 > > > PATCHLEVEL = 3 > > > > > > (this is similar to using /etc/redhat-release on Red Hat with the difference that the > ***first line*** has the relevant info). > > > > > > > > > Additionally, /etc/os-release needs some special handling as well, because > > > the meaningful OS-release description string is not always the last line of the > file but in the line > > > containing the information PRETTY_NAME=... > > > See also : > > > > > >https://www.freedesktop.org/software/systemd/man/os-release.html > > > > > > Example from Ubuntu 14 : > > > > > > $ more /etc/os-release > > > ... > > > PRETTY_NAME="Ubuntu 14.04.3 LTS" > > > ... > > > > > > It might also be a good idea to place /etc/os-release higher in the distro_files > list, but I do not have access to > > > turbolinux / gentoo to check the situation on these distros. > > > > > > Regards, Matthias > > > > > > > > > > > > Diff : > > > --- a/src/os/linux/vm/os_linux.cpp Fri Apr 15 16:19:15 2016 +0100 > > > +++ b/src/os/linux/vm/os_linux.cpp Mon Apr 18 13:54:04 2016 +0200 > > > @@ -2013,8 +2013,8 @@ > > > // their own specific XXX-release file as well as a redhat-release file. > > > // Because of this the XXX-release file needs to be searched for before the > > > // redhat-release file. > > > -// Since Red Hat has a lsb-release file that is not very descriptive the > > > -// search for redhat-release needs to be before lsb-release. > > > +// Since Red Hat and SuSE have an lsb-release file that is not very descriptive the > > > +// search for redhat-release / SuSE-release needs to be before lsb-release. > > > // Since the lsb-release file is the new standard it needs to be searched > > > // before the older style release files. > > > // Searching system-release (Red Hat) and os-release (other Linuxes) are a > > > @@ -2031,8 +2031,8 @@ > > >"/etc/mandrake-release", > > >"/etc/sun-release", > > >"/etc/redhat-release", > > > +"/etc/SuSE-release", > > >"/etc/lsb-release", > > > -"/etc/SuSE-release", > > >"/etc/turbolinux-release", > > >"/etc/gentoo-release", > > >"/etc/ltib-release", > > > @@ -2065,11 +2065,36 @@ > > > static void parse_os_info(char* distro, size_t length, const char* file) { > > > FILE* fp = fopen(file, "r"); > > > if (fp != NULL) { > > > + // SuSE-release : first line is interesting > > > + // os-release : PRETTY_NAME= line is interesting > > > + // (might be at different locations in the file) > > > char buf[256]; > > > - // get last line of the file. > > > - while (fgets(buf, sizeof(buf), fp)) { } > > > + int lcnt = 0; > > > + bool is_etc_suserelease = false; > > > + bool is_etc_osrelease = false; > > > + if (strcmp(file, "/etc/SuSE-release") == 0) { > > > +is_etc_suserelease = true; > > > + } > > > + if (strcmp(file, "/etc/os-release") == 0) { > > > +is_etc_osrelease = true; > > > + } > > > + > > > + // get last line of the file or > > > + // other interesting line on SUSE / os-release > > > + while (fgets(buf, sizeof(buf), fp)) { > > > + if (lcnt == 0 && is_etc_suserelease) { > > > + break; > > > + } > > > + if (is_etc_osrelease) { > > > + if (strstr(buf, "PRETTY_NAME=") != NULL) { > > > +break; > > > + } > > > + } > > > + lcnt++; > > > + } > > > + > > > // Edit out extra stuff in expected ubuntu format > > > - if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL) { > > > + if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, > "PRETTY_NAME=") != NULL) { > > > char* ptr = strstr(buf, "\""); // the name is in quotes > > > if (ptr != NULL) { > > > ptr++; // go beyond first quote > From martinrb at google.com Tue Apr 19 18:50:23 2016 From: martinrb at google.com (Martin Buchholz) Date: Tue, 19 Apr 2016 11:50:23 -0700 Subject: Testing with JTREG In-Reply-To: <571660C6.7070606@redhat.com> References: <571660C6.7070606@redhat.com> Message-ID: See thread Status of jtreg build http://mail.openjdk.java.net/pipermail/jtreg-use/2016-April/thread.html yes, you need genuine 4.2 b01. But no one is currently providing such binaries on the Net. Some people have succeeded in building their own, but it's not easy. As jjg said, hopefully https://adopt-openjdk.ci.cloudbees.com/job/jtreg/ will soon get fixed so that it is the one-stop shopping for jtreg binaries. On Tue, Apr 19, 2016 at 9:45 AM, Andrew Haley wrote: > I've recently started to have problems with jtreg. This one: > > JAVA_HOME=/home/aph/hs/build/linux-aarch64-normal-server-release/images/jdk/ ~/jtreg/bin/jtreg -v compiler/intrinsics/string > Error. Parse Exception: Bad classname provided for `build': java.base/java.lang.Helper > > This is my jtreg: > > jtreg, version 4.2.0 dev 3204 > Installed in /home/aph/jtreg/lib/jtreg.jar > Running on platform version 1.8.0-internal from /home/aph/jdk/jre. > Built with 1.7.0_79 on 09/28/2015 06:55 AM. > Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. > Use is subject to license terms. > JCov 2.0-1 > TestNG: version 6.8-201210030754 > > Is it that I need something newer? > > Thanks, > > Andrew. From christian.thalinger at oracle.com Tue Apr 19 18:24:20 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Tue, 19 Apr 2016 11:24:20 -0700 (PDT) Subject: Testing with JTREG In-Reply-To: <571660C6.7070606@redhat.com> References: <571660C6.7070606@redhat.com> Message-ID: > On Apr 19, 2016, at 6:45 AM, Andrew Haley wrote: > > I've recently started to have problems with jtreg. This one: > > JAVA_HOME=/home/aph/hs/build/linux-aarch64-normal-server-release/images/jdk/ ~/jtreg/bin/jtreg -v compiler/intrinsics/string > Error. Parse Exception: Bad classname provided for `build': java.base/java.lang.Helper > > This is my jtreg: > > jtreg, version 4.2.0 dev 3204 > Installed in /home/aph/jtreg/lib/jtreg.jar > Running on platform version 1.8.0-internal from /home/aph/jdk/jre. > Built with 1.7.0_79 on 09/28/2015 06:55 AM. > Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. > Use is subject to license terms. > JCov 2.0-1 > TestNG: version 6.8-201210030754 > > Is it that I need something newer? Possibly. I have: cthaling at macbook:~$ jtreg -version jtreg, version 4.2 fcs b01 Installed in /Users/cthaling/jtreg/jtreg/lib/jtreg.jar Running on platform version 1.8.0_65 from /Library/Java/JavaVirtualMachines/jdk1.8.0_65.jdk/Contents/Home/jre. Built with Java(TM) 2 SDK, Version 1.5.0-b64 on March 03, 2016. Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. Use is subject to license terms. JCov 2.0-b18 beta TestNG: version 6.8.5 and that one works. > > Thanks, > > Andrew. From christian.thalinger at oracle.com Tue Apr 19 18:39:57 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Tue, 19 Apr 2016 11:39:57 -0700 (PDT) Subject: Profiling interpreter vs. compiled time + proof of concept In-Reply-To: References: Message-ID: This is a nice project but right now I don?t see the value in this. If a method is hot it will get JIT compiled anyway so there is no real value in knowing how much time was spent interpreting it. But maybe I am missing something. One data point I?m curious about is if -Xprof which uses sampling to figure out how much time was spent in interpreted and compiled code gives about the same results as your precise approach. > On Apr 10, 2016, at 11:02 AM, Adrian wrote: > > Hello, > > I am a student at the University of Toronto, working on a research > project studying JVM performance with a focus on distributed systems. > As part of the study, we want to know the time the JVM spends > interpreting code, and the time it spends in compiled (JNI/native or > jitted) code. > For example, for a program repeating a task, you can see the > interpreter time for each iteration decrease. > A timer for interpreted and compiled code can be used to > finely-grained see the performance of the jit (warmup time + > improvement over interpreter). > > Interpreter -> native, and native -> interpreted calls are relatively > easy to track and time as there are clear boundaries. > However, it?s tricky to track transitions between interpreted and > jitted code, especially with complications such as OSR and > deoptimization. > > We have an implementation for the amd64 linux build, which is described below. > Before we solved all the issues, we often encountered crashes. > Of course, we are not JVM experts and may be missing other edge cases. > Regardless, it has been working on our workloads such as reading on > HDFS or running queries on Spark. > As these are fairly complex workloads, we are confident it is > successful at least as a proof of concept. > > We have found this instrumentation to have negligible overhead (less > than the inherent variance between multiple runs). > Furthermore, because most of the instrumentation is done in code blobs > generated by the dynamic > Assembler/MacroAssembler/InterpreterMacroAssembler, when > instrumentation is disabled, it's essentially non-existent. > > Our modifications will be open sourced, but we wanted to know if > OpenJDK developers would be interested in adding something like this > in the future. > I've attached a patch which should be up to date with the latest revision. > I've also uploaded the patches and a prebuilt binary (linux amd64): > http://research.dfr.moe/jvm_profiling/ > > You enable profiling with the flag -XX:+ProfileIntComp, and get the > time in nanoseconds inside a Java program from a java.lang.Thread > object: > Thread th = Thread.currentThread(); > th.resetIntCompTimes(); > long a = th.getIntTime(); > long b = th.getCompTime(); > > The times include a bit of JVM initialization (we have not found a > perfect solution for this), so for short running programs you should > call `resetIntCompTimes` at the start of `main`. > > Regarding the code, it is currently quite hacky because this is my and > my team's first time really modifying the JVM. > I was not familiar with the code structure, and tried to keep most of > my additions in 1 file (sharedRuntime.cpp) instead of worrying about > modularizing stuff. > For example, thread local data is added to the JavaThread class, and > the data is exposed in the java.lang.Thread class. > If OpenJDK developers are interested in this, we are happy to continue > working on this to make it consistent with the proper JVM structure > (would need feedback/pointers). > > We are using it for our research and fixed many problems we > encountered, but if anyone encounters anything or knows of any > potential problems, any feedback would be greatly appreciated. > > Below is a high level description of the implementation (specific to > linux 64 bit). > --- > The goal is to track transitions between interpreted, native, and > jitted (standard + OSR compilation) code so we can precisely track the > time the JVM spends in each type (currently, native and jitted code > are lumped as "compiled" code, but it would be trivial to separate > them), and the challenge is with jitted code. > > Interpreted calls pass all arguments on the stack, whereas the > compiled calling convention uses 6 registers for integer/pointer > arguments. > There are adapters - generated in `sharedRuntime_x86_64.cpp`. > Methods with the same signature share adapters, which move arguments > from the stack into registers (i2c) or from registers onto the stack > (c2i). > It is easy to add instrumentation to these adapters to track a "call > transition". > > When the callee returns, we need a "return transition" back to the > caller's state. > However, there is no existing place we can do this, as callees merely > pop their stack frame and jump to the return address. > Our solution is to save the "real return address" in the adapter, and > replace the return address for the new frame with a "return handler" > address (one for i2c, one for c2i). > We also save the location of the return address (where on the > stack/address in memory), for reasons explained later. > This data is saved in the JavaThread object. > We require a stack of return addresses as we could go i2c -> c2i -> > i2c and so on. > > Because the last thing a compiled function does is pop the return > address and jump to it, for i2c we end up in the "i2c return handler" > and the stack pointer is 1 word above where the return address was. > As a sanity check, we can verify it matches the "expected location" > (what we saved earlier). > We track the transition then jump to the "real/original return > address" which we have also saved. > > When we first did this, we encountered many crashes. > Many JVM operations rely on the return address to identify callers, such as: > - figuring out the caller when a function is deoptimized > (SharedRuntime::handle_wrong_method) > - getting a call trace (java_lang_Throwable::fill_in_stack_trace) > - finding ?activations? of compiled functions on the stack > (NMethodSweeper::mark_active_nmethods) > - checking permissions for AccessController (JVM_GetStackAccessControlContext) > > After identifying these "operations" which require examining the > stack, we can "undo" all our changes to return addresses before any of > it happens. > We could use the `frame` code to walk the stack, but we also saved the > return-address-locations earlier; we can replace each return address > (which should now be an i2c/c2i return handler address) with the > real/original return address. > When we do this, there is no evidence our instrumentation took place. > We redo all our changes after these operations are done so that when > the thread continues executing, we'll continue tracking transitions. > A lot of this happens in safepoints, e.g. marking active nmethods; we > unpatch/repatch at the start and end of a safepoint to handle many > scenarios. > > Java exceptions also need to be handled. > If a function does not catch an exception, it has "finished > executing"; we will pop the stack frame and see if the caller has a > handler. > It's necessary to check for a return handler address to track a > transition back to the caller's state (interpreted/native/compiled), > and also to identify the caller using the real/original return > address. > > Lastly, deoptimization also requires careful treatment. > The jit compiler does not necessarily compile all paths for a function. > If it hits an unexpected path, it can end up in a ?deopt blob? (for C1 > compiled functions) or ?uncommon trap blob? (for C2 compiled > functions) - these are generated in `sharedRuntime_x86_64.cpp`. > The two routines are the same to us - they replace the compiled frame > with an interpreted one, and continue in the interpreter. > > If the deoptee?s caller was interpreted, we must have had an i2c > transition (we should see the i2c return handler?s address on the > stack). > Since we?re executing the rest of the function as interpreted, it?s > the same as if the callee returned to the interpreted caller. > We need to replace the return address on the stack with the > original/real one, and track a transition back to interpreted code - > we simulate the "i2c return transition". > > If the deoptee?s caller was compiled, there was no transition earlier > (it was c2c). > However, we?re continuing execution as interpreted, and will > eventually return to the compiled caller. > We simulate a c2i call; we transition to interpreted code (as with an > interpreted caller), but also save the current/real return address and > replace the one on the stack with the c2i return handler's address. > When the callee returns, we will track the transition back to compiled code. > > C2i return is actually more complicated if we want to do sanity checks. > As explained above, we can check that the stack pointer is a sane > value matching our recorded location for the return address. > However, interpreter frames have both a base pointer (rbp) pointing to > the "rsp on callee entry", and a "sender sp" (usually r13) for the > "rsp on caller exit". > A simple example of why this is required is the c2i adapter itself. > Because it moves register-arguments onto the stack, it needs to > allocate space on the stack. > Sender sp/r13 points to the stack pointer at the start of the adapter > before doing anything. > Rbp points to the stack pointer on interpreted method entry, after the > adapter has done the shuffling. > The return address is 1 word above rbp. > > When an interpreted method returns, it does something like: > # restore callee saved registers > mov rsp <- rbp (reset frame) > pop rbp (restore base pointer) > pop r11 (return address into some temporary register - r11 is caller saved) > mov rsp <- r13 (restore stack pointer to sender sp - r13 is callee > saved and would have been restored above) > jmp r11 (jump to return address) > > Therefore, by the time you end up in the c2i return handler, you don't > know where the return address was. > To verify the return address location, we had to find all places the > interpreter implemented a method return (there are various cases), and > manually check the return address right after picking it up from the > stack, before rsp is set to the sender sp. > > --- > I found code for JVMTI where it gets events for every interpreted and > native entry. > As a sanity check, enabled by the flag -XX:+ProfileIntCompStrict, on > every interpreted method entry, it checks if the state recorded > actually is interpreted. > > There are some Java functions the JVM specifically makes calls to (a > lot of stuff related to classloading, e.g. > `ClassLoader#checkPackageAccess`). > These n2i transitions are manually tracked. > We found these locations with the sanity check above. > > We do not see this check failing in our workloads currently. > > --- > Hopefully this high level overview makes sense. > Comments in the code give more details regarding specific scenarios, > such as handling OSR (which is conceptually similar). > I?d be happy to answer any questions or explain anything. > > Any feedback is appreciated. > Thank you for your time! > > Best regards, > Adrian From coleen.phillimore at oracle.com Tue Apr 19 18:17:40 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 19 Apr 2016 11:17:40 -0700 (PDT) Subject: RFR (S) 8154566: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases Message-ID: <57167644.1080604@oracle.com> Summary: For SuSE, read the first line of the /etc/xrelease file, also get PRETTY_NAME from /etc/os_release Contributed-by: matthias.baesken at sap.com, coleen.phillimore at oracle.com See discussion: http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022720.html open webrev at http://cr.openjdk.java.net/~coleenp/8154566.01/webrev bug link https://bugs.openjdk.java.net/browse/JDK-8154566 Tested by Matthias on SuSE and myself on OEL 6.0 and Ubuntu. Ran hotspot/test/runtime jtreg tests. Thanks, Coleen From lois.foltan at oracle.com Tue Apr 19 19:48:21 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Tue, 19 Apr 2016 15:48:21 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <571593D5.7010504@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> Message-ID: <57168B85.9090803@oracle.com> On 4/18/2016 10:11 PM, David Holmes wrote: > Hi Lois, > > On 19/04/2016 6:25 AM, Lois Foltan wrote: >> >> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>> >>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>> >>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>> >>>>>> Hi Stefan, >>>>>> >>>>>> In start up before module system initialization in complete I >>>>>> believe the VM is single threaded, so the increment/decrement >>>>>> reference counts do not need to be atomic. Adding it is a >>>>>> defensive move in case the reference count is ever used passed >>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>> sounds like you agree? >>>>> There will be a number of threads running before the base module is >>>>> defined to the VM. As things stand the the java threads at this >>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>> Signal Handler. >>>> >>>> So, are you saying that we need the atomics? >>>> >>>> The java_lang_Class::create_mirror function isn't multi-thread safe, >>>> and must already be guarded by a lock (SystemDictionary_lock AFAICT). >>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>> once, for the single InstanceKlass instance in the CLD. And all reads >>>> of _keep_alive from the GC are done during safepoints. >>> The anonymous class is inserted in the fixup mirror and fixup module >>> lists during java_lang_Class::create_mirror() before it is made public >>> or "published" as loaded. So the two instances where the reference >>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>> java_lang_Class::create_mirror(), are guarded by a lock as well as the >>> decrement in Unsafe_DefineAnonymousClass0. No other thread has access >>> to the class during this time, as it is being loaded. >>>> >>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>> concurrent inserts into the _fixup_module_field_list list? >>> That leaves the decrement in >>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. This >>> only occurs when the VM is called to define the module java.base. I >>> believe this should be okay but will double check. >> >> One small change in modules.cpp/define_javabase_module() to ensure that >> only one definition attempt of java.base will occur and thus only one >> call to ModuleEntryTable::patch_javabase_entries(). If a situation >> arises where java.base is trying to be multiply defined, according to >> the expected error conditions for JVM_DefineModule(), an >> IllegalArgumentException should be thrown. >> >> I have also added a comment in classfile/classLoaderData.hpp explaining >> why _keep_alive does need to be defined volatile or atomic. > > Can you add assertions to check that _keep_alive is only modified > under the protection of the lock (with a special case perhaps for the > unguarded java.base case) ? Hi David, Thanks for the review. I misspoke when I indicated that the two increments and the one decrement of the reference counter that occur during a call to the Unsafe_DefineAnonymous0() method were guarded under a lock. However, due to the way anonymous classes are created only a single non-GC thread will have access to the _keep_alive field during this time. And as Stefan indicates above, all reads of _keep_alive from the GC are done during safepoints. Each anonymous class, when defined, has a dedicated ClassLoaderData created for it. No other class shares the anonymous class' name or CLD. Due to this uniqueness, no other thread has knowledge about this anonymous class while it is being defined. It is only upon return from Unsafe_DefineAnonymous0(), that the anonymous class exists and other threads, at that point, can potentially access it. Thanks, Lois > > Thanks, > David > >> Please review at: >> >> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ >> >> Retesting in progress. >> >> Thanks, >> Lois >> >>> >>> Thanks, >>> Lois >>> >>>> >>>> thanks, >>>> StefanK >>>> >>>> >>>>> >>>>> -Alan >>>> >>> >> From coleen.phillimore at oracle.com Tue Apr 19 19:50:47 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 19 Apr 2016 15:50:47 -0400 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure Message-ID: <57168C17.40307@oracle.com> Summary: GC walks the mirror using OopClosure rather than using CLDClosure in oops_interpreted_do() See bug for more description and justification. The changes are large but very redundant. The main change is in TemplateInterpreterGenerator::generate_fixed_frame(). open webrev at http://cr.openjdk.java.net/~coleenp/8154580.01/webrev bug link https://bugs.openjdk.java.net/browse/JDK-8154580 Tested with hotspot-runtime-nightly and gc-nightly tests. Need testing with ppc and aarch64 open code. I implemented the changes but I can't test them. Thanks, Coleen From coleen.phillimore at oracle.com Tue Apr 19 20:24:26 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 19 Apr 2016 16:24:26 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <57168B85.9090803@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> Message-ID: <571693FA.3060807@oracle.com> Hi, this is getting long. On 4/19/16 3:48 PM, Lois Foltan wrote: > > On 4/18/2016 10:11 PM, David Holmes wrote: >> Hi Lois, >> >> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>> >>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>> >>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>> >>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>> >>>>>>> Hi Stefan, >>>>>>> >>>>>>> In start up before module system initialization in complete I >>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>> defensive move in case the reference count is ever used passed >>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>> sounds like you agree? >>>>>> There will be a number of threads running before the base module is >>>>>> defined to the VM. As things stand the the java threads at this >>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>>> Signal Handler. >>>>> >>>>> So, are you saying that we need the atomics? >>>>> >>>>> The java_lang_Class::create_mirror function isn't multi-thread safe, >>>>> and must already be guarded by a lock (SystemDictionary_lock AFAICT). >>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>> once, for the single InstanceKlass instance in the CLD. And all reads >>>>> of _keep_alive from the GC are done during safepoints. >>>> The anonymous class is inserted in the fixup mirror and fixup module >>>> lists during java_lang_Class::create_mirror() before it is made public >>>> or "published" as loaded. So the two instances where the reference >>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>> java_lang_Class::create_mirror(), are guarded by a lock as well as the >>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has access >>>> to the class during this time, as it is being loaded. >>>>> >>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>> concurrent inserts into the _fixup_module_field_list list? >>>> That leaves the decrement in >>>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. This >>>> only occurs when the VM is called to define the module java.base. I >>>> believe this should be okay but will double check. >>> >>> One small change in modules.cpp/define_javabase_module() to ensure that >>> only one definition attempt of java.base will occur and thus only one >>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>> arises where java.base is trying to be multiply defined, according to >>> the expected error conditions for JVM_DefineModule(), an >>> IllegalArgumentException should be thrown. >>> >>> I have also added a comment in classfile/classLoaderData.hpp explaining >>> why _keep_alive does need to be defined volatile or atomic. >> >> Can you add assertions to check that _keep_alive is only modified >> under the protection of the lock (with a special case perhaps for the >> unguarded java.base case) ? > > Hi David, > > Thanks for the review. I misspoke when I indicated that the two > increments and the one decrement of the reference counter that occur > during a call to the Unsafe_DefineAnonymous0() method were guarded > under a lock. However, due to the way anonymous classes are created > only a single non-GC thread will have access to the _keep_alive field > during this time. And as Stefan indicates above, all reads of > _keep_alive from the GC are done during safepoints. Each anonymous > class, when defined, has a dedicated ClassLoaderData created for it. > No other class shares the anonymous class' name or CLD. Due to this > uniqueness, no other thread has knowledge about this anonymous class > while it is being defined. It is only upon return from > Unsafe_DefineAnonymous0(), that the anonymous class exists and other > threads, at that point, can potentially access it. > Ah interesting. Currently, this is true and why this is safe. If we change the JVM to have *some* anonymous classes share CLD with their host_class because the lifetimes are the same, then we'll have to use atomic operations. http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html Can you put one comment directly above the inc_keep_alive() and dec_keep_alive() functions to this effect here, just so we remember? // Anonymous classes have their own ClassLoaderData that is marked to keep alive while the class is being parsed, and // if the class appears on the module fixup list. // If anonymous classes are changed to share with host_class, this refcount needs to be changed to use atomic operations. *+ void ClassLoaderData::inc_keep_alive() {* *+ assert(_keep_alive >= 0, "Invalid keep alive count");* *+ _keep_alive++;* *+ }* *+ * *+ void ClassLoaderData::dec_keep_alive() {* *+ assert(_keep_alive > 0, "Invalid keep alive count");* *+ _keep_alive--;* *+ }* *+ * More below. > Thanks, > Lois > >> >> Thanks, >> David >> >>> Please review at: >>> >>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html I'm not sure how this relates to the bug. Otherwise, the change looks good. Thanks, Coleen >>> >>> Retesting in progress. >>> >>> Thanks, >>> Lois >>> >>>> >>>> Thanks, >>>> Lois >>>> >>>>> >>>>> thanks, >>>>> StefanK >>>>> >>>>> >>>>>> >>>>>> -Alan >>>>> >>>> >>> > From christian.thalinger at oracle.com Tue Apr 19 20:37:14 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Tue, 19 Apr 2016 13:37:14 -0700 (PDT) Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <571693FA.3060807@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> Message-ID: > On Apr 19, 2016, at 10:24 AM, Coleen Phillimore wrote: > > > Hi, this is getting long. > > On 4/19/16 3:48 PM, Lois Foltan wrote: >> >> On 4/18/2016 10:11 PM, David Holmes wrote: >>> Hi Lois, >>> >>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>> >>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>> >>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>> >>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>> >>>>>>>> Hi Stefan, >>>>>>>> >>>>>>>> In start up before module system initialization in complete I >>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>> sounds like you agree? >>>>>>> There will be a number of threads running before the base module is >>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>>>> Signal Handler. >>>>>> >>>>>> So, are you saying that we need the atomics? >>>>>> >>>>>> The java_lang_Class::create_mirror function isn't multi-thread safe, >>>>>> and must already be guarded by a lock (SystemDictionary_lock AFAICT). >>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>> once, for the single InstanceKlass instance in the CLD. And all reads >>>>>> of _keep_alive from the GC are done during safepoints. >>>>> The anonymous class is inserted in the fixup mirror and fixup module >>>>> lists during java_lang_Class::create_mirror() before it is made public >>>>> or "published" as loaded. So the two instances where the reference >>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>> java_lang_Class::create_mirror(), are guarded by a lock as well as the >>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has access >>>>> to the class during this time, as it is being loaded. >>>>>> >>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>> That leaves the decrement in >>>>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. This >>>>> only occurs when the VM is called to define the module java.base. I >>>>> believe this should be okay but will double check. >>>> >>>> One small change in modules.cpp/define_javabase_module() to ensure that >>>> only one definition attempt of java.base will occur and thus only one >>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>> arises where java.base is trying to be multiply defined, according to >>>> the expected error conditions for JVM_DefineModule(), an >>>> IllegalArgumentException should be thrown. >>>> >>>> I have also added a comment in classfile/classLoaderData.hpp explaining >>>> why _keep_alive does need to be defined volatile or atomic. >>> >>> Can you add assertions to check that _keep_alive is only modified under the protection of the lock (with a special case perhaps for the unguarded java.base case) ? >> >> Hi David, >> >> Thanks for the review. I misspoke when I indicated that the two increments and the one decrement of the reference counter that occur during a call to the Unsafe_DefineAnonymous0() method were guarded under a lock. However, due to the way anonymous classes are created only a single non-GC thread will have access to the _keep_alive field during this time. And as Stefan indicates above, all reads of _keep_alive from the GC are done during safepoints. Each anonymous class, when defined, has a dedicated ClassLoaderData created for it. No other class shares the anonymous class' name or CLD. Due to this uniqueness, no other thread has knowledge about this anonymous class while it is being defined. It is only upon return from Unsafe_DefineAnonymous0(), that the anonymous class exists and other threads, at that point, can potentially access it. >> > > Ah interesting. Currently, this is true and why this is safe. If we change the JVM to have *some* anonymous classes share CLD with their host_class because the lifetimes are the same, then we'll have to use atomic operations. > > http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html > > Can you put one comment directly above the inc_keep_alive() and dec_keep_alive() functions to this effect here, just so we remember? Why don?t we just use atomic operations and be done with it? > > // Anonymous classes have their own ClassLoaderData that is marked to keep alive while the class is being parsed, and > // if the class appears on the module fixup list. > // If anonymous classes are changed to share with host_class, this refcount needs to be changed to use atomic operations. > > *+ void ClassLoaderData::inc_keep_alive() {* > *+ assert(_keep_alive >= 0, "Invalid keep alive count");* > *+ _keep_alive++;* > *+ }* > *+ * > *+ void ClassLoaderData::dec_keep_alive() {* > *+ assert(_keep_alive > 0, "Invalid keep alive count");* > *+ _keep_alive--;* > *+ }* > *+ * > > More below. > >> Thanks, >> Lois >> >>> >>> Thanks, >>> David >>> >>>> Please review at: >>>> >>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ > > http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html > > I'm not sure how this relates to the bug. > > Otherwise, the change looks good. > > Thanks, > Coleen > > >>>> >>>> Retesting in progress. >>>> >>>> Thanks, >>>> Lois >>>> >>>>> >>>>> Thanks, >>>>> Lois >>>>> >>>>>> >>>>>> thanks, >>>>>> StefanK >>>>>> >>>>>> >>>>>>> >>>>>>> -Alan From christian.thalinger at oracle.com Tue Apr 19 20:35:09 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Tue, 19 Apr 2016 13:35:09 -0700 (PDT) Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <57168C17.40307@oracle.com> References: <57168C17.40307@oracle.com> Message-ID: <159176CE-675D-41A6-A13A-EBD7580E640C@oracle.com> > On Apr 19, 2016, at 9:50 AM, Coleen Phillimore wrote: > > Summary: GC walks the mirror using OopClosure rather than using CLDClosure in oops_interpreted_do() > > See bug for more description and justification. The changes are large but very redundant. The main change is in TemplateInterpreterGenerator::generate_fixed_frame(). + // Save oop Mirror (with padding) + __ load_mirror(rscratch1, rmethod); + // get mirror and store it in the frame so that this Method* is never + // reclaimed while it's running. + Register mirror = LcpoolCache; + __ load_mirror(mirror, Method); + // Push the mirror so this method isn't collected + __ load_mirror(rdx, rbi); Please use the same comment on all platforms. > > open webrev at http://cr.openjdk.java.net/~coleenp/8154580.01/webrev > bug link https://bugs.openjdk.java.net/browse/JDK-8154580 src/share/vm/runtime/frame.cpp // The method pointer in the frame might be the only path to the method's // klass, and the klass needs to be kept alive while executing. The GCs // don't trace through method pointers, so typically in similar situations // the mirror or the class loader of the klass are installed as a GC root. - // To minimize the overhead of doing that here, we ask the GC to pass down a - // closure that knows how to keep klasses alive given a ClassLoaderData. - cld_f->do_cld(m->method_holder()->class_loader_data()); - } - - if (m->is_native() PPC32_ONLY(&& m->is_static())) { - f->do_oop(interpreter_frame_temp_oop_addr()); - } + // And it is here too. + f->do_oop(interpreter_frame_mirror_addr()); That comment is kinda funny now. It still hints at the old-way of doing things but ?it is here too?. > > Tested with hotspot-runtime-nightly and gc-nightly tests. > > Need testing with ppc and aarch64 open code. I implemented the changes but I can't test them. One obvious bug is that you copied the __ as well: +void MacroAssembler::load_mirror(Register dst, Register method) { + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); + __ ldr(dst, Address(rmethod, Method::const_offset())); + __ ldr(dst, Address(dst, ConstMethod::constants_offset())); + __ ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes())); + __ ldr(dst, Address(dst, mirror_offset)); +} Other than that it looks fine to me. Nothing obvious stood out. > > Thanks, > Coleen From gerald.thornbrugh at oracle.com Tue Apr 19 20:58:24 2016 From: gerald.thornbrugh at oracle.com (Gerald Thornbrugh) Date: Tue, 19 Apr 2016 13:58:24 -0700 (PDT) Subject: Fwd: RFR (S) 8154566: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases In-Reply-To: <571685A9.4060707@oracle.com> References: <57167644.1080604@oracle.com> <571685A9.4060707@oracle.com> Message-ID: <57169BF0.4030804@oracle.com> Hi Coleen, It looks like /etc/SuSE-release is deprecated starting with SLE 12 and will be removed in the future. See: https://www.suse.com/releasenotes/x86_64/SUSE-SLED/12/ From the link: > > > 5.3.7.2 Use /etc/os-release Instead of /etc/SuSE-release > Report Bug > > # > > > /Starting with SLE 12, /etc/SuSE-release file is deprecated. It should > not be used to identify a SUSE Linux Enterprise system. This file will > be removed in a future Service Pack or release./ > > The file |/etc/os-release| now is decisive. This file is a > cross-distribution standard to identify a Linux system. For more > information about the syntax, see the os-release man page ( |man > os-release| ). > So once the /etc/SuSE-release is removed it looks like the code will find the /etc/lsb-release file again and this problem may come back unless I am not understanding this correctly. I wonder if the /etc/os-release file should be moved before the /etc/lsb-release file in the list? Understanding the implications of that change would take a significant amount of testing. Jerry > > -------- Forwarded Message -------- > Subject: RFR (S) 8154566: os_linux.cpp parse_os_info gives non > descriptive output on current SLES releases > Date: Tue, 19 Apr 2016 11:17:40 -0700 (PDT) > From: Coleen Phillimore > To: hotspot-dev developers > > > > Summary: For SuSE, read the first line of the /etc/xrelease file, also > get PRETTY_NAME from /etc/os_release > Contributed-by:matthias.baesken at sap.com,coleen.phillimore at oracle.com > > See discussion: > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022720.html > > open webrev athttp://cr.openjdk.java.net/~coleenp/8154566.01/webrev > bug linkhttps://bugs.openjdk.java.net/browse/JDK-8154566 > > Tested by Matthias on SuSE and myself on OEL 6.0 and Ubuntu. Ran > hotspot/test/runtime jtreg tests. > > Thanks, > Coleen > > From stefan.karlsson at oracle.com Tue Apr 19 20:56:29 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 19 Apr 2016 13:56:29 -0700 (PDT) Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <571693FA.3060807@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> Message-ID: <57169B7D.6040205@oracle.com> On 2016-04-19 22:24, Coleen Phillimore wrote: > > Hi, this is getting long. > > On 4/19/16 3:48 PM, Lois Foltan wrote: >> >> On 4/18/2016 10:11 PM, David Holmes wrote: >>> Hi Lois, >>> >>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>> >>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>> >>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>> >>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>> >>>>>>>> Hi Stefan, >>>>>>>> >>>>>>>> In start up before module system initialization in complete I >>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>> sounds like you agree? >>>>>>> There will be a number of threads running before the base module is >>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>>>> Signal Handler. >>>>>> >>>>>> So, are you saying that we need the atomics? >>>>>> >>>>>> The java_lang_Class::create_mirror function isn't multi-thread safe, >>>>>> and must already be guarded by a lock (SystemDictionary_lock >>>>>> AFAICT). >>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>> once, for the single InstanceKlass instance in the CLD. And all >>>>>> reads >>>>>> of _keep_alive from the GC are done during safepoints. >>>>> The anonymous class is inserted in the fixup mirror and fixup module >>>>> lists during java_lang_Class::create_mirror() before it is made >>>>> public >>>>> or "published" as loaded. So the two instances where the reference >>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>> java_lang_Class::create_mirror(), are guarded by a lock as well as >>>>> the >>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has >>>>> access >>>>> to the class during this time, as it is being loaded. >>>>>> >>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>> That leaves the decrement in >>>>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. >>>>> This >>>>> only occurs when the VM is called to define the module java.base. I >>>>> believe this should be okay but will double check. >>>> >>>> One small change in modules.cpp/define_javabase_module() to ensure >>>> that >>>> only one definition attempt of java.base will occur and thus only one >>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>> arises where java.base is trying to be multiply defined, according to >>>> the expected error conditions for JVM_DefineModule(), an >>>> IllegalArgumentException should be thrown. >>>> >>>> I have also added a comment in classfile/classLoaderData.hpp >>>> explaining >>>> why _keep_alive does need to be defined volatile or atomic. >>> >>> Can you add assertions to check that _keep_alive is only modified >>> under the protection of the lock (with a special case perhaps for >>> the unguarded java.base case) ? >> >> Hi David, >> >> Thanks for the review. I misspoke when I indicated that the two >> increments and the one decrement of the reference counter that occur >> during a call to the Unsafe_DefineAnonymous0() method were guarded >> under a lock. However, due to the way anonymous classes are created >> only a single non-GC thread will have access to the _keep_alive field >> during this time. And as Stefan indicates above, all reads of >> _keep_alive from the GC are done during safepoints. Each anonymous >> class, when defined, has a dedicated ClassLoaderData created for it. >> No other class shares the anonymous class' name or CLD. Due to this >> uniqueness, no other thread has knowledge about this anonymous class >> while it is being defined. It is only upon return from >> Unsafe_DefineAnonymous0(), that the anonymous class exists and other >> threads, at that point, can potentially access it. >> > > Ah interesting. Currently, this is true and why this is safe. If we > change the JVM to have *some* anonymous classes share CLD with their > host_class because the lifetimes are the same, then we'll have to use > atomic operations. Well, then we wouldn't have to use the _keep_alive field because the lifetime would be tracked by the loader of the host_class. The _keep_alive field was added to handle the short window between the creation of the InstanceKlass and the creation of the mirror (java.lang.Class instances). If we have a class loader, then the anonymous klass will be kept alive automatically, as long as the class loader is reachable. StefanK > > http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html > > > Can you put one comment directly above the inc_keep_alive() and > dec_keep_alive() functions to this effect here, just so we remember? > > // Anonymous classes have their own ClassLoaderData that is marked to > keep alive while the class is being parsed, and > // if the class appears on the module fixup list. > // If anonymous classes are changed to share with host_class, this > refcount needs to be changed to use atomic operations. > > *+ void ClassLoaderData::inc_keep_alive() {* > *+ assert(_keep_alive >= 0, "Invalid keep alive count");* > *+ _keep_alive++;* > *+ }* > *+ * > *+ void ClassLoaderData::dec_keep_alive() {* > *+ assert(_keep_alive > 0, "Invalid keep alive count");* > *+ _keep_alive--;* > *+ }* > *+ * > > More below. > >> Thanks, >> Lois >> >>> >>> Thanks, >>> David >>> >>>> Please review at: >>>> >>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ > > http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html > > > I'm not sure how this relates to the bug. > > Otherwise, the change looks good. > > Thanks, > Coleen > > >>>> >>>> Retesting in progress. >>>> >>>> Thanks, >>>> Lois >>>> >>>>> >>>>> Thanks, >>>>> Lois >>>>> >>>>>> >>>>>> thanks, >>>>>> StefanK >>>>>> >>>>>> >>>>>>> >>>>>>> -Alan >>>>>> >>>>> >>>> >> > From volker.simonis at gmail.com Tue Apr 19 21:35:11 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Tue, 19 Apr 2016 23:35:11 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> Message-ID: On Tue, Apr 19, 2016 at 6:49 PM, Christian Thalinger < christian.thalinger at oracle.com> wrote: > > On Apr 19, 2016, at 4:30 AM, Volker Simonis > wrote: > > Hi Rickard, > > I just wanted to prepare the new webrev for 8151956 but I'm a little > confused because I realized that your latest webrev already contains the > changes which I had proposed for 8151956. > > But after thinking about it a little bit I think that's fine. If I prepare > a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had > to adapt 8152664 to take care of the new changes introduced by 8151956. If > I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 > it would be hard to review it (because it will depend on 8152664) and we > would get a change in the repo which would not build on PPC64 and AARCH64 > which isn't nice either. > > So altogether I think it's fine to incorporate the fix for 8151956 into > your change. Please only don't forget to close 8151956 as "fixed by > 8152664" after you have pushed the changes for 8152664. > > I've verified that your last webrev builds and runs fine on Linux/ppc64 and > AIX. You've also fixed all the issues I've addressed in my first mail to > this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! > > Some final nit-picking: > > - you still have the white-space only change in os_windows.cpp objected by > Vladimir. > > - in codeBlob.cpp can you please update the following comments to reflect > the new types: > > // Creates a simple CodeBlob. Sets up the size of the different > regions.* CodeBlob::CodeBlob(const char* name, int header_size, int > size, int frame_complete, int locs_size) {** assert(size == > round_to(size, oopSize), "unaligned size");**+ > RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, > int frame_complete, int locs_size)* > > // Creates a CodeBlob from a CodeBuffer. Sets up the size of the > different regions, // and copy code and relocation info.*! > CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* > > > - why do we need: > > *+ bool make_not_used() { return make_not_entrant(); }* > > it only forwards to make_not_entrant() and it is only used a single time in > ciEnv.cpp: > > *! old->make_not_entrant();**! > old->make_not_used();* > > > I can answer this. make_not_used is virtual: > > virtual bool make_not_used() = 0; > > Can you guess why this is the case? :-) The reason is that the > implementation is different for AOT compiled methods. > > OK, I see. Thanks for the background info but now I can not refrain from commenting :) If SAP (or anybody else outside Oracle) would submit such a kind of XL change in order to better support let's say it's closed HPUX/Itanium port I don't think it would be even considered. I don't want to reject these specific change (I came to terms with it :) but I think this should stand as bad example for changes which will not happen too often in the future. > > > - I don't understand why we need both NMethodIterator and > CompiledMethodIterator - they're virtually the same and nmethod is > currently the only subclass of CompiledMethod. Can you please be more > specific why you've changed some instances of NMethodIterator to > CompiledMethodIterator and others not. Without background information this > makes no sense to me. Also, the advance method in CompiledMethodIterator > isn't "inline" while the one in NMethodIterator is - don't know if this > will be a performance problem. > > The rest looks good to me but please notice that I still haven't looked at > all changes (especially not on the agent/ and dtrace/ files). So you should > get at least one more reviewer for such a big change. > > Regards, > Volker > > > > On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman < > rickard.backman at oracle.com > > wrote: > > > Here is the updated webrev, rebased and I think I have fixed all the > comments with one exception. > > I've avoided making CompiledMethodIterator and NMethodIterator a > template class for now. I agree we should do something to reuse the > parts that are identical but for now I think there will be a few more > changes to CompiledMethodIterator in an upcoming RFR. So can we hold off > with that change? > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ > > Thanks > > On 04/07, Rickard B?ckman wrote: > > Hi, > > can I please have review for this patch please? > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > to be in one continuous blob With this patch we are looking to change > that. It's been done by changing offsets in CodeBlob to addresses, > making some methods virtual to allow different behavior and also > creating a couple of new classes. CompiledMethod now sits inbetween > CodeBlob and nmethod. > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > Thanks > /R > > /R > > > From coleen.phillimore at oracle.com Tue Apr 19 21:28:45 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 19 Apr 2016 14:28:45 -0700 (PDT) Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <57169B7D.6040205@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> <57169B7D.6040205@oracle.com> Message-ID: <5716A30D.1080305@oracle.com> On 4/19/16 4:56 PM, Stefan Karlsson wrote: > On 2016-04-19 22:24, Coleen Phillimore wrote: >> >> Hi, this is getting long. >> >> On 4/19/16 3:48 PM, Lois Foltan wrote: >>> >>> On 4/18/2016 10:11 PM, David Holmes wrote: >>>> Hi Lois, >>>> >>>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>>> >>>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>>> >>>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>>> >>>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>>> >>>>>>>>> Hi Stefan, >>>>>>>>> >>>>>>>>> In start up before module system initialization in complete I >>>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>>> sounds like you agree? >>>>>>>> There will be a number of threads running before the base >>>>>>>> module is >>>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>>>>> Signal Handler. >>>>>>> >>>>>>> So, are you saying that we need the atomics? >>>>>>> >>>>>>> The java_lang_Class::create_mirror function isn't multi-thread >>>>>>> safe, >>>>>>> and must already be guarded by a lock (SystemDictionary_lock >>>>>>> AFAICT). >>>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>>> once, for the single InstanceKlass instance in the CLD. And all >>>>>>> reads >>>>>>> of _keep_alive from the GC are done during safepoints. >>>>>> The anonymous class is inserted in the fixup mirror and fixup module >>>>>> lists during java_lang_Class::create_mirror() before it is made >>>>>> public >>>>>> or "published" as loaded. So the two instances where the reference >>>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>>> java_lang_Class::create_mirror(), are guarded by a lock as well >>>>>> as the >>>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has >>>>>> access >>>>>> to the class during this time, as it is being loaded. >>>>>>> >>>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>>> That leaves the decrement in >>>>>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. >>>>>> This >>>>>> only occurs when the VM is called to define the module java.base. I >>>>>> believe this should be okay but will double check. >>>>> >>>>> One small change in modules.cpp/define_javabase_module() to ensure >>>>> that >>>>> only one definition attempt of java.base will occur and thus only one >>>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>>> arises where java.base is trying to be multiply defined, according to >>>>> the expected error conditions for JVM_DefineModule(), an >>>>> IllegalArgumentException should be thrown. >>>>> >>>>> I have also added a comment in classfile/classLoaderData.hpp >>>>> explaining >>>>> why _keep_alive does need to be defined volatile or atomic. >>>> >>>> Can you add assertions to check that _keep_alive is only modified >>>> under the protection of the lock (with a special case perhaps for >>>> the unguarded java.base case) ? >>> >>> Hi David, >>> >>> Thanks for the review. I misspoke when I indicated that the two >>> increments and the one decrement of the reference counter that occur >>> during a call to the Unsafe_DefineAnonymous0() method were guarded >>> under a lock. However, due to the way anonymous classes are created >>> only a single non-GC thread will have access to the _keep_alive >>> field during this time. And as Stefan indicates above, all reads of >>> _keep_alive from the GC are done during safepoints. Each anonymous >>> class, when defined, has a dedicated ClassLoaderData created for >>> it. No other class shares the anonymous class' name or CLD. Due to >>> this uniqueness, no other thread has knowledge about this anonymous >>> class while it is being defined. It is only upon return from >>> Unsafe_DefineAnonymous0(), that the anonymous class exists and other >>> threads, at that point, can potentially access it. >>> >> >> Ah interesting. Currently, this is true and why this is safe. If we >> change the JVM to have *some* anonymous classes share CLD with their >> host_class because the lifetimes are the same, then we'll have to use >> atomic operations. > > Well, then we wouldn't have to use the _keep_alive field because the > lifetime would be tracked by the loader of the host_class. The > _keep_alive field was added to handle the short window between the > creation of the InstanceKlass and the creation of the mirror > (java.lang.Class instances). If we have a class loader, then the > anonymous klass will be kept alive automatically, as long as the class > loader is reachable. There is no way to have the host_class unloaded while an anonymous class is loaded for that host_class. Yes, this is true. The class_loader is live because the code loading the anonymous class has the host_class holder (loader or mirror) as a reference. Coleen > > StefanK >> >> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html >> >> >> Can you put one comment directly above the inc_keep_alive() and >> dec_keep_alive() functions to this effect here, just so we remember? >> >> // Anonymous classes have their own ClassLoaderData that is marked to >> keep alive while the class is being parsed, and >> // if the class appears on the module fixup list. >> // If anonymous classes are changed to share with host_class, this >> refcount needs to be changed to use atomic operations. >> >> *+ void ClassLoaderData::inc_keep_alive() {* >> *+ assert(_keep_alive >= 0, "Invalid keep alive count");* >> *+ _keep_alive++;* >> *+ }* >> *+ * >> *+ void ClassLoaderData::dec_keep_alive() {* >> *+ assert(_keep_alive > 0, "Invalid keep alive count");* >> *+ _keep_alive--;* >> *+ }* >> *+ * >> >> More below. >> >>> Thanks, >>> Lois >>> >>>> >>>> Thanks, >>>> David >>>> >>>>> Please review at: >>>>> >>>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ >> >> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html >> >> >> I'm not sure how this relates to the bug. >> >> Otherwise, the change looks good. >> >> Thanks, >> Coleen >> >> >>>>> >>>>> Retesting in progress. >>>>> >>>>> Thanks, >>>>> Lois >>>>> >>>>>> >>>>>> Thanks, >>>>>> Lois >>>>>> >>>>>>> >>>>>>> thanks, >>>>>>> StefanK >>>>>>> >>>>>>> >>>>>>>> >>>>>>>> -Alan >>>>>>> >>>>>> >>>>> >>> >> > From christian.thalinger at oracle.com Tue Apr 19 22:10:13 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Tue, 19 Apr 2016 15:10:13 -0700 (PDT) Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> Message-ID: > On Apr 19, 2016, at 11:35 AM, Volker Simonis wrote: > > > On Tue, Apr 19, 2016 at 6:49 PM, Christian Thalinger > wrote: > >> On Apr 19, 2016, at 4:30 AM, Volker Simonis > wrote: >> >> Hi Rickard, >> >> I just wanted to prepare the new webrev for 8151956 but I'm a little >> confused because I realized that your latest webrev already contains the >> changes which I had proposed for 8151956. >> >> But after thinking about it a little bit I think that's fine. If I prepare >> a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had >> to adapt 8152664 to take care of the new changes introduced by 8151956. If >> I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 >> it would be hard to review it (because it will depend on 8152664) and we >> would get a change in the repo which would not build on PPC64 and AARCH64 >> which isn't nice either. >> >> So altogether I think it's fine to incorporate the fix for 8151956 into >> your change. Please only don't forget to close 8151956 as "fixed by >> 8152664" after you have pushed the changes for 8152664. >> >> I've verified that your last webrev builds and runs fine on Linux/ppc64 and >> AIX. You've also fixed all the issues I've addressed in my first mail to >> this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! >> >> Some final nit-picking: >> >> - you still have the white-space only change in os_windows.cpp objected by >> Vladimir. >> >> - in codeBlob.cpp can you please update the following comments to reflect >> the new types: >> >> // Creates a simple CodeBlob. Sets up the size of the different >> regions.* CodeBlob::CodeBlob(const char* name, int header_size, int >> size, int frame_complete, int locs_size) {** assert(size == >> round_to(size, oopSize), "unaligned size");**+ >> RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, >> int frame_complete, int locs_size)* >> >> // Creates a CodeBlob from a CodeBuffer. Sets up the size of the >> different regions, // and copy code and relocation info.*! >> CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* >> >> >> - why do we need: >> >> *+ bool make_not_used() { return make_not_entrant(); }* >> >> it only forwards to make_not_entrant() and it is only used a single time in >> ciEnv.cpp: >> >> *! old->make_not_entrant();**! old->make_not_used();* > > I can answer this. make_not_used is virtual: > > virtual bool make_not_used() = 0; > > Can you guess why this is the case? :-) The reason is that the implementation is different for AOT compiled methods. > > > OK, I see. Thanks for the background info but now I can not refrain from commenting :) > > If SAP (or anybody else outside Oracle) would submit such a kind of XL change in order to better support let's say it's closed HPUX/Itanium port I don't think it would be even considered. I totally agree with you; it?s a double-standard and it?s frustrating. Believe me, if it were up to me the AOT implementation would be open-source. But I truly believe that the implementation will be opened up in a future JDK release. In general, it?s a nice refactoring and abstraction and could possibly help other projects like Sumatra if we ever get back to it. > > I don't want to reject these specific change (I came to terms with it :) but I think this should stand as bad example for changes which will not happen too often in the future. Conversely, if you have changes you want to get it you can use this as a precedent :-) > >> >> >> - I don't understand why we need both NMethodIterator and >> CompiledMethodIterator - they're virtually the same and nmethod is >> currently the only subclass of CompiledMethod. Can you please be more >> specific why you've changed some instances of NMethodIterator to >> CompiledMethodIterator and others not. Without background information this >> makes no sense to me. Also, the advance method in CompiledMethodIterator >> isn't "inline" while the one in NMethodIterator is - don't know if this >> will be a performance problem. >> >> The rest looks good to me but please notice that I still haven't looked at >> all changes (especially not on the agent/ and dtrace/ files). So you should >> get at least one more reviewer for such a big change. >> >> Regards, >> Volker >> >> >> >> On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman >>> wrote: >> >>> Here is the updated webrev, rebased and I think I have fixed all the >>> comments with one exception. >>> >>> I've avoided making CompiledMethodIterator and NMethodIterator a >>> template class for now. I agree we should do something to reuse the >>> parts that are identical but for now I think there will be a few more >>> changes to CompiledMethodIterator in an upcoming RFR. So can we hold off >>> with that change? >>> >>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ >>> >>> Thanks >>> >>> On 04/07, Rickard B?ckman wrote: >>>> Hi, >>>> >>>> can I please have review for this patch please? >>>> >>>> So far CodeBlobs have required all the data (metadata, oops, code, etc) >>>> to be in one continuous blob With this patch we are looking to change >>>> that. It's been done by changing offsets in CodeBlob to addresses, >>>> making some methods virtual to allow different behavior and also >>>> creating a couple of new classes. CompiledMethod now sits inbetween >>>> CodeBlob and nmethod. >>>> >>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>>> >>>> Thanks >>>> /R >>> /R From stuart.marks at oracle.com Tue Apr 19 22:21:02 2016 From: stuart.marks at oracle.com (Stuart Marks) Date: Tue, 19 Apr 2016 15:21:02 -0700 Subject: Fwd: RFR(s): 8153330: deprecate Runtime.traceInstructions & traceMethodCalls for removal In-Reply-To: <5716AD1A.3040902@oracle.com> References: <5716AD1A.3040902@oracle.com> Message-ID: <5716AF4E.4000103@oracle.com> I just posted this to core-libs-dev, and I figured I should run this by Hotspot-land. There isn't any double-secret-intrinsic magic that Hotspot does with these methods, is there? Anybody see any issue with deprecating and eventually removing these methods? They won't be removed immediately, but the proposal is to deprecate them in JDK 9, marked for removal in a future release. Thanks, s'marks -------- Forwarded Message -------- Subject: RFR(s): 8153330: deprecate Runtime.traceInstructions & traceMethodCalls for removal Date: Tue, 19 Apr 2016 15:11:38 -0700 From: Stuart Marks To: core-libs-dev Hi all, I missed a couple bits of cruft in the previous round of java.lang deprecations: the Runtime.traceInstructions() and traceMethodCalls() methods. Their implementations are empty. That is, they do absolutely nothing. They're only mentioned a couple times in the JDK, in CORBA (!) and in some RMI tests (!), and in a few symbol files. On grepcode.com, there are a couple uses, mainly attempting to enable these in response to some debugging flag being given. Thus, I propose these be deprecated for removal. Please review diff below. Thanks, s'marks diff -r 92280897299f src/java.base/share/classes/java/lang/Runtime.java --- a/src/java.base/share/classes/java/lang/Runtime.java Mon Apr 18 14:10:14 2016 -0700 +++ b/src/java.base/share/classes/java/lang/Runtime.java Tue Apr 19 15:04:25 2016 -0700 @@ -718,41 +718,27 @@ } /** - * Enables/Disables tracing of instructions. - * If the {@code boolean} argument is {@code true}, this - * method suggests that the Java virtual machine emit debugging - * information for each instruction in the virtual machine as it - * is executed. The format of this information, and the file or other - * output stream to which it is emitted, depends on the host environment. - * The virtual machine may ignore this request if it does not support - * this feature. The destination of the trace output is system - * dependent. - *

- * If the {@code boolean} argument is {@code false}, this - * method causes the virtual machine to stop performing the - * detailed instruction trace it is performing. - * - * @param on {@code true} to enable instruction tracing; - * {@code false} to disable this feature. + * Not implemented. + * + * @deprecated + * This method was intended to control instruction tracing. + * It has been superseded by JVM-specific tracing mechanisms. + * + * @param on ignored */ + @Deprecated(since="9", forRemoval=true) public void traceInstructions(boolean on) { } /** - * Enables/Disables tracing of method calls. - * If the {@code boolean} argument is {@code true}, this - * method suggests that the Java virtual machine emit debugging - * information for each method in the virtual machine as it is - * called. The format of this information, and the file or other output - * stream to which it is emitted, depends on the host environment. The - * virtual machine may ignore this request if it does not support - * this feature. - *

- * Calling this method with argument false suggests that the - * virtual machine cease emitting per-call debugging information. - * - * @param on {@code true} to enable instruction tracing; - * {@code false} to disable this feature. + * Not implemented. + * + * @deprecated + * This method was intended to control method call tracing. + * It has been superseded by JVM-specific tracing mechanisms. + * + * @param on ignored */ + @Deprecated(since="9", forRemoval=true) public void traceMethodCalls(boolean on) { } /** From dean.long at oracle.com Tue Apr 19 22:41:56 2016 From: dean.long at oracle.com (Dean Long) Date: Tue, 19 Apr 2016 15:41:56 -0700 (PDT) Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <57168C17.40307@oracle.com> References: <57168C17.40307@oracle.com> Message-ID: Looks OK, except I didn't understand the reason for the change to InitArrayShortSize in globals_zero.hpp. dl On 4/19/2016 12:50 PM, Coleen Phillimore wrote: > Summary: GC walks the mirror using OopClosure rather than using > CLDClosure in oops_interpreted_do() > > See bug for more description and justification. The changes are large > but very redundant. The main change is in > TemplateInterpreterGenerator::generate_fixed_frame(). > > open webrev at http://cr.openjdk.java.net/~coleenp/8154580.01/webrev > bug link https://bugs.openjdk.java.net/browse/JDK-8154580 > > Tested with hotspot-runtime-nightly and gc-nightly tests. > > Need testing with ppc and aarch64 open code. I implemented the > changes but I can't test them. > > Thanks, > Coleen From david.holmes at oracle.com Tue Apr 19 23:59:34 2016 From: david.holmes at oracle.com (David Holmes) Date: Tue, 19 Apr 2016 16:59:34 -0700 (PDT) Subject: Fwd: RFR(s): 8153330: deprecate Runtime.traceInstructions & traceMethodCalls for removal In-Reply-To: <5716AF4E.4000103@oracle.com> References: <5716AD1A.3040902@oracle.com> <5716AF4E.4000103@oracle.com> Message-ID: <5716C666.5060006@oracle.com> Hi Stuart, On 20/04/2016 8:21 AM, Stuart Marks wrote: > I just posted this to core-libs-dev, and I figured I should run this by > Hotspot-land. There isn't any double-secret-intrinsic magic that Hotspot > does with these methods, is there? Anybody see any issue with > deprecating and eventually removing these methods? I can't see any references in hotspot. Dasvid > They won't be removed immediately, but the proposal is to deprecate them > in JDK 9, marked for removal in a future release. > > Thanks, > > s'marks > > > -------- Forwarded Message -------- > Subject: RFR(s): 8153330: deprecate Runtime.traceInstructions & > traceMethodCalls for removal > Date: Tue, 19 Apr 2016 15:11:38 -0700 > From: Stuart Marks > To: core-libs-dev > > Hi all, > > I missed a couple bits of cruft in the previous round of java.lang > deprecations: > the Runtime.traceInstructions() and traceMethodCalls() methods. > > Their implementations are empty. That is, they do absolutely nothing. > > They're only mentioned a couple times in the JDK, in CORBA (!) and in > some RMI > tests (!), and in a few symbol files. On grepcode.com, there are a > couple uses, > mainly attempting to enable these in response to some debugging flag > being given. > > Thus, I propose these be deprecated for removal. Please review diff below. > > Thanks, > > s'marks > > > > diff -r 92280897299f src/java.base/share/classes/java/lang/Runtime.java > --- a/src/java.base/share/classes/java/lang/Runtime.java Mon Apr 18 > 14:10:14 > 2016 -0700 > +++ b/src/java.base/share/classes/java/lang/Runtime.java Tue Apr 19 > 15:04:25 > 2016 -0700 > @@ -718,41 +718,27 @@ > } > > /** > - * Enables/Disables tracing of instructions. > - * If the {@code boolean} argument is {@code true}, this > - * method suggests that the Java virtual machine emit debugging > - * information for each instruction in the virtual machine as it > - * is executed. The format of this information, and the file or other > - * output stream to which it is emitted, depends on the host > environment. > - * The virtual machine may ignore this request if it does not support > - * this feature. The destination of the trace output is system > - * dependent. > - *

> - * If the {@code boolean} argument is {@code false}, this > - * method causes the virtual machine to stop performing the > - * detailed instruction trace it is performing. > - * > - * @param on {@code true} to enable instruction tracing; > - * {@code false} to disable this feature. > + * Not implemented. > + * > + * @deprecated > + * This method was intended to control instruction tracing. > + * It has been superseded by JVM-specific tracing mechanisms. > + * > + * @param on ignored > */ > + @Deprecated(since="9", forRemoval=true) > public void traceInstructions(boolean on) { } > > /** > - * Enables/Disables tracing of method calls. > - * If the {@code boolean} argument is {@code true}, this > - * method suggests that the Java virtual machine emit debugging > - * information for each method in the virtual machine as it is > - * called. The format of this information, and the file or other > output > - * stream to which it is emitted, depends on the host environment. The > - * virtual machine may ignore this request if it does not support > - * this feature. > - *

> - * Calling this method with argument false suggests that the > - * virtual machine cease emitting per-call debugging information. > - * > - * @param on {@code true} to enable instruction tracing; > - * {@code false} to disable this feature. > + * Not implemented. > + * > + * @deprecated > + * This method was intended to control method call tracing. > + * It has been superseded by JVM-specific tracing mechanisms. > + * > + * @param on ignored > */ > + @Deprecated(since="9", forRemoval=true) > public void traceMethodCalls(boolean on) { } > > /** > > > From coleen.phillimore at oracle.com Wed Apr 20 01:59:11 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 19 Apr 2016 21:59:11 -0400 Subject: Fwd: RFR (S) 8154566: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases In-Reply-To: <57169BF0.4030804@oracle.com> References: <57167644.1080604@oracle.com> <571685A9.4060707@oracle.com> <57169BF0.4030804@oracle.com> Message-ID: <5716E26F.3020606@oracle.com> Jerry, Thank you for looking at this so quickly. On 4/19/16 4:58 PM, Gerald Thornbrugh wrote: > Hi Coleen, > > It looks like /etc/SuSE-release is deprecated starting with SLE 12 and > will be removed in the future. > > See: > > https://www.suse.com/releasenotes/x86_64/SUSE-SLED/12/ > > From the link: >> >> >> 5.3.7.2 Use /etc/os-release Instead of /etc/SuSE-release >> Report Bug >> >> # >> >> >> /Starting with SLE 12, /etc/SuSE-release file is deprecated. It >> should not be used to identify a SUSE Linux Enterprise system. This >> file will be removed in a future Service Pack or release./ >> >> The file |/etc/os-release| now is decisive. This file is a >> cross-distribution standard to identify a Linux system. For more >> information about the syntax, see the os-release man page ( |man >> os-release| ). >> > > So once the /etc/SuSE-release is removed it looks like the code will > find the /etc/lsb-release file again and > this problem may come back unless I am not understanding this correctly. > > I wonder if the /etc/os-release file should be moved before the > /etc/lsb-release file in the list? I think this seems reasonable. For the platforms that I have and that we support, it won't make a difference because on OEL, we get /etc/oracle-release and for Ubuntu would get PRETTY_NAME from /etc/os-release rather than the last line from lsb-release. So, yes, I'll move os-release above lsb-release. From the page and my google searching, this should be better. > > Understanding the implications of that change would take a significant > amount of testing. > I don't have access to SuSE or other distributions, but this seems like a safe thing to change. Coleen > Jerry >> >> -------- Forwarded Message -------- >> Subject: RFR (S) 8154566: os_linux.cpp parse_os_info gives non >> descriptive output on current SLES releases >> Date: Tue, 19 Apr 2016 11:17:40 -0700 (PDT) >> From: Coleen Phillimore >> To: hotspot-dev developers >> >> >> >> Summary: For SuSE, read the first line of the /etc/xrelease file, also >> get PRETTY_NAME from /etc/os_release >> Contributed-by:matthias.baesken at sap.com,coleen.phillimore at oracle.com >> >> See discussion: >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022720.html >> >> open webrev athttp://cr.openjdk.java.net/~coleenp/8154566.01/webrev >> bug linkhttps://bugs.openjdk.java.net/browse/JDK-8154566 >> >> Tested by Matthias on SuSE and myself on OEL 6.0 and Ubuntu. Ran >> hotspot/test/runtime jtreg tests. >> >> Thanks, >> Coleen >> >> > From coleen.phillimore at oracle.com Wed Apr 20 02:29:08 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 19 Apr 2016 19:29:08 -0700 (PDT) Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <159176CE-675D-41A6-A13A-EBD7580E640C@oracle.com> References: <57168C17.40307@oracle.com> <159176CE-675D-41A6-A13A-EBD7580E640C@oracle.com> Message-ID: <5716E974.3030101@oracle.com> Chris, thank you for reviewing this. On 4/19/16 4:35 PM, Christian Thalinger wrote: >> On Apr 19, 2016, at 9:50 AM, Coleen Phillimore wrote: >> >> Summary: GC walks the mirror using OopClosure rather than using CLDClosure in oops_interpreted_do() >> >> See bug for more description and justification. The changes are large but very redundant. The main change is in TemplateInterpreterGenerator::generate_fixed_frame(). > + // Save oop Mirror (with padding) > + __ load_mirror(rscratch1, rmethod); > > + // get mirror and store it in the frame so that this Method* is never > + // reclaimed while it's running. > + Register mirror = LcpoolCache; > + __ load_mirror(mirror, Method); > > + // Push the mirror so this method isn't collected > + __ load_mirror(rdx, rbi); > > Please use the same comment on all platforms. Yes, that's inconsistent. I changed all platforms to have this comment: // Get mirror and store it in the frame as GC root for this Method* >> open webrev at http://cr.openjdk.java.net/~coleenp/8154580.01/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8154580 > src/share/vm/runtime/frame.cpp > > // The method pointer in the frame might be the only path to the method's > // klass, and the klass needs to be kept alive while executing. The GCs > // don't trace through method pointers, so typically in similar situations > // the mirror or the class loader of the klass are installed as a GC root. > > - // To minimize the overhead of doing that here, we ask the GC to pass down a > - // closure that knows how to keep klasses alive given a ClassLoaderData. > - cld_f->do_cld(m->method_holder()->class_loader_data()); > - } > - > - if (m->is_native() PPC32_ONLY(&& m->is_static())) { > - f->do_oop(interpreter_frame_temp_oop_addr()); > - } > + // And it is here too. > + f->do_oop(interpreter_frame_mirror_addr()); > > That comment is kinda funny now. It still hints at the old-way of doing things but ?it is here too?. I reworded it as: // The method pointer in the frame might be the only path to the method's // klass, and the klass needs to be kept alive while executing. The GCs // don't trace through method pointers, so the mirror of the method's klass // is installed as a GC root. f->do_oop(interpreter_frame_mirror_addr()); >> Tested with hotspot-runtime-nightly and gc-nightly tests. >> >> Need testing with ppc and aarch64 open code. I implemented the changes but I can't test them. > One obvious bug is that you copied the __ as well: > > +void MacroAssembler::load_mirror(Register dst, Register method) { > + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); > + __ ldr(dst, Address(rmethod, Method::const_offset())); > + __ ldr(dst, Address(dst, ConstMethod::constants_offset())); > + __ ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes())); > + __ ldr(dst, Address(dst, mirror_offset)); > +} > > Other than that it looks fine to me. Nothing obvious stood out. Oh, yes, this was wrong on both ppc and aarch64. I hope someone that has access to this platforms can test this patch. Thank you! Coleen > >> Thanks, >> Coleen From david.holmes at oracle.com Wed Apr 20 02:32:02 2016 From: david.holmes at oracle.com (David Holmes) Date: Tue, 19 Apr 2016 19:32:02 -0700 (PDT) Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <57168B85.9090803@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> Message-ID: <5716EA22.50400@oracle.com> Hi Lois, Please note I did not actually review this - the locking issue simply caught my attention. Thanks, David On 20/04/2016 5:48 AM, Lois Foltan wrote: > > On 4/18/2016 10:11 PM, David Holmes wrote: >> Hi Lois, >> >> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>> >>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>> >>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>> >>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>> >>>>>>> Hi Stefan, >>>>>>> >>>>>>> In start up before module system initialization in complete I >>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>> defensive move in case the reference count is ever used passed >>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>> sounds like you agree? >>>>>> There will be a number of threads running before the base module is >>>>>> defined to the VM. As things stand the the java threads at this >>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>>> Signal Handler. >>>>> >>>>> So, are you saying that we need the atomics? >>>>> >>>>> The java_lang_Class::create_mirror function isn't multi-thread safe, >>>>> and must already be guarded by a lock (SystemDictionary_lock AFAICT). >>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>> once, for the single InstanceKlass instance in the CLD. And all reads >>>>> of _keep_alive from the GC are done during safepoints. >>>> The anonymous class is inserted in the fixup mirror and fixup module >>>> lists during java_lang_Class::create_mirror() before it is made public >>>> or "published" as loaded. So the two instances where the reference >>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>> java_lang_Class::create_mirror(), are guarded by a lock as well as the >>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has access >>>> to the class during this time, as it is being loaded. >>>>> >>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>> concurrent inserts into the _fixup_module_field_list list? >>>> That leaves the decrement in >>>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. This >>>> only occurs when the VM is called to define the module java.base. I >>>> believe this should be okay but will double check. >>> >>> One small change in modules.cpp/define_javabase_module() to ensure that >>> only one definition attempt of java.base will occur and thus only one >>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>> arises where java.base is trying to be multiply defined, according to >>> the expected error conditions for JVM_DefineModule(), an >>> IllegalArgumentException should be thrown. >>> >>> I have also added a comment in classfile/classLoaderData.hpp explaining >>> why _keep_alive does need to be defined volatile or atomic. >> >> Can you add assertions to check that _keep_alive is only modified >> under the protection of the lock (with a special case perhaps for the >> unguarded java.base case) ? > > Hi David, > > Thanks for the review. I misspoke when I indicated that the two > increments and the one decrement of the reference counter that occur > during a call to the Unsafe_DefineAnonymous0() method were guarded under > a lock. However, due to the way anonymous classes are created only a > single non-GC thread will have access to the _keep_alive field during > this time. And as Stefan indicates above, all reads of _keep_alive from > the GC are done during safepoints. Each anonymous class, when defined, > has a dedicated ClassLoaderData created for it. No other class shares > the anonymous class' name or CLD. Due to this uniqueness, no other > thread has knowledge about this anonymous class while it is being > defined. It is only upon return from Unsafe_DefineAnonymous0(), that > the anonymous class exists and other threads, at that point, can > potentially access it. > > Thanks, > Lois > >> >> Thanks, >> David >> >>> Please review at: >>> >>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ >>> >>> Retesting in progress. >>> >>> Thanks, >>> Lois >>> >>>> >>>> Thanks, >>>> Lois >>>> >>>>> >>>>> thanks, >>>>> StefanK >>>>> >>>>> >>>>>> >>>>>> -Alan >>>>> >>>> >>> > From coleen.phillimore at oracle.com Wed Apr 20 02:11:52 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 19 Apr 2016 19:11:52 -0700 (PDT) Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: References: <57168C17.40307@oracle.com> Message-ID: <5716E568.3080105@oracle.com> On 4/19/16 6:41 PM, Dean Long wrote: > Looks OK, except I didn't understand the reason for the change to > InitArrayShortSize in globals_zero.hpp. Thank you Dean. That change is something else I'm sponsoring which I needed to get Zero to compile. It won't be in this changeset. thanks, Coleen > > dl > > On 4/19/2016 12:50 PM, Coleen Phillimore wrote: >> Summary: GC walks the mirror using OopClosure rather than using >> CLDClosure in oops_interpreted_do() >> >> See bug for more description and justification. The changes are >> large but very redundant. The main change is in >> TemplateInterpreterGenerator::generate_fixed_frame(). >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8154580.01/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8154580 >> >> Tested with hotspot-runtime-nightly and gc-nightly tests. >> >> Need testing with ppc and aarch64 open code. I implemented the >> changes but I can't test them. >> >> Thanks, >> Coleen > From rickard.backman at oracle.com Wed Apr 20 07:43:30 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Wed, 20 Apr 2016 09:43:30 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> Message-ID: <20160420074330.GG19871@rbackman> Volker, sorry about the confusion. To me the only thing that made sense was to make the push of the two commits atomically. I didn't intend to include your changes in the webrev but I forgot to pop the changes of my mq. I hope you got the answers on some of the oddities from Christian. But to add to that. The reason we have NMethodIterator and CompiledMethodIterator is that we sometimes want to make sure we only iterate over nmethods. Hope that works. Here is an updated webrev with the other changes you commented on: http://cr.openjdk.java.net/~rbackman/8152664.4/ Thanks /R On 04/19, Volker Simonis wrote: > Hi Rickard, > > I just wanted to prepare the new webrev for 8151956 but I'm a little > confused because I realized that your latest webrev already contains the > changes which I had proposed for 8151956. > > But after thinking about it a little bit I think that's fine. If I prepare > a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had > to adapt 8152664 to take care of the new changes introduced by 8151956. If > I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 > it would be hard to review it (because it will depend on 8152664) and we > would get a change in the repo which would not build on PPC64 and AARCH64 > which isn't nice either. > > So altogether I think it's fine to incorporate the fix for 8151956 into > your change. Please only don't forget to close 8151956 as "fixed by > 8152664" after you have pushed the changes for 8152664. > > I've verified that your last webrev builds and runs fine on Linux/ppc64 and > AIX. You've also fixed all the issues I've addressed in my first mail to > this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! > > Some final nit-picking: > > - you still have the white-space only change in os_windows.cpp objected by > Vladimir. > > - in codeBlob.cpp can you please update the following comments to reflect > the new types: > > // Creates a simple CodeBlob. Sets up the size of the different > regions.* CodeBlob::CodeBlob(const char* name, int header_size, int > size, int frame_complete, int locs_size) {** assert(size == > round_to(size, oopSize), "unaligned size");**+ > RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, > int frame_complete, int locs_size)* > > // Creates a CodeBlob from a CodeBuffer. Sets up the size of the > different regions, // and copy code and relocation info.*! > CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* > > > - why do we need: > > *+ bool make_not_used() { return make_not_entrant(); }* > > it only forwards to make_not_entrant() and it is only used a single time in > ciEnv.cpp: > > *! old->make_not_entrant();**! old->make_not_used();* > > > - I don't understand why we need both NMethodIterator and > CompiledMethodIterator - they're virtually the same and nmethod is > currently the only subclass of CompiledMethod. Can you please be more > specific why you've changed some instances of NMethodIterator to > CompiledMethodIterator and others not. Without background information this > makes no sense to me. Also, the advance method in CompiledMethodIterator > isn't "inline" while the one in NMethodIterator is - don't know if this > will be a performance problem. > > The rest looks good to me but please notice that I still haven't looked at > all changes (especially not on the agent/ and dtrace/ files). So you should > get at least one more reviewer for such a big change. > > Regards, > Volker > > > > On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman > wrote: > > > Here is the updated webrev, rebased and I think I have fixed all the > > comments with one exception. > > > > I've avoided making CompiledMethodIterator and NMethodIterator a > > template class for now. I agree we should do something to reuse the > > parts that are identical but for now I think there will be a few more > > changes to CompiledMethodIterator in an upcoming RFR. So can we hold off > > with that change? > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ > > > > Thanks > > > > On 04/07, Rickard B?ckman wrote: > > > Hi, > > > > > > can I please have review for this patch please? > > > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > > to be in one continuous blob With this patch we are looking to change > > > that. It's been done by changing offsets in CodeBlob to addresses, > > > making some methods virtual to allow different behavior and also > > > creating a couple of new classes. CompiledMethod now sits inbetween > > > CodeBlob and nmethod. > > > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > > > Thanks > > > /R > > /R > > From adinn at redhat.com Wed Apr 20 08:01:05 2016 From: adinn at redhat.com (Andrew Dinn) Date: Wed, 20 Apr 2016 09:01:05 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> Message-ID: <57173741.2010506@redhat.com> On 19/04/16 23:10, Christian Thalinger wrote: > >> On Apr 19, 2016, at 11:35 AM, Volker Simonis >> wrote: >> If SAP (or anybody else outside Oracle) would submit such a kind of >> XL change in order to better support let's say it's closed >> HPUX/Itanium port I don't think it would be even considered. > > I totally agree with you; it?s a double-standard and it?s > frustrating. Believe me, if it were up to me the AOT implementation > would be open-source. But I truly believe that the implementation > will be opened up in a future JDK release. Well, if not then someone might have a very nice time implementing an open implementation. Our community is very resourceful :-) > In general, it?s a nice refactoring and abstraction and could > possibly help other projects like Sumatra if we ever get back to it. Or indeed pave the way for an open implementation of AOT. >> I don't want to reject these specific change (I came to terms with >> it :) but I think this should stand as bad example for changes >> which will not happen too often in the future. > > Conversely, if you have changes you want to get it you can use this > as a precedent :-) I have already filed that point for easy retrieval . . . regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) From aph at redhat.com Wed Apr 20 08:21:42 2016 From: aph at redhat.com (Andrew Haley) Date: Wed, 20 Apr 2016 09:21:42 +0100 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> Message-ID: <57173C16.2080609@redhat.com> On 19/04/16 23:10, Christian Thalinger wrote: >> > >> > I don't want to reject these specific change (I came to terms >> > with it :) but I think this should stand as bad example for >> > changes which will not happen too often in the future. > > Conversely, if you have changes you want to get it you can use this > as a precedent :-) I think that's the best way to view it. In general, we want to be as open to innovation as we can. If that means some stubs or hooks in the core OpenJDK for features or ports which aren't (yet) part of OpenJDK that should be OK. Of course, we don't want extra overhead or fragility. I'm going to raise this issue with the GB. Andrew. From matthias.baesken at sap.com Wed Apr 20 08:50:42 2016 From: matthias.baesken at sap.com (Baesken, Matthias) Date: Wed, 20 Apr 2016 08:50:42 +0000 Subject: Fwd: RFR (S) 8154566: os_linux.cpp parse_os_info gives non-descriptive output on current SLES releases In-Reply-To: <4e2e2d419d6641c2a680c304899cfe16@derote13de22.global.corp.sap> References: <4e2e2d419d6641c2a680c304899cfe16@derote13de22.global.corp.sap> Message-ID: Hello Coleen and Gerald, I like the idea of moving os-release above lsb-release . On SUSE 12 and 12.1 /etc/SuSE-release is still present, however it might go away in future releases . First line of /etc/SuSE-release is : SUSE Linux Enterprise Server 12 (x86_64) and SUSE Linux Enterprise Server 12 (ppc64le) on the SLES 12 / 12.1 machines I have access to , and on these 2 machines , the PRETTY_NAME line of /etc/os-release is PRETTY_NAME="SUSE Linux Enterprise Server 12" and PRETTY_NAME="SUSE Linux Enterprise Server 12 SP1" Regards, Matthias > > Jerry, Thank you for looking at this so quickly. > > On 4/19/16 4:58 PM, Gerald Thornbrugh wrote: > > Hi Coleen, > > > > It looks like /etc/SuSE-release is deprecated starting with SLE 12 and > > will be removed in the future. > > > > See: > > > > https://www.suse.com/releasenotes/x86_64/SUSE-SLED/12/ > > > > From the link: > >> > >> > >> 5.3.7.2 Use /etc/os-release Instead of /etc/SuSE-release > >> Report Bug > >> > rprise%20Desktop%2012&component=Documentation&short_desc=[doc]+& > comment=5.3.7.2%20%20Use%20%2Fetc%2Fos- > release%20Instead%20of%20%2Fetc%2FSuSE- > release%0A%0Ahttps%3A%2F%2Fwww.suse.com%2Freleasenotes%2Fx86_6 > 4%2FSUSE-SLED%2F12%2F%23fate-316268> > >> # > >> 316268> > >> > >> /Starting with SLE 12, /etc/SuSE-release file is deprecated. It > >> should not be used to identify a SUSE Linux Enterprise system. This > >> file will be removed in a future Service Pack or release./ > >> > >> The file |/etc/os-release| now is decisive. This file is a > >> cross-distribution standard to identify a Linux system. For more > >> information about the syntax, see the os-release man page ( |man > >> os-release| ). > >> > > > > So once the /etc/SuSE-release is removed it looks like the code will > > find the /etc/lsb-release file again and > > this problem may come back unless I am not understanding this correctly. > > > > I wonder if the /etc/os-release file should be moved before the > > /etc/lsb-release file in the list? > > I think this seems reasonable. For the platforms that I have and that > we support, it won't make a difference because on OEL, we get > /etc/oracle-release and for Ubuntu would get PRETTY_NAME from > /etc/os-release rather than the last line from lsb-release. > > So, yes, I'll move os-release above lsb-release. From the page and my > google searching, this should be better. > > > > > Understanding the implications of that change would take a significant > > amount of testing. > > > > I don't have access to SuSE or other distributions, but this seems like > a safe thing to change. > > Coleen > > > Jerry > >> > >> -------- Forwarded Message -------- > >> Subject: RFR (S) 8154566: os_linux.cpp parse_os_info gives non > >> descriptive output on current SLES releases > >> Date: Tue, 19 Apr 2016 11:17:40 -0700 (PDT) > >> From: Coleen Phillimore > >> To: hotspot-dev developers > >> > >> > >> > >> Summary: For SuSE, read the first line of the /etc/xrelease file, also > >> get PRETTY_NAME from /etc/os_release > >> Contributed- > by:matthias.baesken at sap.com,coleen.phillimore at oracle.com > >> > >> See discussion: > >> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016- > April/022720.html > >> > >> open webrev athttp://cr.openjdk.java.net/~coleenp/8154566.01/webrev > >> bug linkhttps://bugs.openjdk.java.net/browse/JDK-8154566 > >> > >> Tested by Matthias on SuSE and myself on OEL 6.0 and Ubuntu. Ran > >> hotspot/test/runtime jtreg tests. > >> > >> Thanks, > >> Coleen > >> > >> > > From lois.foltan at oracle.com Wed Apr 20 10:42:19 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Wed, 20 Apr 2016 06:42:19 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <571693FA.3060807@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> Message-ID: <57175D0B.6050604@oracle.com> On 4/19/2016 4:24 PM, Coleen Phillimore wrote: > > Hi, this is getting long. > > On 4/19/16 3:48 PM, Lois Foltan wrote: >> >> On 4/18/2016 10:11 PM, David Holmes wrote: >>> Hi Lois, >>> >>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>> >>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>> >>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>> >>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>> >>>>>>>> Hi Stefan, >>>>>>>> >>>>>>>> In start up before module system initialization in complete I >>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>> sounds like you agree? >>>>>>> There will be a number of threads running before the base module is >>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>>>> Signal Handler. >>>>>> >>>>>> So, are you saying that we need the atomics? >>>>>> >>>>>> The java_lang_Class::create_mirror function isn't multi-thread safe, >>>>>> and must already be guarded by a lock (SystemDictionary_lock >>>>>> AFAICT). >>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>> once, for the single InstanceKlass instance in the CLD. And all >>>>>> reads >>>>>> of _keep_alive from the GC are done during safepoints. >>>>> The anonymous class is inserted in the fixup mirror and fixup module >>>>> lists during java_lang_Class::create_mirror() before it is made >>>>> public >>>>> or "published" as loaded. So the two instances where the reference >>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>> java_lang_Class::create_mirror(), are guarded by a lock as well as >>>>> the >>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has >>>>> access >>>>> to the class during this time, as it is being loaded. >>>>>> >>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>> That leaves the decrement in >>>>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. >>>>> This >>>>> only occurs when the VM is called to define the module java.base. I >>>>> believe this should be okay but will double check. >>>> >>>> One small change in modules.cpp/define_javabase_module() to ensure >>>> that >>>> only one definition attempt of java.base will occur and thus only one >>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>> arises where java.base is trying to be multiply defined, according to >>>> the expected error conditions for JVM_DefineModule(), an >>>> IllegalArgumentException should be thrown. >>>> >>>> I have also added a comment in classfile/classLoaderData.hpp >>>> explaining >>>> why _keep_alive does need to be defined volatile or atomic. >>> >>> Can you add assertions to check that _keep_alive is only modified >>> under the protection of the lock (with a special case perhaps for >>> the unguarded java.base case) ? >> >> Hi David, >> >> Thanks for the review. I misspoke when I indicated that the two >> increments and the one decrement of the reference counter that occur >> during a call to the Unsafe_DefineAnonymous0() method were guarded >> under a lock. However, due to the way anonymous classes are created >> only a single non-GC thread will have access to the _keep_alive field >> during this time. And as Stefan indicates above, all reads of >> _keep_alive from the GC are done during safepoints. Each anonymous >> class, when defined, has a dedicated ClassLoaderData created for it. >> No other class shares the anonymous class' name or CLD. Due to this >> uniqueness, no other thread has knowledge about this anonymous class >> while it is being defined. It is only upon return from >> Unsafe_DefineAnonymous0(), that the anonymous class exists and other >> threads, at that point, can potentially access it. >> > > Ah interesting. Currently, this is true and why this is safe. If we > change the JVM to have *some* anonymous classes share CLD with their > host_class because the lifetimes are the same, then we'll have to use > atomic operations. > > http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html > > > Can you put one comment directly above the inc_keep_alive() and > dec_keep_alive() functions to this effect here, just so we remember? Hi Coleen, I will add the comment before I commit. I might expand a bit on your last sentence a bit. > > // Anonymous classes have their own ClassLoaderData that is marked to > keep alive while the class is being parsed, and > // if the class appears on the module fixup list. > // If anonymous classes are changed to share with host_class, this > refcount needs to be changed to use atomic operations. > > *+ void ClassLoaderData::inc_keep_alive() {* > *+ assert(_keep_alive >= 0, "Invalid keep alive count");* > *+ _keep_alive++;* > *+ }* > *+ * > *+ void ClassLoaderData::dec_keep_alive() {* > *+ assert(_keep_alive > 0, "Invalid keep alive count");* > *+ _keep_alive--;* > *+ }* > *+ * > > More below. > >> Thanks, >> Lois >> >>> >>> Thanks, >>> David >>> >>>> Please review at: >>>> >>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ > > http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html > > > I'm not sure how this relates to the bug. This change ensures that java.base will not be multiply defined and thus only one call to ModuleEntryTable::patch_javabase_entries() will occur. A decrement of the reference count happens when an anonymous class is on the fixup module list post patching it with java.base > > Otherwise, the change looks good. Thanks again! > > Thanks, > Coleen > > >>>> >>>> Retesting in progress. >>>> >>>> Thanks, >>>> Lois >>>> >>>>> >>>>> Thanks, >>>>> Lois >>>>> >>>>>> >>>>>> thanks, >>>>>> StefanK >>>>>> >>>>>> >>>>>>> >>>>>>> -Alan >>>>>> >>>>> >>>> >> > From lois.foltan at oracle.com Wed Apr 20 10:47:44 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Wed, 20 Apr 2016 06:47:44 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> Message-ID: <57175E50.9050501@oracle.com> On 4/19/2016 4:37 PM, Christian Thalinger wrote: >> On Apr 19, 2016, at 10:24 AM, Coleen Phillimore wrote: >> >> >> Hi, this is getting long. >> >> On 4/19/16 3:48 PM, Lois Foltan wrote: >>> On 4/18/2016 10:11 PM, David Holmes wrote: >>>> Hi Lois, >>>> >>>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>>> Hi Stefan, >>>>>>>>> >>>>>>>>> In start up before module system initialization in complete I >>>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>>> sounds like you agree? >>>>>>>> There will be a number of threads running before the base module is >>>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>>>>> Signal Handler. >>>>>>> So, are you saying that we need the atomics? >>>>>>> >>>>>>> The java_lang_Class::create_mirror function isn't multi-thread safe, >>>>>>> and must already be guarded by a lock (SystemDictionary_lock AFAICT). >>>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>>> once, for the single InstanceKlass instance in the CLD. And all reads >>>>>>> of _keep_alive from the GC are done during safepoints. >>>>>> The anonymous class is inserted in the fixup mirror and fixup module >>>>>> lists during java_lang_Class::create_mirror() before it is made public >>>>>> or "published" as loaded. So the two instances where the reference >>>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>>> java_lang_Class::create_mirror(), are guarded by a lock as well as the >>>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has access >>>>>> to the class during this time, as it is being loaded. >>>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>>> That leaves the decrement in >>>>>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. This >>>>>> only occurs when the VM is called to define the module java.base. I >>>>>> believe this should be okay but will double check. >>>>> One small change in modules.cpp/define_javabase_module() to ensure that >>>>> only one definition attempt of java.base will occur and thus only one >>>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>>> arises where java.base is trying to be multiply defined, according to >>>>> the expected error conditions for JVM_DefineModule(), an >>>>> IllegalArgumentException should be thrown. >>>>> >>>>> I have also added a comment in classfile/classLoaderData.hpp explaining >>>>> why _keep_alive does need to be defined volatile or atomic. >>>> Can you add assertions to check that _keep_alive is only modified under the protection of the lock (with a special case perhaps for the unguarded java.base case) ? >>> Hi David, >>> >>> Thanks for the review. I misspoke when I indicated that the two increments and the one decrement of the reference counter that occur during a call to the Unsafe_DefineAnonymous0() method were guarded under a lock. However, due to the way anonymous classes are created only a single non-GC thread will have access to the _keep_alive field during this time. And as Stefan indicates above, all reads of _keep_alive from the GC are done during safepoints. Each anonymous class, when defined, has a dedicated ClassLoaderData created for it. No other class shares the anonymous class' name or CLD. Due to this uniqueness, no other thread has knowledge about this anonymous class while it is being defined. It is only upon return from Unsafe_DefineAnonymous0(), that the anonymous class exists and other threads, at that point, can potentially access it. >>> >> Ah interesting. Currently, this is true and why this is safe. If we change the JVM to have *some* anonymous classes share CLD with their host_class because the lifetimes are the same, then we'll have to use atomic operations. >> >> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html >> >> Can you put one comment directly above the inc_keep_alive() and dec_keep_alive() functions to this effect here, just so we remember? > Why don?t we just use atomic operations and be done with it? Hi Christian, That was a discussion point earlier on this review thread and there were concerns raised that it may be excessive and potentially confusing for someone reading the code because it indicates that the VM is in a multi-threaded context when defining the anonymous class when it is not. Thanks for the suggestion and review, Lois > >> // Anonymous classes have their own ClassLoaderData that is marked to keep alive while the class is being parsed, and >> // if the class appears on the module fixup list. >> // If anonymous classes are changed to share with host_class, this refcount needs to be changed to use atomic operations. >> >> *+ void ClassLoaderData::inc_keep_alive() {* >> *+ assert(_keep_alive >= 0, "Invalid keep alive count");* >> *+ _keep_alive++;* >> *+ }* >> *+ * >> *+ void ClassLoaderData::dec_keep_alive() {* >> *+ assert(_keep_alive > 0, "Invalid keep alive count");* >> *+ _keep_alive--;* >> *+ }* >> *+ * >> >> More below. >> >>> Thanks, >>> Lois >>> >>>> Thanks, >>>> David >>>> >>>>> Please review at: >>>>> >>>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ >> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html >> >> I'm not sure how this relates to the bug. >> >> Otherwise, the change looks good. >> >> Thanks, >> Coleen >> >> >>>>> Retesting in progress. >>>>> >>>>> Thanks, >>>>> Lois >>>>> >>>>>> Thanks, >>>>>> Lois >>>>>> >>>>>>> thanks, >>>>>>> StefanK >>>>>>> >>>>>>> >>>>>>>> -Alan From aph at redhat.com Wed Apr 20 11:34:31 2016 From: aph at redhat.com (Andrew Haley) Date: Wed, 20 Apr 2016 12:34:31 +0100 Subject: RFR: 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode Message-ID: <57176947.7090501@redhat.com> I've seen weird unexplained (and unrepeatable) segfaults during JDK builds for years. They're vary rare, and I thought it was to do with flaky prototype hardware -- or at least that's how I kidded myself. Yesterday I found a culprit. It's a load in one of the bytecode accelerators, the one which replaces _aload_0, _fast_igetfield. Instead of a 32-bit word load, it's a 64-bit xword load. So how can this lead to a crash? Well, if the object in question is at the very end of the heap and the integer field is at the very end of the object, you'll get a read which spills over onto the next page in memory. This requires quite a coincidence of events, but it happens. Also, I discovered that the volatile load case had only a LoadLoad: it needs a LoadStore too. Thanks, Andrew. # HG changeset patch # User aph # Date 1461150850 0 # Wed Apr 20 11:14:10 2016 +0000 # Node ID 0df9b5892b864f27524480a698fe2550b4f9e531 # Parent 57f9554a28f1858c009b4c4f0fdcb42079f4c447 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode Reviewed-by: roland diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/src/cpu/aarch64/vm/templateTable_aarch64.cpp --- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp +++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp @@ -2982,7 +2982,7 @@ __ null_check(r0); switch (state) { case itos: - __ ldr(r0, Address(r0, r1, Address::lsl(0))); + __ ldrw(r0, Address(r0, r1, Address::lsl(0))); break; case atos: __ load_heap_oop(r0, Address(r0, r1, Address::lsl(0))); @@ -3000,7 +3000,7 @@ __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()))); __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); - __ membar(MacroAssembler::LoadLoad); + __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ bind(notVolatile); } From stefan.johansson at oracle.com Wed Apr 20 11:36:29 2016 From: stefan.johansson at oracle.com (Stefan Johansson) Date: Wed, 20 Apr 2016 13:36:29 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: <5714B13F.7080307@oracle.com> References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> <570D1747.2020508@oracle.com> <570E2C45.7090201@oracle.com> <5714B13F.7080307@oracle.com> Message-ID: <571769BD.6000700@oracle.com> Hi StefanK, On 2016-04-18 12:04, Stefan Karlsson wrote: > Hi Thomas, > > I discussed the code with Per and updated the names and changed the > code slightly. > > http://cr.openjdk.java.net/~stefank/8017629/webrev.03.delta > http://cr.openjdk.java.net/~stefank/8017629/webrev.03 > Looks good, StefanJ > 1) shmat_with_large_alignment was renamed to shmat_with_alignment and > all references to large pages were removed. > > 2) shmat_with_normal_alignment was renamed to shmat_at_address and all > references to pages sizes were removed. > > 3) shmat_with_alignment was renamed to shmat_large_pages and all large > pages specific code were kept in that function. > > 4) shmat_large_pages was restructured to have one section for the > req_addr != NULL case, and another section for req_addr == NULL. I > know that you suggested to call shmat_with_alignment (previously > shmat_with_normal_alignment) for both cases in the req_addr == NULL > section, but I would like to only have to use shmat_with_alignment > when it's really necessary. > > Thanks, > StefanK > > On 2016-04-13 15:59, Thomas St?fe wrote: >> Hi Stefan, >> >> On Wed, Apr 13, 2016 at 1:23 PM, Stefan Karlsson >> > wrote: >> >> Hi Thomas, >> >> >> On 2016-04-13 12:44, Thomas St?fe wrote: >>> Hi Stefan, >>> >>> On Tue, Apr 12, 2016 at 5:41 PM, Stefan Karlsson >>> > >>> wrote: >>> >>> Hi Thomas, >>> >>> >>> On 2016-04-12 16:23, Thomas St?fe wrote: >>>> Hi Stefan, >>>> >>>> >>>> On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson >>>> >>> > wrote: >>>> >>>> Hi Thomas, >>>> >>>> On 2016-04-11 14:39, Thomas St?fe wrote: >>>>> Hi Stefan, >>>>> >>>>> short question, why the mmap before the shmat? Why not >>>>> shmat right away at the requested address? >>>> >>>> If we have a requested_address we do exactly what you >>>> propose. >>>> >>>> if (req_addr == NULL && alignment > >>>> os::large_page_size()) { >>>> return shmat_with_large_alignment(shmid, bytes, >>>> alignment); >>>> } else { >>>> return shmat_with_normal_alignment(shmid, req_addr); >>>> } >>>> >>>> ... >>>> >>>> static char* shmat_with_normal_alignment(int shmid, >>>> char* req_addr) { >>>> char* addr = (char*)shmat(shmid, req_addr, 0); >>>> >>>> if ((intptr_t)addr == -1) { >>>> shm_warning_with_errno("Failed to attach shared memory."); >>>> return NULL; >>>> } >>>> >>>> return addr; >>>> } >>>> >>>> >>>> It's when you don't have a requested address that mmap >>>> is used to find a large enough virtual memory area. >>>> >>>> >>>> Sorry, seems I did not look at this coding thoroughly >>>> enough. I understand now that you do mmap to allocate and >>>> then to cut away the extra pre-/post-space, something which >>>> would not be possible with shmat, which cannot be unmapped >>>> page-wise. >>>> >>>> But I am still not sure why we do it his way: >>>> >>>> 3429 static char* shmat_with_alignment(int shmid, size_t >>>> bytes, size_t alignment, char* req_addr) { >>>> 3430 // If there's no requested address, the shmat call >>>> can return memory that is not >>>> 3431 // 'alignment' aligned, if the given alignment is >>>> larger than the large page size. >>>> 3432 // Special care needs to be taken to ensure that we >>>> get aligned memory back. >>>> 3433 if (req_addr == NULL && alignment > >>>> os::large_page_size()) { >>>> 3434 return shmat_with_large_alignment(shmid, bytes, >>>> alignment); >>>> 3435 } else { >>>> 3436 return shmat_with_normal_alignment(shmid, req_addr); >>>> 3437 } >>>> 3438 } >>>> >>>> For req_addr==0 and big alignment, we attach at the given >>>> alignment ("shmat_with_large_alignment"). >>>> For req_addr!=0, we attach at the given requested address >>>> ("shmat_with_normal_alignment"). >>>> For req_addr==0 and smaller alignment, we ignore the >>>> alignment and attach anywhere? >>>> >>>> Maybe I am slow, but why does it matter if the alignment is >>>> large or small? Why not just distinguish between: >>>> >>>> 1) address given (req_addr!=0): in this case we attach at >>>> this req_addr and rely on the user having aligned the >>>> address properly for his purposes. We specify 0 for flags, >>>> so we will attach at exactly the given address or fail. In >>>> this case we could simply ignore the given alignment - if >>>> one was given - or just use it to counter-check the req_addr. >>>> >>>> 2) alignment given (req_addr==0 and alignment > 0): attach >>>> at the given alignment using mmap-before-shmat. This could >>>> be done for any alignment, be it large or small. >>> >>> What you propose doesn't work. >>> >>> We're allocating large pages with SHM_HUGETLB, and if we try >>> to attach to an address that is not large_page_size aligned >>> the shmat call returns EINVAL. >>> >>> >>> I was aware of this. What I meant was: >>> >>> You have "shmat_with_large_alignment" which takes an alignment >>> and does its best to shmat with that alignment using the mmap >>> trick. This coding does not need to know anything about huge >>> pages, and actually does not do anything huge-pagey, apart from >>> the asserts - it would just as well work with small pages, >>> because the only place where the code needs to know about huge >>> pages is in the layer above, in reserve_memory_special - where we >>> pass SHM_HUGETLB to shmget. (Btw, I always wondered about the >>> "reserve_memory_special" naming.) >>> >>> I think my point is that by renaming this to >>> "shmat_with_alignment" and removing the huge-page-related asserts >>> the function would become both simpler and more versatile and >>> could be reused for small alignments as well as large ones. The >>> fact that it returns EINVAL for alignments instead of asserting >>> would not be a problem - we would return an error instead of >>> asserting because of bad alignment, and both handling this error >>> and asserting for huge-page-alignment could be handled better in >>> reserve_memory_special. >>> >>> To put it another way, I think "shmat_with_large_alignment" does >>> not need to know about huge pages; this should be the >>> responsibility of reserve_memory_special. >>> >>> About "shmat_with_normal_alignment", this is actually only a raw >>> shmat call and exists for the req_addr!=NULL case and for the >>> case where we do not pass neither req_addr nor alignment. So the >>> only thing it does not handle is alignment, so it is misnamed and >>> also should not be called for the >>> req_addr==NULL-and-small-alignments-case. >> >> The reserve_memory_special_shm function and the associated helper >> functions I'm adding are specifically written to support large >> pages allocations. The names "normal_alignment" and >> "large_alignment" are intended to refer to alignment sizes >> compared to the large pages size. I grant you that it's not >> obvious from the name, and we can rename them to make it more clear. >> >> I want to provide a small bug fix for this large pages bug, while >> you are suggesting that we re-purpose the code into supporting >> small page allocations as well. Your suggestions might be good, >> but may I suggest that you create a patch and an RFE that >> motivates why we should make this code more generic to support >> small pages as well? >> >> Thanks, >> StefanK >> >> >> Ok, we can do that. I was just worried that the code becomes more >> difficult to understand. But lets wait for some more reviews. >> >> Kind Regards, Thomas >> >> >>>> >>>> Functions would become simpler and also could be clearer >>>> named (e.g. "shmat_at_address" and "shmat_with_alignment", >>>> respectivly). >>> >>> Maybe I should rename the functions to make it more obvious >>> that these are large pages specific functions? >>> >>>> >>>> ---- >>>> >>>> This: >>>> >>>> 3402 if ((intptr_t)addr == -1) { >>>> 3403 shm_warning_with_errno("Failed to attach shared >>>> memory."); >>>> 3404 // Since we don't know if the kernel unmapped the >>>> pre-reserved memory area >>>> 3405 // we can't unmap it, since that would potentially >>>> unmap memory that was >>>> 3406 // mapped from other threads. >>>> 3407 return NULL; >>>> 3408 } >>>> >>>> seems scary. Means for every call this happens, we leak the >>>> reserved (not committed) address space? >>> >>> Yes, that's unfortunate. >>> >>> An alternative would be to use this sequence: >>> 1) Use anon_mmap_aligned to find a suitable VA range >>> 2) Immediately unmap the VA range >>> 3) Try to attach at that VA range _without_ SHM_REMAP >>> >>> That would remove the risk of leaking the reserved address >>> space, but instead we risk failing at (3) if another thread >>> manages to allocate memory inside the found VA range. This >>> will cause some users to unnecessarily fail to get large >>> pages, though. We've had other problems when pre-existing >>> threads used mmap while we were initializing the VM. See: >>> JDK-8007074. >>> >>> >>> Yes; btw you also could do this with shmget/shmat instead of mmap. >>> >>> Note that similar unclean tricks are already done in other >>> places, see e.g. the windows version of >>> os::pd_split_reserved_memory(). Which deals with VirtualAlloc() >>> being unable, like shmget, to deallocate piece-wise. >>> >>> >>> >>>> For most cases (anything but ENOMEM, actually) could we at >>>> least assert?: >>>> >>>> EACCES - should not happen: we created the shared memory and >>>> are its owner >>>> EIDRM - should not happen. >>>> EINVAL - should not happen. (you already check now the >>>> attach address for alignment to SHMLBA, so this is covered) >>> >>> Sure. I'll add asserts for these. >>> >>>> >>>> --- >>>> >>>> Smaller nits: >>>> >>>> Functions called "shmat_..." suggest shmat-like behaviour, >>>> so could we have them return -1 instead of NULL in case of >>>> error? >>> >>> That would add clutter to the reserve_memory_special_shm, and >>> it might also suggest that it would be OK to check errno for >>> the failure reason, which probably wouldn't work. I'll let >>> other Reviewers chime in and help decide if we should change >>> this. >>> >>> >>> You are right. If one returns -1, one would have to preserve >>> errno for the caller too. >>> >>> Thanks for reviewing this, >>> StefanK >>> >>> >>> You are welcome! >>> >>> Kind Regards, Thomas >>> >>> >>> >>>> >>>> Kind Regards, Thomas >>>> >>>>> >>>>> Also note that mmap- and shmat-allocated memory may >>>>> have different alignment requirements: mmap requires a >>>>> page-aligned request address, whereas shmat requires >>>>> alignment to SHMLBA, which may be multiple pages (e.g. >>>>> for ARM: >>>>> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >>>>> So, for this shat-over-mmap trick to work, request >>>>> address has to be aligned to SHMLBA, not just page size. >>>>> >>>>> I see that you assert alignment of requ address to >>>>> os::large_page_size(), which I would assume is a >>>>> multiple of SHMLBA, but I am not sure of this. >>>> >>>> I've added some defensive code and asserts to catch this >>>> if/when this assumption fails: >>>> >>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ >>>> >>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02 >>>> >>>> >>>> I need to verify that this works on other machines than >>>> my local Linux x64 machine. >>>> >>>> Thanks, >>>> StefanK >>>> >>>>> >>>>> Kind Regards, Thomas >>>>> >>>>> >>>>> >>>>> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson >>>>> >>>> > wrote: >>>>> >>>>> Hi all, >>>>> >>>>> Please review this patch to enable SHM large page >>>>> allocations even when the requested alignment is >>>>> larger than os::large_page_size(). >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >>>>> >>>>> https://bugs.openjdk.java.net/browse/JDK-8017629 >>>>> >>>>> G1 is affected by this bug since it requires the >>>>> heap to start at an address that is aligned with >>>>> the heap region size. The patch fixes this by >>>>> changing the UseSHM large pages allocation code. >>>>> First, virtual memory with correct alignment is >>>>> pre-reserved and then the large pages are attached >>>>> to this memory area. >>>>> >>>>> Tested with vm.gc.testlist and ExecuteInternaVMTests >>>>> >>>>> Thanks, >>>>> StefanK >>>>> >>>>> >>>> >>>> >>> >>> >> >> > From stefan.karlsson at oracle.com Wed Apr 20 11:37:12 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 20 Apr 2016 13:37:12 +0200 Subject: RFR: 8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages In-Reply-To: <571769BD.6000700@oracle.com> References: <570B8481.8010108@oracle.com> <570BAC1B.7040806@oracle.com> <570D1747.2020508@oracle.com> <570E2C45.7090201@oracle.com> <5714B13F.7080307@oracle.com> <571769BD.6000700@oracle.com> Message-ID: <571769E8.6000201@oracle.com> Thanks, StefanJ. StefanK On 2016-04-20 13:36, Stefan Johansson wrote: > Hi StefanK, > > On 2016-04-18 12:04, Stefan Karlsson wrote: >> Hi Thomas, >> >> I discussed the code with Per and updated the names and changed the >> code slightly. >> >> http://cr.openjdk.java.net/~stefank/8017629/webrev.03.delta >> http://cr.openjdk.java.net/~stefank/8017629/webrev.03 >> > Looks good, > > StefanJ >> 1) shmat_with_large_alignment was renamed to shmat_with_alignment and >> all references to large pages were removed. >> >> 2) shmat_with_normal_alignment was renamed to shmat_at_address and >> all references to pages sizes were removed. >> >> 3) shmat_with_alignment was renamed to shmat_large_pages and all >> large pages specific code were kept in that function. >> >> 4) shmat_large_pages was restructured to have one section for the >> req_addr != NULL case, and another section for req_addr == NULL. I >> know that you suggested to call shmat_with_alignment (previously >> shmat_with_normal_alignment) for both cases in the req_addr == NULL >> section, but I would like to only have to use shmat_with_alignment >> when it's really necessary. >> >> Thanks, >> StefanK >> >> On 2016-04-13 15:59, Thomas St?fe wrote: >>> Hi Stefan, >>> >>> On Wed, Apr 13, 2016 at 1:23 PM, Stefan Karlsson >>> > wrote: >>> >>> Hi Thomas, >>> >>> >>> On 2016-04-13 12:44, Thomas St?fe wrote: >>>> Hi Stefan, >>>> >>>> On Tue, Apr 12, 2016 at 5:41 PM, Stefan Karlsson >>>> > >>>> wrote: >>>> >>>> Hi Thomas, >>>> >>>> >>>> On 2016-04-12 16:23, Thomas St?fe wrote: >>>>> Hi Stefan, >>>>> >>>>> >>>>> On Mon, Apr 11, 2016 at 3:52 PM, Stefan Karlsson >>>>> >>>> > wrote: >>>>> >>>>> Hi Thomas, >>>>> >>>>> On 2016-04-11 14:39, Thomas St?fe wrote: >>>>>> Hi Stefan, >>>>>> >>>>>> short question, why the mmap before the shmat? Why not >>>>>> shmat right away at the requested address? >>>>> >>>>> If we have a requested_address we do exactly what you >>>>> propose. >>>>> >>>>> if (req_addr == NULL && alignment > >>>>> os::large_page_size()) { >>>>> return shmat_with_large_alignment(shmid, bytes, >>>>> alignment); >>>>> } else { >>>>> return shmat_with_normal_alignment(shmid, req_addr); >>>>> } >>>>> >>>>> ... >>>>> >>>>> static char* shmat_with_normal_alignment(int shmid, >>>>> char* req_addr) { >>>>> char* addr = (char*)shmat(shmid, req_addr, 0); >>>>> >>>>> if ((intptr_t)addr == -1) { >>>>> shm_warning_with_errno("Failed to attach shared >>>>> memory."); >>>>> return NULL; >>>>> } >>>>> >>>>> return addr; >>>>> } >>>>> >>>>> >>>>> It's when you don't have a requested address that mmap >>>>> is used to find a large enough virtual memory area. >>>>> >>>>> >>>>> Sorry, seems I did not look at this coding thoroughly >>>>> enough. I understand now that you do mmap to allocate and >>>>> then to cut away the extra pre-/post-space, something which >>>>> would not be possible with shmat, which cannot be unmapped >>>>> page-wise. >>>>> >>>>> But I am still not sure why we do it his way: >>>>> >>>>> 3429 static char* shmat_with_alignment(int shmid, size_t >>>>> bytes, size_t alignment, char* req_addr) { >>>>> 3430 // If there's no requested address, the shmat call >>>>> can return memory that is not >>>>> 3431 // 'alignment' aligned, if the given alignment is >>>>> larger than the large page size. >>>>> 3432 // Special care needs to be taken to ensure that we >>>>> get aligned memory back. >>>>> 3433 if (req_addr == NULL && alignment > >>>>> os::large_page_size()) { >>>>> 3434 return shmat_with_large_alignment(shmid, bytes, >>>>> alignment); >>>>> 3435 } else { >>>>> 3436 return shmat_with_normal_alignment(shmid, req_addr); >>>>> 3437 } >>>>> 3438 } >>>>> >>>>> For req_addr==0 and big alignment, we attach at the given >>>>> alignment ("shmat_with_large_alignment"). >>>>> For req_addr!=0, we attach at the given requested address >>>>> ("shmat_with_normal_alignment"). >>>>> For req_addr==0 and smaller alignment, we ignore the >>>>> alignment and attach anywhere? >>>>> >>>>> Maybe I am slow, but why does it matter if the alignment is >>>>> large or small? Why not just distinguish between: >>>>> >>>>> 1) address given (req_addr!=0): in this case we attach at >>>>> this req_addr and rely on the user having aligned the >>>>> address properly for his purposes. We specify 0 for flags, >>>>> so we will attach at exactly the given address or fail. In >>>>> this case we could simply ignore the given alignment - if >>>>> one was given - or just use it to counter-check the req_addr. >>>>> >>>>> 2) alignment given (req_addr==0 and alignment > 0): attach >>>>> at the given alignment using mmap-before-shmat. This could >>>>> be done for any alignment, be it large or small. >>>> >>>> What you propose doesn't work. >>>> >>>> We're allocating large pages with SHM_HUGETLB, and if we try >>>> to attach to an address that is not large_page_size aligned >>>> the shmat call returns EINVAL. >>>> >>>> >>>> I was aware of this. What I meant was: >>>> >>>> You have "shmat_with_large_alignment" which takes an alignment >>>> and does its best to shmat with that alignment using the mmap >>>> trick. This coding does not need to know anything about huge >>>> pages, and actually does not do anything huge-pagey, apart from >>>> the asserts - it would just as well work with small pages, >>>> because the only place where the code needs to know about huge >>>> pages is in the layer above, in reserve_memory_special - where we >>>> pass SHM_HUGETLB to shmget. (Btw, I always wondered about the >>>> "reserve_memory_special" naming.) >>>> >>>> I think my point is that by renaming this to >>>> "shmat_with_alignment" and removing the huge-page-related asserts >>>> the function would become both simpler and more versatile and >>>> could be reused for small alignments as well as large ones. The >>>> fact that it returns EINVAL for alignments instead of asserting >>>> would not be a problem - we would return an error instead of >>>> asserting because of bad alignment, and both handling this error >>>> and asserting for huge-page-alignment could be handled better in >>>> reserve_memory_special. >>>> >>>> To put it another way, I think "shmat_with_large_alignment" does >>>> not need to know about huge pages; this should be the >>>> responsibility of reserve_memory_special. >>>> >>>> About "shmat_with_normal_alignment", this is actually only a raw >>>> shmat call and exists for the req_addr!=NULL case and for the >>>> case where we do not pass neither req_addr nor alignment. So the >>>> only thing it does not handle is alignment, so it is misnamed and >>>> also should not be called for the >>>> req_addr==NULL-and-small-alignments-case. >>> >>> The reserve_memory_special_shm function and the associated helper >>> functions I'm adding are specifically written to support large >>> pages allocations. The names "normal_alignment" and >>> "large_alignment" are intended to refer to alignment sizes >>> compared to the large pages size. I grant you that it's not >>> obvious from the name, and we can rename them to make it more >>> clear. >>> >>> I want to provide a small bug fix for this large pages bug, while >>> you are suggesting that we re-purpose the code into supporting >>> small page allocations as well. Your suggestions might be good, >>> but may I suggest that you create a patch and an RFE that >>> motivates why we should make this code more generic to support >>> small pages as well? >>> >>> Thanks, >>> StefanK >>> >>> >>> Ok, we can do that. I was just worried that the code becomes more >>> difficult to understand. But lets wait for some more reviews. >>> >>> Kind Regards, Thomas >>> >>> >>>>> >>>>> Functions would become simpler and also could be clearer >>>>> named (e.g. "shmat_at_address" and "shmat_with_alignment", >>>>> respectivly). >>>> >>>> Maybe I should rename the functions to make it more obvious >>>> that these are large pages specific functions? >>>> >>>>> >>>>> ---- >>>>> >>>>> This: >>>>> >>>>> 3402 if ((intptr_t)addr == -1) { >>>>> 3403 shm_warning_with_errno("Failed to attach shared >>>>> memory."); >>>>> 3404 // Since we don't know if the kernel unmapped the >>>>> pre-reserved memory area >>>>> 3405 // we can't unmap it, since that would potentially >>>>> unmap memory that was >>>>> 3406 // mapped from other threads. >>>>> 3407 return NULL; >>>>> 3408 } >>>>> >>>>> seems scary. Means for every call this happens, we leak the >>>>> reserved (not committed) address space? >>>> >>>> Yes, that's unfortunate. >>>> >>>> An alternative would be to use this sequence: >>>> 1) Use anon_mmap_aligned to find a suitable VA range >>>> 2) Immediately unmap the VA range >>>> 3) Try to attach at that VA range _without_ SHM_REMAP >>>> >>>> That would remove the risk of leaking the reserved address >>>> space, but instead we risk failing at (3) if another thread >>>> manages to allocate memory inside the found VA range. This >>>> will cause some users to unnecessarily fail to get large >>>> pages, though. We've had other problems when pre-existing >>>> threads used mmap while we were initializing the VM. See: >>>> JDK-8007074. >>>> >>>> >>>> Yes; btw you also could do this with shmget/shmat instead of mmap. >>>> >>>> Note that similar unclean tricks are already done in other >>>> places, see e.g. the windows version of >>>> os::pd_split_reserved_memory(). Which deals with VirtualAlloc() >>>> being unable, like shmget, to deallocate piece-wise. >>>> >>>> >>>> >>>>> For most cases (anything but ENOMEM, actually) could we at >>>>> least assert?: >>>>> >>>>> EACCES - should not happen: we created the shared memory and >>>>> are its owner >>>>> EIDRM - should not happen. >>>>> EINVAL - should not happen. (you already check now the >>>>> attach address for alignment to SHMLBA, so this is covered) >>>> >>>> Sure. I'll add asserts for these. >>>> >>>>> >>>>> --- >>>>> >>>>> Smaller nits: >>>>> >>>>> Functions called "shmat_..." suggest shmat-like behaviour, >>>>> so could we have them return -1 instead of NULL in case of >>>>> error? >>>> >>>> That would add clutter to the reserve_memory_special_shm, and >>>> it might also suggest that it would be OK to check errno for >>>> the failure reason, which probably wouldn't work. I'll let >>>> other Reviewers chime in and help decide if we should change >>>> this. >>>> >>>> >>>> You are right. If one returns -1, one would have to preserve >>>> errno for the caller too. >>>> >>>> Thanks for reviewing this, >>>> StefanK >>>> >>>> >>>> You are welcome! >>>> >>>> Kind Regards, Thomas >>>> >>>> >>>> >>>>> >>>>> Kind Regards, Thomas >>>>> >>>>>> >>>>>> Also note that mmap- and shmat-allocated memory may >>>>>> have different alignment requirements: mmap requires a >>>>>> page-aligned request address, whereas shmat requires >>>>>> alignment to SHMLBA, which may be multiple pages (e.g. >>>>>> for ARM: >>>>>> http://lxr.free-electrons.com/source/arch/arm/include/asm/shmparam.h#L9). >>>>>> >>>>>> So, for this shat-over-mmap trick to work, request >>>>>> address has to be aligned to SHMLBA, not just page size. >>>>>> >>>>>> I see that you assert alignment of requ address to >>>>>> os::large_page_size(), which I would assume is a >>>>>> multiple of SHMLBA, but I am not sure of this. >>>>> >>>>> I've added some defensive code and asserts to catch this >>>>> if/when this assumption fails: >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02.delta/ >>>>> >>>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.02 >>>>> >>>>> >>>>> I need to verify that this works on other machines than >>>>> my local Linux x64 machine. >>>>> >>>>> Thanks, >>>>> StefanK >>>>> >>>>>> >>>>>> Kind Regards, Thomas >>>>>> >>>>>> >>>>>> >>>>>> On Mon, Apr 11, 2016 at 1:03 PM, Stefan Karlsson >>>>>> >>>>> > wrote: >>>>>> >>>>>> Hi all, >>>>>> >>>>>> Please review this patch to enable SHM large page >>>>>> allocations even when the requested alignment is >>>>>> larger than os::large_page_size(). >>>>>> >>>>>> http://cr.openjdk.java.net/~stefank/8017629/webrev.01 >>>>>> >>>>>> https://bugs.openjdk.java.net/browse/JDK-8017629 >>>>>> >>>>>> G1 is affected by this bug since it requires the >>>>>> heap to start at an address that is aligned with >>>>>> the heap region size. The patch fixes this by >>>>>> changing the UseSHM large pages allocation code. >>>>>> First, virtual memory with correct alignment is >>>>>> pre-reserved and then the large pages are attached >>>>>> to this memory area. >>>>>> >>>>>> Tested with vm.gc.testlist and ExecuteInternaVMTests >>>>>> >>>>>> Thanks, >>>>>> StefanK >>>>>> >>>>>> >>>>> >>>>> >>>> >>>> >>> >>> >> > From rwestrel at redhat.com Wed Apr 20 11:51:02 2016 From: rwestrel at redhat.com (Roland Westrelin) Date: Wed, 20 Apr 2016 13:51:02 +0200 Subject: RFR: 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode In-Reply-To: <57176947.7090501@redhat.com> References: <57176947.7090501@redhat.com> Message-ID: <57176D26.6050306@redhat.com> That looks good to me. Roland. On 04/20/2016 01:34 PM, Andrew Haley wrote: > I've seen weird unexplained (and unrepeatable) segfaults during JDK > builds for years. They're vary rare, and I thought it was to do with > flaky prototype hardware -- or at least that's how I kidded myself. > Yesterday I found a culprit. It's a load in one of the bytecode > accelerators, the one which replaces _aload_0, _fast_igetfield. > > Instead of a 32-bit word load, it's a 64-bit xword load. So how can > this lead to a crash? Well, if the object in question is at the very > end of the heap and the integer field is at the very end of the > object, you'll get a read which spills over onto the next page in > memory. This requires quite a coincidence of events, but it happens. > > Also, I discovered that the volatile load case had only a LoadLoad: it > needs a LoadStore too. > > Thanks, > > Andrew. > > > # HG changeset patch > # User aph > # Date 1461150850 0 > # Wed Apr 20 11:14:10 2016 +0000 > # Node ID 0df9b5892b864f27524480a698fe2550b4f9e531 > # Parent 57f9554a28f1858c009b4c4f0fdcb42079f4c447 > 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode > Reviewed-by: roland > > diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/src/cpu/aarch64/vm/templateTable_aarch64.cpp > --- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp > +++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp > @@ -2982,7 +2982,7 @@ > __ null_check(r0); > switch (state) { > case itos: > - __ ldr(r0, Address(r0, r1, Address::lsl(0))); > + __ ldrw(r0, Address(r0, r1, Address::lsl(0))); > break; > case atos: > __ load_heap_oop(r0, Address(r0, r1, Address::lsl(0))); > @@ -3000,7 +3000,7 @@ > __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + > ConstantPoolCacheEntry::flags_offset()))); > __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); > - __ membar(MacroAssembler::LoadLoad); > + __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); > __ bind(notVolatile); > } > > > From coleen.phillimore at oracle.com Wed Apr 20 12:24:23 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Wed, 20 Apr 2016 08:24:23 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <57175D0B.6050604@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> <57175D0B.6050604@oracle.com> Message-ID: <571774F7.7000409@oracle.com> On 4/20/16 6:42 AM, Lois Foltan wrote: > > On 4/19/2016 4:24 PM, Coleen Phillimore wrote: >> >> Hi, this is getting long. >> >> On 4/19/16 3:48 PM, Lois Foltan wrote: >>> >>> On 4/18/2016 10:11 PM, David Holmes wrote: >>>> Hi Lois, >>>> >>>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>>> >>>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>>> >>>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>>> >>>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>>> >>>>>>>>> Hi Stefan, >>>>>>>>> >>>>>>>>> In start up before module system initialization in complete I >>>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>>> sounds like you agree? >>>>>>>> There will be a number of threads running before the base >>>>>>>> module is >>>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler and >>>>>>>> Signal Handler. >>>>>>> >>>>>>> So, are you saying that we need the atomics? >>>>>>> >>>>>>> The java_lang_Class::create_mirror function isn't multi-thread >>>>>>> safe, >>>>>>> and must already be guarded by a lock (SystemDictionary_lock >>>>>>> AFAICT). >>>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>>> once, for the single InstanceKlass instance in the CLD. And all >>>>>>> reads >>>>>>> of _keep_alive from the GC are done during safepoints. >>>>>> The anonymous class is inserted in the fixup mirror and fixup module >>>>>> lists during java_lang_Class::create_mirror() before it is made >>>>>> public >>>>>> or "published" as loaded. So the two instances where the reference >>>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>>> java_lang_Class::create_mirror(), are guarded by a lock as well >>>>>> as the >>>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has >>>>>> access >>>>>> to the class during this time, as it is being loaded. >>>>>>> >>>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>>> That leaves the decrement in >>>>>> ModuleEntryTable::patch_javabase_entries() as possibly unguarded. >>>>>> This >>>>>> only occurs when the VM is called to define the module java.base. I >>>>>> believe this should be okay but will double check. >>>>> >>>>> One small change in modules.cpp/define_javabase_module() to ensure >>>>> that >>>>> only one definition attempt of java.base will occur and thus only one >>>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>>> arises where java.base is trying to be multiply defined, according to >>>>> the expected error conditions for JVM_DefineModule(), an >>>>> IllegalArgumentException should be thrown. >>>>> >>>>> I have also added a comment in classfile/classLoaderData.hpp >>>>> explaining >>>>> why _keep_alive does need to be defined volatile or atomic. >>>> >>>> Can you add assertions to check that _keep_alive is only modified >>>> under the protection of the lock (with a special case perhaps for >>>> the unguarded java.base case) ? >>> >>> Hi David, >>> >>> Thanks for the review. I misspoke when I indicated that the two >>> increments and the one decrement of the reference counter that occur >>> during a call to the Unsafe_DefineAnonymous0() method were guarded >>> under a lock. However, due to the way anonymous classes are created >>> only a single non-GC thread will have access to the _keep_alive >>> field during this time. And as Stefan indicates above, all reads of >>> _keep_alive from the GC are done during safepoints. Each anonymous >>> class, when defined, has a dedicated ClassLoaderData created for >>> it. No other class shares the anonymous class' name or CLD. Due to >>> this uniqueness, no other thread has knowledge about this anonymous >>> class while it is being defined. It is only upon return from >>> Unsafe_DefineAnonymous0(), that the anonymous class exists and other >>> threads, at that point, can potentially access it. >>> >> >> Ah interesting. Currently, this is true and why this is safe. If we >> change the JVM to have *some* anonymous classes share CLD with their >> host_class because the lifetimes are the same, then we'll have to use >> atomic operations. >> >> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html >> >> >> Can you put one comment directly above the inc_keep_alive() and >> dec_keep_alive() functions to this effect here, just so we remember? > > Hi Coleen, > > I will add the comment before I commit. I might expand a bit on your > last sentence a bit. Apparently the comment is false because we don't need the alive count for non-anonymous ClassLoaderData ... there are more replies to this thread. So maybe take out my last suggested sentence. Coleen > >> >> // Anonymous classes have their own ClassLoaderData that is marked to >> keep alive while the class is being parsed, and >> // if the class appears on the module fixup list. >> // If anonymous classes are changed to share with host_class, this >> refcount needs to be changed to use atomic operations. >> >> *+ void ClassLoaderData::inc_keep_alive() {* >> *+ assert(_keep_alive >= 0, "Invalid keep alive count");* >> *+ _keep_alive++;* >> *+ }* >> *+ * >> *+ void ClassLoaderData::dec_keep_alive() {* >> *+ assert(_keep_alive > 0, "Invalid keep alive count");* >> *+ _keep_alive--;* >> *+ }* >> *+ * >> >> More below. >> >>> Thanks, >>> Lois >>> >>>> >>>> Thanks, >>>> David >>>> >>>>> Please review at: >>>>> >>>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ >> >> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html >> >> >> I'm not sure how this relates to the bug. > > This change ensures that java.base will not be multiply defined and > thus only one call to ModuleEntryTable::patch_javabase_entries() will > occur. A decrement of the reference count happens when an anonymous > class is on the fixup module list post patching it with java.base > >> >> Otherwise, the change looks good. > > Thanks again! > >> >> Thanks, >> Coleen >> >> >>>>> >>>>> Retesting in progress. >>>>> >>>>> Thanks, >>>>> Lois >>>>> >>>>>> >>>>>> Thanks, >>>>>> Lois >>>>>> >>>>>>> >>>>>>> thanks, >>>>>>> StefanK >>>>>>> >>>>>>> >>>>>>>> >>>>>>>> -Alan >>>>>>> >>>>>> >>>>> >>> >> > From sanne at redhat.com Wed Apr 20 12:28:54 2016 From: sanne at redhat.com (Sanne Grinovero) Date: Wed, 20 Apr 2016 13:28:54 +0100 Subject: JMH and JDK9 In-Reply-To: <5703C8B1.5080501@redhat.com> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> <56FE29EE.1090801@oracle.com> <0C6C1D21-1592-426A-B9DE-33458FC90CF9@oracle.com> <57038495.40603@redhat.com> <570390AE.6050406@oracle.com> <5703C8B1.5080501@redhat.com> Message-ID: On Tue, Apr 5, 2016 at 3:16 PM, Andrew Dinn wrote: > On 05/04/16 11:17, Alan Bateman wrote: > . . . >> We recently updated JEP 261 proposing that "java.se" be the only java.* >> root module that is resolved when compiling code in the unnamed module >> or at runtime when the main class is loaded from the class path [1]. On >> the surface then it will "appear" to developers that the JDK does not >> have the EE components but from everything we've seen so far, then those >> EE components are usually on the class path anyway. > > Ah ok, so this means that the problem has been punted to the other foot > i.e. the only apps affected will be those which i) don't have an EE jar > on their classpath and ii) require the (partial) stub implementations > provided by Java SE. That sounds much better since such a configuration > is of almost no use to anyone and hence is very unlikely to arise. Agreed: excellent idea! I'm eager to try it out so that we can resume testing of everything else too; I just tried my luck with build 9-ea+114 but it didn't seem to work: I'm going to assume this wasn't implemented yet, or should I double check how I'm building? Did I understand correctly that I won't need to pass any switch to neither java nor javac, as long as I have the JavaEE jar as external dependencies on my classpath? (i.e. if this build is "proven" on Java8 it should work on Java9 ?) Is there an issue tracker which I could follow to watch updates on this? Slightly unrelated, but is it expected that compilation is successful, even though (in my specific case) javax.transaction.Synchronization causes a java.lang.NoClassDefFoundError at runtime? Thanks, Sanne From lois.foltan at oracle.com Wed Apr 20 12:30:09 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Wed, 20 Apr 2016 08:30:09 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <571774F7.7000409@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> <57175D0B.6050604@oracle.com> <571774F7.7000409@oracle.com> Message-ID: <57177651.9080001@oracle.com> On 4/20/2016 8:24 AM, Coleen Phillimore wrote: > > > On 4/20/16 6:42 AM, Lois Foltan wrote: >> >> On 4/19/2016 4:24 PM, Coleen Phillimore wrote: >>> >>> Hi, this is getting long. >>> >>> On 4/19/16 3:48 PM, Lois Foltan wrote: >>>> >>>> On 4/18/2016 10:11 PM, David Holmes wrote: >>>>> Hi Lois, >>>>> >>>>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>>>> >>>>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>>>> >>>>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>>>> >>>>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>>>> >>>>>>>>>> Hi Stefan, >>>>>>>>>> >>>>>>>>>> In start up before module system initialization in complete I >>>>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>>>> sounds like you agree? >>>>>>>>> There will be a number of threads running before the base >>>>>>>>> module is >>>>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler >>>>>>>>> and >>>>>>>>> Signal Handler. >>>>>>>> >>>>>>>> So, are you saying that we need the atomics? >>>>>>>> >>>>>>>> The java_lang_Class::create_mirror function isn't multi-thread >>>>>>>> safe, >>>>>>>> and must already be guarded by a lock (SystemDictionary_lock >>>>>>>> AFAICT). >>>>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>>>> once, for the single InstanceKlass instance in the CLD. And all >>>>>>>> reads >>>>>>>> of _keep_alive from the GC are done during safepoints. >>>>>>> The anonymous class is inserted in the fixup mirror and fixup >>>>>>> module >>>>>>> lists during java_lang_Class::create_mirror() before it is made >>>>>>> public >>>>>>> or "published" as loaded. So the two instances where the reference >>>>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>>>> java_lang_Class::create_mirror(), are guarded by a lock as well >>>>>>> as the >>>>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has >>>>>>> access >>>>>>> to the class during this time, as it is being loaded. >>>>>>>> >>>>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>>>> That leaves the decrement in >>>>>>> ModuleEntryTable::patch_javabase_entries() as possibly >>>>>>> unguarded. This >>>>>>> only occurs when the VM is called to define the module >>>>>>> java.base. I >>>>>>> believe this should be okay but will double check. >>>>>> >>>>>> One small change in modules.cpp/define_javabase_module() to >>>>>> ensure that >>>>>> only one definition attempt of java.base will occur and thus only >>>>>> one >>>>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>>>> arises where java.base is trying to be multiply defined, >>>>>> according to >>>>>> the expected error conditions for JVM_DefineModule(), an >>>>>> IllegalArgumentException should be thrown. >>>>>> >>>>>> I have also added a comment in classfile/classLoaderData.hpp >>>>>> explaining >>>>>> why _keep_alive does need to be defined volatile or atomic. >>>>> >>>>> Can you add assertions to check that _keep_alive is only modified >>>>> under the protection of the lock (with a special case perhaps for >>>>> the unguarded java.base case) ? >>>> >>>> Hi David, >>>> >>>> Thanks for the review. I misspoke when I indicated that the two >>>> increments and the one decrement of the reference counter that >>>> occur during a call to the Unsafe_DefineAnonymous0() method were >>>> guarded under a lock. However, due to the way anonymous classes are >>>> created only a single non-GC thread will have access to the >>>> _keep_alive field during this time. And as Stefan indicates above, >>>> all reads of _keep_alive from the GC are done during safepoints. >>>> Each anonymous class, when defined, has a dedicated ClassLoaderData >>>> created for it. No other class shares the anonymous class' name or >>>> CLD. Due to this uniqueness, no other thread has knowledge about >>>> this anonymous class while it is being defined. It is only upon >>>> return from Unsafe_DefineAnonymous0(), that the anonymous class >>>> exists and other threads, at that point, can potentially access it. >>>> >>> >>> Ah interesting. Currently, this is true and why this is safe. If we >>> change the JVM to have *some* anonymous classes share CLD with their >>> host_class because the lifetimes are the same, then we'll have to >>> use atomic operations. >>> >>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html >>> >>> >>> Can you put one comment directly above the inc_keep_alive() and >>> dec_keep_alive() functions to this effect here, just so we remember? >> >> Hi Coleen, >> >> I will add the comment before I commit. I might expand a bit on your >> last sentence a bit. > > Apparently the comment is false because we don't need the alive count > for non-anonymous ClassLoaderData ... there are more replies to this > thread. > > So maybe take out my last suggested sentence. Right, saw that. Will do. Thanks again. Lois > > Coleen >> >>> >>> // Anonymous classes have their own ClassLoaderData that is marked >>> to keep alive while the class is being parsed, and >>> // if the class appears on the module fixup list. >>> // If anonymous classes are changed to share with host_class, this >>> refcount needs to be changed to use atomic operations. >>> >>> *+ void ClassLoaderData::inc_keep_alive() {* >>> *+ assert(_keep_alive >= 0, "Invalid keep alive count");* >>> *+ _keep_alive++;* >>> *+ }* >>> *+ * >>> *+ void ClassLoaderData::dec_keep_alive() {* >>> *+ assert(_keep_alive > 0, "Invalid keep alive count");* >>> *+ _keep_alive--;* >>> *+ }* >>> *+ * >>> >>> More below. >>> >>>> Thanks, >>>> Lois >>>> >>>>> >>>>> Thanks, >>>>> David >>>>> >>>>>> Please review at: >>>>>> >>>>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ >>> >>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html >>> >>> >>> I'm not sure how this relates to the bug. >> >> This change ensures that java.base will not be multiply defined and >> thus only one call to ModuleEntryTable::patch_javabase_entries() will >> occur. A decrement of the reference count happens when an anonymous >> class is on the fixup module list post patching it with java.base >> >>> >>> Otherwise, the change looks good. >> >> Thanks again! >> >>> >>> Thanks, >>> Coleen >>> >>> >>>>>> >>>>>> Retesting in progress. >>>>>> >>>>>> Thanks, >>>>>> Lois >>>>>> >>>>>>> >>>>>>> Thanks, >>>>>>> Lois >>>>>>> >>>>>>>> >>>>>>>> thanks, >>>>>>>> StefanK >>>>>>>> >>>>>>>> >>>>>>>>> >>>>>>>>> -Alan >>>>>>>> >>>>>>> >>>>>> >>>> >>> >> > From adinn at redhat.com Wed Apr 20 15:00:41 2016 From: adinn at redhat.com (Andrew Dinn) Date: Wed, 20 Apr 2016 16:00:41 +0100 Subject: RFR: 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode In-Reply-To: <57176947.7090501@redhat.com> References: <57176947.7090501@redhat.com> Message-ID: <57179999.5040206@redhat.com> Awesome debug-fu! Patch looks good. regards, Andrew Dinn ----------- Senior Principal Software Engineer Red Hat UK Ltd Registered in UK and Wales under Company Registration No. 3798903 Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul Argiry (US) On 20/04/16 12:34, Andrew Haley wrote: > I've seen weird unexplained (and unrepeatable) segfaults during JDK > builds for years. They're vary rare, and I thought it was to do with > flaky prototype hardware -- or at least that's how I kidded myself. > Yesterday I found a culprit. It's a load in one of the bytecode > accelerators, the one which replaces _aload_0, _fast_igetfield. > > Instead of a 32-bit word load, it's a 64-bit xword load. So how can > this lead to a crash? Well, if the object in question is at the very > end of the heap and the integer field is at the very end of the > object, you'll get a read which spills over onto the next page in > memory. This requires quite a coincidence of events, but it happens. > > Also, I discovered that the volatile load case had only a LoadLoad: it > needs a LoadStore too. > > Thanks, > > Andrew. > > > # HG changeset patch > # User aph > # Date 1461150850 0 > # Wed Apr 20 11:14:10 2016 +0000 > # Node ID 0df9b5892b864f27524480a698fe2550b4f9e531 > # Parent 57f9554a28f1858c009b4c4f0fdcb42079f4c447 > 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode > Reviewed-by: roland > > diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/src/cpu/aarch64/vm/templateTable_aarch64.cpp > --- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp > +++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp > @@ -2982,7 +2982,7 @@ > __ null_check(r0); > switch (state) { > case itos: > - __ ldr(r0, Address(r0, r1, Address::lsl(0))); > + __ ldrw(r0, Address(r0, r1, Address::lsl(0))); > break; > case atos: > __ load_heap_oop(r0, Address(r0, r1, Address::lsl(0))); > @@ -3000,7 +3000,7 @@ > __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + > ConstantPoolCacheEntry::flags_offset()))); > __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); > - __ membar(MacroAssembler::LoadLoad); > + __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); > __ bind(notVolatile); > } > > > > From erik.joelsson at oracle.com Wed Apr 20 15:27:33 2016 From: erik.joelsson at oracle.com (Erik Joelsson) Date: Wed, 20 Apr 2016 17:27:33 +0200 Subject: RFR: JDK-8150601: Remove the old Hotspot build system Message-ID: <57179FE5.4080906@oracle.com> The new Hotspot Build System has now been in place for a week and a half. I have not heard any major complaints so I think the time has come to remove the old. This patch removes all the old, now unused, makefiles in hotspot/make. It also removes the support for using these makefiles from configure and the top level make dir. Finally it moves all the new makefiles from hotspot/makefiles to hotspot/make. With this change, we will be able to conclude JEP 284. Bug: https://bugs.openjdk.java.net/browse/JDK-8150601 Webrev: http://cr.openjdk.java.net/~erikj/8150601/webrev.01/ /Erik From stephen.felts at oracle.com Wed Apr 20 14:15:30 2016 From: stephen.felts at oracle.com (Stephen Felts) Date: Wed, 20 Apr 2016 07:15:30 -0700 (PDT) Subject: JMH and JDK9 In-Reply-To: References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> <56FE29EE.1090801@oracle.com> <0C6C1D21-1592-426A-B9DE-33458FC90CF9@oracle.com> <57038495.40603@redhat.com> <570390AE.6050406@oracle.com> <5703C8B1.5080501@redhat.com> Message-ID: <10982358-95d5-40c1-8e20-c153e355d495@default> There was a bug that was just fixed yesterday such that the Java EE classes were not hidden. It should have been fixed in the last nightly 114 build. As long as the necessary Java EE API classes are on the classpath, no command line options are needed. If you have the Java EE JTA API jar in the classpath, then use of javax.transaction.Synchronization (which is not in the JDK but is in the JTA API jar) will be resolved for both javac and java. -----Original Message----- From: Sanne Grinovero [mailto:sanne at redhat.com] Sent: Wednesday, April 20, 2016 8:29 AM To: Alan Bateman Cc: jigsaw-dev; hotspot-dev Source Developers Subject: Re: JMH and JDK9 On Tue, Apr 5, 2016 at 3:16 PM, Andrew Dinn wrote: > On 05/04/16 11:17, Alan Bateman wrote: > . . . >> We recently updated JEP 261 proposing that "java.se" be the only >> java.* root module that is resolved when compiling code in the >> unnamed module or at runtime when the main class is loaded from the >> class path [1]. On the surface then it will "appear" to developers >> that the JDK does not have the EE components but from everything >> we've seen so far, then those EE components are usually on the class path anyway. > > Ah ok, so this means that the problem has been punted to the other > foot i.e. the only apps affected will be those which i) don't have an > EE jar on their classpath and ii) require the (partial) stub > implementations provided by Java SE. That sounds much better since > such a configuration is of almost no use to anyone and hence is very unlikely to arise. Agreed: excellent idea! I'm eager to try it out so that we can resume testing of everything else too; I just tried my luck with build 9-ea+114 but it didn't seem to work: I'm going to assume this wasn't implemented yet, or should I double check how I'm building? Did I understand correctly that I won't need to pass any switch to neither java nor javac, as long as I have the JavaEE jar as external dependencies on my classpath? (i.e. if this build is "proven" on Java8 it should work on Java9 ?) Is there an issue tracker which I could follow to watch updates on this? Slightly unrelated, but is it expected that compilation is successful, even though (in my specific case) javax.transaction.Synchronization causes a java.lang.NoClassDefFoundError at runtime? Thanks, Sanne From matthias.baesken at sap.com Wed Apr 20 16:18:44 2016 From: matthias.baesken at sap.com (Baesken, Matthias) Date: Wed, 20 Apr 2016 16:18:44 +0000 Subject: RFR: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases Message-ID: <86aaa90dd3154f36b7230e1c530a97f7@derote13de22.global.corp.sap> > Hi Matthais, I captured this email in bug and will fix the indentation > and will send this out as both of us as contributors. > > https://bugs.openjdk.java.net/browse/JDK-8154566 > > So this works properly on SuSE? > > Thanks! > Coleen Hi Coleen , thanks for opening the bug . I tested the patch on SLES12, and I get : SUSE Linux Enterprise Server 12 (x86_64) Regards, Matthias From aph at redhat.com Wed Apr 20 16:48:08 2016 From: aph at redhat.com (Andrew Haley) Date: Wed, 20 Apr 2016 17:48:08 +0100 Subject: RFR: JDK-8150601: Remove the old Hotspot build system In-Reply-To: <57179FE5.4080906@oracle.com> References: <57179FE5.4080906@oracle.com> Message-ID: <5717B2C8.3020905@redhat.com> On 04/20/2016 04:27 PM, Erik Joelsson wrote: > The new Hotspot Build System has now been in place for a week and a > half. I have not heard any major complaints so I think the time has come > to remove the old. There's been a CPU for the last week. We've been far too busy to test the new build system. Andrew. From coleen.phillimore at oracle.com Wed Apr 20 17:07:23 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Wed, 20 Apr 2016 13:07:23 -0400 Subject: RFR: os_linux.cpp parse_os_info gives non descriptive output on current SLES releases In-Reply-To: <86aaa90dd3154f36b7230e1c530a97f7@derote13de22.global.corp.sap> References: <86aaa90dd3154f36b7230e1c530a97f7@derote13de22.global.corp.sap> Message-ID: <5717B74B.9020403@oracle.com> This is great. Thanks for confirming. Now I need a Reviewer for the other RFR thread. Coleen On 4/20/16 12:18 PM, Baesken, Matthias wrote: >> Hi Matthais, I captured this email in bug and will fix the indentation >> and will send this out as both of us as contributors. >> >> https://bugs.openjdk.java.net/browse/JDK-8154566 >> >> So this works properly on SuSE? >> >> Thanks! >> Coleen > Hi Coleen , thanks for opening the bug . > I tested the patch on SLES12, and I get : > > > SUSE Linux Enterprise Server 12 (x86_64) > > > Regards, Matthias From joseph.provino at oracle.com Wed Apr 20 18:08:41 2016 From: joseph.provino at oracle.com (Joseph Provino) Date: Wed, 20 Apr 2016 14:08:41 -0400 Subject: RFR (XXS) JDK-8153578,Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm Message-ID: <5717C5A9.1060406@oracle.com> Please review this tiny change. It only affects ParNew. Are there any unintended consequences? Passes JPRT. CR: JDK-8153578 Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm Webrev: http://cr.openjdk.java.net/~jprovino/8153578/webrev.00 From jesper.wilhelmsson at oracle.com Wed Apr 20 19:35:09 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Wed, 20 Apr 2016 21:35:09 +0200 Subject: RFR (XXS) JDK-8153578,Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm In-Reply-To: <5717C5A9.1060406@oracle.com> References: <5717C5A9.1060406@oracle.com> Message-ID: <5717D9ED.2070907@oracle.com> Hi Joe, If I understand the bug description correctly the problem is that NewSize becomes too small. According to the bug the VM ignores the NewRatio setting. Your change removes the setting of MaxNewSize in the case where NewSize has the default value. It's not obvious to me how that is related to the bug. There is an if statement enclosing the code you are changing. It has a comment that I find interesting: 1755 // If either MaxNewSize or NewRatio is set on the command line, 1756 // assume the user is trying to set the size of the young gen. 1757 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { The interesting part is that the comment says "MaxNewSize OR NewRatio" but the code says "MaxNewSize AND NewRatio". This could be a typo in the comment, or it could be related to your bug. I don't think the fix here is to ignore the calculated preferred_max_new_size, but rather to figure out why it has the wrong value. preferred_max_new_size is calculated a few lines up, and it is based on NewRatio. The number of threads seems to be involved as well. Should it be? Usually things based on the number of threads tend to be wrong... 1748 MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads)); young_gen_per_worker is CMSYoungGenPerWorker which defaults to things like 16M or 64M. ParallelGCThreads is usually just a handful, 8 on my machine. Since we take the smallest number of this thread based thing and the NewRatio calculation, I would guess the threads will limit the MaxNewSize quite a lot. I wonder if this isn't the bug you are looking for. It would make more sense to me if it was MAX of the two instead of MIN. You should probably consult whoever wrote this code. /Jesper Den 20/4/16 kl. 20:08, skrev Joseph Provino: > Please review this tiny change. It only affects ParNew. Are there any > unintended consequences? > > Passes JPRT. > > CR: JDK-8153578 Default NewRatio is ignored when UseConcMarkSweepGC is used as > GC algorithm > > Webrev: http://cr.openjdk.java.net/~jprovino/8153578/webrev.00 > > From jesper.wilhelmsson at oracle.com Wed Apr 20 19:36:42 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Wed, 20 Apr 2016 21:36:42 +0200 Subject: RFR (XXS) JDK-8153578,Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm In-Reply-To: <5717D9ED.2070907@oracle.com> References: <5717C5A9.1060406@oracle.com> <5717D9ED.2070907@oracle.com> Message-ID: <5717DA4A.7060203@oracle.com> And, by the way, I object to the subject. Changes in argument parsing are never XXS. ;) /Jesper Den 20/4/16 kl. 21:35, skrev Jesper Wilhelmsson: > Hi Joe, > > If I understand the bug description correctly the problem is that NewSize > becomes too small. According to the bug the VM ignores the NewRatio setting. > > Your change removes the setting of MaxNewSize in the case where NewSize has the > default value. It's not obvious to me how that is related to the bug. > > There is an if statement enclosing the code you are changing. It has a comment > that I find interesting: > > 1755 // If either MaxNewSize or NewRatio is set on the command line, > 1756 // assume the user is trying to set the size of the young gen. > 1757 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { > > The interesting part is that the comment says "MaxNewSize OR NewRatio" but the > code says "MaxNewSize AND NewRatio". This could be a typo in the comment, or it > could be related to your bug. > > I don't think the fix here is to ignore the calculated preferred_max_new_size, > but rather to figure out why it has the wrong value. preferred_max_new_size is > calculated a few lines up, and it is based on NewRatio. The number of threads > seems to be involved as well. Should it be? Usually things based on the number > of threads tend to be wrong... > > 1748 MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * > ParallelGCThreads)); > > young_gen_per_worker is CMSYoungGenPerWorker which defaults to things like 16M > or 64M. ParallelGCThreads is usually just a handful, 8 on my machine. Since we > take the smallest number of this thread based thing and the NewRatio > calculation, I would guess the threads will limit the MaxNewSize quite a lot. I > wonder if this isn't the bug you are looking for. It would make more sense to me > if it was MAX of the two instead of MIN. You should probably consult whoever > wrote this code. > > /Jesper > > > Den 20/4/16 kl. 20:08, skrev Joseph Provino: >> Please review this tiny change. It only affects ParNew. Are there any >> unintended consequences? >> >> Passes JPRT. >> >> CR: JDK-8153578 Default NewRatio is ignored when UseConcMarkSweepGC is used as >> GC algorithm >> >> Webrev: http://cr.openjdk.java.net/~jprovino/8153578/webrev.00 >> >> From joseph.provino at oracle.com Wed Apr 20 20:00:15 2016 From: joseph.provino at oracle.com (Joe Provino) Date: Wed, 20 Apr 2016 16:00:15 -0400 Subject: RFR (XXS) JDK-8153578,Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm In-Reply-To: <5717D9ED.2070907@oracle.com> References: <5717C5A9.1060406@oracle.com> <5717D9ED.2070907@oracle.com> Message-ID: <5717DFCF.3070808@oracle.com> Hi Jesper, I had a feeling what looked like a simple fix wouldn't be that simple. ;-) On 04/20/2016 03:35 PM, Jesper Wilhelmsson wrote: > Hi Joe, > > If I understand the bug description correctly the problem is that > NewSize becomes too small. According to the bug the VM ignores the > NewRatio setting. > > Your change removes the setting of MaxNewSize in the case where > NewSize has the default value. It's not obvious to me how that is > related to the bug. > > There is an if statement enclosing the code you are changing. It has a > comment that I find interesting: > > 1755 // If either MaxNewSize or NewRatio is set on the command line, > 1756 // assume the user is trying to set the size of the young gen. > 1757 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { > > The interesting part is that the comment says "MaxNewSize OR NewRatio" > but the code says "MaxNewSize AND NewRatio". This could be a typo in > the comment, or it could be related to your bug. If the code inside that "if" is executed, then the bug occurs. So I think changing to OR won't fix the problem. > > I don't think the fix here is to ignore the calculated > preferred_max_new_size, but rather to figure out why it has the wrong > value. preferred_max_new_size is calculated a few lines up, and it is > based on NewRatio. The number of threads seems to be involved as well. > Should it be? Usually things based on the number of threads tend to be > wrong... > > 1748 MIN2(max_heap/(NewRatio+1), > ScaleForWordSize(young_gen_per_worker * ParallelGCThreads)); > > young_gen_per_worker is CMSYoungGenPerWorker which defaults to things > like 16M or 64M. ParallelGCThreads is usually just a handful, 8 on my > machine. Since we take the smallest number of this thread based thing > and the NewRatio calculation, I would guess the threads will limit the > MaxNewSize quite a lot. I wonder if this isn't the bug you are looking > for. It would make more sense to me if it was MAX of the two instead > of MIN. Yeah, I don't know the reasoning behind this. > You should probably consult whoever wrote this code. That sounds like a good idea. I'm not sure who that person is. thanks. joe > > /Jesper > > > Den 20/4/16 kl. 20:08, skrev Joseph Provino: >> Please review this tiny change. It only affects ParNew. Are there any >> unintended consequences? >> >> Passes JPRT. >> >> CR: JDK-8153578 Default NewRatio is ignored when UseConcMarkSweepGC >> is used as >> GC algorithm >> >> Webrev: http://cr.openjdk.java.net/~jprovino/8153578/webrev.00 >> >> From joseph.provino at oracle.com Wed Apr 20 20:00:38 2016 From: joseph.provino at oracle.com (Joe Provino) Date: Wed, 20 Apr 2016 16:00:38 -0400 Subject: RFR (XXS) JDK-8153578,Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm In-Reply-To: <5717DA4A.7060203@oracle.com> References: <5717C5A9.1060406@oracle.com> <5717D9ED.2070907@oracle.com> <5717DA4A.7060203@oracle.com> Message-ID: <5717DFE6.5000200@oracle.com> So it seems! On 04/20/2016 03:36 PM, Jesper Wilhelmsson wrote: > And, by the way, I object to the subject. Changes in argument parsing > are never XXS. ;) > /Jesper > > Den 20/4/16 kl. 21:35, skrev Jesper Wilhelmsson: >> Hi Joe, >> >> If I understand the bug description correctly the problem is that >> NewSize >> becomes too small. According to the bug the VM ignores the NewRatio >> setting. >> >> Your change removes the setting of MaxNewSize in the case where >> NewSize has the >> default value. It's not obvious to me how that is related to the bug. >> >> There is an if statement enclosing the code you are changing. It has >> a comment >> that I find interesting: >> >> 1755 // If either MaxNewSize or NewRatio is set on the command line, >> 1756 // assume the user is trying to set the size of the young gen. >> 1757 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { >> >> The interesting part is that the comment says "MaxNewSize OR >> NewRatio" but the >> code says "MaxNewSize AND NewRatio". This could be a typo in the >> comment, or it >> could be related to your bug. >> >> I don't think the fix here is to ignore the calculated >> preferred_max_new_size, >> but rather to figure out why it has the wrong value. >> preferred_max_new_size is >> calculated a few lines up, and it is based on NewRatio. The number of >> threads >> seems to be involved as well. Should it be? Usually things based on >> the number >> of threads tend to be wrong... >> >> 1748 MIN2(max_heap/(NewRatio+1), >> ScaleForWordSize(young_gen_per_worker * >> ParallelGCThreads)); >> >> young_gen_per_worker is CMSYoungGenPerWorker which defaults to things >> like 16M >> or 64M. ParallelGCThreads is usually just a handful, 8 on my machine. >> Since we >> take the smallest number of this thread based thing and the NewRatio >> calculation, I would guess the threads will limit the MaxNewSize >> quite a lot. I >> wonder if this isn't the bug you are looking for. It would make more >> sense to me >> if it was MAX of the two instead of MIN. You should probably consult >> whoever >> wrote this code. >> >> /Jesper >> >> >> Den 20/4/16 kl. 20:08, skrev Joseph Provino: >>> Please review this tiny change. It only affects ParNew. Are there any >>> unintended consequences? >>> >>> Passes JPRT. >>> >>> CR: JDK-8153578 Default NewRatio is ignored when UseConcMarkSweepGC >>> is used as >>> GC algorithm >>> >>> Webrev: http://cr.openjdk.java.net/~jprovino/8153578/webrev.00 >>> >>> From jesper.wilhelmsson at oracle.com Wed Apr 20 20:22:39 2016 From: jesper.wilhelmsson at oracle.com (Jesper Wilhelmsson) Date: Wed, 20 Apr 2016 22:22:39 +0200 Subject: RFR (XXS) JDK-8153578,Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm In-Reply-To: <5717DFCF.3070808@oracle.com> References: <5717C5A9.1060406@oracle.com> <5717D9ED.2070907@oracle.com> <5717DFCF.3070808@oracle.com> Message-ID: <5717E50F.8090505@oracle.com> Den 20/4/16 kl. 22:00, skrev Joe Provino: > Hi Jesper, I had a feeling what looked like a simple fix wouldn't be that > simple. ;-) > > On 04/20/2016 03:35 PM, Jesper Wilhelmsson wrote: >> Hi Joe, >> >> If I understand the bug description correctly the problem is that NewSize >> becomes too small. According to the bug the VM ignores the NewRatio setting. >> >> Your change removes the setting of MaxNewSize in the case where NewSize has >> the default value. It's not obvious to me how that is related to the bug. >> >> There is an if statement enclosing the code you are changing. It has a comment >> that I find interesting: >> >> 1755 // If either MaxNewSize or NewRatio is set on the command line, >> 1756 // assume the user is trying to set the size of the young gen. >> 1757 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { >> >> The interesting part is that the comment says "MaxNewSize OR NewRatio" but the >> code says "MaxNewSize AND NewRatio". This could be a typo in the comment, or >> it could be related to your bug. > If the code inside that "if" is executed, then the bug occurs. So I think > changing to OR won't fix the problem. No, it's probably not that simple. And it may be unrelated to your bug. But either the code or the comment is wrong here. >> >> I don't think the fix here is to ignore the calculated preferred_max_new_size, >> but rather to figure out why it has the wrong value. preferred_max_new_size is >> calculated a few lines up, and it is based on NewRatio. The number of threads >> seems to be involved as well. Should it be? Usually things based on the number >> of threads tend to be wrong... >> >> 1748 MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * >> ParallelGCThreads)); >> >> young_gen_per_worker is CMSYoungGenPerWorker which defaults to things like 16M >> or 64M. ParallelGCThreads is usually just a handful, 8 on my machine. Since we >> take the smallest number of this thread based thing and the NewRatio >> calculation, I would guess the threads will limit the MaxNewSize quite a lot. >> I wonder if this isn't the bug you are looking for. It would make more sense >> to me if it was MAX of the two instead of MIN. > Yeah, I don't know the reasoning behind this. >> You should probably consult whoever wrote this code. > That sounds like a good idea. I'm not sure who that person is. Ramki added this code in the fix for 6896099 - Integrate CMS heap ergo with default heap sizing ergo If you just want to try it out, I would suggest to see what happens if you change MIN to MAX in line 1748. /Jesper > > thanks. > > joe > >> >> /Jesper >> >> >> Den 20/4/16 kl. 20:08, skrev Joseph Provino: >>> Please review this tiny change. It only affects ParNew. Are there any >>> unintended consequences? >>> >>> Passes JPRT. >>> >>> CR: JDK-8153578 Default NewRatio is ignored when UseConcMarkSweepGC is used as >>> GC algorithm >>> >>> Webrev: http://cr.openjdk.java.net/~jprovino/8153578/webrev.00 >>> >>> > From harold.seigel at oracle.com Wed Apr 20 20:23:03 2016 From: harold.seigel at oracle.com (harold seigel) Date: Wed, 20 Apr 2016 16:23:03 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <571774F7.7000409@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> <57175D0B.6050604@oracle.com> <571774F7.7000409@oracle.com> Message-ID: <5717E527.1080406@oracle.com> Hi Lois, Your changes look good. Harold On 4/20/2016 8:24 AM, Coleen Phillimore wrote: > > > On 4/20/16 6:42 AM, Lois Foltan wrote: >> >> On 4/19/2016 4:24 PM, Coleen Phillimore wrote: >>> >>> Hi, this is getting long. >>> >>> On 4/19/16 3:48 PM, Lois Foltan wrote: >>>> >>>> On 4/18/2016 10:11 PM, David Holmes wrote: >>>>> Hi Lois, >>>>> >>>>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>>>> >>>>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>>>> >>>>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>>>> >>>>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>>>> >>>>>>>>>> Hi Stefan, >>>>>>>>>> >>>>>>>>>> In start up before module system initialization in complete I >>>>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>>>> sounds like you agree? >>>>>>>>> There will be a number of threads running before the base >>>>>>>>> module is >>>>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>>>> point will be the Common-Cleaner, Finalizer, Reference Handler >>>>>>>>> and >>>>>>>>> Signal Handler. >>>>>>>> >>>>>>>> So, are you saying that we need the atomics? >>>>>>>> >>>>>>>> The java_lang_Class::create_mirror function isn't multi-thread >>>>>>>> safe, >>>>>>>> and must already be guarded by a lock (SystemDictionary_lock >>>>>>>> AFAICT). >>>>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>>>> once, for the single InstanceKlass instance in the CLD. And all >>>>>>>> reads >>>>>>>> of _keep_alive from the GC are done during safepoints. >>>>>>> The anonymous class is inserted in the fixup mirror and fixup >>>>>>> module >>>>>>> lists during java_lang_Class::create_mirror() before it is made >>>>>>> public >>>>>>> or "published" as loaded. So the two instances where the reference >>>>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>>>> java_lang_Class::create_mirror(), are guarded by a lock as well >>>>>>> as the >>>>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has >>>>>>> access >>>>>>> to the class during this time, as it is being loaded. >>>>>>>> >>>>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>>>> That leaves the decrement in >>>>>>> ModuleEntryTable::patch_javabase_entries() as possibly >>>>>>> unguarded. This >>>>>>> only occurs when the VM is called to define the module >>>>>>> java.base. I >>>>>>> believe this should be okay but will double check. >>>>>> >>>>>> One small change in modules.cpp/define_javabase_module() to >>>>>> ensure that >>>>>> only one definition attempt of java.base will occur and thus only >>>>>> one >>>>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>>>> arises where java.base is trying to be multiply defined, >>>>>> according to >>>>>> the expected error conditions for JVM_DefineModule(), an >>>>>> IllegalArgumentException should be thrown. >>>>>> >>>>>> I have also added a comment in classfile/classLoaderData.hpp >>>>>> explaining >>>>>> why _keep_alive does need to be defined volatile or atomic. >>>>> >>>>> Can you add assertions to check that _keep_alive is only modified >>>>> under the protection of the lock (with a special case perhaps for >>>>> the unguarded java.base case) ? >>>> >>>> Hi David, >>>> >>>> Thanks for the review. I misspoke when I indicated that the two >>>> increments and the one decrement of the reference counter that >>>> occur during a call to the Unsafe_DefineAnonymous0() method were >>>> guarded under a lock. However, due to the way anonymous classes are >>>> created only a single non-GC thread will have access to the >>>> _keep_alive field during this time. And as Stefan indicates above, >>>> all reads of _keep_alive from the GC are done during safepoints. >>>> Each anonymous class, when defined, has a dedicated ClassLoaderData >>>> created for it. No other class shares the anonymous class' name or >>>> CLD. Due to this uniqueness, no other thread has knowledge about >>>> this anonymous class while it is being defined. It is only upon >>>> return from Unsafe_DefineAnonymous0(), that the anonymous class >>>> exists and other threads, at that point, can potentially access it. >>>> >>> >>> Ah interesting. Currently, this is true and why this is safe. If we >>> change the JVM to have *some* anonymous classes share CLD with their >>> host_class because the lifetimes are the same, then we'll have to >>> use atomic operations. >>> >>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html >>> >>> >>> Can you put one comment directly above the inc_keep_alive() and >>> dec_keep_alive() functions to this effect here, just so we remember? >> >> Hi Coleen, >> >> I will add the comment before I commit. I might expand a bit on your >> last sentence a bit. > > Apparently the comment is false because we don't need the alive count > for non-anonymous ClassLoaderData ... there are more replies to this > thread. > > So maybe take out my last suggested sentence. > > Coleen >> >>> >>> // Anonymous classes have their own ClassLoaderData that is marked >>> to keep alive while the class is being parsed, and >>> // if the class appears on the module fixup list. >>> // If anonymous classes are changed to share with host_class, this >>> refcount needs to be changed to use atomic operations. >>> >>> *+ void ClassLoaderData::inc_keep_alive() {* >>> *+ assert(_keep_alive >= 0, "Invalid keep alive count");* >>> *+ _keep_alive++;* >>> *+ }* >>> *+ * >>> *+ void ClassLoaderData::dec_keep_alive() {* >>> *+ assert(_keep_alive > 0, "Invalid keep alive count");* >>> *+ _keep_alive--;* >>> *+ }* >>> *+ * >>> >>> More below. >>> >>>> Thanks, >>>> Lois >>>> >>>>> >>>>> Thanks, >>>>> David >>>>> >>>>>> Please review at: >>>>>> >>>>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ >>> >>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html >>> >>> >>> I'm not sure how this relates to the bug. >> >> This change ensures that java.base will not be multiply defined and >> thus only one call to ModuleEntryTable::patch_javabase_entries() will >> occur. A decrement of the reference count happens when an anonymous >> class is on the fixup module list post patching it with java.base >> >>> >>> Otherwise, the change looks good. >> >> Thanks again! >> >>> >>> Thanks, >>> Coleen >>> >>> >>>>>> >>>>>> Retesting in progress. >>>>>> >>>>>> Thanks, >>>>>> Lois >>>>>> >>>>>>> >>>>>>> Thanks, >>>>>>> Lois >>>>>>> >>>>>>>> >>>>>>>> thanks, >>>>>>>> StefanK >>>>>>>> >>>>>>>> >>>>>>>>> >>>>>>>>> -Alan >>>>>>>> >>>>>>> >>>>>> >>>> >>> >> > From lois.foltan at oracle.com Wed Apr 20 21:06:03 2016 From: lois.foltan at oracle.com (Lois Foltan) Date: Wed, 20 Apr 2016 17:06:03 -0400 Subject: RFR (S): JDK-8152949: Jigsaw crash when Klass in _fixup_module_field_list is unloaded In-Reply-To: <5717E527.1080406@oracle.com> References: <570FEFAB.2070809@oracle.com> <570FFC4F.7020000@oracle.com> <5710E397.3020106@oracle.com> <5710E86F.3090606@oracle.com> <57111EBC.4000308@oracle.com> <571144ED.1050902@oracle.com> <5714876C.2050207@oracle.com> <5714C57C.1000109@oracle.com> <571542C2.5060707@oracle.com> <571593D5.7010504@oracle.com> <57168B85.9090803@oracle.com> <571693FA.3060807@oracle.com> <57175D0B.6050604@oracle.com> <571774F7.7000409@oracle.com> <5717E527.1080406@oracle.com> Message-ID: <5717EF3B.6070702@oracle.com> On 4/20/2016 4:23 PM, harold seigel wrote: > Hi Lois, > > Your changes look good. Thank you Harold for the review! Lois > > Harold > > On 4/20/2016 8:24 AM, Coleen Phillimore wrote: >> >> >> On 4/20/16 6:42 AM, Lois Foltan wrote: >>> >>> On 4/19/2016 4:24 PM, Coleen Phillimore wrote: >>>> >>>> Hi, this is getting long. >>>> >>>> On 4/19/16 3:48 PM, Lois Foltan wrote: >>>>> >>>>> On 4/18/2016 10:11 PM, David Holmes wrote: >>>>>> Hi Lois, >>>>>> >>>>>> On 19/04/2016 6:25 AM, Lois Foltan wrote: >>>>>>> >>>>>>> On 4/18/2016 7:31 AM, Lois Foltan wrote: >>>>>>>> >>>>>>>> On 4/18/2016 3:06 AM, Stefan Karlsson wrote: >>>>>>>>> On 2016-04-15 21:45, Alan Bateman wrote: >>>>>>>>>> >>>>>>>>>> On 15/04/2016 18:02, Lois Foltan wrote: >>>>>>>>>>> >>>>>>>>>>> Hi Stefan, >>>>>>>>>>> >>>>>>>>>>> In start up before module system initialization in complete I >>>>>>>>>>> believe the VM is single threaded, so the increment/decrement >>>>>>>>>>> reference counts do not need to be atomic. Adding it is a >>>>>>>>>>> defensive move in case the reference count is ever used passed >>>>>>>>>>> start up in the future. It kind of does seem a bit excessive, >>>>>>>>>>> sounds like you agree? >>>>>>>>>> There will be a number of threads running before the base >>>>>>>>>> module is >>>>>>>>>> defined to the VM. As things stand the the java threads at this >>>>>>>>>> point will be the Common-Cleaner, Finalizer, Reference >>>>>>>>>> Handler and >>>>>>>>>> Signal Handler. >>>>>>>>> >>>>>>>>> So, are you saying that we need the atomics? >>>>>>>>> >>>>>>>>> The java_lang_Class::create_mirror function isn't multi-thread >>>>>>>>> safe, >>>>>>>>> and must already be guarded by a lock (SystemDictionary_lock >>>>>>>>> AFAICT). >>>>>>>>> The increment in Unsafe_DefineAnonymousClass0, will only be done >>>>>>>>> once, for the single InstanceKlass instance in the CLD. And >>>>>>>>> all reads >>>>>>>>> of _keep_alive from the GC are done during safepoints. >>>>>>>> The anonymous class is inserted in the fixup mirror and fixup >>>>>>>> module >>>>>>>> lists during java_lang_Class::create_mirror() before it is made >>>>>>>> public >>>>>>>> or "published" as loaded. So the two instances where the >>>>>>>> reference >>>>>>>> count is incremented, Unsafe_DefineAnonymousClass0 and in >>>>>>>> java_lang_Class::create_mirror(), are guarded by a lock as well >>>>>>>> as the >>>>>>>> decrement in Unsafe_DefineAnonymousClass0. No other thread has >>>>>>>> access >>>>>>>> to the class during this time, as it is being loaded. >>>>>>>>> >>>>>>>>> How does ModuleEntryTable::patch_javabase_entries guard against >>>>>>>>> concurrent inserts into the _fixup_module_field_list list? >>>>>>>> That leaves the decrement in >>>>>>>> ModuleEntryTable::patch_javabase_entries() as possibly >>>>>>>> unguarded. This >>>>>>>> only occurs when the VM is called to define the module >>>>>>>> java.base. I >>>>>>>> believe this should be okay but will double check. >>>>>>> >>>>>>> One small change in modules.cpp/define_javabase_module() to >>>>>>> ensure that >>>>>>> only one definition attempt of java.base will occur and thus >>>>>>> only one >>>>>>> call to ModuleEntryTable::patch_javabase_entries(). If a situation >>>>>>> arises where java.base is trying to be multiply defined, >>>>>>> according to >>>>>>> the expected error conditions for JVM_DefineModule(), an >>>>>>> IllegalArgumentException should be thrown. >>>>>>> >>>>>>> I have also added a comment in classfile/classLoaderData.hpp >>>>>>> explaining >>>>>>> why _keep_alive does need to be defined volatile or atomic. >>>>>> >>>>>> Can you add assertions to check that _keep_alive is only modified >>>>>> under the protection of the lock (with a special case perhaps for >>>>>> the unguarded java.base case) ? >>>>> >>>>> Hi David, >>>>> >>>>> Thanks for the review. I misspoke when I indicated that the two >>>>> increments and the one decrement of the reference counter that >>>>> occur during a call to the Unsafe_DefineAnonymous0() method were >>>>> guarded under a lock. However, due to the way anonymous classes >>>>> are created only a single non-GC thread will have access to the >>>>> _keep_alive field during this time. And as Stefan indicates >>>>> above, all reads of _keep_alive from the GC are done during >>>>> safepoints. Each anonymous class, when defined, has a dedicated >>>>> ClassLoaderData created for it. No other class shares the >>>>> anonymous class' name or CLD. Due to this uniqueness, no other >>>>> thread has knowledge about this anonymous class while it is being >>>>> defined. It is only upon return from Unsafe_DefineAnonymous0(), >>>>> that the anonymous class exists and other threads, at that point, >>>>> can potentially access it. >>>>> >>>> >>>> Ah interesting. Currently, this is true and why this is safe. If >>>> we change the JVM to have *some* anonymous classes share CLD with >>>> their host_class because the lifetimes are the same, then we'll >>>> have to use atomic operations. >>>> >>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/classLoaderData.cpp.udiff.html >>>> >>>> >>>> Can you put one comment directly above the inc_keep_alive() and >>>> dec_keep_alive() functions to this effect here, just so we remember? >>> >>> Hi Coleen, >>> >>> I will add the comment before I commit. I might expand a bit on >>> your last sentence a bit. >> >> Apparently the comment is false because we don't need the alive count >> for non-anonymous ClassLoaderData ... there are more replies to this >> thread. >> >> So maybe take out my last suggested sentence. >> >> Coleen >>> >>>> >>>> // Anonymous classes have their own ClassLoaderData that is marked >>>> to keep alive while the class is being parsed, and >>>> // if the class appears on the module fixup list. >>>> // If anonymous classes are changed to share with host_class, this >>>> refcount needs to be changed to use atomic operations. >>>> >>>> *+ void ClassLoaderData::inc_keep_alive() {* >>>> *+ assert(_keep_alive >= 0, "Invalid keep alive count");* >>>> *+ _keep_alive++;* >>>> *+ }* >>>> *+ * >>>> *+ void ClassLoaderData::dec_keep_alive() {* >>>> *+ assert(_keep_alive > 0, "Invalid keep alive count");* >>>> *+ _keep_alive--;* >>>> *+ }* >>>> *+ * >>>> >>>> More below. >>>> >>>>> Thanks, >>>>> Lois >>>>> >>>>>> >>>>>> Thanks, >>>>>> David >>>>>> >>>>>>> Please review at: >>>>>>> >>>>>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/ >>>> >>>> http://cr.openjdk.java.net/~lfoltan/bug_jdk8152949_1/src/share/vm/classfile/modules.cpp.frames.html >>>> >>>> >>>> I'm not sure how this relates to the bug. >>> >>> This change ensures that java.base will not be multiply defined and >>> thus only one call to ModuleEntryTable::patch_javabase_entries() >>> will occur. A decrement of the reference count happens when an >>> anonymous class is on the fixup module list post patching it with >>> java.base >>> >>>> >>>> Otherwise, the change looks good. >>> >>> Thanks again! >>> >>>> >>>> Thanks, >>>> Coleen >>>> >>>> >>>>>>> >>>>>>> Retesting in progress. >>>>>>> >>>>>>> Thanks, >>>>>>> Lois >>>>>>> >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Lois >>>>>>>> >>>>>>>>> >>>>>>>>> thanks, >>>>>>>>> StefanK >>>>>>>>> >>>>>>>>> >>>>>>>>>> >>>>>>>>>> -Alan >>>>>>>>> >>>>>>>> >>>>>>> >>>>> >>>> >>> >> > From mikael.vidstedt at oracle.com Wed Apr 20 21:41:17 2016 From: mikael.vidstedt at oracle.com (Mikael Vidstedt) Date: Wed, 20 Apr 2016 14:41:17 -0700 Subject: RFR(S): 8154209: Remove client VM from default JIB profile on windows-x86 and linux-x86 In-Reply-To: <57149250.10008@oracle.com> References: <570FC852.10808@oracle.com> <57149250.10008@oracle.com> Message-ID: <5717F77D.6080308@oracle.com> Good catch. Updated webrevs here: top: http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02/webrev/ hotspot: http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02/hotspot/webrev/ Incremental webrevs (from webrev.01): top: http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02.incr/webrev/ hotspot: N/A (same as webrev.01) Cheers, Mikael On 4/18/2016 12:52 AM, Leonid Mesnik wrote: > Hi > > > Shouldn't be jprt targets in jprt.properties updates to stop using > client also? > > http://hg.openjdk.java.net/jdk9/hs/file/645c48292130/make/jprt.properties > > line 206 - 214 > # Test target list (no fastdebug & limited c2 testing) > my.test.target.set= \ > solaris_sparcv9_5.11-product-c2-TESTNAME, \ > solaris_x64_5.11-product-c2-TESTNAME, \ > linux_i586_3.8-product-{c1|c2}-TESTNAME, \ > linux_x64_3.8-product-c2-TESTNAME, \ > macosx_x64_10.9-product-c2-TESTNAME, \ > windows_i586_6.3-product-c1-TESTNAME, \ > windows_x64_6.3-product-c2-TESTNAME > > and > line 294-299 > # JCK test targets in test/Makefile (no windows) > my.test.target.set.jck= \ > solaris_sparcv9_5.11-product-c2-JCK7TESTRULE, \ > solaris_x64_5.11-product-c2-JCK7TESTRULE, \ > linux_i586_3.8-product-c1-JCK7TESTRULE, \ > linux_x64_3.8-product-c2-JCK7TESTRULE > > Leonid > > On 14.04.2016 19:41, Mikael Vidstedt wrote: >> >> Please review the following change which removes the "client" VM from >> the default JIB build profile on windows-x86 and linux-x86: >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8154209 >> Webrev (top): >> http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/ >> Webrev (hotspot): >> http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/hotspot/webrev/ >> >> >> When not including the client VM, the build system automatically >> creates a jvm.cfg which makes -client an alias for -server. At some >> point in the future we may choose to output a warning and/or refuse >> to start up if -client is specified, but at least for now silently >> falling back on the -server VM seems appropriate. >> >> The test/runtime/SharedArchiveFile/DefaultUseWithClient.java test >> assumes that CDS is always compiled in and enabled in the -client VM >> on windows-x86. Since -client will fall back on -server that is no >> longer true, so the test needs to be updated. I added an @ignore and >> filed the following issue to track fixing the test: >> >> https://bugs.openjdk.java.net/browse/JDK-8154204 >> >> >> Testing: >> >> In addition to a standard JPRT push job, Christian Tornqvist helped >> me run the runtime nightly tests and apart from the above mentioned >> test all tests were successful. >> >> Cheers, >> Mikael >> > From christian.thalinger at oracle.com Wed Apr 20 21:43:18 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Wed, 20 Apr 2016 11:43:18 -1000 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <5716E974.3030101@oracle.com> References: <57168C17.40307@oracle.com> <159176CE-675D-41A6-A13A-EBD7580E640C@oracle.com> <5716E974.3030101@oracle.com> Message-ID: <9A1C204A-3CFB-4AE3-BEAF-FB3ED4319E6F@oracle.com> > On Apr 19, 2016, at 4:29 PM, Coleen Phillimore wrote: > > > Chris, thank you for reviewing this. > > On 4/19/16 4:35 PM, Christian Thalinger wrote: >>> On Apr 19, 2016, at 9:50 AM, Coleen Phillimore wrote: >>> >>> Summary: GC walks the mirror using OopClosure rather than using CLDClosure in oops_interpreted_do() >>> >>> See bug for more description and justification. The changes are large but very redundant. The main change is in TemplateInterpreterGenerator::generate_fixed_frame(). >> + // Save oop Mirror (with padding) >> + __ load_mirror(rscratch1, rmethod); >> >> + // get mirror and store it in the frame so that this Method* is never >> + // reclaimed while it's running. >> + Register mirror = LcpoolCache; >> + __ load_mirror(mirror, Method); >> >> + // Push the mirror so this method isn't collected >> + __ load_mirror(rdx, rbi); >> >> Please use the same comment on all platforms. > > Yes, that's inconsistent. I changed all platforms to have this comment: > > // Get mirror and store it in the frame as GC root for this Method* > >>> open webrev at http://cr.openjdk.java.net/~coleenp/8154580.01/webrev >>> bug link https://bugs.openjdk.java.net/browse/JDK-8154580 >> src/share/vm/runtime/frame.cpp >> >> // The method pointer in the frame might be the only path to the method's >> // klass, and the klass needs to be kept alive while executing. The GCs >> // don't trace through method pointers, so typically in similar situations >> // the mirror or the class loader of the klass are installed as a GC root. >> >> - // To minimize the overhead of doing that here, we ask the GC to pass down a >> - // closure that knows how to keep klasses alive given a ClassLoaderData. >> - cld_f->do_cld(m->method_holder()->class_loader_data()); >> - } >> - >> - if (m->is_native() PPC32_ONLY(&& m->is_static())) { >> - f->do_oop(interpreter_frame_temp_oop_addr()); >> - } >> + // And it is here too. >> + f->do_oop(interpreter_frame_mirror_addr()); >> >> That comment is kinda funny now. It still hints at the old-way of doing things but ?it is here too?. > > I reworded it as: > > // The method pointer in the frame might be the only path to the method's > // klass, and the klass needs to be kept alive while executing. The GCs > // don't trace through method pointers, so the mirror of the method's klass > // is installed as a GC root. > f->do_oop(interpreter_frame_mirror_addr()); Excellent. > >>> Tested with hotspot-runtime-nightly and gc-nightly tests. >>> >>> Need testing with ppc and aarch64 open code. I implemented the changes but I can't test them. >> One obvious bug is that you copied the __ as well: >> >> +void MacroAssembler::load_mirror(Register dst, Register method) { >> + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); >> + __ ldr(dst, Address(rmethod, Method::const_offset())); >> + __ ldr(dst, Address(dst, ConstMethod::constants_offset())); >> + __ ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes())); >> + __ ldr(dst, Address(dst, mirror_offset)); >> +} >> >> Other than that it looks fine to me. Nothing obvious stood out. > > Oh, yes, this was wrong on both ppc and aarch64. I hope someone that has access to this platforms can test this patch. > > Thank you! > Coleen > >> >>> Thanks, >>> Coleen From coleen.phillimore at oracle.com Wed Apr 20 21:49:03 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Wed, 20 Apr 2016 17:49:03 -0400 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <9A1C204A-3CFB-4AE3-BEAF-FB3ED4319E6F@oracle.com> References: <57168C17.40307@oracle.com> <159176CE-675D-41A6-A13A-EBD7580E640C@oracle.com> <5716E974.3030101@oracle.com> <9A1C204A-3CFB-4AE3-BEAF-FB3ED4319E6F@oracle.com> Message-ID: <5717F94F.1000204@oracle.com> Thanks Chris! Coleen On 4/20/16 5:43 PM, Christian Thalinger wrote: > >> On Apr 19, 2016, at 4:29 PM, Coleen Phillimore >> > >> wrote: >> >> >> Chris, thank you for reviewing this. >> >> On 4/19/16 4:35 PM, Christian Thalinger wrote: >>>> On Apr 19, 2016, at 9:50 AM, Coleen Phillimore >>>> >>> > wrote: >>>> >>>> Summary: GC walks the mirror using OopClosure rather than using >>>> CLDClosure in oops_interpreted_do() >>>> >>>> See bug for more description and justification. The changes are >>>> large but very redundant. The main change is in >>>> TemplateInterpreterGenerator::generate_fixed_frame(). >>> + // Save oop Mirror (with padding) >>> + __ load_mirror(rscratch1, rmethod); >>> >>> + // get mirror and store it in the frame so that this Method* is never >>> + // reclaimed while it's running. >>> + Register mirror = LcpoolCache; >>> + __ load_mirror(mirror, Method); >>> >>> + // Push the mirror so this method isn't collected >>> + __ load_mirror(rdx, rbi); >>> >>> Please use the same comment on all platforms. >> >> Yes, that's inconsistent. I changed all platforms to have this comment: >> >> // Get mirror and store it in the frame as GC root for this Method* >> >>>> open webrev at >>>> http://cr.openjdk.java.net/~coleenp/8154580.01/webrev >>>> >>>> bug link https://bugs.openjdk.java.net/browse/JDK-8154580 >>> src/share/vm/runtime/frame.cpp >>> >>> // The method pointer in the frame might be the only path to >>> the method's >>> // klass, and the klass needs to be kept alive while executing. >>> The GCs >>> // don't trace through method pointers, so typically in similar >>> situations >>> // the mirror or the class loader of the klass are installed as >>> a GC root. >>> >>> - // To minimize the overhead of doing that here, we ask the GC >>> to pass down a >>> - // closure that knows how to keep klasses alive given a >>> ClassLoaderData. >>> - cld_f->do_cld(m->method_holder()->class_loader_data()); >>> - } >>> - >>> - if (m->is_native() PPC32_ONLY(&& m->is_static())) { >>> - f->do_oop(interpreter_frame_temp_oop_addr()); >>> - } >>> + // And it is here too. >>> + f->do_oop(interpreter_frame_mirror_addr()); >>> >>> That comment is kinda funny now. It still hints at the old-way of >>> doing things but ?it is here too?. >> >> I reworded it as: >> >> // The method pointer in the frame might be the only path to the >> method's >> // klass, and the klass needs to be kept alive while executing. The GCs >> // don't trace through method pointers, so the mirror of the >> method's klass >> // is installed as a GC root. >> f->do_oop(interpreter_frame_mirror_addr()); > > Excellent. > >> >>>> Tested with hotspot-runtime-nightly and gc-nightly tests. >>>> >>>> Need testing with ppc and aarch64 open code. I implemented the >>>> changes but I can't test them. >>> One obvious bug is that you copied the __ as well: >>> >>> +void MacroAssembler::load_mirror(Register dst, Register method) { >>> + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); >>> + __ ldr(dst, Address(rmethod, Method::const_offset())); >>> + __ ldr(dst, Address(dst, ConstMethod::constants_offset())); >>> + __ ldr(dst, Address(dst, >>> ConstantPool::pool_holder_offset_in_bytes())); >>> + __ ldr(dst, Address(dst, mirror_offset)); >>> +} >>> >>> Other than that it looks fine to me. Nothing obvious stood out. >> >> Oh, yes, this was wrong on both ppc and aarch64. I hope someone >> that has access to this platforms can test this patch. >> >> Thank you! >> Coleen >> >>> >>>> Thanks, >>>> Coleen > From joseph.provino at oracle.com Wed Apr 20 21:56:42 2016 From: joseph.provino at oracle.com (Joe Provino) Date: Wed, 20 Apr 2016 17:56:42 -0400 Subject: RFR (XXS) JDK-8153578,Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm In-Reply-To: <5717E50F.8090505@oracle.com> References: <5717C5A9.1060406@oracle.com> <5717D9ED.2070907@oracle.com> <5717DFCF.3070808@oracle.com> <5717E50F.8090505@oracle.com> Message-ID: <5717FB1A.1070307@oracle.com> ok, thanks. On 04/20/2016 04:22 PM, Jesper Wilhelmsson wrote: > Den 20/4/16 kl. 22:00, skrev Joe Provino: >> Hi Jesper, I had a feeling what looked like a simple fix wouldn't be >> that >> simple. ;-) >> >> On 04/20/2016 03:35 PM, Jesper Wilhelmsson wrote: >>> Hi Joe, >>> >>> If I understand the bug description correctly the problem is that >>> NewSize >>> becomes too small. According to the bug the VM ignores the NewRatio >>> setting. >>> >>> Your change removes the setting of MaxNewSize in the case where >>> NewSize has >>> the default value. It's not obvious to me how that is related to the >>> bug. >>> >>> There is an if statement enclosing the code you are changing. It has >>> a comment >>> that I find interesting: >>> >>> 1755 // If either MaxNewSize or NewRatio is set on the command line, >>> 1756 // assume the user is trying to set the size of the young gen. >>> 1757 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { >>> >>> The interesting part is that the comment says "MaxNewSize OR >>> NewRatio" but the >>> code says "MaxNewSize AND NewRatio". This could be a typo in the >>> comment, or >>> it could be related to your bug. >> If the code inside that "if" is executed, then the bug occurs. So I >> think >> changing to OR won't fix the problem. > > No, it's probably not that simple. And it may be unrelated to your > bug. But either the code or the comment is wrong here. > >>> >>> I don't think the fix here is to ignore the calculated >>> preferred_max_new_size, >>> but rather to figure out why it has the wrong value. >>> preferred_max_new_size is >>> calculated a few lines up, and it is based on NewRatio. The number >>> of threads >>> seems to be involved as well. Should it be? Usually things based on >>> the number >>> of threads tend to be wrong... >>> >>> 1748 MIN2(max_heap/(NewRatio+1), >>> ScaleForWordSize(young_gen_per_worker * >>> ParallelGCThreads)); >>> >>> young_gen_per_worker is CMSYoungGenPerWorker which defaults to >>> things like 16M >>> or 64M. ParallelGCThreads is usually just a handful, 8 on my >>> machine. Since we >>> take the smallest number of this thread based thing and the NewRatio >>> calculation, I would guess the threads will limit the MaxNewSize >>> quite a lot. >>> I wonder if this isn't the bug you are looking for. It would make >>> more sense >>> to me if it was MAX of the two instead of MIN. >> Yeah, I don't know the reasoning behind this. >>> You should probably consult whoever wrote this code. >> That sounds like a good idea. I'm not sure who that person is. > > Ramki added this code in the fix for 6896099 - Integrate CMS heap ergo > with default heap sizing ergo > > If you just want to try it out, I would suggest to see what happens if > you change MIN to MAX in line 1748. > /Jesper > >> >> thanks. >> >> joe >> >>> >>> /Jesper >>> >>> >>> Den 20/4/16 kl. 20:08, skrev Joseph Provino: >>>> Please review this tiny change. It only affects ParNew. Are there >>>> any >>>> unintended consequences? >>>> >>>> Passes JPRT. >>>> >>>> CR: JDK-8153578 Default NewRatio is ignored when UseConcMarkSweepGC >>>> is used as >>>> GC algorithm >>>> >>>> Webrev: http://cr.openjdk.java.net/~jprovino/8153578/webrev.00 >>>> >>>> >> From jon.masamitsu at oracle.com Wed Apr 20 22:29:35 2016 From: jon.masamitsu at oracle.com (Jon Masamitsu) Date: Wed, 20 Apr 2016 15:29:35 -0700 Subject: RFR (XXS) JDK-8153578,Default NewRatio is ignored when UseConcMarkSweepGC is used as GC algorithm In-Reply-To: <5717D9ED.2070907@oracle.com> References: <5717C5A9.1060406@oracle.com> <5717D9ED.2070907@oracle.com> Message-ID: <571802CF.7010000@oracle.com> On 04/20/2016 12:35 PM, Jesper Wilhelmsson wrote: > Hi Joe, > > If I understand the bug description correctly the problem is that > NewSize becomes too small. According to the bug the VM ignores the > NewRatio setting. > > Your change removes the setting of MaxNewSize in the case where > NewSize has the default value. It's not obvious to me how that is > related to the bug. > > There is an if statement enclosing the code you are changing. It has a > comment that I find interesting: > > 1755 // If either MaxNewSize or NewRatio is set on the command line, > 1756 // assume the user is trying to set the size of the young gen. > 1757 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { > > The interesting part is that the comment says "MaxNewSize OR NewRatio" > but the code says "MaxNewSize AND NewRatio". This could be a typo in > the comment, or it could be related to your bug. > > I don't think the fix here is to ignore the calculated > preferred_max_new_size, but rather to figure out why it has the wrong > value. preferred_max_new_size is calculated a few lines up, and it is > based on NewRatio. The number of threads seems to be involved as well. > Should it be? Usually things based on the number of threads tend to be > wrong... The intent of this code was to control the young pause times by limiting the size of the young gen. The preferred size scaling with young_gen_per_worker * ParallelGCThreads was meant to take into account the fact you could have approximately the same pauses with larger heaps as the number of GC workers increases. I didn't add this code but I'm pretty sure it was added as a result of customer interaction. Jon > > 1748 MIN2(max_heap/(NewRatio+1), > ScaleForWordSize(young_gen_per_worker * ParallelGCThreads)); > > young_gen_per_worker is CMSYoungGenPerWorker which defaults to things > like 16M or 64M. ParallelGCThreads is usually just a handful, 8 on my > machine. Since we take the smallest number of this thread based thing > and the NewRatio calculation, I would guess the threads will limit the > MaxNewSize quite a lot. I wonder if this isn't the bug you are looking > for. It would make more sense to me if it was MAX of the two instead > of MIN. You should probably consult whoever wrote this code. > > /Jesper > > > Den 20/4/16 kl. 20:08, skrev Joseph Provino: >> Please review this tiny change. It only affects ParNew. Are there any >> unintended consequences? >> >> Passes JPRT. >> >> CR: JDK-8153578 Default NewRatio is ignored when UseConcMarkSweepGC >> is used as >> GC algorithm >> >> Webrev: http://cr.openjdk.java.net/~jprovino/8153578/webrev.00 >> >> From stefan.karlsson at oracle.com Thu Apr 21 04:59:29 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 21 Apr 2016 06:59:29 +0200 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <57168C17.40307@oracle.com> References: <57168C17.40307@oracle.com> Message-ID: <57185E31.9010705@oracle.com> GC parts look great! StefanK On 2016-04-19 21:50, Coleen Phillimore wrote: > Summary: GC walks the mirror using OopClosure rather than using > CLDClosure in oops_interpreted_do() > > See bug for more description and justification. The changes are large > but very redundant. The main change is in > TemplateInterpreterGenerator::generate_fixed_frame(). > > open webrev at http://cr.openjdk.java.net/~coleenp/8154580.01/webrev > bug link https://bugs.openjdk.java.net/browse/JDK-8154580 > > Tested with hotspot-runtime-nightly and gc-nightly tests. > > Need testing with ppc and aarch64 open code. I implemented the > changes but I can't test them. > > Thanks, > Coleen From Leonid.Mesnik at oracle.com Thu Apr 21 06:23:34 2016 From: Leonid.Mesnik at oracle.com (Leonid Mesnik) Date: Thu, 21 Apr 2016 09:23:34 +0300 Subject: RFR(S): 8154209: Remove client VM from default JIB profile on windows-x86 and linux-x86 In-Reply-To: <5717F77D.6080308@oracle.com> References: <570FC852.10808@oracle.com> <57149250.10008@oracle.com> <5717F77D.6080308@oracle.com> Message-ID: <571871E6.5050306@oracle.com> Mikael On 21.04.2016 00:41, Mikael Vidstedt wrote: > > Good catch. Updated webrevs here: > > top: http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02/webrev/ http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02/webrev/common/conf/jib-profiles.js.udiff.html Couldn't be *"--with-jvm-variants=client,server"* just completely removed now as for all 64bit profiles? http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02/webrev/make/jprt.properties.sdiff.html 213 windows_i586_6.3-product-c1-TESTNAME, \ I see that you just remove C1 testing. Wouldn't be better to replace it with c2? (Same for line 298) Leonid > hotspot: > http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02/hotspot/webrev/ > > Incremental webrevs (from webrev.01): > > top: > http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02.incr/webrev/ > hotspot: N/A (same as webrev.01) > > Cheers, > Mikael > > On 4/18/2016 12:52 AM, Leonid Mesnik wrote: >> Hi >> >> >> Shouldn't be jprt targets in jprt.properties updates to stop using >> client also? >> >> http://hg.openjdk.java.net/jdk9/hs/file/645c48292130/make/jprt.properties >> >> line 206 - 214 >> # Test target list (no fastdebug & limited c2 testing) >> my.test.target.set= \ >> solaris_sparcv9_5.11-product-c2-TESTNAME, \ >> solaris_x64_5.11-product-c2-TESTNAME, \ >> linux_i586_3.8-product-{c1|c2}-TESTNAME, \ >> linux_x64_3.8-product-c2-TESTNAME, \ >> macosx_x64_10.9-product-c2-TESTNAME, \ >> windows_i586_6.3-product-c1-TESTNAME, \ >> windows_x64_6.3-product-c2-TESTNAME >> >> and >> line 294-299 >> # JCK test targets in test/Makefile (no windows) >> my.test.target.set.jck= \ >> solaris_sparcv9_5.11-product-c2-JCK7TESTRULE, \ >> solaris_x64_5.11-product-c2-JCK7TESTRULE, \ >> linux_i586_3.8-product-c1-JCK7TESTRULE, \ >> linux_x64_3.8-product-c2-JCK7TESTRULE >> >> Leonid >> >> On 14.04.2016 19:41, Mikael Vidstedt wrote: >>> >>> Please review the following change which removes the "client" VM >>> from the default JIB build profile on windows-x86 and linux-x86: >>> >>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154209 >>> Webrev (top): >>> http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/ >>> Webrev (hotspot): >>> http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/hotspot/webrev/ >>> >>> >>> When not including the client VM, the build system automatically >>> creates a jvm.cfg which makes -client an alias for -server. At some >>> point in the future we may choose to output a warning and/or refuse >>> to start up if -client is specified, but at least for now silently >>> falling back on the -server VM seems appropriate. >>> >>> The test/runtime/SharedArchiveFile/DefaultUseWithClient.java test >>> assumes that CDS is always compiled in and enabled in the -client VM >>> on windows-x86. Since -client will fall back on -server that is no >>> longer true, so the test needs to be updated. I added an @ignore and >>> filed the following issue to track fixing the test: >>> >>> https://bugs.openjdk.java.net/browse/JDK-8154204 >>> >>> >>> Testing: >>> >>> In addition to a standard JPRT push job, Christian Tornqvist helped >>> me run the runtime nightly tests and apart from the above mentioned >>> test all tests were successful. >>> >>> Cheers, >>> Mikael >>> >> > From david.holmes at oracle.com Thu Apr 21 06:40:47 2016 From: david.holmes at oracle.com (David Holmes) Date: Thu, 21 Apr 2016 16:40:47 +1000 Subject: RFR(S): 8154209: Remove client VM from default JIB profile on windows-x86 and linux-x86 In-Reply-To: <5717F77D.6080308@oracle.com> References: <570FC852.10808@oracle.com> <57149250.10008@oracle.com> <5717F77D.6080308@oracle.com> Message-ID: <571875EF.2090907@oracle.com> On 21/04/2016 7:41 AM, Mikael Vidstedt wrote: > > Good catch. Updated webrevs here: > > top: http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02/webrev/ Minor nit - jib-profiles.js: ! configure_args_32bit: ["--with-target-bits=32", "--with-jvm-variants=server"], I think server is the default for jvm_variant - hence no configure_args for 64-bit - so this can be elided. jprt.properties: I agree with Leonid that the c1 testing should be converted to c2 testing, not just removed. Of course we then need to see if those test sets run reliably with server on the 32-bit platforms. > hotspot: > http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02/hotspot/webrev/ Ignoring is simplest option for now. David > > Incremental webrevs (from webrev.01): > > top: > http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.02.incr/webrev/ > hotspot: N/A (same as webrev.01) > > Cheers, > Mikael > > On 4/18/2016 12:52 AM, Leonid Mesnik wrote: >> Hi >> >> >> Shouldn't be jprt targets in jprt.properties updates to stop using >> client also? >> >> http://hg.openjdk.java.net/jdk9/hs/file/645c48292130/make/jprt.properties >> >> line 206 - 214 >> # Test target list (no fastdebug & limited c2 testing) >> my.test.target.set= \ >> solaris_sparcv9_5.11-product-c2-TESTNAME, \ >> solaris_x64_5.11-product-c2-TESTNAME, \ >> linux_i586_3.8-product-{c1|c2}-TESTNAME, \ >> linux_x64_3.8-product-c2-TESTNAME, \ >> macosx_x64_10.9-product-c2-TESTNAME, \ >> windows_i586_6.3-product-c1-TESTNAME, \ >> windows_x64_6.3-product-c2-TESTNAME >> >> and >> line 294-299 >> # JCK test targets in test/Makefile (no windows) >> my.test.target.set.jck= \ >> solaris_sparcv9_5.11-product-c2-JCK7TESTRULE, \ >> solaris_x64_5.11-product-c2-JCK7TESTRULE, \ >> linux_i586_3.8-product-c1-JCK7TESTRULE, \ >> linux_x64_3.8-product-c2-JCK7TESTRULE >> >> Leonid >> >> On 14.04.2016 19:41, Mikael Vidstedt wrote: >>> >>> Please review the following change which removes the "client" VM from >>> the default JIB build profile on windows-x86 and linux-x86: >>> >>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154209 >>> Webrev (top): >>> http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/ >>> Webrev (hotspot): >>> http://cr.openjdk.java.net/~mikael/webrevs/8154209/webrev.01/hotspot/webrev/ >>> >>> >>> >>> When not including the client VM, the build system automatically >>> creates a jvm.cfg which makes -client an alias for -server. At some >>> point in the future we may choose to output a warning and/or refuse >>> to start up if -client is specified, but at least for now silently >>> falling back on the -server VM seems appropriate. >>> >>> The test/runtime/SharedArchiveFile/DefaultUseWithClient.java test >>> assumes that CDS is always compiled in and enabled in the -client VM >>> on windows-x86. Since -client will fall back on -server that is no >>> longer true, so the test needs to be updated. I added an @ignore and >>> filed the following issue to track fixing the test: >>> >>> https://bugs.openjdk.java.net/browse/JDK-8154204 >>> >>> >>> Testing: >>> >>> In addition to a standard JPRT push job, Christian Tornqvist helped >>> me run the runtime nightly tests and apart from the above mentioned >>> test all tests were successful. >>> >>> Cheers, >>> Mikael >>> >> > From alexander.kulyakhtin at oracle.com Thu Apr 21 12:11:46 2016 From: alexander.kulyakhtin at oracle.com (Alexander Kulyakhtin) Date: Thu, 21 Apr 2016 05:11:46 -0700 (PDT) Subject: RFR:8153992:Some hotspot tests fail on compact2 due to an unnecessary test library dependency Message-ID: Hi, Could you, please, review this fix (updated to address the findings of the previous review) CR: https://bugs.openjdk.java.net/browse/JDK-8153992 "Some SVC tests fail on compact2 due to an unnecessary test library dependency" Webrev: http://cr.openjdk.java.net/~akulyakh/8153992_02/index.html Before the fix the ProcessTools.getProcessId() used the ManagementFactory.getRuntimeMXBean() API. The API is not available on compact2 and below. Therefore the tests failed. We are changing the ProcessTools.getProcessId() method to use the JDK 9 Process.getPid(). This eliminates the unnecessary dependency making the tests pass on compact2. Since, with this change ProcessTools.getProcessId() now returns long we are also trivially modifying all the affected tests. Best regards, Alexander ----- Original Message ----- From: mandy.chung at oracle.com To: alexander.kulyakhtin at oracle.com Cc: serviceability-dev at openjdk.java.net Sent: Thursday, April 21, 2016 12:03:14 AM GMT +03:00 Iraq Subject: Re: RFR:8153989:Some SVC tests fail on compact2 due to an unnecessary test library dependency > On Apr 20, 2016, at 9:06 AM, Alexander Kulyakhtin wrote: > > Hi, > > Could you, please, review this small tests-only fix: > > CR: https://bugs.openjdk.java.net/browse/JDK-8153992 "Some SVC tests fail on compact2 due to an unnecessary test library dependency" > Webrev: http://cr.openjdk.java.net/~akulyakh/8153992/test/testlibrary/jdk/test/lib/ProcessTools.java.udiff.html > > Before the fix the ProcessTools.getProcessId() used the ManagementFactory.getRuntimeMXBean() API. > The API is not available on compact2 and below. Therefore the tests failed. > > We are changing the ProcessTools.getProcessId() method to use the JDK 9 Process.getPid(). This eliminates the unnecessary dependency making the tests pass on compact2. > This looks okay. But I see that getVmInputArguments calls ManagementFactory.getRuntimeMXBean. So ProcessTools still has a dependency on java.management. The jdk test library ProcessTools::getProcessId has been long ago to call Process::getPid and the method is changed to return long. I thought that similar change would have been made in the hotspot test library at that time. > I am not sure how acceptable it is to cast from long to int this change. If it is not acceptable we can change the return type to long. > This however, will cause massive changes throughout the hotspot tests which presently expect getProcessId() to return int. IMO it would be good to return long or have the callsite to call ProcessHandle.current().getPid(). Mandy From alexander.kulyakhtin at oracle.com Thu Apr 21 12:23:54 2016 From: alexander.kulyakhtin at oracle.com (Alexander Kulyakhtin) Date: Thu, 21 Apr 2016 05:23:54 -0700 (PDT) Subject: RFR:8153992:Some hotspot tests fail on compact2 due to an unnecessary test library dependency Message-ID: <8dd7532c-48e9-420b-b145-e896d0e2a84f@default> Mandy, Thank you very much for your review. I have updated the fix per your comments, making ProcessTools.getProcessId() return long. The function ProcessTools.getVmInputArguments(), although does depend on the API from the java.management, is not used by any of the tests addressed by the CR. Best regards, Alexander ----- Original Message ----- From: alexander.kulyakhtin at oracle.com To: hotspot-dev at openjdk.java.net Cc: mandy.chung at oracle.com Sent: Thursday, April 21, 2016 3:11:46 PM GMT +03:00 Iraq Subject: Re: RFR:8153992:Some hotspot tests fail on compact2 due to an unnecessary test library dependency Hi, Could you, please, review this fix (updated to address the findings of the previous review) CR: https://bugs.openjdk.java.net/browse/JDK-8153992 "Some SVC tests fail on compact2 due to an unnecessary test library dependency" Webrev: http://cr.openjdk.java.net/~akulyakh/8153992_02/index.html Before the fix the ProcessTools.getProcessId() used the ManagementFactory.getRuntimeMXBean() API. The API is not available on compact2 and below. Therefore the tests failed. We are changing the ProcessTools.getProcessId() method to use the JDK 9 Process.getPid(). This eliminates the unnecessary dependency making the tests pass on compact2. Since, with this change ProcessTools.getProcessId() now returns long we are also trivially modifying all the affected tests. Best regards, Alexander ----- Original Message ----- From: mandy.chung at oracle.com To: alexander.kulyakhtin at oracle.com Cc: serviceability-dev at openjdk.java.net Sent: Thursday, April 21, 2016 12:03:14 AM GMT +03:00 Iraq Subject: Re: RFR:8153989:Some SVC tests fail on compact2 due to an unnecessary test library dependency > On Apr 20, 2016, at 9:06 AM, Alexander Kulyakhtin wrote: > > Hi, > > Could you, please, review this small tests-only fix: > > CR: https://bugs.openjdk.java.net/browse/JDK-8153992 "Some SVC tests fail on compact2 due to an unnecessary test library dependency" > Webrev: http://cr.openjdk.java.net/~akulyakh/8153992/test/testlibrary/jdk/test/lib/ProcessTools.java.udiff.html > > Before the fix the ProcessTools.getProcessId() used the ManagementFactory.getRuntimeMXBean() API. > The API is not available on compact2 and below. Therefore the tests failed. > > We are changing the ProcessTools.getProcessId() method to use the JDK 9 Process.getPid(). This eliminates the unnecessary dependency making the tests pass on compact2. > This looks okay. But I see that getVmInputArguments calls ManagementFactory.getRuntimeMXBean. So ProcessTools still has a dependency on java.management. The jdk test library ProcessTools::getProcessId has been long ago to call Process::getPid and the method is changed to return long. I thought that similar change would have been made in the hotspot test library at that time. > I am not sure how acceptable it is to cast from long to int this change. If it is not acceptable we can change the return type to long. > This however, will cause massive changes throughout the hotspot tests which presently expect getProcessId() to return int. IMO it would be good to return long or have the callsite to call ProcessHandle.current().getPid(). Mandy From coleen.phillimore at oracle.com Thu Apr 21 14:36:55 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Thu, 21 Apr 2016 10:36:55 -0400 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <57185E31.9010705@oracle.com> References: <57168C17.40307@oracle.com> <57185E31.9010705@oracle.com> Message-ID: <5718E587.5070108@oracle.com> Thank you Stefan! Coleen On 4/21/16 12:59 AM, Stefan Karlsson wrote: > GC parts look great! > > StefanK > > On 2016-04-19 21:50, Coleen Phillimore wrote: >> Summary: GC walks the mirror using OopClosure rather than using >> CLDClosure in oops_interpreted_do() >> >> See bug for more description and justification. The changes are >> large but very redundant. The main change is in >> TemplateInterpreterGenerator::generate_fixed_frame(). >> >> open webrev at http://cr.openjdk.java.net/~coleenp/8154580.01/webrev >> bug link https://bugs.openjdk.java.net/browse/JDK-8154580 >> >> Tested with hotspot-runtime-nightly and gc-nightly tests. >> >> Need testing with ppc and aarch64 open code. I implemented the >> changes but I can't test them. >> >> Thanks, >> Coleen > From mandy.chung at oracle.com Thu Apr 21 17:48:24 2016 From: mandy.chung at oracle.com (Mandy Chung) Date: Thu, 21 Apr 2016 10:48:24 -0700 Subject: RFR:8153992:Some hotspot tests fail on compact2 due to an unnecessary test library dependency In-Reply-To: <8dd7532c-48e9-420b-b145-e896d0e2a84f@default> References: <8dd7532c-48e9-420b-b145-e896d0e2a84f@default> Message-ID: The patch looks fine to me. Due to ProcessTools.getVmInputArguments() dependency, any test using ProcessTools has @modules java.management even it does not use this method. It?d be good to refactor ProcessTools.getVmInputArguments() and maybe other test library to eliminate unnecessary dependency. Shura has cleaned up jdk/test/lib/testlibrary in the jdk side: https://bugs.openjdk.java.net/browse/JDK-8139430 Can you file a separate issue to refactor the hotspot test library and similar fix to JDK-8139430 can be applied in the future? thanks Mandy > On Apr 21, 2016, at 5:23 AM, Alexander Kulyakhtin wrote: > > Mandy, > > Thank you very much for your review. > > I have updated the fix per your comments, making ProcessTools.getProcessId() return long. > > The function ProcessTools.getVmInputArguments(), although does depend on the API from the java.management, is not used by any of the tests addressed by the CR. > > Best regards, > Alexander > > ----- Original Message ----- > From: alexander.kulyakhtin at oracle.com > To: hotspot-dev at openjdk.java.net > Cc: mandy.chung at oracle.com > Sent: Thursday, April 21, 2016 3:11:46 PM GMT +03:00 Iraq > Subject: Re: RFR:8153992:Some hotspot tests fail on compact2 due to an unnecessary test library dependency > > Hi, > > Could you, please, review this fix (updated to address the findings of the previous review) > > CR: https://bugs.openjdk.java.net/browse/JDK-8153992 "Some SVC tests fail on compact2 due to an unnecessary test library dependency" > Webrev: http://cr.openjdk.java.net/~akulyakh/8153992_02/index.html > > Before the fix the ProcessTools.getProcessId() used the ManagementFactory.getRuntimeMXBean() API. > The API is not available on compact2 and below. Therefore the tests failed. > We are changing the ProcessTools.getProcessId() method to use the JDK 9 Process.getPid(). This eliminates the unnecessary dependency making the tests pass on compact2. > > Since, with this change ProcessTools.getProcessId() now returns long we are also trivially modifying all the affected tests. > > Best regards, > Alexander > > > > ----- Original Message ----- > From: mandy.chung at oracle.com > To: alexander.kulyakhtin at oracle.com > Cc: serviceability-dev at openjdk.java.net > Sent: Thursday, April 21, 2016 12:03:14 AM GMT +03:00 Iraq > Subject: Re: RFR:8153989:Some SVC tests fail on compact2 due to an unnecessary test library dependency > > >> On Apr 20, 2016, at 9:06 AM, Alexander Kulyakhtin wrote: >> >> Hi, >> >> Could you, please, review this small tests-only fix: >> >> CR: https://bugs.openjdk.java.net/browse/JDK-8153992 "Some SVC tests fail on compact2 due to an unnecessary test library dependency" >> Webrev: http://cr.openjdk.java.net/~akulyakh/8153992/test/testlibrary/jdk/test/lib/ProcessTools.java.udiff.html >> >> Before the fix the ProcessTools.getProcessId() used the ManagementFactory.getRuntimeMXBean() API. >> The API is not available on compact2 and below. Therefore the tests failed. >> >> We are changing the ProcessTools.getProcessId() method to use the JDK 9 Process.getPid(). This eliminates the unnecessary dependency making the tests pass on compact2. >> > > This looks okay. But I see that getVmInputArguments calls ManagementFactory.getRuntimeMXBean. So ProcessTools still has a dependency on java.management. > > The jdk test library ProcessTools::getProcessId has been long ago to call Process::getPid and the method is changed to return long. I thought that similar change would have been made in the hotspot test library at that time. > >> I am not sure how acceptable it is to cast from long to int this change. If it is not acceptable we can change the return type to long. >> This however, will cause massive changes throughout the hotspot tests which presently expect getProcessId() to return int. > > IMO it would be good to return long or have the callsite to call ProcessHandle.current().getPid(). > > Mandy > From christian.thalinger at oracle.com Thu Apr 21 18:16:42 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Thu, 21 Apr 2016 08:16:42 -1000 Subject: RFR: 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode In-Reply-To: <57176947.7090501@redhat.com> References: <57176947.7090501@redhat.com> Message-ID: <2DE4DCCB-C743-4ED8-8287-8FD618307C19@oracle.com> > On Apr 20, 2016, at 1:34 AM, Andrew Haley wrote: > > I've seen weird unexplained (and unrepeatable) segfaults during JDK > builds for years. They're vary rare, and I thought it was to do with > flaky prototype hardware -- or at least that's how I kidded myself. > Yesterday I found a culprit. It's a load in one of the bytecode > accelerators, the one which replaces _aload_0, _fast_igetfield. > > Instead of a 32-bit word load, it's a 64-bit xword load. So how can > this lead to a crash? Well, if the object in question is at the very > end of the heap and the integer field is at the very end of the > object, you'll get a read which spills over onto the next page in > memory. This requires quite a coincidence of events, but it happens. Collecting FOSDEM talk material for years to come... > > Also, I discovered that the volatile load case had only a LoadLoad: it > needs a LoadStore too. > > Thanks, > > Andrew. > > > # HG changeset patch > # User aph > # Date 1461150850 0 > # Wed Apr 20 11:14:10 2016 +0000 > # Node ID 0df9b5892b864f27524480a698fe2550b4f9e531 > # Parent 57f9554a28f1858c009b4c4f0fdcb42079f4c447 > 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode > Reviewed-by: roland > > diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/src/cpu/aarch64/vm/templateTable_aarch64.cpp > --- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp > +++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp > @@ -2982,7 +2982,7 @@ > __ null_check(r0); > switch (state) { > case itos: > - __ ldr(r0, Address(r0, r1, Address::lsl(0))); > + __ ldrw(r0, Address(r0, r1, Address::lsl(0))); > break; > case atos: > __ load_heap_oop(r0, Address(r0, r1, Address::lsl(0))); > @@ -3000,7 +3000,7 @@ > __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + > ConstantPoolCacheEntry::flags_offset()))); > __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); > - __ membar(MacroAssembler::LoadLoad); > + __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); > __ bind(notVolatile); > } > > > From igor.veresov at oracle.com Fri Apr 22 07:07:11 2016 From: igor.veresov at oracle.com (Igor Veresov) Date: Fri, 22 Apr 2016 00:07:11 -0700 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160420074330.GG19871@rbackman> References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <20160420074330.GG19871@rbackman> Message-ID: <87808A47-FBE3-4F05-BCC1-0EF140086905@oracle.com> This looks good to me. igor > On Apr 20, 2016, at 12:43 AM, Rickard B?ckman wrote: > > Volker, > > sorry about the confusion. To me the only thing that made sense was to > make the push of the two commits atomically. I didn't intend to include > your changes in the webrev but I forgot to pop the changes of my mq. > > I hope you got the answers on some of the oddities from Christian. But > to add to that. The reason we have NMethodIterator and > CompiledMethodIterator is that we sometimes want to make sure we only > iterate over nmethods. Hope that works. > > Here is an updated webrev with the other changes you commented on: > > http://cr.openjdk.java.net/~rbackman/8152664.4/ > > Thanks > /R > > On 04/19, Volker Simonis wrote: >> Hi Rickard, >> >> I just wanted to prepare the new webrev for 8151956 but I'm a little >> confused because I realized that your latest webrev already contains the >> changes which I had proposed for 8151956. >> >> But after thinking about it a little bit I think that's fine. If I prepare >> a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had >> to adapt 8152664 to take care of the new changes introduced by 8151956. If >> I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 >> it would be hard to review it (because it will depend on 8152664) and we >> would get a change in the repo which would not build on PPC64 and AARCH64 >> which isn't nice either. >> >> So altogether I think it's fine to incorporate the fix for 8151956 into >> your change. Please only don't forget to close 8151956 as "fixed by >> 8152664" after you have pushed the changes for 8152664. >> >> I've verified that your last webrev builds and runs fine on Linux/ppc64 and >> AIX. You've also fixed all the issues I've addressed in my first mail to >> this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! >> >> Some final nit-picking: >> >> - you still have the white-space only change in os_windows.cpp objected by >> Vladimir. >> >> - in codeBlob.cpp can you please update the following comments to reflect >> the new types: >> >> // Creates a simple CodeBlob. Sets up the size of the different >> regions.* CodeBlob::CodeBlob(const char* name, int header_size, int >> size, int frame_complete, int locs_size) {** assert(size == >> round_to(size, oopSize), "unaligned size");**+ >> RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, >> int frame_complete, int locs_size)* >> >> // Creates a CodeBlob from a CodeBuffer. Sets up the size of the >> different regions, // and copy code and relocation info.*! >> CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* >> >> >> - why do we need: >> >> *+ bool make_not_used() { return make_not_entrant(); }* >> >> it only forwards to make_not_entrant() and it is only used a single time in >> ciEnv.cpp: >> >> *! old->make_not_entrant();**! old->make_not_used();* >> >> >> - I don't understand why we need both NMethodIterator and >> CompiledMethodIterator - they're virtually the same and nmethod is >> currently the only subclass of CompiledMethod. Can you please be more >> specific why you've changed some instances of NMethodIterator to >> CompiledMethodIterator and others not. Without background information this >> makes no sense to me. Also, the advance method in CompiledMethodIterator >> isn't "inline" while the one in NMethodIterator is - don't know if this >> will be a performance problem. >> >> The rest looks good to me but please notice that I still haven't looked at >> all changes (especially not on the agent/ and dtrace/ files). So you should >> get at least one more reviewer for such a big change. >> >> Regards, >> Volker >> >> >> >> On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman >> wrote: >> >>> Here is the updated webrev, rebased and I think I have fixed all the >>> comments with one exception. >>> >>> I've avoided making CompiledMethodIterator and NMethodIterator a >>> template class for now. I agree we should do something to reuse the >>> parts that are identical but for now I think there will be a few more >>> changes to CompiledMethodIterator in an upcoming RFR. So can we hold off >>> with that change? >>> >>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ >>> >>> Thanks >>> >>> On 04/07, Rickard B?ckman wrote: >>>> Hi, >>>> >>>> can I please have review for this patch please? >>>> >>>> So far CodeBlobs have required all the data (metadata, oops, code, etc) >>>> to be in one continuous blob With this patch we are looking to change >>>> that. It's been done by changing offsets in CodeBlob to addresses, >>>> making some methods virtual to allow different behavior and also >>>> creating a couple of new classes. CompiledMethod now sits inbetween >>>> CodeBlob and nmethod. >>>> >>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>>> >>>> Thanks >>>> /R >>> /R >>> From magnus.ihse.bursie at oracle.com Fri Apr 22 08:20:57 2016 From: magnus.ihse.bursie at oracle.com (Magnus Ihse Bursie) Date: Fri, 22 Apr 2016 10:20:57 +0200 Subject: RFR: JDK-8150601: Remove the old Hotspot build system In-Reply-To: <57179FE5.4080906@oracle.com> References: <57179FE5.4080906@oracle.com> Message-ID: <016AC389-4D5B-441E-AFC0-7CAD2B8E971F@oracle.com> Looks good to me. /Magnus > 20 apr. 2016 kl. 17:27 skrev Erik Joelsson : > > The new Hotspot Build System has now been in place for a week and a half. I have not heard any major complaints so I think the time has come to remove the old. This patch removes all the old, now unused, makefiles in hotspot/make. It also removes the support for using these makefiles from configure and the top level make dir. Finally it moves all the new makefiles from hotspot/makefiles to hotspot/make. > > With this change, we will be able to conclude JEP 284. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8150601 > Webrev: http://cr.openjdk.java.net/~erikj/8150601/webrev.01/ > > /Erik From sgehwolf at redhat.com Fri Apr 22 09:11:27 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 22 Apr 2016 11:11:27 +0200 Subject: [8u] RFR(s): 8154210: Zero: Better byte behaviour Message-ID: <1461316287.3683.11.camel@redhat.com> Hi, Could somebody please review and approve this fix for JDK8u Zero? The April CPU patches for OpenJDK broke this (bug 8132051). Please see the bug for details. The fixes are: - Fix the jump table which is used for Zero release builds, since ? JDK-8132051 introduced a new bytecode _fast_zputfield. - Make sure that results, which are a subword of a stack slot ? (possibly narrowed) are always at the lowest address. The fix was originally written by Andrew Haley. Bug:?https://bugs.openjdk.java.net/browse/JDK-8154210 webrev:?http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.01/ Once the April CPU patches hit JDK 9 trees we need to make sure this gets properly forward-ported. Unfortunately this hasn't happened yet (or maybe it didn't reach jdk9/hs yet). Thoughts? Thanks, Severin From tobias.hartmann at oracle.com Fri Apr 22 09:21:48 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Fri, 22 Apr 2016 11:21:48 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160420074330.GG19871@rbackman> References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <20160420074330.GG19871@rbackman> Message-ID: <5719ED2C.1050307@oracle.com> Hi Rickard, On 20.04.2016 09:43, Rickard B?ckman wrote: > http://cr.openjdk.java.net/~rbackman/8152664.4/ Thanks for fixing the issues. This looks good to me (I haven't looked at all changes). Best regards, Tobias > > Thanks > /R > > On 04/19, Volker Simonis wrote: >> Hi Rickard, >> >> I just wanted to prepare the new webrev for 8151956 but I'm a little >> confused because I realized that your latest webrev already contains the >> changes which I had proposed for 8151956. >> >> But after thinking about it a little bit I think that's fine. If I prepare >> a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had >> to adapt 8152664 to take care of the new changes introduced by 8151956. If >> I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 >> it would be hard to review it (because it will depend on 8152664) and we >> would get a change in the repo which would not build on PPC64 and AARCH64 >> which isn't nice either. >> >> So altogether I think it's fine to incorporate the fix for 8151956 into >> your change. Please only don't forget to close 8151956 as "fixed by >> 8152664" after you have pushed the changes for 8152664. >> >> I've verified that your last webrev builds and runs fine on Linux/ppc64 and >> AIX. You've also fixed all the issues I've addressed in my first mail to >> this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! >> >> Some final nit-picking: >> >> - you still have the white-space only change in os_windows.cpp objected by >> Vladimir. >> >> - in codeBlob.cpp can you please update the following comments to reflect >> the new types: >> >> // Creates a simple CodeBlob. Sets up the size of the different >> regions.* CodeBlob::CodeBlob(const char* name, int header_size, int >> size, int frame_complete, int locs_size) {** assert(size == >> round_to(size, oopSize), "unaligned size");**+ >> RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, >> int frame_complete, int locs_size)* >> >> // Creates a CodeBlob from a CodeBuffer. Sets up the size of the >> different regions, // and copy code and relocation info.*! >> CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* >> >> >> - why do we need: >> >> *+ bool make_not_used() { return make_not_entrant(); }* >> >> it only forwards to make_not_entrant() and it is only used a single time in >> ciEnv.cpp: >> >> *! old->make_not_entrant();**! old->make_not_used();* >> >> >> - I don't understand why we need both NMethodIterator and >> CompiledMethodIterator - they're virtually the same and nmethod is >> currently the only subclass of CompiledMethod. Can you please be more >> specific why you've changed some instances of NMethodIterator to >> CompiledMethodIterator and others not. Without background information this >> makes no sense to me. Also, the advance method in CompiledMethodIterator >> isn't "inline" while the one in NMethodIterator is - don't know if this >> will be a performance problem. >> >> The rest looks good to me but please notice that I still haven't looked at >> all changes (especially not on the agent/ and dtrace/ files). So you should >> get at least one more reviewer for such a big change. >> >> Regards, >> Volker >> >> >> >> On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman >> wrote: >> >>> Here is the updated webrev, rebased and I think I have fixed all the >>> comments with one exception. >>> >>> I've avoided making CompiledMethodIterator and NMethodIterator a >>> template class for now. I agree we should do something to reuse the >>> parts that are identical but for now I think there will be a few more >>> changes to CompiledMethodIterator in an upcoming RFR. So can we hold off >>> with that change? >>> >>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ >>> >>> Thanks >>> >>> On 04/07, Rickard B?ckman wrote: >>>> Hi, >>>> >>>> can I please have review for this patch please? >>>> >>>> So far CodeBlobs have required all the data (metadata, oops, code, etc) >>>> to be in one continuous blob With this patch we are looking to change >>>> that. It's been done by changing offsets in CodeBlob to addresses, >>>> making some methods virtual to allow different behavior and also >>>> creating a couple of new classes. CompiledMethod now sits inbetween >>>> CodeBlob and nmethod. >>>> >>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 >>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ >>>> >>>> Thanks >>>> /R >>> /R >>> From sgehwolf at redhat.com Fri Apr 22 09:24:13 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 22 Apr 2016 11:24:13 +0200 Subject: RFR(s): 8154210: Zero: Better byte behaviour In-Reply-To: <1461316287.3683.11.camel@redhat.com> References: <1461316287.3683.11.camel@redhat.com> Message-ID: <1461317053.3683.14.camel@redhat.com> On Fri, 2016-04-22 at 11:11 +0200, Severin Gehwolf wrote: > Hi, > > Could somebody please review and approve this fix for JDK8u Zero? The > April CPU patches for OpenJDK broke this (bug 8132051). Please see > the > bug for details. > > The fixes are: > - Fix the jump table which is used for Zero release builds, since > ? JDK-8132051 introduced a new bytecode _fast_zputfield. > - Make sure that results, which are a subword of a stack slot > ? (possibly narrowed) are always at the lowest address. > > The fix was originally written by Andrew Haley. > > Bug:?https://bugs.openjdk.java.net/browse/JDK-8154210 > webrev:?http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webr > ev.01/ > > Once the April CPU patches hit JDK 9 trees we need to make sure this > gets properly forward-ported. Unfortunately this hasn't happened yet > (or maybe it didn't reach jdk9/hs yet). NVM. It looks like it's in jdk9/dev now. The webrev should be the same, though. > Thoughts? > > Thanks, > Severin From sgehwolf at redhat.com Fri Apr 22 12:42:44 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 22 Apr 2016 14:42:44 +0200 Subject: RFR(s): 8154210: Zero: Better byte behaviour Message-ID: <1461328964.3683.22.camel@redhat.com> Hi, [new thread for JDK 9]. Could somebody please review this Zero-only fix for jdk9/dev? The April CPU patches for OpenJDK broke this (bug 8132051). Please see the bug for details. The fixes are: - Fix the jump table which is used for Zero release builds, since ? JDK-8132051 introduced a new bytecode _fast_zputfield. - Make sure that results, which are a subword of a stack slot ? (possibly narrowed) are always at the lowest address. The fix was originally written by Andrew Haley. Bug: https://bugs.openjdk.java.net/browse/JDK-8154210 webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.02/ Once reviewed and pushed, we'd need a similar fix[2] for JDK 8u Zero which is currently broken too. I think this fix needs to get pushed to jdk9/dev directly since jdk9/hs does not have the CPU fixes yet. Thoughts? Thanks, Severin [1]?http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.01/ From coleen.phillimore at oracle.com Fri Apr 22 12:49:31 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Fri, 22 Apr 2016 08:49:31 -0400 Subject: RFR(s): 8154210: Zero: Better byte behaviour In-Reply-To: <1461328964.3683.22.camel@redhat.com> References: <1461328964.3683.22.camel@redhat.com> Message-ID: <571A1DDB.2000106@oracle.com> This looks good. Sorry, I thought I'd fixed this but apparently not correctly. I'll sponsor it. Coleen On 4/22/16 8:42 AM, Severin Gehwolf wrote: > Hi, > > [new thread for JDK 9]. > > Could somebody please review this Zero-only fix for jdk9/dev? The April > CPU patches for OpenJDK broke this (bug 8132051). Please see the bug > for details. > > The fixes are: > - Fix the jump table which is used for Zero release builds, since > JDK-8132051 introduced a new bytecode _fast_zputfield. > - Make sure that results, which are a subword of a stack slot > (possibly narrowed) are always at the lowest address. > > The fix was originally written by Andrew Haley. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154210 > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.02/ > > Once reviewed and pushed, we'd need a similar fix[2] for JDK 8u Zero > which is currently broken too. I think this fix needs to get pushed to > jdk9/dev directly since jdk9/hs does not have the CPU fixes yet. > > Thoughts? > > Thanks, > Severin > > [1] http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.01/ From sgehwolf at redhat.com Fri Apr 22 13:03:18 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 22 Apr 2016 15:03:18 +0200 Subject: RFR(s): 8154210: Zero: Better byte behaviour In-Reply-To: <571A1DDB.2000106@oracle.com> References: <1461328964.3683.22.camel@redhat.com> <571A1DDB.2000106@oracle.com> Message-ID: <1461330198.3683.24.camel@redhat.com> On Fri, 2016-04-22 at 08:49 -0400, Coleen Phillimore wrote: > This looks good.??Sorry, I thought I'd fixed this but apparently not? > correctly. > I'll sponsor it. Thanks for the review, Coleen! HG exported changeset is here for your convenience: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/JDK-8154210-Zero-better-byte-behaviour.jdk9.export.patch Cheers, Severin > Coleen > > > On 4/22/16 8:42 AM, Severin Gehwolf wrote: > > > > Hi, > > > > [new thread for JDK 9]. > > > > Could somebody please review this Zero-only fix for jdk9/dev? The April > > CPU patches for OpenJDK broke this (bug 8132051). Please see the bug > > for details. > > > > The fixes are: > > - Fix the jump table which is used for Zero release builds, since > > ???JDK-8132051 introduced a new bytecode _fast_zputfield. > > - Make sure that results, which are a subword of a stack slot > > ???(possibly narrowed) are always at the lowest address. > > > > The fix was originally written by Andrew Haley. > > > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154210 > > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.02/ > > > > Once reviewed and pushed, we'd need a similar fix[2] for JDK 8u Zero > > which is currently broken too. I think this fix needs to get pushed to > > jdk9/dev directly since jdk9/hs does not have the CPU fixes yet. > > > > Thoughts? > > > > Thanks, > > Severin > > > > [1] http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.01/ From rickard.backman at oracle.com Fri Apr 22 13:19:28 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Fri, 22 Apr 2016 15:19:28 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <5719ED2C.1050307@oracle.com> References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <20160420074330.GG19871@rbackman> <5719ED2C.1050307@oracle.com> Message-ID: <20160422131928.GI19871@rbackman> Thank you for the review Tobias. /R On 04/22, Tobias Hartmann wrote: > Hi Rickard, > > On 20.04.2016 09:43, Rickard B?ckman wrote: > > http://cr.openjdk.java.net/~rbackman/8152664.4/ > > Thanks for fixing the issues. This looks good to me (I haven't looked at all changes). > > Best regards, > Tobias > > > > > Thanks > > /R > > > > On 04/19, Volker Simonis wrote: > >> Hi Rickard, > >> > >> I just wanted to prepare the new webrev for 8151956 but I'm a little > >> confused because I realized that your latest webrev already contains the > >> changes which I had proposed for 8151956. > >> > >> But after thinking about it a little bit I think that's fine. If I prepare > >> a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had > >> to adapt 8152664 to take care of the new changes introduced by 8151956. If > >> I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 > >> it would be hard to review it (because it will depend on 8152664) and we > >> would get a change in the repo which would not build on PPC64 and AARCH64 > >> which isn't nice either. > >> > >> So altogether I think it's fine to incorporate the fix for 8151956 into > >> your change. Please only don't forget to close 8151956 as "fixed by > >> 8152664" after you have pushed the changes for 8152664. > >> > >> I've verified that your last webrev builds and runs fine on Linux/ppc64 and > >> AIX. You've also fixed all the issues I've addressed in my first mail to > >> this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! > >> > >> Some final nit-picking: > >> > >> - you still have the white-space only change in os_windows.cpp objected by > >> Vladimir. > >> > >> - in codeBlob.cpp can you please update the following comments to reflect > >> the new types: > >> > >> // Creates a simple CodeBlob. Sets up the size of the different > >> regions.* CodeBlob::CodeBlob(const char* name, int header_size, int > >> size, int frame_complete, int locs_size) {** assert(size == > >> round_to(size, oopSize), "unaligned size");**+ > >> RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, > >> int frame_complete, int locs_size)* > >> > >> // Creates a CodeBlob from a CodeBuffer. Sets up the size of the > >> different regions, // and copy code and relocation info.*! > >> CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* > >> > >> > >> - why do we need: > >> > >> *+ bool make_not_used() { return make_not_entrant(); }* > >> > >> it only forwards to make_not_entrant() and it is only used a single time in > >> ciEnv.cpp: > >> > >> *! old->make_not_entrant();**! old->make_not_used();* > >> > >> > >> - I don't understand why we need both NMethodIterator and > >> CompiledMethodIterator - they're virtually the same and nmethod is > >> currently the only subclass of CompiledMethod. Can you please be more > >> specific why you've changed some instances of NMethodIterator to > >> CompiledMethodIterator and others not. Without background information this > >> makes no sense to me. Also, the advance method in CompiledMethodIterator > >> isn't "inline" while the one in NMethodIterator is - don't know if this > >> will be a performance problem. > >> > >> The rest looks good to me but please notice that I still haven't looked at > >> all changes (especially not on the agent/ and dtrace/ files). So you should > >> get at least one more reviewer for such a big change. > >> > >> Regards, > >> Volker > >> > >> > >> > >> On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman >>> wrote: > >> > >>> Here is the updated webrev, rebased and I think I have fixed all the > >>> comments with one exception. > >>> > >>> I've avoided making CompiledMethodIterator and NMethodIterator a > >>> template class for now. I agree we should do something to reuse the > >>> parts that are identical but for now I think there will be a few more > >>> changes to CompiledMethodIterator in an upcoming RFR. So can we hold off > >>> with that change? > >>> > >>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ > >>> > >>> Thanks > >>> > >>> On 04/07, Rickard B?ckman wrote: > >>>> Hi, > >>>> > >>>> can I please have review for this patch please? > >>>> > >>>> So far CodeBlobs have required all the data (metadata, oops, code, etc) > >>>> to be in one continuous blob With this patch we are looking to change > >>>> that. It's been done by changing offsets in CodeBlob to addresses, > >>>> making some methods virtual to allow different behavior and also > >>>> creating a couple of new classes. CompiledMethod now sits inbetween > >>>> CodeBlob and nmethod. > >>>> > >>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > >>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > >>>> > >>>> Thanks > >>>> /R > >>> /R > >>> From rickard.backman at oracle.com Fri Apr 22 13:19:46 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Fri, 22 Apr 2016 15:19:46 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <87808A47-FBE3-4F05-BCC1-0EF140086905@oracle.com> References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <20160420074330.GG19871@rbackman> <87808A47-FBE3-4F05-BCC1-0EF140086905@oracle.com> Message-ID: <20160422131946.GJ19871@rbackman> Thank you for the review Igor. /R On 04/22, Igor Veresov wrote: > This looks good to me. > > igor > > > > On Apr 20, 2016, at 12:43 AM, Rickard B?ckman wrote: > > > > Volker, > > > > sorry about the confusion. To me the only thing that made sense was to > > make the push of the two commits atomically. I didn't intend to include > > your changes in the webrev but I forgot to pop the changes of my mq. > > > > I hope you got the answers on some of the oddities from Christian. But > > to add to that. The reason we have NMethodIterator and > > CompiledMethodIterator is that we sometimes want to make sure we only > > iterate over nmethods. Hope that works. > > > > Here is an updated webrev with the other changes you commented on: > > > > http://cr.openjdk.java.net/~rbackman/8152664.4/ > > > > Thanks > > /R > > > > On 04/19, Volker Simonis wrote: > >> Hi Rickard, > >> > >> I just wanted to prepare the new webrev for 8151956 but I'm a little > >> confused because I realized that your latest webrev already contains the > >> changes which I had proposed for 8151956. > >> > >> But after thinking about it a little bit I think that's fine. If I prepare > >> a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had > >> to adapt 8152664 to take care of the new changes introduced by 8151956. If > >> I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 > >> it would be hard to review it (because it will depend on 8152664) and we > >> would get a change in the repo which would not build on PPC64 and AARCH64 > >> which isn't nice either. > >> > >> So altogether I think it's fine to incorporate the fix for 8151956 into > >> your change. Please only don't forget to close 8151956 as "fixed by > >> 8152664" after you have pushed the changes for 8152664. > >> > >> I've verified that your last webrev builds and runs fine on Linux/ppc64 and > >> AIX. You've also fixed all the issues I've addressed in my first mail to > >> this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! > >> > >> Some final nit-picking: > >> > >> - you still have the white-space only change in os_windows.cpp objected by > >> Vladimir. > >> > >> - in codeBlob.cpp can you please update the following comments to reflect > >> the new types: > >> > >> // Creates a simple CodeBlob. Sets up the size of the different > >> regions.* CodeBlob::CodeBlob(const char* name, int header_size, int > >> size, int frame_complete, int locs_size) {** assert(size == > >> round_to(size, oopSize), "unaligned size");**+ > >> RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, > >> int frame_complete, int locs_size)* > >> > >> // Creates a CodeBlob from a CodeBuffer. Sets up the size of the > >> different regions, // and copy code and relocation info.*! > >> CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* > >> > >> > >> - why do we need: > >> > >> *+ bool make_not_used() { return make_not_entrant(); }* > >> > >> it only forwards to make_not_entrant() and it is only used a single time in > >> ciEnv.cpp: > >> > >> *! old->make_not_entrant();**! old->make_not_used();* > >> > >> > >> - I don't understand why we need both NMethodIterator and > >> CompiledMethodIterator - they're virtually the same and nmethod is > >> currently the only subclass of CompiledMethod. Can you please be more > >> specific why you've changed some instances of NMethodIterator to > >> CompiledMethodIterator and others not. Without background information this > >> makes no sense to me. Also, the advance method in CompiledMethodIterator > >> isn't "inline" while the one in NMethodIterator is - don't know if this > >> will be a performance problem. > >> > >> The rest looks good to me but please notice that I still haven't looked at > >> all changes (especially not on the agent/ and dtrace/ files). So you should > >> get at least one more reviewer for such a big change. > >> > >> Regards, > >> Volker > >> > >> > >> > >> On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman >>> wrote: > >> > >>> Here is the updated webrev, rebased and I think I have fixed all the > >>> comments with one exception. > >>> > >>> I've avoided making CompiledMethodIterator and NMethodIterator a > >>> template class for now. I agree we should do something to reuse the > >>> parts that are identical but for now I think there will be a few more > >>> changes to CompiledMethodIterator in an upcoming RFR. So can we hold off > >>> with that change? > >>> > >>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ > >>> > >>> Thanks > >>> > >>> On 04/07, Rickard B?ckman wrote: > >>>> Hi, > >>>> > >>>> can I please have review for this patch please? > >>>> > >>>> So far CodeBlobs have required all the data (metadata, oops, code, etc) > >>>> to be in one continuous blob With this patch we are looking to change > >>>> that. It's been done by changing offsets in CodeBlob to addresses, > >>>> making some methods virtual to allow different behavior and also > >>>> creating a couple of new classes. CompiledMethod now sits inbetween > >>>> CodeBlob and nmethod. > >>>> > >>>> CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > >>>> Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > >>>> > >>>> Thanks > >>>> /R > >>> /R > >>> > From aph at redhat.com Fri Apr 22 17:59:00 2016 From: aph at redhat.com (Andrew Haley) Date: Fri, 22 Apr 2016 18:59:00 +0100 Subject: RFR: 8154957: AArch64: Better byte behavior Message-ID: <571A6664.1050903@redhat.com> The fix for 8132051 is needed for AArch64. This patch also includes a fix for an earlier mismerge. http://cr.openjdk.java.net/~aph/8154957/ Andrew. From gnu.andrew at redhat.com Fri Apr 22 18:06:38 2016 From: gnu.andrew at redhat.com (Andrew Hughes) Date: Fri, 22 Apr 2016 14:06:38 -0400 (EDT) Subject: RFR(s): 8154210: Zero: Better byte behaviour In-Reply-To: <571A1DDB.2000106@oracle.com> References: <1461328964.3683.22.camel@redhat.com> <571A1DDB.2000106@oracle.com> Message-ID: <2036615111.4341454.1461348398776.JavaMail.zimbra@redhat.com> ----- Original Message ----- > > This looks good. Sorry, I thought I'd fixed this but apparently not > correctly. > I'll sponsor it. > There's a rather chequered history to this. We didn't see 8146518 before it appeared in the public push of 8u91. Andrew Haley thus developed our own fix which covered both the issues fixed in 8146518 and the ones you see in this changeset. Up on merging the upstream u91, I first tested out the upstream version without our fix. It worked fine on a debug build, but immediately crashed on a product build. Hence the need for this patch. It was quite possible to entirely miss these remaining changes: 1. The jump table is only used on product builds (USELABELS needs to be defined). We fell over this ourselves when the initial patch missed a comma in that same table. 2. The address issue only shows up on 64-bit big-endian architectures. So you'll see it on ppc64(be) if you disable the JIT and on s390x, but not on other architectures. > Coleen > > -- Andrew :) Senior Free Java Software Engineer Red Hat, Inc. (http://www.redhat.com) PGP Key: ed25519/35964222 (hkp://keys.gnupg.net) Fingerprint = 5132 579D D154 0ED2 3E04 C5A0 CFDA 0F9B 3596 4222 From stuart.marks at oracle.com Fri Apr 22 18:44:21 2016 From: stuart.marks at oracle.com (Stuart Marks) Date: Fri, 22 Apr 2016 11:44:21 -0700 Subject: Fwd: RFR(s): 8153330: deprecate Runtime.traceInstructions & traceMethodCalls for removal In-Reply-To: <5716C666.5060006@oracle.com> References: <5716AD1A.3040902@oracle.com> <5716AF4E.4000103@oracle.com> <5716C666.5060006@oracle.com> Message-ID: <571A7105.2070104@oracle.com> On 4/19/16 4:59 PM, David Holmes wrote: > On 20/04/2016 8:21 AM, Stuart Marks wrote: >> I just posted this to core-libs-dev, and I figured I should run this by >> Hotspot-land. There isn't any double-secret-intrinsic magic that Hotspot >> does with these methods, is there? Anybody see any issue with >> deprecating and eventually removing these methods? > > I can't see any references in hotspot. Great, thanks for checking. I had looked too but I don't know hotspot very well so I was concerned I might have missed something. s'marks From sanne at redhat.com Fri Apr 22 19:06:25 2016 From: sanne at redhat.com (Sanne Grinovero) Date: Fri, 22 Apr 2016 20:06:25 +0100 Subject: JMH and JDK9 In-Reply-To: <10982358-95d5-40c1-8e20-c153e355d495@default> References: <56FBFA22.2060204@redhat.com> <56FBFB05.9050706@oracle.com> <56FBFCCD.1000704@oracle.com> <428721B0-47A4-453E-89C2-1D6CA09DEC75@oracle.com> <56FE29EE.1090801@oracle.com> <0C6C1D21-1592-426A-B9DE-33458FC90CF9@oracle.com> <57038495.40603@redhat.com> <570390AE.6050406@oracle.com> <5703C8B1.5080501@redhat.com> <10982358-95d5-40c1-8e20-c153e355d495@default> Message-ID: On Wed, Apr 20, 2016 at 3:15 PM, Stephen Felts wrote: > There was a bug that was just fixed yesterday such that the Java EE classes were not hidden. > It should have been fixed in the last nightly 114 build. > > As long as the necessary Java EE API classes are on the classpath, no command line options are needed. > > If you have the Java EE JTA API jar in the classpath, then use of javax.transaction.Synchronization (which is not in the JDK but is in the JTA API jar) will be resolved for both javac and java. Thanks Stephen, indeed testing 9-ea+114-2016-04-19-162931.javare.4880.nc this is working great. [Sorry all for the forked email thread.. I should have changed the subject early on] Regards, Sanne > > -----Original Message----- > From: Sanne Grinovero [mailto:sanne at redhat.com] > Sent: Wednesday, April 20, 2016 8:29 AM > To: Alan Bateman > Cc: jigsaw-dev; hotspot-dev Source Developers > Subject: Re: JMH and JDK9 > > On Tue, Apr 5, 2016 at 3:16 PM, Andrew Dinn wrote: >> On 05/04/16 11:17, Alan Bateman wrote: >> . . . >>> We recently updated JEP 261 proposing that "java.se" be the only >>> java.* root module that is resolved when compiling code in the >>> unnamed module or at runtime when the main class is loaded from the >>> class path [1]. On the surface then it will "appear" to developers >>> that the JDK does not have the EE components but from everything >>> we've seen so far, then those EE components are usually on the class path anyway. >> >> Ah ok, so this means that the problem has been punted to the other >> foot i.e. the only apps affected will be those which i) don't have an >> EE jar on their classpath and ii) require the (partial) stub >> implementations provided by Java SE. That sounds much better since >> such a configuration is of almost no use to anyone and hence is very unlikely to arise. > > Agreed: excellent idea! > > I'm eager to try it out so that we can resume testing of everything else too; I just tried my luck with build 9-ea+114 but it didn't seem to work: I'm going to assume this wasn't implemented yet, or should I double check how I'm building? > Did I understand correctly that I won't need to pass any switch to neither java nor javac, as long as I have the JavaEE jar as external dependencies on my classpath? (i.e. if this build is "proven" on Java8 it should work on Java9 ?) > > Is there an issue tracker which I could follow to watch updates on this? > > Slightly unrelated, but is it expected that compilation is successful, even though (in my specific case) javax.transaction.Synchronization causes a java.lang.NoClassDefFoundError at runtime? > > Thanks, > Sanne From rwestrel at redhat.com Mon Apr 25 08:06:56 2016 From: rwestrel at redhat.com (Roland Westrelin) Date: Mon, 25 Apr 2016 10:06:56 +0200 Subject: RFR: 8154957: AArch64: Better byte behavior In-Reply-To: <571A6664.1050903@redhat.com> References: <571A6664.1050903@redhat.com> Message-ID: <571DD020.6040607@redhat.com> > http://cr.openjdk.java.net/~aph/8154957/ That looks good to me. Roland. From sgehwolf at redhat.com Mon Apr 25 08:45:29 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Mon, 25 Apr 2016 10:45:29 +0200 Subject: [8u] Request for Approval: 8154210: Zero: Better byte behaviour Message-ID: <1461573929.4324.9.camel@redhat.com> Hi, Please approve the following Zero-only change for jdk8u. The current tree for JDK 8 is broken for a Zero JVM build[1]. The failure is caused by the April CPU patches. The fix for this went into 9 already[2] and the JDK 8 is essentially the same patch. Only whitespace differences make the JDK 9 patch not apply. Bug:?https://bugs.openjdk.java.net/browse/JDK-8154210 webrev:?http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.jdk8.01/ HG exported changeset: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/JDK-8154210-better-byte-heviour-zero.jdk8.export.patch Review thread for JDK 9: http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022824.html Thanks, Severin [1]?http://builder.classpath.org/jenkins/job/OpenJDK8_jdk8u_Zero/61/console [2] http://hg.openjdk.java.net/jdk9/hs/hotspot/rev/3d7d041acb59 From sean.coffey at oracle.com Mon Apr 25 08:52:52 2016 From: sean.coffey at oracle.com (=?UTF-8?Q?Se=c3=a1n_Coffey?=) Date: Mon, 25 Apr 2016 09:52:52 +0100 Subject: [8u] Request for Approval: 8154210: Zero: Better byte behaviour In-Reply-To: <1461573929.4324.9.camel@redhat.com> References: <1461573929.4324.9.camel@redhat.com> Message-ID: <571DDAE4.7000506@oracle.com> Looks like the JDK 9 master record needs a noreg- label. Please add one : http://openjdk.java.net/guide/changePlanning.html#noreg Approved. Regards, Sean. On 25/04/2016 09:45, Severin Gehwolf wrote: > Hi, > > Please approve the following Zero-only change for jdk8u. The current > tree for JDK 8 is broken for a Zero JVM build[1]. The failure is caused > by the April CPU patches. > > The fix for this went into 9 already[2] and the JDK 8 is essentially > the same patch. Only whitespace differences make the JDK 9 patch not > apply. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154210 > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.jdk8.01/ > HG exported changeset: > http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/JDK-8154210-better-byte-heviour-zero.jdk8.export.patch > > Review thread for JDK 9: > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022824.html > > Thanks, > Severin > > [1] http://builder.classpath.org/jenkins/job/OpenJDK8_jdk8u_Zero/61/console > [2] http://hg.openjdk.java.net/jdk9/hs/hotspot/rev/3d7d041acb59 From sgehwolf at redhat.com Mon Apr 25 09:02:09 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Mon, 25 Apr 2016 11:02:09 +0200 Subject: [8u] Request for Approval: 8154210: Zero: Better byte behaviour In-Reply-To: <571DDAE4.7000506@oracle.com> References: <1461573929.4324.9.camel@redhat.com> <571DDAE4.7000506@oracle.com> Message-ID: <1461574929.4324.12.camel@redhat.com> On Mon, 2016-04-25 at 09:52 +0100, Se?n Coffey wrote: > Looks like the JDK 9 master record needs a noreg- label. Please add one? > : http://openjdk.java.net/guide/changePlanning.html#noreg > > Approved. Thanks, Sean. I've added a noreq-hard label. How is the push process for this going to work? Will this be handled by the bulk-backport-hotspot-changes-to-jdk8-process or are we good to push to the jdk8u dev tree? Cheers, Severin > Regards, > Sean. > > On 25/04/2016 09:45, Severin Gehwolf wrote: > > > > Hi, > > > > Please approve the following Zero-only change for jdk8u. The current > > tree for JDK 8 is broken for a Zero JVM build[1]. The failure is caused > > by the April CPU patches. > > > > The fix for this went into 9 already[2] and the JDK 8 is essentially > > the same patch. Only whitespace differences make the JDK 9 patch not > > apply. > > > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154210 > > webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.jdk8.01/ > > HG exported changeset: > > http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/JDK-8154210-better-byte-heviour-zero.jdk8.export.patch > > > > Review thread for JDK 9: > > http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022824.html > > > > Thanks, > > Severin > > > > [1] http://builder.classpath.org/jenkins/job/OpenJDK8_jdk8u_Zero/61/console > > [2] http://hg.openjdk.java.net/jdk9/hs/hotspot/rev/3d7d041acb59 From sean.coffey at oracle.com Mon Apr 25 09:46:53 2016 From: sean.coffey at oracle.com (=?UTF-8?Q?Se=c3=a1n_Coffey?=) Date: Mon, 25 Apr 2016 10:46:53 +0100 Subject: [8u] Request for Approval: 8154210: Zero: Better byte behaviour In-Reply-To: <1461574929.4324.12.camel@redhat.com> References: <1461573929.4324.9.camel@redhat.com> <571DDAE4.7000506@oracle.com> <1461574929.4324.12.camel@redhat.com> Message-ID: <571DE78D.8010909@oracle.com> On 25/04/16 10:02, Severin Gehwolf wrote: > On Mon, 2016-04-25 at 09:52 +0100, Se?n Coffey wrote: >> Looks like the JDK 9 master record needs a noreg- label. Please add one >> : http://openjdk.java.net/guide/changePlanning.html#noreg >> >> Approved. > Thanks, Sean. I've added a noreq-hard label. > > How is the push process for this going to work? Will this be handled by > the bulk-backport-hotspot-changes-to-jdk8-process or are we good to > push to the jdk8u dev tree? there is only one team integration forest for the JDK 8 Updates project now. That's jdk8u-dev. Please ensure all tests pass and push your changes to there. regards, Sean. > > Cheers, > Severin > >> Regards, >> Sean. >> >> On 25/04/2016 09:45, Severin Gehwolf wrote: >>> Hi, >>> >>> Please approve the following Zero-only change for jdk8u. The current >>> tree for JDK 8 is broken for a Zero JVM build[1]. The failure is caused >>> by the April CPU patches. >>> >>> The fix for this went into 9 already[2] and the JDK 8 is essentially >>> the same patch. Only whitespace differences make the JDK 9 patch not >>> apply. >>> >>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154210 >>> webrev: http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/webrev.jdk8.01/ >>> HG exported changeset: >>> http://cr.openjdk.java.net/~sgehwolf/webrevs/JDK-8154210/JDK-8154210-better-byte-heviour-zero.jdk8.export.patch >>> >>> Review thread for JDK 9: >>> http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022824.html >>> >>> Thanks, >>> Severin >>> >>> [1] http://builder.classpath.org/jenkins/job/OpenJDK8_jdk8u_Zero/61/console >>> [2] http://hg.openjdk.java.net/jdk9/hs/hotspot/rev/3d7d041acb59 From dmitry.samersoff at oracle.com Mon Apr 25 11:50:26 2016 From: dmitry.samersoff at oracle.com (Dmitry Samersoff) Date: Mon, 25 Apr 2016 14:50:26 +0300 Subject: RFR(L): JDK-8154258 [TESTBUG] Various serviceability tests fail compilation In-Reply-To: <5714CCCA.7030809@oracle.com> References: <5714CCCA.7030809@oracle.com> Message-ID: <571E0482.6050000@oracle.com> Everybody, Please review the changes. http://cr.openjdk.java.net/~dsamersoff/JDK-8154258/webrev.04/ 1. Change hotspot/test/testlibrary/jdk/test/lib/Utils.java to match /test/lib/share/classes/jdk/test/lib/Utils.java i.e. replace sun.misc.Unsafe to jdk.internal.misc.Unsafe 2. Add the tag @modules java.base/jdk.internal.misc to all tests that uses testlibrary 3. Replace all occurrence of sun.misc.Unsafe to jdk.internal.misc.Unsafe Testing: local, RBT:hotspot_all -Dmitry -- Dmitry Samersoff Oracle Java development team, Saint Petersburg, Russia * I would love to change the world, but they won't give me the sources. From rickard.backman at oracle.com Mon Apr 25 12:15:24 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Mon, 25 Apr 2016 14:15:24 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> Message-ID: <20160425121524.GK19871@rbackman> Volker, are you ok with the last changes? http://cr.openjdk.java.net/~rbackman/8152664.4/ Thanks On 04/19, Volker Simonis wrote: > On Tue, Apr 19, 2016 at 6:49 PM, Christian Thalinger < > christian.thalinger at oracle.com> wrote: > > > > > On Apr 19, 2016, at 4:30 AM, Volker Simonis > > wrote: > > > > Hi Rickard, > > > > I just wanted to prepare the new webrev for 8151956 but I'm a little > > confused because I realized that your latest webrev already contains the > > changes which I had proposed for 8151956. > > > > But after thinking about it a little bit I think that's fine. If I prepare > > a patch for 8151956 which is intended to be pushed BEFORE 8152664 you'd had > > to adapt 8152664 to take care of the new changes introduced by 8151956. If > > I prepare a patch for 8151956 which is intended to be pushed AFTER 8152664 > > it would be hard to review it (because it will depend on 8152664) and we > > would get a change in the repo which would not build on PPC64 and AARCH64 > > which isn't nice either. > > > > So altogether I think it's fine to incorporate the fix for 8151956 into > > your change. Please only don't forget to close 8151956 as "fixed by > > 8152664" after you have pushed the changes for 8152664. > > > > I've verified that your last webrev builds and runs fine on Linux/ppc64 and > > AIX. You've also fixed all the issues I've addressed in my first mail to > > this thread and the typo in os_linux_aarch64.cpp found by Andrew - thanks! > > > > Some final nit-picking: > > > > - you still have the white-space only change in os_windows.cpp objected by > > Vladimir. > > > > - in codeBlob.cpp can you please update the following comments to reflect > > the new types: > > > > // Creates a simple CodeBlob. Sets up the size of the different > > regions.* CodeBlob::CodeBlob(const char* name, int header_size, int > > size, int frame_complete, int locs_size) {** assert(size == > > round_to(size, oopSize), "unaligned size");**+ > > RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, > > int frame_complete, int locs_size)* > > > > // Creates a CodeBlob from a CodeBuffer. Sets up the size of the > > different regions, // and copy code and relocation info.*! > > CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* > > > > > > - why do we need: > > > > *+ bool make_not_used() { return make_not_entrant(); }* > > > > it only forwards to make_not_entrant() and it is only used a single time in > > ciEnv.cpp: > > > > *! old->make_not_entrant();**! > > old->make_not_used();* > > > > > > I can answer this. make_not_used is virtual: > > > > virtual bool make_not_used() = 0; > > > > Can you guess why this is the case? :-) The reason is that the > > implementation is different for AOT compiled methods. > > > > > OK, I see. Thanks for the background info but now I can not refrain from > commenting :) > > If SAP (or anybody else outside Oracle) would submit such a kind of XL > change in order to better support let's say it's closed HPUX/Itanium port I > don't think it would be even considered. > > I don't want to reject these specific change (I came to terms with it :) > but I think this should stand as bad example for changes which will not > happen too often in the future. > > > > > > > > - I don't understand why we need both NMethodIterator and > > CompiledMethodIterator - they're virtually the same and nmethod is > > currently the only subclass of CompiledMethod. Can you please be more > > specific why you've changed some instances of NMethodIterator to > > CompiledMethodIterator and others not. Without background information this > > makes no sense to me. Also, the advance method in CompiledMethodIterator > > isn't "inline" while the one in NMethodIterator is - don't know if this > > will be a performance problem. > > > > The rest looks good to me but please notice that I still haven't looked at > > all changes (especially not on the agent/ and dtrace/ files). So you should > > get at least one more reviewer for such a big change. > > > > Regards, > > Volker > > > > > > > > On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman < > > rickard.backman at oracle.com > > > > wrote: > > > > > > Here is the updated webrev, rebased and I think I have fixed all the > > comments with one exception. > > > > I've avoided making CompiledMethodIterator and NMethodIterator a > > template class for now. I agree we should do something to reuse the > > parts that are identical but for now I think there will be a few more > > changes to CompiledMethodIterator in an upcoming RFR. So can we hold off > > with that change? > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ > > > > Thanks > > > > On 04/07, Rickard B?ckman wrote: > > > > Hi, > > > > can I please have review for this patch please? > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > to be in one continuous blob With this patch we are looking to change > > that. It's been done by changing offsets in CodeBlob to addresses, > > making some methods virtual to allow different behavior and also > > creating a couple of new classes. CompiledMethod now sits inbetween > > CodeBlob and nmethod. > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > Thanks > > /R > > > > /R > > > > > > From volker.simonis at gmail.com Mon Apr 25 13:34:10 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Mon, 25 Apr 2016 15:34:10 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: <20160425121524.GK19871@rbackman> References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> <20160425121524.GK19871@rbackman> Message-ID: Hi Rickard, sorry for being late but I was at a conference last week. Unfortunately your patch doesn't apply cleanly to hs-comp any more. As this is probably my fault because I didn't send you my OK in time I resolved the conflicts :) The first seven can be ignored because they only touch the copyright year which has already been updated to 2016 now. Following are the changes needed to fix relocInfo.hpp, method.hpp and frame.cpp after you've applied your current patch (also attached in case the patch gets scrambled): diff -r a1d6c22335bb src/share/vm/code/relocInfo.hpp --- a/src/share/vm/code/relocInfo.hpp Mon Apr 25 14:30:01 2016 +0200 +++ b/src/share/vm/code/relocInfo.hpp Mon Apr 25 15:12:16 2016 +0200 @@ -28,6 +28,8 @@ #include "memory/allocation.hpp" #include "runtime/os.hpp" +class nmethod; +class CompiledMethod; class Metadata; class NativeMovConstReg; diff -r a1d6c22335bb src/share/vm/oops/method.hpp --- a/src/share/vm/oops/method.hpp Mon Apr 25 14:30:01 2016 +0200 +++ b/src/share/vm/oops/method.hpp Mon Apr 25 15:12:16 2016 +0200 @@ -432,9 +432,9 @@ // nmethod/verified compiler entry address verified_code_entry(); bool check_code() const; // Not inline to avoid circular ref - nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } + CompiledMethod* volatile code() const { assert( check_code(), "" ); return (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); } void clear_code(); // Clear out any compiled code - static void set_code(methodHandle mh, nmethod* code); + static void set_code(methodHandle mh, CompiledMethod* code); void set_adapter_entry(AdapterHandlerEntry* adapter) { constMethod()->set_adapter_entry(adapter); } diff -r a1d6c22335bb src/share/vm/runtime/frame.cpp --- a/src/share/vm/runtime/frame.cpp Mon Apr 25 14:30:01 2016 +0200 +++ b/src/share/vm/runtime/frame.cpp Mon Apr 25 15:12:16 2016 +0200 @@ -661,13 +661,16 @@ } } else if (_cb->is_buffer_blob()) { st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); - } else if (_cb->is_nmethod()) { - nmethod* nm = (nmethod*)_cb; - Method* m = nm->method(); + } else if (_cb->is_compiled()) { + CompiledMethod* cm = (CompiledMethod*)_cb; + Method* m = cm->method(); if (m != NULL) { - st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); - if (nm->compiler() != NULL) { - st->print(" %s", nm->compiler()->name()); + if (cm->is_nmethod()) { + nmethod* nm = cm->as_nmethod(); + st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); + if (nm->compiler() != NULL) { + st->print(" %s", nm->compiler()->name()); + } } m->name_and_sig_as_C_string(buf, buflen); st->print(" %s", buf); Besides this, the change looks good. Thanks, Volker On Mon, Apr 25, 2016 at 2:15 PM, Rickard B?ckman wrote: > Volker, > > are you ok with the last changes? > > http://cr.openjdk.java.net/~rbackman/8152664.4/ > > Thanks > > On 04/19, Volker Simonis wrote: > > On Tue, Apr 19, 2016 at 6:49 PM, Christian Thalinger < > > christian.thalinger at oracle.com> wrote: > > > > > > > > On Apr 19, 2016, at 4:30 AM, Volker Simonis > > > wrote: > > > > > > Hi Rickard, > > > > > > I just wanted to prepare the new webrev for 8151956 but I'm a little > > > confused because I realized that your latest webrev already contains > the > > > changes which I had proposed for 8151956. > > > > > > But after thinking about it a little bit I think that's fine. If I > prepare > > > a patch for 8151956 which is intended to be pushed BEFORE 8152664 > you'd had > > > to adapt 8152664 to take care of the new changes introduced by > 8151956. If > > > I prepare a patch for 8151956 which is intended to be pushed AFTER > 8152664 > > > it would be hard to review it (because it will depend on 8152664) and > we > > > would get a change in the repo which would not build on PPC64 and > AARCH64 > > > which isn't nice either. > > > > > > So altogether I think it's fine to incorporate the fix for 8151956 into > > > your change. Please only don't forget to close 8151956 as "fixed by > > > 8152664" after you have pushed the changes for 8152664. > > > > > > I've verified that your last webrev builds and runs fine on > Linux/ppc64 and > > > AIX. You've also fixed all the issues I've addressed in my first mail > to > > > this thread and the typo in os_linux_aarch64.cpp found by Andrew - > thanks! > > > > > > Some final nit-picking: > > > > > > - you still have the white-space only change in os_windows.cpp > objected by > > > Vladimir. > > > > > > - in codeBlob.cpp can you please update the following comments to > reflect > > > the new types: > > > > > > // Creates a simple CodeBlob. Sets up the size of the different > > > regions.* CodeBlob::CodeBlob(const char* name, int header_size, int > > > size, int frame_complete, int locs_size) {** assert(size == > > > round_to(size, oopSize), "unaligned size");**+ > > > RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, > > > int frame_complete, int locs_size)* > > > > > > // Creates a CodeBlob from a CodeBuffer. Sets up the size of the > > > different regions, // and copy code and relocation info.*! > > > CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* > > > > > > > > > - why do we need: > > > > > > *+ bool make_not_used() { return make_not_entrant(); }* > > > > > > it only forwards to make_not_entrant() and it is only used a single > time in > > > ciEnv.cpp: > > > > > > *! old->make_not_entrant();**! > > > old->make_not_used();* > > > > > > > > > I can answer this. make_not_used is virtual: > > > > > > virtual bool make_not_used() = 0; > > > > > > Can you guess why this is the case? :-) The reason is that the > > > implementation is different for AOT compiled methods. > > > > > > > > OK, I see. Thanks for the background info but now I can not refrain from > > commenting :) > > > > If SAP (or anybody else outside Oracle) would submit such a kind of XL > > change in order to better support let's say it's closed HPUX/Itanium > port I > > don't think it would be even considered. > > > > I don't want to reject these specific change (I came to terms with it :) > > but I think this should stand as bad example for changes which will not > > happen too often in the future. > > > > > > > > > > > > > - I don't understand why we need both NMethodIterator and > > > CompiledMethodIterator - they're virtually the same and nmethod is > > > currently the only subclass of CompiledMethod. Can you please be more > > > specific why you've changed some instances of NMethodIterator to > > > CompiledMethodIterator and others not. Without background information > this > > > makes no sense to me. Also, the advance method in > CompiledMethodIterator > > > isn't "inline" while the one in NMethodIterator is - don't know if this > > > will be a performance problem. > > > > > > The rest looks good to me but please notice that I still haven't > looked at > > > all changes (especially not on the agent/ and dtrace/ files). So you > should > > > get at least one more reviewer for such a big change. > > > > > > Regards, > > > Volker > > > > > > > > > > > > On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman < > > > rickard.backman at oracle.com > > > > > > wrote: > > > > > > > > > Here is the updated webrev, rebased and I think I have fixed all the > > > comments with one exception. > > > > > > I've avoided making CompiledMethodIterator and NMethodIterator a > > > template class for now. I agree we should do something to reuse the > > > parts that are identical but for now I think there will be a few more > > > changes to CompiledMethodIterator in an upcoming RFR. So can we hold > off > > > with that change? > > > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ > > > > > > Thanks > > > > > > On 04/07, Rickard B?ckman wrote: > > > > > > Hi, > > > > > > can I please have review for this patch please? > > > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > > to be in one continuous blob With this patch we are looking to change > > > that. It's been done by changing offsets in CodeBlob to addresses, > > > making some methods virtual to allow different behavior and also > > > creating a couple of new classes. CompiledMethod now sits inbetween > > > CodeBlob and nmethod. > > > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > > > Thanks > > > /R > > > > > > /R > > > > > > > > > > -------------- next part -------------- A non-text attachment was scrubbed... Name: 8152664_v4_addon.patch Type: text/x-patch Size: 2403 bytes Desc: not available URL: From coleen.phillimore at oracle.com Mon Apr 25 19:44:42 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 25 Apr 2016 15:44:42 -0400 Subject: Result: New hotspot Group Member: Christian Tornqvist Message-ID: <303327cd-854c-115a-9a8b-c90d7bb4d197@oracle.com> The vote for Christian Tornqvist (OpenJDK user name: ctornqvi) [1] is now closed. Yes: 13 Veto: 0 Abstain: 0 According to the Bylaws definition of Three-Vote Consensus, this is sufficient to approve the nomination. Coleen Phillimore [1] http://mail.openjdk.java.net/pipermail/members/2016-April/000486.html From coleen.phillimore at oracle.com Mon Apr 25 19:46:57 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Mon, 25 Apr 2016 15:46:57 -0400 Subject: Result: New hotspot Group Member: Christian Tornqvist [correction] Message-ID: The vote for Christian Tornqvist (OpenJDK user name: ctornqvi) [1] is now closed. Yes: 13 Veto: 0 Abstain: 0 According to the Bylaws definition of Three-Vote Consensus, this is sufficient to approve the nomination. Coleen Phillimore [1] http://mail.openjdk.java.net/pipermail/hotspot-dev/2016-April/022589.html From gnu.andrew at redhat.com Tue Apr 26 02:06:45 2016 From: gnu.andrew at redhat.com (Andrew Hughes) Date: Mon, 25 Apr 2016 22:06:45 -0400 (EDT) Subject: [8u] Request for Approval: 8154210: Zero: Better byte behaviour In-Reply-To: <1461574929.4324.12.camel@redhat.com> References: <1461573929.4324.9.camel@redhat.com> <571DDAE4.7000506@oracle.com> <1461574929.4324.12.camel@redhat.com> Message-ID: <1767175218.210703.1461636405220.JavaMail.zimbra@redhat.com> ----- Original Message ----- > On Mon, 2016-04-25 at 09:52 +0100, Se?n Coffey wrote: > > Looks like the JDK 9 master record needs a noreg- label. Please add one > > : http://openjdk.java.net/guide/changePlanning.html#noreg > > > > Approved. > > Thanks, Sean. I've added a noreq-hard label. > > How is the push process for this going to work? Will this be handled by > the bulk-backport-hotspot-changes-to-jdk8-process or are we good to > push to the jdk8u dev tree? > Pushed: http://hg.openjdk.java.net/jdk8u/jdk8u-dev/hotspot/rev/8f58998958ca -- Andrew :) Senior Free Java Software Engineer Red Hat, Inc. (http://www.redhat.com) PGP Key: ed25519/35964222 (hkp://keys.gnupg.net) Fingerprint = 5132 579D D154 0ED2 3E04 C5A0 CFDA 0F9B 3596 4222 From max.ockner at oracle.com Tue Apr 26 05:42:49 2016 From: max.ockner at oracle.com (Max Ockner) Date: Tue, 26 Apr 2016 01:42:49 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems Message-ID: <571EFFD9.9050408@oracle.com> Hello, This change factors the tags from the class and safepoint logging subsystems into smaller tags, including "class" and "safepoint" tags which are included in tag combination in their respective subsystems. classresolve -> class+resolve classload -> class+load classunload -> class+unload classpath -> class+path classloaderdata -> class+loaderdata classload+constraints -> class+loaderconstraints classinit -> class+init classload+preorder -> class+preorder safepointcleanup -> safepoint+cleanup class+loaderdata can be further factored into class+load+data. Same with class+loaderconstraints. Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ Tested with jtreg hotspot tests. jdk tests currently running. Thanks, Max From rickard.backman at oracle.com Tue Apr 26 08:11:05 2016 From: rickard.backman at oracle.com (Rickard =?iso-8859-1?Q?B=E4ckman?=) Date: Tue, 26 Apr 2016 10:11:05 +0200 Subject: RFR (XL): 8152664 - Support non-continuous CodeBlobs in HotSpot In-Reply-To: References: <20160407121221.GQ9504@rbackman> <20160419053212.GB19871@rbackman> <31054C59-BCB4-4C66-9AF9-FC92DC836821@oracle.com> <20160425121524.GK19871@rbackman> Message-ID: <20160426081105.GM19871@rbackman> Volker, thanks for the review and the patch to rebase the change. /R On 04/25, Volker Simonis wrote: > Hi Rickard, > > sorry for being late but I was at a conference last week. > Unfortunately your patch doesn't apply cleanly to hs-comp any more. As this > is probably my fault because I didn't send you my OK in time I resolved the > conflicts :) > > The first seven can be ignored because they only touch the copyright year > which has already been updated to 2016 now. > > Following are the changes needed to fix relocInfo.hpp, method.hpp and > frame.cpp after you've applied your current patch (also attached in case > the patch gets scrambled): > > diff -r a1d6c22335bb src/share/vm/code/relocInfo.hpp > --- a/src/share/vm/code/relocInfo.hpp Mon Apr 25 14:30:01 2016 +0200 > +++ b/src/share/vm/code/relocInfo.hpp Mon Apr 25 15:12:16 2016 +0200 > @@ -28,6 +28,8 @@ > #include "memory/allocation.hpp" > #include "runtime/os.hpp" > > +class nmethod; > +class CompiledMethod; > class Metadata; > class NativeMovConstReg; > > diff -r a1d6c22335bb src/share/vm/oops/method.hpp > --- a/src/share/vm/oops/method.hpp Mon Apr 25 14:30:01 2016 +0200 > +++ b/src/share/vm/oops/method.hpp Mon Apr 25 15:12:16 2016 +0200 > @@ -432,9 +432,9 @@ > // nmethod/verified compiler entry > address verified_code_entry(); > bool check_code() const; // Not inline to avoid circular ref > - nmethod* volatile code() const { assert( check_code(), > "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } > + CompiledMethod* volatile code() const { assert( > check_code(), "" ); return (CompiledMethod > *)OrderAccess::load_ptr_acquire(&_code); } > void clear_code(); // Clear out any compiled code > - static void set_code(methodHandle mh, nmethod* code); > + static void set_code(methodHandle mh, CompiledMethod* code); > void set_adapter_entry(AdapterHandlerEntry* adapter) { > constMethod()->set_adapter_entry(adapter); > } > diff -r a1d6c22335bb src/share/vm/runtime/frame.cpp > --- a/src/share/vm/runtime/frame.cpp Mon Apr 25 14:30:01 2016 +0200 > +++ b/src/share/vm/runtime/frame.cpp Mon Apr 25 15:12:16 2016 +0200 > @@ -661,13 +661,16 @@ > } > } else if (_cb->is_buffer_blob()) { > st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); > - } else if (_cb->is_nmethod()) { > - nmethod* nm = (nmethod*)_cb; > - Method* m = nm->method(); > + } else if (_cb->is_compiled()) { > + CompiledMethod* cm = (CompiledMethod*)_cb; > + Method* m = cm->method(); > if (m != NULL) { > - st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : > "")); > - if (nm->compiler() != NULL) { > - st->print(" %s", nm->compiler()->name()); > + if (cm->is_nmethod()) { > + nmethod* nm = cm->as_nmethod(); > + st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" > : "")); > + if (nm->compiler() != NULL) { > + st->print(" %s", nm->compiler()->name()); > + } > } > m->name_and_sig_as_C_string(buf, buflen); > st->print(" %s", buf); > > Besides this, the change looks good. > > Thanks, > Volker > > > On Mon, Apr 25, 2016 at 2:15 PM, Rickard B?ckman > wrote: > > > Volker, > > > > are you ok with the last changes? > > > > http://cr.openjdk.java.net/~rbackman/8152664.4/ > > > > Thanks > > > > On 04/19, Volker Simonis wrote: > > > On Tue, Apr 19, 2016 at 6:49 PM, Christian Thalinger < > > > christian.thalinger at oracle.com> wrote: > > > > > > > > > > > On Apr 19, 2016, at 4:30 AM, Volker Simonis > > > > wrote: > > > > > > > > Hi Rickard, > > > > > > > > I just wanted to prepare the new webrev for 8151956 but I'm a little > > > > confused because I realized that your latest webrev already contains > > the > > > > changes which I had proposed for 8151956. > > > > > > > > But after thinking about it a little bit I think that's fine. If I > > prepare > > > > a patch for 8151956 which is intended to be pushed BEFORE 8152664 > > you'd had > > > > to adapt 8152664 to take care of the new changes introduced by > > 8151956. If > > > > I prepare a patch for 8151956 which is intended to be pushed AFTER > > 8152664 > > > > it would be hard to review it (because it will depend on 8152664) and > > we > > > > would get a change in the repo which would not build on PPC64 and > > AARCH64 > > > > which isn't nice either. > > > > > > > > So altogether I think it's fine to incorporate the fix for 8151956 into > > > > your change. Please only don't forget to close 8151956 as "fixed by > > > > 8152664" after you have pushed the changes for 8152664. > > > > > > > > I've verified that your last webrev builds and runs fine on > > Linux/ppc64 and > > > > AIX. You've also fixed all the issues I've addressed in my first mail > > to > > > > this thread and the typo in os_linux_aarch64.cpp found by Andrew - > > thanks! > > > > > > > > Some final nit-picking: > > > > > > > > - you still have the white-space only change in os_windows.cpp > > objected by > > > > Vladimir. > > > > > > > > - in codeBlob.cpp can you please update the following comments to > > reflect > > > > the new types: > > > > > > > > // Creates a simple CodeBlob. Sets up the size of the different > > > > regions.* CodeBlob::CodeBlob(const char* name, int header_size, int > > > > size, int frame_complete, int locs_size) {** assert(size == > > > > round_to(size, oopSize), "unaligned size");**+ > > > > RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, > > > > int frame_complete, int locs_size)* > > > > > > > > // Creates a CodeBlob from a CodeBuffer. Sets up the size of the > > > > different regions, // and copy code and relocation info.*! > > > > CodeBlob::CodeBlob(**! RuntimeBlob::RuntimeBlob(* > > > > > > > > > > > > - why do we need: > > > > > > > > *+ bool make_not_used() { return make_not_entrant(); }* > > > > > > > > it only forwards to make_not_entrant() and it is only used a single > > time in > > > > ciEnv.cpp: > > > > > > > > *! old->make_not_entrant();**! > > > > old->make_not_used();* > > > > > > > > > > > > I can answer this. make_not_used is virtual: > > > > > > > > virtual bool make_not_used() = 0; > > > > > > > > Can you guess why this is the case? :-) The reason is that the > > > > implementation is different for AOT compiled methods. > > > > > > > > > > > OK, I see. Thanks for the background info but now I can not refrain from > > > commenting :) > > > > > > If SAP (or anybody else outside Oracle) would submit such a kind of XL > > > change in order to better support let's say it's closed HPUX/Itanium > > port I > > > don't think it would be even considered. > > > > > > I don't want to reject these specific change (I came to terms with it :) > > > but I think this should stand as bad example for changes which will not > > > happen too often in the future. > > > > > > > > > > > > > > > > > > - I don't understand why we need both NMethodIterator and > > > > CompiledMethodIterator - they're virtually the same and nmethod is > > > > currently the only subclass of CompiledMethod. Can you please be more > > > > specific why you've changed some instances of NMethodIterator to > > > > CompiledMethodIterator and others not. Without background information > > this > > > > makes no sense to me. Also, the advance method in > > CompiledMethodIterator > > > > isn't "inline" while the one in NMethodIterator is - don't know if this > > > > will be a performance problem. > > > > > > > > The rest looks good to me but please notice that I still haven't > > looked at > > > > all changes (especially not on the agent/ and dtrace/ files). So you > > should > > > > get at least one more reviewer for such a big change. > > > > > > > > Regards, > > > > Volker > > > > > > > > > > > > > > > > On Tue, Apr 19, 2016 at 7:32 AM, Rickard B?ckman < > > > > rickard.backman at oracle.com > > > > > > > > wrote: > > > > > > > > > > > > Here is the updated webrev, rebased and I think I have fixed all the > > > > comments with one exception. > > > > > > > > I've avoided making CompiledMethodIterator and NMethodIterator a > > > > template class for now. I agree we should do something to reuse the > > > > parts that are identical but for now I think there will be a few more > > > > changes to CompiledMethodIterator in an upcoming RFR. So can we hold > > off > > > > with that change? > > > > > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664.3/ > > > > > > > > Thanks > > > > > > > > On 04/07, Rickard B?ckman wrote: > > > > > > > > Hi, > > > > > > > > can I please have review for this patch please? > > > > > > > > So far CodeBlobs have required all the data (metadata, oops, code, etc) > > > > to be in one continuous blob With this patch we are looking to change > > > > that. It's been done by changing offsets in CodeBlob to addresses, > > > > making some methods virtual to allow different behavior and also > > > > creating a couple of new classes. CompiledMethod now sits inbetween > > > > CodeBlob and nmethod. > > > > > > > > CR: https://bugs.openjdk.java.net/browse/JDK-8152664 > > > > Webrev: http://cr.openjdk.java.net/~rbackman/8152664/ > > > > > > > > Thanks > > > > /R > > > > > > > > /R > > > > > > > > > > > > > > > diff -r a1d6c22335bb src/share/vm/code/relocInfo.hpp > --- a/src/share/vm/code/relocInfo.hpp Mon Apr 25 14:30:01 2016 +0200 > +++ b/src/share/vm/code/relocInfo.hpp Mon Apr 25 15:12:16 2016 +0200 > @@ -28,6 +28,8 @@ > #include "memory/allocation.hpp" > #include "runtime/os.hpp" > > +class nmethod; > +class CompiledMethod; > class Metadata; > class NativeMovConstReg; > > diff -r a1d6c22335bb src/share/vm/oops/method.hpp > --- a/src/share/vm/oops/method.hpp Mon Apr 25 14:30:01 2016 +0200 > +++ b/src/share/vm/oops/method.hpp Mon Apr 25 15:12:16 2016 +0200 > @@ -432,9 +432,9 @@ > // nmethod/verified compiler entry > address verified_code_entry(); > bool check_code() const; // Not inline to avoid circular ref > - nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } > + CompiledMethod* volatile code() const { assert( check_code(), "" ); return (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); } > void clear_code(); // Clear out any compiled code > - static void set_code(methodHandle mh, nmethod* code); > + static void set_code(methodHandle mh, CompiledMethod* code); > void set_adapter_entry(AdapterHandlerEntry* adapter) { > constMethod()->set_adapter_entry(adapter); > } > diff -r a1d6c22335bb src/share/vm/runtime/frame.cpp > --- a/src/share/vm/runtime/frame.cpp Mon Apr 25 14:30:01 2016 +0200 > +++ b/src/share/vm/runtime/frame.cpp Mon Apr 25 15:12:16 2016 +0200 > @@ -661,13 +661,16 @@ > } > } else if (_cb->is_buffer_blob()) { > st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); > - } else if (_cb->is_nmethod()) { > - nmethod* nm = (nmethod*)_cb; > - Method* m = nm->method(); > + } else if (_cb->is_compiled()) { > + CompiledMethod* cm = (CompiledMethod*)_cb; > + Method* m = cm->method(); > if (m != NULL) { > - st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); > - if (nm->compiler() != NULL) { > - st->print(" %s", nm->compiler()->name()); > + if (cm->is_nmethod()) { > + nmethod* nm = cm->as_nmethod(); > + st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); > + if (nm->compiler() != NULL) { > + st->print(" %s", nm->compiler()->name()); > + } > } > m->name_and_sig_as_C_string(buf, buflen); > st->print(" %s", buf); From stefan.karlsson at oracle.com Tue Apr 26 10:43:24 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 26 Apr 2016 12:43:24 +0200 Subject: RFR: 8141496: BitMap set operations copy their other BitMap argument Message-ID: <571F464C.1030004@oracle.com> Hi all, Please review this patch to make the BitMap operations take const BitMap references instead of copying the BitMaps. http://cr.openjdk.java.net/~stefank/8141496/webrev.01 https://bugs.openjdk.java.net/browse/JDK-8141496 The patch is needed in preparation for: https://bugs.openjdk.java.net/browse/JDK-8141501 Problems with BitMap buffer management Tested in JPRT (together with prototypes for JDK-8141501). Thanks, StefanK From mikael.gerdin at oracle.com Tue Apr 26 11:38:51 2016 From: mikael.gerdin at oracle.com (Mikael Gerdin) Date: Tue, 26 Apr 2016 13:38:51 +0200 Subject: RFR: 8141496: BitMap set operations copy their other BitMap argument In-Reply-To: <571F464C.1030004@oracle.com> References: <571F464C.1030004@oracle.com> Message-ID: <571F534B.10308@oracle.com> Hi Stefan, On 2016-04-26 12:43, Stefan Karlsson wrote: > Hi all, > > Please review this patch to make the BitMap operations take const BitMap > references instead of copying the BitMaps. > > http://cr.openjdk.java.net/~stefank/8141496/webrev.01 Looks good! /Mikael > https://bugs.openjdk.java.net/browse/JDK-8141496 > > The patch is needed in preparation for: > https://bugs.openjdk.java.net/browse/JDK-8141501 > Problems with BitMap buffer management > > Tested in JPRT (together with prototypes for JDK-8141501). > > Thanks, > StefanK From stefan.karlsson at oracle.com Tue Apr 26 11:42:05 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 26 Apr 2016 13:42:05 +0200 Subject: RFR: 8141496: BitMap set operations copy their other BitMap argument In-Reply-To: <571F534B.10308@oracle.com> References: <571F464C.1030004@oracle.com> <571F534B.10308@oracle.com> Message-ID: <571F540D.20509@oracle.com> Thanks, Mikael! StefanK On 26/04/16 13:38, Mikael Gerdin wrote: > Hi Stefan, > > On 2016-04-26 12:43, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to make the BitMap operations take const BitMap >> references instead of copying the BitMaps. >> >> http://cr.openjdk.java.net/~stefank/8141496/webrev.01 > > Looks good! > > /Mikael > >> https://bugs.openjdk.java.net/browse/JDK-8141496 >> >> The patch is needed in preparation for: >> https://bugs.openjdk.java.net/browse/JDK-8141501 >> Problems with BitMap buffer management >> >> Tested in JPRT (together with prototypes for JDK-8141501). >> >> Thanks, >> StefanK From thomas.schatzl at oracle.com Tue Apr 26 12:04:38 2016 From: thomas.schatzl at oracle.com (Thomas Schatzl) Date: Tue, 26 Apr 2016 14:04:38 +0200 Subject: RFR: 8141496: BitMap set operations copy their other BitMap argument In-Reply-To: <571F464C.1030004@oracle.com> References: <571F464C.1030004@oracle.com> Message-ID: <1461672278.2571.7.camel@oracle.com> Hi, On Tue, 2016-04-26 at 12:43 +0200, Stefan Karlsson wrote: > Hi all, > > Please review this patch to make the BitMap operations take const > BitMap? > references instead of copying the BitMaps. > > http://cr.openjdk.java.net/~stefank/8141496/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8141496 > > The patch is needed in preparation for: > ? https://bugs.openjdk.java.net/browse/JDK-8141501 > ? Problems with BitMap buffer management > > Tested in JPRT (together with prototypes for JDK-8141501). ? looks good. Thomas From stefan.karlsson at oracle.com Tue Apr 26 12:09:41 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Tue, 26 Apr 2016 14:09:41 +0200 Subject: RFR: 8141496: BitMap set operations copy their other BitMap argument In-Reply-To: <1461672278.2571.7.camel@oracle.com> References: <571F464C.1030004@oracle.com> <1461672278.2571.7.camel@oracle.com> Message-ID: <571F5A85.2070307@oracle.com> Thanks, Thomas! StefanK On 26/04/16 14:04, Thomas Schatzl wrote: > Hi, > > On Tue, 2016-04-26 at 12:43 +0200, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to make the BitMap operations take const >> BitMap >> references instead of copying the BitMaps. >> >> http://cr.openjdk.java.net/~stefank/8141496/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8141496 >> >> The patch is needed in preparation for: >> https://bugs.openjdk.java.net/browse/JDK-8141501 >> Problems with BitMap buffer management >> >> Tested in JPRT (together with prototypes for JDK-8141501). > looks good. > > Thomas > From aph at redhat.com Tue Apr 26 12:28:21 2016 From: aph at redhat.com (Andrew Haley) Date: Tue, 26 Apr 2016 13:28:21 +0100 Subject: Illegal tag name: jdk8u76-b00-aarch32-160405 Message-ID: <571F5EE5.9010809@redhat.com> Please tell us what the syntax of a jcheck'd tag name should be. Thanks, Andrew. On 05 Apr 2016, at 13:27, Edward Nevill wrote: > > OK. I tried tagging the tree with > > jdk8u76-b00-aarch32-160405 > > But I get > > (trusty)ed at localhost:/work/ed/aarch32/jdk8u/hotspot$ hg push > pushing to ssh://enevill at hg.openjdk.java.net/aarch32-port/jdk8u/hotspot > running ssh enevill at hg.openjdk.java.net 'hg -R aarch32-port/jdk8u/hotspot serve --stdio' > searching for changes > 1 changesets found > remote: adding changesets > remote: adding manifests > remote: adding file changes > remote: added 1 changesets with 1 changes to 1 files > remote: [jcheck d46eaf84b346 2015-12-01 13:15:54 -0800] > remote: > remote: Illegal tag name: jdk8u76-b00-aarch32-160405 > remote: > remote: transaction abort! > remote: rollback completed > remote: abort: pretxnchangegroup.0.jcheck hook failed > > So, it seems like there are upstream restrictions on the tags we can use. > > Does anyone know the correct syntax of a tag? From edward.nevill at gmail.com Tue Apr 26 12:54:55 2016 From: edward.nevill at gmail.com (Edward Nevill) Date: Tue, 26 Apr 2016 13:54:55 +0100 Subject: Illegal tag name: jdk8u76-b00-aarch32-160405 In-Reply-To: <571F5EE5.9010809@redhat.com> References: <571F5EE5.9010809@redhat.com> Message-ID: <1461675295.2941.67.camel@mylittlepony.linaroharston> On Tue, 2016-04-26 at 13:28 +0100, Andrew Haley wrote: > Please tell us what the syntax of a jcheck'd tag name should be. tag_re = re.compile("tip$|jdk-([1-9]([0-9]*)(\.[0-9]){0,3})\+([0-9]+)$|jdk[4-9](u\d{1,3})?-b\d{2,3}$|hs\d\d(\.\d{1,2})?-b\d\d$") Regards, Ed. From david.holmes at oracle.com Tue Apr 26 12:55:42 2016 From: david.holmes at oracle.com (David Holmes) Date: Tue, 26 Apr 2016 22:55:42 +1000 Subject: Illegal tag name: jdk8u76-b00-aarch32-160405 In-Reply-To: <571F5EE5.9010809@redhat.com> References: <571F5EE5.9010809@redhat.com> Message-ID: <571F654E.8070101@oracle.com> Not really a hotspot question but ... http://hg.openjdk.java.net/code-tools/jcheck/file/d46eaf84b346/jcheck.py tag_re = re.compile("tip$|jdk-([1-9]([0-9]*)(\.[0-9]){0,3})\+([0-9]+)$|jdk[4-9](u\d{1,3})?-b\d{2,3}$|hs\d\d(\.\d{1,2})?-b\d\d$") HTH David On 26/04/2016 10:28 PM, Andrew Haley wrote: > Please tell us what the syntax of a jcheck'd tag name should be. > > Thanks, > > Andrew. > > > On 05 Apr 2016, at 13:27, Edward Nevill wrote: >> >> OK. I tried tagging the tree with >> >> jdk8u76-b00-aarch32-160405 >> >> But I get >> >> (trusty)ed at localhost:/work/ed/aarch32/jdk8u/hotspot$ hg push >> pushing to ssh://enevill at hg.openjdk.java.net/aarch32-port/jdk8u/hotspot >> running ssh enevill at hg.openjdk.java.net 'hg -R aarch32-port/jdk8u/hotspot serve --stdio' >> searching for changes >> 1 changesets found >> remote: adding changesets >> remote: adding manifests >> remote: adding file changes >> remote: added 1 changesets with 1 changes to 1 files >> remote: [jcheck d46eaf84b346 2015-12-01 13:15:54 -0800] >> remote: >> remote: Illegal tag name: jdk8u76-b00-aarch32-160405 >> remote: >> remote: transaction abort! >> remote: rollback completed >> remote: abort: pretxnchangegroup.0.jcheck hook failed >> >> So, it seems like there are upstream restrictions on the tags we can use. >> >> Does anyone know the correct syntax of a tag? > > From aph at redhat.com Tue Apr 26 13:01:33 2016 From: aph at redhat.com (Andrew Haley) Date: Tue, 26 Apr 2016 14:01:33 +0100 Subject: Illegal tag name: jdk8u76-b00-aarch32-160405 In-Reply-To: <571F654E.8070101@oracle.com> References: <571F5EE5.9010809@redhat.com> <571F654E.8070101@oracle.com> Message-ID: <571F66AD.3070307@redhat.com> On 04/26/2016 01:55 PM, David Holmes wrote: > Not really a hotspot question but ... > > http://hg.openjdk.java.net/code-tools/jcheck/file/d46eaf84b346/jcheck.py > > tag_re = > re.compile("tip$|jdk-([1-9]([0-9]*)(\.[0-9]){0,3})\+([0-9]+)$|jdk[4-9](u\d{1,3})?-b\d{2,3}$|hs\d\d(\.\d{1,2})?-b\d\d$") Ah yes. :-) I was thinking of asking build-dev, but even then I was not sure it was the right list. Would it have been? Andrew. From volker.simonis at gmail.com Tue Apr 26 13:09:04 2016 From: volker.simonis at gmail.com (Volker Simonis) Date: Tue, 26 Apr 2016 15:09:04 +0200 Subject: Illegal tag name: jdk8u76-b00-aarch32-160405 In-Reply-To: <571F66AD.3070307@redhat.com> References: <571F5EE5.9010809@redhat.com> <571F654E.8070101@oracle.com> <571F66AD.3070307@redhat.com> Message-ID: jcheck belongs to code-tools so probably code-tools-dev would have been the right list :) On Tue, Apr 26, 2016 at 3:01 PM, Andrew Haley wrote: > On 04/26/2016 01:55 PM, David Holmes wrote: >> Not really a hotspot question but ... >> >> http://hg.openjdk.java.net/code-tools/jcheck/file/d46eaf84b346/jcheck.py >> >> tag_re = >> re.compile("tip$|jdk-([1-9]([0-9]*)(\.[0-9]){0,3})\+([0-9]+)$|jdk[4-9](u\d{1,3})?-b\d{2,3}$|hs\d\d(\.\d{1,2})?-b\d\d$") > > Ah yes. :-) > > I was thinking of asking build-dev, but even then I was not sure it was > the right list. Would it have been? > > Andrew. > From david.holmes at oracle.com Tue Apr 26 13:11:01 2016 From: david.holmes at oracle.com (David Holmes) Date: Tue, 26 Apr 2016 23:11:01 +1000 Subject: Illegal tag name: jdk8u76-b00-aarch32-160405 In-Reply-To: <571F66AD.3070307@redhat.com> References: <571F5EE5.9010809@redhat.com> <571F654E.8070101@oracle.com> <571F66AD.3070307@redhat.com> Message-ID: <571F68E5.3040402@oracle.com> On 26/04/2016 11:01 PM, Andrew Haley wrote: > On 04/26/2016 01:55 PM, David Holmes wrote: >> Not really a hotspot question but ... >> >> http://hg.openjdk.java.net/code-tools/jcheck/file/d46eaf84b346/jcheck.py >> >> tag_re = >> re.compile("tip$|jdk-([1-9]([0-9]*)(\.[0-9]){0,3})\+([0-9]+)$|jdk[4-9](u\d{1,3})?-b\d{2,3}$|hs\d\d(\.\d{1,2})?-b\d\d$") > > Ah yes. :-) > > I was thinking of asking build-dev, but even then I was not sure it was > the right list. Would it have been? Nope: hg-tools-dev at openjdk.java.net. which until today I had never heard of :) David > Andrew. > From aph at redhat.com Tue Apr 26 14:14:11 2016 From: aph at redhat.com (Andrew Haley) Date: Tue, 26 Apr 2016 15:14:11 +0100 Subject: RFR: 8155100: AArch64: Relax alignment requirement for byte_map_base Message-ID: <571F77B3.9020402@redhat.com> AArch64 expects the card table base to be page aligned, and asserts that. Unfortunately (because asserts are compiled out in release code) if this assumption is ever violated in production code we won't see it. Instead, we will see mysterious segfaults. We have seen this fail in OpenJDK 7. It makes more sense to allow the base to be unaligned. http://cr.openjdk.java.net/~aph/8155100/hotspot.changeset Andrew. From rwestrel at redhat.com Tue Apr 26 14:22:18 2016 From: rwestrel at redhat.com (Roland Westrelin) Date: Tue, 26 Apr 2016 16:22:18 +0200 Subject: RFR: 8155100: AArch64: Relax alignment requirement for byte_map_base In-Reply-To: <571F77B3.9020402@redhat.com> References: <571F77B3.9020402@redhat.com> Message-ID: <571F799A.6090707@redhat.com> > http://cr.openjdk.java.net/~aph/8155100/hotspot.changeset That looks good to me. Roland. From harold.seigel at oracle.com Tue Apr 26 14:48:19 2016 From: harold.seigel at oracle.com (harold seigel) Date: Tue, 26 Apr 2016 10:48:19 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <571EFFD9.9050408@oracle.com> References: <571EFFD9.9050408@oracle.com> Message-ID: <571F7FB3.9070209@oracle.com> Hi Max, The changes look good. Harold On 4/26/2016 1:42 AM, Max Ockner wrote: > Hello, > > This change factors the tags from the class and safepoint logging > subsystems into smaller tags, including "class" and "safepoint" tags > which are included in tag combination in their respective subsystems. > > classresolve -> class+resolve > classload -> class+load > classunload -> class+unload > classpath -> class+path > classloaderdata -> class+loaderdata > classload+constraints -> class+loaderconstraints > classinit -> class+init > classload+preorder -> class+preorder > > safepointcleanup -> safepoint+cleanup > > class+loaderdata can be further factored into class+load+data. Same > with class+loaderconstraints. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 > webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ > webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ > > Tested with jtreg hotspot tests. jdk tests currently running. > > Thanks, > Max From chris.hegarty at oracle.com Tue Apr 26 18:56:11 2016 From: chris.hegarty at oracle.com (Chris Hegarty) Date: Tue, 26 Apr 2016 19:56:11 +0100 Subject: RFR(L): JDK-8154258 [TESTBUG] Various serviceability tests fail compilation In-Reply-To: <571E0482.6050000@oracle.com> References: <5714CCCA.7030809@oracle.com> <571E0482.6050000@oracle.com> Message-ID: <80B4971C-0833-400F-BD63-B0D1F070D180@oracle.com> On 25 Apr 2016, at 12:50, Dmitry Samersoff wrote: > Everybody, > > Please review the changes. > > http://cr.openjdk.java.net/~dsamersoff/JDK-8154258/webrev.04/ Thank you Dmitry. Your changes look good to me. As a follow up, it would be nice to refactor the current testlibrary so that the Unsafe dependency is put into its own testlibrary. I suspect that many of the @modules could then be removed from many tests that don?t actually depend on Unsafe. Anyway, that can some later. -Chris. > 1. Change hotspot/test/testlibrary/jdk/test/lib/Utils.java to match > /test/lib/share/classes/jdk/test/lib/Utils.java > i.e. replace sun.misc.Unsafe to jdk.internal.misc.Unsafe > > 2. Add the tag > @modules java.base/jdk.internal.misc > to all tests that uses testlibrary > > 3. Replace all occurrence of sun.misc.Unsafe to jdk.internal.misc.Unsafe > > Testing: local, RBT:hotspot_all > > -Dmitry > > -- > Dmitry Samersoff > Oracle Java development team, Saint Petersburg, Russia > * I would love to change the world, but they won't give me the sources. From robbin.ehn at oracle.com Tue Apr 26 19:44:29 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Tue, 26 Apr 2016 21:44:29 +0200 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <571EFFD9.9050408@oracle.com> References: <571EFFD9.9050408@oracle.com> Message-ID: <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> Hi Max, I was just looking at these tags and thought maybe they should be split, nice ! I have request for the ' constraints' tag, can make it singular (without ending 's') ? (there are a few other ones in plural which we also should fix) Otherwise I think this looks good! Thanks! /Robbin On 04/26/2016 07:42 AM, Max Ockner wrote: > Hello, > > This change factors the tags from the class and safepoint logging > subsystems into smaller tags, including "class" and "safepoint" tags > which are included in tag combination in their respective subsystems. > > classresolve -> class+resolve > classload -> class+load > classunload -> class+unload > classpath -> class+path > classloaderdata -> class+loaderdata > classload+constraints -> class+loaderconstraints > classinit -> class+init > classload+preorder -> class+preorder > > safepointcleanup -> safepoint+cleanup > > class+loaderdata can be further factored into class+load+data. Same > with class+loaderconstraints. > > Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 > webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ > webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ > > Tested with jtreg hotspot tests. jdk tests currently running. > > Thanks, > Max From coleen.phillimore at oracle.com Tue Apr 26 20:05:29 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 26 Apr 2016 16:05:29 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> Message-ID: <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> Hi, Yes, his looks good, but I agree that class, loaderconstraints => class, loader, constraints and class, loaderdata => class, loader, data But I think constraints should be plural, we're logging the constraint checking (more than one constraint) so plural makes more sense to me. Okay, I think I now prefer the combination of short words to do logging after seeing this change. Thanks, Coleen On 4/26/16 3:44 PM, Robbin Ehn wrote: > Hi Max, > > I was just looking at these tags and thought maybe they should be > split, nice ! > > I have request for the ' constraints' tag, can make it singular > (without ending 's') ? > > (there are a few other ones in plural which we also should fix) > > Otherwise I think this looks good! > > Thanks! > > /Robbin > > On 04/26/2016 07:42 AM, Max Ockner wrote: >> Hello, >> >> This change factors the tags from the class and safepoint logging >> subsystems into smaller tags, including "class" and "safepoint" tags >> which are included in tag combination in their respective subsystems. >> >> classresolve -> class+resolve >> classload -> class+load >> classunload -> class+unload >> classpath -> class+path >> classloaderdata -> class+loaderdata >> classload+constraints -> class+loaderconstraints >> classinit -> class+init >> classload+preorder -> class+preorder >> >> safepointcleanup -> safepoint+cleanup >> >> class+loaderdata can be further factored into class+load+data. Same >> with class+loaderconstraints. >> >> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >> webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >> >> Tested with jtreg hotspot tests. jdk tests currently running. >> >> Thanks, >> Max > From max.ockner at oracle.com Tue Apr 26 21:05:09 2016 From: max.ockner at oracle.com (Max Ockner) Date: Tue, 26 Apr 2016 17:05:09 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> Message-ID: <571FD805.3040201@oracle.com> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ I have responded to these suggestions: - class+loaderdata -> class+loader+data - class+loaderconstraints -> class+loader+constraints - I did not change constraints to constraint. I agree with Coleen's reasoning, but please let me know if you think there is a good reason to change it. Thanks, Max On 4/26/2016 4:05 PM, Coleen Phillimore wrote: > > Hi, > > Yes, his looks good, but I agree that class, loaderconstraints => > class, loader, constraints > and class, loaderdata => class, loader, data > > But I think constraints should be plural, we're logging the constraint > checking (more than one constraint) so plural makes more sense to me. > > Okay, I think I now prefer the combination of short words to do > logging after seeing this change. > > Thanks, > Coleen > > > On 4/26/16 3:44 PM, Robbin Ehn wrote: >> Hi Max, >> >> I was just looking at these tags and thought maybe they should be >> split, nice ! >> >> I have request for the ' constraints' tag, can make it singular >> (without ending 's') ? >> >> (there are a few other ones in plural which we also should fix) >> >> Otherwise I think this looks good! >> >> Thanks! >> >> /Robbin >> >> On 04/26/2016 07:42 AM, Max Ockner wrote: >>> Hello, >>> >>> This change factors the tags from the class and safepoint logging >>> subsystems into smaller tags, including "class" and "safepoint" tags >>> which are included in tag combination in their respective subsystems. >>> >>> classresolve -> class+resolve >>> classload -> class+load >>> classunload -> class+unload >>> classpath -> class+path >>> classloaderdata -> class+loaderdata >>> classload+constraints -> class+loaderconstraints >>> classinit -> class+init >>> classload+preorder -> class+preorder >>> >>> safepointcleanup -> safepoint+cleanup >>> >>> class+loaderdata can be further factored into class+load+data. Same >>> with class+loaderconstraints. >>> >>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>> webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>> >>> Tested with jtreg hotspot tests. jdk tests currently running. >>> >>> Thanks, >>> Max >> > From coleen.phillimore at oracle.com Wed Apr 27 00:02:57 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Tue, 26 Apr 2016 20:02:57 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <571FD805.3040201@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> Message-ID: <59126cff-e67e-6a95-ce2a-587227dab9f1@oracle.com> This looks good to me. Coleen On 4/26/16 5:05 PM, Max Ockner wrote: > New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ > > I have responded to these suggestions: > - class+loaderdata -> class+loader+data > - class+loaderconstraints -> class+loader+constraints > - I did not change constraints to constraint. I agree with Coleen's > reasoning, but please let me know if you think there is a good reason > to change it. > > Thanks, > Max > > On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >> >> Hi, >> >> Yes, his looks good, but I agree that class, loaderconstraints => >> class, loader, constraints >> and class, loaderdata => class, loader, data >> >> But I think constraints should be plural, we're logging the >> constraint checking (more than one constraint) so plural makes more >> sense to me. >> >> Okay, I think I now prefer the combination of short words to do >> logging after seeing this change. >> >> Thanks, >> Coleen >> >> >> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>> Hi Max, >>> >>> I was just looking at these tags and thought maybe they should be >>> split, nice ! >>> >>> I have request for the ' constraints' tag, can make it singular >>> (without ending 's') ? >>> >>> (there are a few other ones in plural which we also should fix) >>> >>> Otherwise I think this looks good! >>> >>> Thanks! >>> >>> /Robbin >>> >>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>> Hello, >>>> >>>> This change factors the tags from the class and safepoint logging >>>> subsystems into smaller tags, including "class" and "safepoint" >>>> tags which are included in tag combination in their respective >>>> subsystems. >>>> >>>> classresolve -> class+resolve >>>> classload -> class+load >>>> classunload -> class+unload >>>> classpath -> class+path >>>> classloaderdata -> class+loaderdata >>>> classload+constraints -> class+loaderconstraints >>>> classinit -> class+init >>>> classload+preorder -> class+preorder >>>> >>>> safepointcleanup -> safepoint+cleanup >>>> >>>> class+loaderdata can be further factored into class+load+data. Same >>>> with class+loaderconstraints. >>>> >>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>> webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>> >>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>> >>>> Thanks, >>>> Max >>> >> > From ioi.lam at oracle.com Wed Apr 27 00:21:48 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Tue, 26 Apr 2016 17:21:48 -0700 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <571FD805.3040201@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> Message-ID: <5720061C.4070404@oracle.com> The changes look good to me. I think each line of the "constraints" prints only one item (?). If so, we should use the singular form. (Max, could you post a sample output from -Xlog?) Thanks - Ioi On 4/26/16 2:05 PM, Max Ockner wrote: > New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ > > I have responded to these suggestions: > - class+loaderdata -> class+loader+data > - class+loaderconstraints -> class+loader+constraints > - I did not change constraints to constraint. I agree with Coleen's > reasoning, but please let me know if you think there is a good reason > to change it. > > Thanks, > Max > > On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >> >> Hi, >> >> Yes, his looks good, but I agree that class, loaderconstraints => >> class, loader, constraints >> and class, loaderdata => class, loader, data >> >> But I think constraints should be plural, we're logging the >> constraint checking (more than one constraint) so plural makes more >> sense to me. >> >> Okay, I think I now prefer the combination of short words to do >> logging after seeing this change. >> >> Thanks, >> Coleen >> >> >> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>> Hi Max, >>> >>> I was just looking at these tags and thought maybe they should be >>> split, nice ! >>> >>> I have request for the ' constraints' tag, can make it singular >>> (without ending 's') ? >>> >>> (there are a few other ones in plural which we also should fix) >>> >>> Otherwise I think this looks good! >>> >>> Thanks! >>> >>> /Robbin >>> >>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>> Hello, >>>> >>>> This change factors the tags from the class and safepoint logging >>>> subsystems into smaller tags, including "class" and "safepoint" >>>> tags which are included in tag combination in their respective >>>> subsystems. >>>> >>>> classresolve -> class+resolve >>>> classload -> class+load >>>> classunload -> class+unload >>>> classpath -> class+path >>>> classloaderdata -> class+loaderdata >>>> classload+constraints -> class+loaderconstraints >>>> classinit -> class+init >>>> classload+preorder -> class+preorder >>>> >>>> safepointcleanup -> safepoint+cleanup >>>> >>>> class+loaderdata can be further factored into class+load+data. Same >>>> with class+loaderconstraints. >>>> >>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>> webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>> >>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>> >>>> Thanks, >>>> Max >>> >> > From david.holmes at oracle.com Wed Apr 27 01:13:37 2016 From: david.holmes at oracle.com (David Holmes) Date: Wed, 27 Apr 2016 11:13:37 +1000 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <5720061C.4070404@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> Message-ID: <57201241.50609@oracle.com> On 27/04/2016 10:21 AM, Ioi Lam wrote: > The changes look good to me. > > I think each line of the "constraints" prints only one item (?). If so, > we should use the singular form. (Max, could you post a sample output > from -Xlog?) The point is that it shows all the constraints. David > Thanks > - Ioi > > On 4/26/16 2:05 PM, Max Ockner wrote: >> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >> >> I have responded to these suggestions: >> - class+loaderdata -> class+loader+data >> - class+loaderconstraints -> class+loader+constraints >> - I did not change constraints to constraint. I agree with Coleen's >> reasoning, but please let me know if you think there is a good reason >> to change it. >> >> Thanks, >> Max >> >> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>> >>> Hi, >>> >>> Yes, his looks good, but I agree that class, loaderconstraints => >>> class, loader, constraints >>> and class, loaderdata => class, loader, data >>> >>> But I think constraints should be plural, we're logging the >>> constraint checking (more than one constraint) so plural makes more >>> sense to me. >>> >>> Okay, I think I now prefer the combination of short words to do >>> logging after seeing this change. >>> >>> Thanks, >>> Coleen >>> >>> >>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>> Hi Max, >>>> >>>> I was just looking at these tags and thought maybe they should be >>>> split, nice ! >>>> >>>> I have request for the ' constraints' tag, can make it singular >>>> (without ending 's') ? >>>> >>>> (there are a few other ones in plural which we also should fix) >>>> >>>> Otherwise I think this looks good! >>>> >>>> Thanks! >>>> >>>> /Robbin >>>> >>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>> Hello, >>>>> >>>>> This change factors the tags from the class and safepoint logging >>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>> tags which are included in tag combination in their respective >>>>> subsystems. >>>>> >>>>> classresolve -> class+resolve >>>>> classload -> class+load >>>>> classunload -> class+unload >>>>> classpath -> class+path >>>>> classloaderdata -> class+loaderdata >>>>> classload+constraints -> class+loaderconstraints >>>>> classinit -> class+init >>>>> classload+preorder -> class+preorder >>>>> >>>>> safepointcleanup -> safepoint+cleanup >>>>> >>>>> class+loaderdata can be further factored into class+load+data. Same >>>>> with class+loaderconstraints. >>>>> >>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>> webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>> >>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>> >>>>> Thanks, >>>>> Max >>>> >>> >> > From robbin.ehn at oracle.com Wed Apr 27 07:32:46 2016 From: robbin.ehn at oracle.com (Robbin Ehn) Date: Wed, 27 Apr 2016 09:32:46 +0200 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <571FD805.3040201@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> Message-ID: <62a05651-bf62-dd46-56ec-cb029a256a8f@oracle.com> Hi Max, Coleen is not wrong in this case, but a tag can be a part of multiple tag-set. And we don't want to both a singular and a plural version of each tag. And it is much simpler to write your -Xlog command when you know all tag have the same form. Thanks! /Robbin On 04/26/2016 11:05 PM, Max Ockner wrote: > New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ > > I have responded to these suggestions: > - class+loaderdata -> class+loader+data > - class+loaderconstraints -> class+loader+constraints > - I did not change constraints to constraint. I agree with Coleen's > reasoning, but please let me know if you think there is a good reason > to change it. > > Thanks, > Max > > On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >> >> Hi, >> >> Yes, his looks good, but I agree that class, loaderconstraints => >> class, loader, constraints >> and class, loaderdata => class, loader, data >> >> But I think constraints should be plural, we're logging the >> constraint checking (more than one constraint) so plural makes more >> sense to me. >> >> Okay, I think I now prefer the combination of short words to do >> logging after seeing this change. >> >> Thanks, >> Coleen >> >> >> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>> Hi Max, >>> >>> I was just looking at these tags and thought maybe they should be >>> split, nice ! >>> >>> I have request for the ' constraints' tag, can make it singular >>> (without ending 's') ? >>> >>> (there are a few other ones in plural which we also should fix) >>> >>> Otherwise I think this looks good! >>> >>> Thanks! >>> >>> /Robbin >>> >>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>> Hello, >>>> >>>> This change factors the tags from the class and safepoint logging >>>> subsystems into smaller tags, including "class" and "safepoint" >>>> tags which are included in tag combination in their respective >>>> subsystems. >>>> >>>> classresolve -> class+resolve >>>> classload -> class+load >>>> classunload -> class+unload >>>> classpath -> class+path >>>> classloaderdata -> class+loaderdata >>>> classload+constraints -> class+loaderconstraints >>>> classinit -> class+init >>>> classload+preorder -> class+preorder >>>> >>>> safepointcleanup -> safepoint+cleanup >>>> >>>> class+loaderdata can be further factored into class+load+data. Same >>>> with class+loaderconstraints. >>>> >>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>> webrev(hotspot): http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>> >>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>> >>>> Thanks, >>>> Max >>> >> > From ioi.lam at oracle.com Wed Apr 27 08:32:57 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Wed, 27 Apr 2016 01:32:57 -0700 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <57201241.50609@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> Message-ID: <57207939.2020805@oracle.com> On 4/26/16 6:13 PM, David Holmes wrote: > On 27/04/2016 10:21 AM, Ioi Lam wrote: >> The changes look good to me. >> >> I think each line of the "constraints" prints only one item (?). If so, >> we should use the singular form. (Max, could you post a sample output >> from -Xlog?) > > The point is that it shows all the constraints. > We currently have a mix-match of singular/plural forms in the log tags: singular: LOG_TAG(alloc) LOG_TAG(region) plural: LOG_TAG(defaultmethods) Sometimes whether to use an (s) can be a personal preference. However, the poor user would need to try 2^n times to get the right combination of (s) or (no s), and that's too much. - Ioi > David > >> Thanks >> - Ioi >> >> On 4/26/16 2:05 PM, Max Ockner wrote: >>> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>> >>> I have responded to these suggestions: >>> - class+loaderdata -> class+loader+data >>> - class+loaderconstraints -> class+loader+constraints >>> - I did not change constraints to constraint. I agree with Coleen's >>> reasoning, but please let me know if you think there is a good reason >>> to change it. >>> >>> Thanks, >>> Max >>> >>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>> >>>> Hi, >>>> >>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>> class, loader, constraints >>>> and class, loaderdata => class, loader, data >>>> >>>> But I think constraints should be plural, we're logging the >>>> constraint checking (more than one constraint) so plural makes more >>>> sense to me. >>>> >>>> Okay, I think I now prefer the combination of short words to do >>>> logging after seeing this change. >>>> >>>> Thanks, >>>> Coleen >>>> >>>> >>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>> Hi Max, >>>>> >>>>> I was just looking at these tags and thought maybe they should be >>>>> split, nice ! >>>>> >>>>> I have request for the ' constraints' tag, can make it singular >>>>> (without ending 's') ? >>>>> >>>>> (there are a few other ones in plural which we also should fix) >>>>> >>>>> Otherwise I think this looks good! >>>>> >>>>> Thanks! >>>>> >>>>> /Robbin >>>>> >>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>> Hello, >>>>>> >>>>>> This change factors the tags from the class and safepoint logging >>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>> tags which are included in tag combination in their respective >>>>>> subsystems. >>>>>> >>>>>> classresolve -> class+resolve >>>>>> classload -> class+load >>>>>> classunload -> class+unload >>>>>> classpath -> class+path >>>>>> classloaderdata -> class+loaderdata >>>>>> classload+constraints -> class+loaderconstraints >>>>>> classinit -> class+init >>>>>> classload+preorder -> class+preorder >>>>>> >>>>>> safepointcleanup -> safepoint+cleanup >>>>>> >>>>>> class+loaderdata can be further factored into class+load+data. Same >>>>>> with class+loaderconstraints. >>>>>> >>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>> webrev(hotspot): >>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>> >>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>> >>>>>> Thanks, >>>>>> Max >>>>> >>>> >>> >> From marcus.larsson at oracle.com Wed Apr 27 08:39:41 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Wed, 27 Apr 2016 10:39:41 +0200 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <57207939.2020805@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> Message-ID: <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> Hi, On 04/27/2016 10:32 AM, Ioi Lam wrote: > > > On 4/26/16 6:13 PM, David Holmes wrote: >> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>> The changes look good to me. >>> >>> I think each line of the "constraints" prints only one item (?). If so, >>> we should use the singular form. (Max, could you post a sample output >>> from -Xlog?) >> >> The point is that it shows all the constraints. >> > > We currently have a mix-match of singular/plural forms in the log tags: > > singular: > LOG_TAG(alloc) > LOG_TAG(region) > > plural: > LOG_TAG(defaultmethods) > > Sometimes whether to use an (s) can be a personal preference. However, > the poor user would need to try 2^n times to get the right combination > of (s) or (no s), and that's too much. Yeah, we should definitely be consistent with this. I vote for tags in singular, since it makes them ever so slightly shorter. Thanks, Marcus > > - Ioi > >> David >> >>> Thanks >>> - Ioi >>> >>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>> >>>> I have responded to these suggestions: >>>> - class+loaderdata -> class+loader+data >>>> - class+loaderconstraints -> class+loader+constraints >>>> - I did not change constraints to constraint. I agree with Coleen's >>>> reasoning, but please let me know if you think there is a good reason >>>> to change it. >>>> >>>> Thanks, >>>> Max >>>> >>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>> >>>>> Hi, >>>>> >>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>> class, loader, constraints >>>>> and class, loaderdata => class, loader, data >>>>> >>>>> But I think constraints should be plural, we're logging the >>>>> constraint checking (more than one constraint) so plural makes more >>>>> sense to me. >>>>> >>>>> Okay, I think I now prefer the combination of short words to do >>>>> logging after seeing this change. >>>>> >>>>> Thanks, >>>>> Coleen >>>>> >>>>> >>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>> Hi Max, >>>>>> >>>>>> I was just looking at these tags and thought maybe they should be >>>>>> split, nice ! >>>>>> >>>>>> I have request for the ' constraints' tag, can make it singular >>>>>> (without ending 's') ? >>>>>> >>>>>> (there are a few other ones in plural which we also should fix) >>>>>> >>>>>> Otherwise I think this looks good! >>>>>> >>>>>> Thanks! >>>>>> >>>>>> /Robbin >>>>>> >>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>> Hello, >>>>>>> >>>>>>> This change factors the tags from the class and safepoint logging >>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>> tags which are included in tag combination in their respective >>>>>>> subsystems. >>>>>>> >>>>>>> classresolve -> class+resolve >>>>>>> classload -> class+load >>>>>>> classunload -> class+unload >>>>>>> classpath -> class+path >>>>>>> classloaderdata -> class+loaderdata >>>>>>> classload+constraints -> class+loaderconstraints >>>>>>> classinit -> class+init >>>>>>> classload+preorder -> class+preorder >>>>>>> >>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>> >>>>>>> class+loaderdata can be further factored into class+load+data. Same >>>>>>> with class+loaderconstraints. >>>>>>> >>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>> webrev(hotspot): >>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>> >>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>> >>>>>>> Thanks, >>>>>>> Max >>>>>> >>>>> >>>> >>> > From david.holmes at oracle.com Wed Apr 27 09:19:02 2016 From: david.holmes at oracle.com (David Holmes) Date: Wed, 27 Apr 2016 19:19:02 +1000 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> Message-ID: <57208406.5040605@oracle.com> On 27/04/2016 6:39 PM, Marcus Larsson wrote: > Hi, > > > On 04/27/2016 10:32 AM, Ioi Lam wrote: >> >> >> On 4/26/16 6:13 PM, David Holmes wrote: >>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>> The changes look good to me. >>>> >>>> I think each line of the "constraints" prints only one item (?). If so, >>>> we should use the singular form. (Max, could you post a sample output >>>> from -Xlog?) >>> >>> The point is that it shows all the constraints. >>> >> >> We currently have a mix-match of singular/plural forms in the log tags: >> >> singular: >> LOG_TAG(alloc) >> LOG_TAG(region) >> >> plural: >> LOG_TAG(defaultmethods) >> >> Sometimes whether to use an (s) can be a personal preference. However, >> the poor user would need to try 2^n times to get the right combination >> of (s) or (no s), and that's too much. > > Yeah, we should definitely be consistent with this. I vote for tags in > singular, since it makes them ever so slightly shorter. Hmmm, so data becomes datum? Or is that the exception to the rule? ;-) No matter what you choose some things will be unintuitively awkward: defaultmethods, exceptions, constraints, all suit plural forms and are awkward in singular. There are numerous plural forms in use already. David > Thanks, > Marcus > >> >> - Ioi >> >>> David >>> >>>> Thanks >>>> - Ioi >>>> >>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>> >>>>> I have responded to these suggestions: >>>>> - class+loaderdata -> class+loader+data >>>>> - class+loaderconstraints -> class+loader+constraints >>>>> - I did not change constraints to constraint. I agree with Coleen's >>>>> reasoning, but please let me know if you think there is a good reason >>>>> to change it. >>>>> >>>>> Thanks, >>>>> Max >>>>> >>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>> >>>>>> Hi, >>>>>> >>>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>>> class, loader, constraints >>>>>> and class, loaderdata => class, loader, data >>>>>> >>>>>> But I think constraints should be plural, we're logging the >>>>>> constraint checking (more than one constraint) so plural makes more >>>>>> sense to me. >>>>>> >>>>>> Okay, I think I now prefer the combination of short words to do >>>>>> logging after seeing this change. >>>>>> >>>>>> Thanks, >>>>>> Coleen >>>>>> >>>>>> >>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>> Hi Max, >>>>>>> >>>>>>> I was just looking at these tags and thought maybe they should be >>>>>>> split, nice ! >>>>>>> >>>>>>> I have request for the ' constraints' tag, can make it singular >>>>>>> (without ending 's') ? >>>>>>> >>>>>>> (there are a few other ones in plural which we also should fix) >>>>>>> >>>>>>> Otherwise I think this looks good! >>>>>>> >>>>>>> Thanks! >>>>>>> >>>>>>> /Robbin >>>>>>> >>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>> Hello, >>>>>>>> >>>>>>>> This change factors the tags from the class and safepoint logging >>>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>>> tags which are included in tag combination in their respective >>>>>>>> subsystems. >>>>>>>> >>>>>>>> classresolve -> class+resolve >>>>>>>> classload -> class+load >>>>>>>> classunload -> class+unload >>>>>>>> classpath -> class+path >>>>>>>> classloaderdata -> class+loaderdata >>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>> classinit -> class+init >>>>>>>> classload+preorder -> class+preorder >>>>>>>> >>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>> >>>>>>>> class+loaderdata can be further factored into class+load+data. Same >>>>>>>> with class+loaderconstraints. >>>>>>>> >>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>> webrev(hotspot): >>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>> >>>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Max >>>>>>> >>>>>> >>>>> >>>> >> > From marcus.larsson at oracle.com Wed Apr 27 09:41:34 2016 From: marcus.larsson at oracle.com (Marcus Larsson) Date: Wed, 27 Apr 2016 11:41:34 +0200 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <57208406.5040605@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> <57208406.5040605@oracle.com> Message-ID: <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> On 04/27/2016 11:19 AM, David Holmes wrote: > On 27/04/2016 6:39 PM, Marcus Larsson wrote: >> Hi, >> >> >> On 04/27/2016 10:32 AM, Ioi Lam wrote: >>> >>> >>> On 4/26/16 6:13 PM, David Holmes wrote: >>>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>>> The changes look good to me. >>>>> >>>>> I think each line of the "constraints" prints only one item (?). >>>>> If so, >>>>> we should use the singular form. (Max, could you post a sample output >>>>> from -Xlog?) >>>> >>>> The point is that it shows all the constraints. >>>> >>> >>> We currently have a mix-match of singular/plural forms in the log tags: >>> >>> singular: >>> LOG_TAG(alloc) >>> LOG_TAG(region) >>> >>> plural: >>> LOG_TAG(defaultmethods) >>> >>> Sometimes whether to use an (s) can be a personal preference. However, >>> the poor user would need to try 2^n times to get the right combination >>> of (s) or (no s), and that's too much. >> >> Yeah, we should definitely be consistent with this. I vote for tags in >> singular, since it makes them ever so slightly shorter. > > Hmmm, so data becomes datum? Or is that the exception to the rule? ;-) Seems like a good exception to me. :) Perhaps the rule should be to avoid trailing s if it signifies plural. > > No matter what you choose some things will be unintuitively awkward: > defaultmethods, exceptions, constraints, all suit plural forms and are > awkward in singular. There are numerous plural forms in use already. I don't see what distinguishes the tags you mention from the others. I guess I don't see what makes them awkward in singular. If I want defaultmethod logging I enable that tag. It's obvious I will get multiple log messages for it so I don't see the purpose of the 's'. Thanks, Marcus > > David > >> Thanks, >> Marcus >> >>> >>> - Ioi >>> >>>> David >>>> >>>>> Thanks >>>>> - Ioi >>>>> >>>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>>> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>>> >>>>>> I have responded to these suggestions: >>>>>> - class+loaderdata -> class+loader+data >>>>>> - class+loaderconstraints -> class+loader+constraints >>>>>> - I did not change constraints to constraint. I agree with Coleen's >>>>>> reasoning, but please let me know if you think there is a good >>>>>> reason >>>>>> to change it. >>>>>> >>>>>> Thanks, >>>>>> Max >>>>>> >>>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>>> >>>>>>> Hi, >>>>>>> >>>>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>>>> class, loader, constraints >>>>>>> and class, loaderdata => class, loader, data >>>>>>> >>>>>>> But I think constraints should be plural, we're logging the >>>>>>> constraint checking (more than one constraint) so plural makes more >>>>>>> sense to me. >>>>>>> >>>>>>> Okay, I think I now prefer the combination of short words to do >>>>>>> logging after seeing this change. >>>>>>> >>>>>>> Thanks, >>>>>>> Coleen >>>>>>> >>>>>>> >>>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>>> Hi Max, >>>>>>>> >>>>>>>> I was just looking at these tags and thought maybe they should be >>>>>>>> split, nice ! >>>>>>>> >>>>>>>> I have request for the ' constraints' tag, can make it singular >>>>>>>> (without ending 's') ? >>>>>>>> >>>>>>>> (there are a few other ones in plural which we also should fix) >>>>>>>> >>>>>>>> Otherwise I think this looks good! >>>>>>>> >>>>>>>> Thanks! >>>>>>>> >>>>>>>> /Robbin >>>>>>>> >>>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>>> Hello, >>>>>>>>> >>>>>>>>> This change factors the tags from the class and safepoint logging >>>>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>>>> tags which are included in tag combination in their respective >>>>>>>>> subsystems. >>>>>>>>> >>>>>>>>> classresolve -> class+resolve >>>>>>>>> classload -> class+load >>>>>>>>> classunload -> class+unload >>>>>>>>> classpath -> class+path >>>>>>>>> classloaderdata -> class+loaderdata >>>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>>> classinit -> class+init >>>>>>>>> classload+preorder -> class+preorder >>>>>>>>> >>>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>>> >>>>>>>>> class+loaderdata can be further factored into class+load+data. >>>>>>>>> Same >>>>>>>>> with class+loaderconstraints. >>>>>>>>> >>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>>> webrev(hotspot): >>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>>> >>>>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>>>> >>>>>>>>> Thanks, >>>>>>>>> Max >>>>>>>> >>>>>>> >>>>>> >>>>> >>> >> From stefan.karlsson at oracle.com Wed Apr 27 11:57:14 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Wed, 27 Apr 2016 13:57:14 +0200 Subject: RFR: 8141501: Problems with BitMap buffer management Message-ID: <5720A91A.1020809@oracle.com> Hi all, Please review this patch to change how the backing storage of BitMaps are managed. http://cr.openjdk.java.net/~stefank/8141501/webrev.01 https://bugs.openjdk.java.net/browse/JDK-8141501 The patch changes BitMap into an abstract base class, with concrete sub-classes that manages the underlying bitmap backing storage. The proposed BitMap classes are: - BitMap - the abstract base class - ResourceBitMap - bitmap with resource area allocated backing storage - ArenaBitMap - bitmap with arena allocated backing storage - CHeapBitMap - bitmap with CHeap allocated backing storage - BitMapView - bitmap without the ownership of the backing storage This will hopefully make it less likely to use the BitMaps incorrectly. Previously, it was possible to write the following broken code: // CHeap allocate. BitMap map(BITMAP_SIZE / 2, false); // Resource allocate. // The CHeap memory is leaked. map.resize(BITMAP_SIZE); and: // Resource allocate. BitMap map(BITMAP_SIZE / 2); // CHeap allocate. // CHeap freeing Resource allocated memory => memory stomping map.resize(BITMAP_SIZE, false); The stricter typing of the new BitMap sub-classes prevents these classes of bugs. Further motivation for this patch can be found in: https://bugs.openjdk.java.net/browse/JDK-8141501 Tested with JPRT and ExecuteInternalVMTests. Thanks Kim for providing offline feedback on different revisions of this patch. This changes code in mostly the GC and Compiler parts of the JVM, so it would be good to get reviews from those groups. Thanks, StefanK From david.holmes at oracle.com Wed Apr 27 12:17:48 2016 From: david.holmes at oracle.com (David Holmes) Date: Wed, 27 Apr 2016 22:17:48 +1000 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> <57208406.5040605@oracle.com> <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> Message-ID: <5720ADEC.3050904@oracle.com> On 27/04/2016 7:41 PM, Marcus Larsson wrote: > > > On 04/27/2016 11:19 AM, David Holmes wrote: >> On 27/04/2016 6:39 PM, Marcus Larsson wrote: >>> Hi, >>> >>> >>> On 04/27/2016 10:32 AM, Ioi Lam wrote: >>>> >>>> >>>> On 4/26/16 6:13 PM, David Holmes wrote: >>>>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>>>> The changes look good to me. >>>>>> >>>>>> I think each line of the "constraints" prints only one item (?). >>>>>> If so, >>>>>> we should use the singular form. (Max, could you post a sample output >>>>>> from -Xlog?) >>>>> >>>>> The point is that it shows all the constraints. >>>>> >>>> >>>> We currently have a mix-match of singular/plural forms in the log tags: >>>> >>>> singular: >>>> LOG_TAG(alloc) >>>> LOG_TAG(region) >>>> >>>> plural: >>>> LOG_TAG(defaultmethods) >>>> >>>> Sometimes whether to use an (s) can be a personal preference. However, >>>> the poor user would need to try 2^n times to get the right combination >>>> of (s) or (no s), and that's too much. >>> >>> Yeah, we should definitely be consistent with this. I vote for tags in >>> singular, since it makes them ever so slightly shorter. >> >> Hmmm, so data becomes datum? Or is that the exception to the rule? ;-) > > Seems like a good exception to me. :) Perhaps the rule should be to > avoid trailing s if it signifies plural. > >> >> No matter what you choose some things will be unintuitively awkward: >> defaultmethods, exceptions, constraints, all suit plural forms and are >> awkward in singular. There are numerous plural forms in use already. > > I don't see what distinguishes the tags you mention from the others. I > guess I don't see what makes them awkward in singular. If I want > defaultmethod logging I enable that tag. It's obvious I will get > multiple log messages for it so I don't see the purpose of the 's'. Maybe it is just familiarity with the old TraceXXX forms. I prefer the noun forms to be plural - as many of them are. Many of the ones that are not are a prefix taken from a longer form ie TraceThreadXXX became thread + xxx; whereas TraceExceptions became exceptions. It is all somewhat arbitrary in my view. David > Thanks, > Marcus > >> >> David >> >>> Thanks, >>> Marcus >>> >>>> >>>> - Ioi >>>> >>>>> David >>>>> >>>>>> Thanks >>>>>> - Ioi >>>>>> >>>>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>>>> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>>>> >>>>>>> I have responded to these suggestions: >>>>>>> - class+loaderdata -> class+loader+data >>>>>>> - class+loaderconstraints -> class+loader+constraints >>>>>>> - I did not change constraints to constraint. I agree with Coleen's >>>>>>> reasoning, but please let me know if you think there is a good >>>>>>> reason >>>>>>> to change it. >>>>>>> >>>>>>> Thanks, >>>>>>> Max >>>>>>> >>>>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>>>> >>>>>>>> Hi, >>>>>>>> >>>>>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>>>>> class, loader, constraints >>>>>>>> and class, loaderdata => class, loader, data >>>>>>>> >>>>>>>> But I think constraints should be plural, we're logging the >>>>>>>> constraint checking (more than one constraint) so plural makes more >>>>>>>> sense to me. >>>>>>>> >>>>>>>> Okay, I think I now prefer the combination of short words to do >>>>>>>> logging after seeing this change. >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Coleen >>>>>>>> >>>>>>>> >>>>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>>>> Hi Max, >>>>>>>>> >>>>>>>>> I was just looking at these tags and thought maybe they should be >>>>>>>>> split, nice ! >>>>>>>>> >>>>>>>>> I have request for the ' constraints' tag, can make it singular >>>>>>>>> (without ending 's') ? >>>>>>>>> >>>>>>>>> (there are a few other ones in plural which we also should fix) >>>>>>>>> >>>>>>>>> Otherwise I think this looks good! >>>>>>>>> >>>>>>>>> Thanks! >>>>>>>>> >>>>>>>>> /Robbin >>>>>>>>> >>>>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>>>> Hello, >>>>>>>>>> >>>>>>>>>> This change factors the tags from the class and safepoint logging >>>>>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>>>>> tags which are included in tag combination in their respective >>>>>>>>>> subsystems. >>>>>>>>>> >>>>>>>>>> classresolve -> class+resolve >>>>>>>>>> classload -> class+load >>>>>>>>>> classunload -> class+unload >>>>>>>>>> classpath -> class+path >>>>>>>>>> classloaderdata -> class+loaderdata >>>>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>>>> classinit -> class+init >>>>>>>>>> classload+preorder -> class+preorder >>>>>>>>>> >>>>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>>>> >>>>>>>>>> class+loaderdata can be further factored into class+load+data. >>>>>>>>>> Same >>>>>>>>>> with class+loaderconstraints. >>>>>>>>>> >>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>>>> webrev(hotspot): >>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>>>> >>>>>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>>>>> >>>>>>>>>> Thanks, >>>>>>>>>> Max >>>>>>>>> >>>>>>>> >>>>>>> >>>>>> >>>> >>> > From coleen.phillimore at oracle.com Wed Apr 27 12:18:57 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Wed, 27 Apr 2016 08:18:57 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> <57208406.5040605@oracle.com> <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> Message-ID: There isn't a singular "constraint". The tag is logging constraints - some arbitrary rule that no trailing 's's is just that, arbitrary. These are runtime logging flags; I vote that they stay plural because that's how I would find them if I were looking. Coleen On 4/27/16 5:41 AM, Marcus Larsson wrote: > > > On 04/27/2016 11:19 AM, David Holmes wrote: >> On 27/04/2016 6:39 PM, Marcus Larsson wrote: >>> Hi, >>> >>> >>> On 04/27/2016 10:32 AM, Ioi Lam wrote: >>>> >>>> >>>> On 4/26/16 6:13 PM, David Holmes wrote: >>>>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>>>> The changes look good to me. >>>>>> >>>>>> I think each line of the "constraints" prints only one item (?). >>>>>> If so, >>>>>> we should use the singular form. (Max, could you post a sample >>>>>> output >>>>>> from -Xlog?) >>>>> >>>>> The point is that it shows all the constraints. >>>>> >>>> >>>> We currently have a mix-match of singular/plural forms in the log >>>> tags: >>>> >>>> singular: >>>> LOG_TAG(alloc) >>>> LOG_TAG(region) >>>> >>>> plural: >>>> LOG_TAG(defaultmethods) >>>> >>>> Sometimes whether to use an (s) can be a personal preference. However, >>>> the poor user would need to try 2^n times to get the right combination >>>> of (s) or (no s), and that's too much. >>> >>> Yeah, we should definitely be consistent with this. I vote for tags in >>> singular, since it makes them ever so slightly shorter. >> >> Hmmm, so data becomes datum? Or is that the exception to the rule? ;-) > > Seems like a good exception to me. :) Perhaps the rule should be to > avoid trailing s if it signifies plural. > >> >> No matter what you choose some things will be unintuitively awkward: >> defaultmethods, exceptions, constraints, all suit plural forms and >> are awkward in singular. There are numerous plural forms in use already. > > I don't see what distinguishes the tags you mention from the others. I > guess I don't see what makes them awkward in singular. If I want > defaultmethod logging I enable that tag. It's obvious I will get > multiple log messages for it so I don't see the purpose of the 's'. > > Thanks, > Marcus > >> >> David >> >>> Thanks, >>> Marcus >>> >>>> >>>> - Ioi >>>> >>>>> David >>>>> >>>>>> Thanks >>>>>> - Ioi >>>>>> >>>>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>>>> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>>>> >>>>>>> I have responded to these suggestions: >>>>>>> - class+loaderdata -> class+loader+data >>>>>>> - class+loaderconstraints -> class+loader+constraints >>>>>>> - I did not change constraints to constraint. I agree with >>>>>>> Coleen's >>>>>>> reasoning, but please let me know if you think there is a good >>>>>>> reason >>>>>>> to change it. >>>>>>> >>>>>>> Thanks, >>>>>>> Max >>>>>>> >>>>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>>>> >>>>>>>> Hi, >>>>>>>> >>>>>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>>>>> class, loader, constraints >>>>>>>> and class, loaderdata => class, loader, data >>>>>>>> >>>>>>>> But I think constraints should be plural, we're logging the >>>>>>>> constraint checking (more than one constraint) so plural makes >>>>>>>> more >>>>>>>> sense to me. >>>>>>>> >>>>>>>> Okay, I think I now prefer the combination of short words to do >>>>>>>> logging after seeing this change. >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Coleen >>>>>>>> >>>>>>>> >>>>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>>>> Hi Max, >>>>>>>>> >>>>>>>>> I was just looking at these tags and thought maybe they should be >>>>>>>>> split, nice ! >>>>>>>>> >>>>>>>>> I have request for the ' constraints' tag, can make it singular >>>>>>>>> (without ending 's') ? >>>>>>>>> >>>>>>>>> (there are a few other ones in plural which we also should fix) >>>>>>>>> >>>>>>>>> Otherwise I think this looks good! >>>>>>>>> >>>>>>>>> Thanks! >>>>>>>>> >>>>>>>>> /Robbin >>>>>>>>> >>>>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>>>> Hello, >>>>>>>>>> >>>>>>>>>> This change factors the tags from the class and safepoint >>>>>>>>>> logging >>>>>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>>>>> tags which are included in tag combination in their respective >>>>>>>>>> subsystems. >>>>>>>>>> >>>>>>>>>> classresolve -> class+resolve >>>>>>>>>> classload -> class+load >>>>>>>>>> classunload -> class+unload >>>>>>>>>> classpath -> class+path >>>>>>>>>> classloaderdata -> class+loaderdata >>>>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>>>> classinit -> class+init >>>>>>>>>> classload+preorder -> class+preorder >>>>>>>>>> >>>>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>>>> >>>>>>>>>> class+loaderdata can be further factored into >>>>>>>>>> class+load+data. Same >>>>>>>>>> with class+loaderconstraints. >>>>>>>>>> >>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>>>> webrev(hotspot): >>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>>>> >>>>>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>>>>> >>>>>>>>>> Thanks, >>>>>>>>>> Max >>>>>>>>> >>>>>>>> >>>>>>> >>>>>> >>>> >>> > From coleen.phillimore at oracle.com Wed Apr 27 12:19:31 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Wed, 27 Apr 2016 08:19:31 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <5720ADEC.3050904@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> <57208406.5040605@oracle.com> <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> <5720ADEC.3050904@oracle.com> Message-ID: <62138c18-ff85-4cfe-c33a-ba46b54c9a44@oracle.com> On 4/27/16 8:17 AM, David Holmes wrote: > > > On 27/04/2016 7:41 PM, Marcus Larsson wrote: >> >> >> On 04/27/2016 11:19 AM, David Holmes wrote: >>> On 27/04/2016 6:39 PM, Marcus Larsson wrote: >>>> Hi, >>>> >>>> >>>> On 04/27/2016 10:32 AM, Ioi Lam wrote: >>>>> >>>>> >>>>> On 4/26/16 6:13 PM, David Holmes wrote: >>>>>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>>>>> The changes look good to me. >>>>>>> >>>>>>> I think each line of the "constraints" prints only one item (?). >>>>>>> If so, >>>>>>> we should use the singular form. (Max, could you post a sample >>>>>>> output >>>>>>> from -Xlog?) >>>>>> >>>>>> The point is that it shows all the constraints. >>>>>> >>>>> >>>>> We currently have a mix-match of singular/plural forms in the log >>>>> tags: >>>>> >>>>> singular: >>>>> LOG_TAG(alloc) >>>>> LOG_TAG(region) >>>>> >>>>> plural: >>>>> LOG_TAG(defaultmethods) >>>>> >>>>> Sometimes whether to use an (s) can be a personal preference. >>>>> However, >>>>> the poor user would need to try 2^n times to get the right >>>>> combination >>>>> of (s) or (no s), and that's too much. >>>> >>>> Yeah, we should definitely be consistent with this. I vote for tags in >>>> singular, since it makes them ever so slightly shorter. >>> >>> Hmmm, so data becomes datum? Or is that the exception to the rule? ;-) >> >> Seems like a good exception to me. :) Perhaps the rule should be to >> avoid trailing s if it signifies plural. >> >>> >>> No matter what you choose some things will be unintuitively awkward: >>> defaultmethods, exceptions, constraints, all suit plural forms and are >>> awkward in singular. There are numerous plural forms in use already. >> >> I don't see what distinguishes the tags you mention from the others. I >> guess I don't see what makes them awkward in singular. If I want >> defaultmethod logging I enable that tag. It's obvious I will get >> multiple log messages for it so I don't see the purpose of the 's'. > > Maybe it is just familiarity with the old TraceXXX forms. I prefer the > noun forms to be plural - as many of them are. Many of the ones that > are not are a prefix taken from a longer form ie TraceThreadXXX became > thread + xxx; whereas TraceExceptions became exceptions. > > It is all somewhat arbitrary in my view. LOL, we said the same thing. Coleen > > David > >> Thanks, >> Marcus >> >>> >>> David >>> >>>> Thanks, >>>> Marcus >>>> >>>>> >>>>> - Ioi >>>>> >>>>>> David >>>>>> >>>>>>> Thanks >>>>>>> - Ioi >>>>>>> >>>>>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>>>>> New webrev: >>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>>>>> >>>>>>>> I have responded to these suggestions: >>>>>>>> - class+loaderdata -> class+loader+data >>>>>>>> - class+loaderconstraints -> class+loader+constraints >>>>>>>> - I did not change constraints to constraint. I agree with >>>>>>>> Coleen's >>>>>>>> reasoning, but please let me know if you think there is a good >>>>>>>> reason >>>>>>>> to change it. >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Max >>>>>>>> >>>>>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>>>>> >>>>>>>>> Hi, >>>>>>>>> >>>>>>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>>>>>> class, loader, constraints >>>>>>>>> and class, loaderdata => class, loader, data >>>>>>>>> >>>>>>>>> But I think constraints should be plural, we're logging the >>>>>>>>> constraint checking (more than one constraint) so plural makes >>>>>>>>> more >>>>>>>>> sense to me. >>>>>>>>> >>>>>>>>> Okay, I think I now prefer the combination of short words to do >>>>>>>>> logging after seeing this change. >>>>>>>>> >>>>>>>>> Thanks, >>>>>>>>> Coleen >>>>>>>>> >>>>>>>>> >>>>>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>>>>> Hi Max, >>>>>>>>>> >>>>>>>>>> I was just looking at these tags and thought maybe they >>>>>>>>>> should be >>>>>>>>>> split, nice ! >>>>>>>>>> >>>>>>>>>> I have request for the ' constraints' tag, can make it singular >>>>>>>>>> (without ending 's') ? >>>>>>>>>> >>>>>>>>>> (there are a few other ones in plural which we also should fix) >>>>>>>>>> >>>>>>>>>> Otherwise I think this looks good! >>>>>>>>>> >>>>>>>>>> Thanks! >>>>>>>>>> >>>>>>>>>> /Robbin >>>>>>>>>> >>>>>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>>>>> Hello, >>>>>>>>>>> >>>>>>>>>>> This change factors the tags from the class and safepoint >>>>>>>>>>> logging >>>>>>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>>>>>> tags which are included in tag combination in their respective >>>>>>>>>>> subsystems. >>>>>>>>>>> >>>>>>>>>>> classresolve -> class+resolve >>>>>>>>>>> classload -> class+load >>>>>>>>>>> classunload -> class+unload >>>>>>>>>>> classpath -> class+path >>>>>>>>>>> classloaderdata -> class+loaderdata >>>>>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>>>>> classinit -> class+init >>>>>>>>>>> classload+preorder -> class+preorder >>>>>>>>>>> >>>>>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>>>>> >>>>>>>>>>> class+loaderdata can be further factored into class+load+data. >>>>>>>>>>> Same >>>>>>>>>>> with class+loaderconstraints. >>>>>>>>>>> >>>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>>>>> webrev(hotspot): >>>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>>>>> >>>>>>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>>>>>> >>>>>>>>>>> Thanks, >>>>>>>>>>> Max >>>>>>>>>> >>>>>>>>> >>>>>>>> >>>>>>> >>>>> >>>> >> From ioi.lam at oracle.com Wed Apr 27 13:44:40 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Wed, 27 Apr 2016 06:44:40 -0700 Subject: RFR [XS] 8155239 [TESTBUG] Simple test setup for JVMTI ClassFileLoadHook Message-ID: <5720C248.6080902@oracle.com> Hi, Please review this simple test bug: https://bugs.openjdk.java.net/browse/JDK-8155239 http://cr.openjdk.java.net/~iklam/jdk9/8155239_simple_classfileloadhook.v01/ Testing ClassFileLoadHook is a pain. In many cases, we are not testing CFLH itself, but rather "how does my feature interact with CFLH. One example is the interaction between CDS and CFLH. This RFE makes it easy to write such tests. Please see SimpleClassFileLoadHookTest.java for an example. I've tested under Linux and I am now trying to test under all other platforms. Thanks - Ioi From george.triantafillou at oracle.com Wed Apr 27 14:39:20 2016 From: george.triantafillou at oracle.com (George Triantafillou) Date: Wed, 27 Apr 2016 10:39:20 -0400 Subject: RFR(XS): 8154942 Remove ProcessTools.getVmInputArguments() from the hotspot test library, as it is not used by any of the hotspot tests Message-ID: <5720CF18.6000109@oracle.com> Please review this very small change that removes the unused method getVmInputArguments() from ProcessTools. JBS: https://bugs.openjdk.java.net/browse/JDK-8154942 Webrev: http://cr.openjdk.java.net/~gtriantafill/bug-8154942/bug_8154942.patch/webrev/ Thanks, George From harold.seigel at oracle.com Wed Apr 27 15:35:36 2016 From: harold.seigel at oracle.com (harold seigel) Date: Wed, 27 Apr 2016 11:35:36 -0400 Subject: RFR(XS): 8154942 Remove ProcessTools.getVmInputArguments() from the hotspot test library, as it is not used by any of the hotspot tests In-Reply-To: <5720CF18.6000109@oracle.com> References: <5720CF18.6000109@oracle.com> Message-ID: Hi George, The change looks good. Thanks, Harold On 4/27/2016 10:39 AM, George Triantafillou wrote: > Please review this very small change that removes the unused method > getVmInputArguments() from ProcessTools. > > JBS: https://bugs.openjdk.java.net/browse/JDK-8154942 > Webrev: > http://cr.openjdk.java.net/~gtriantafill/bug-8154942/bug_8154942.patch/webrev/ > > > > Thanks, > George From christian.tornqvist at oracle.com Wed Apr 27 15:44:15 2016 From: christian.tornqvist at oracle.com (Christian Tornqvist) Date: Wed, 27 Apr 2016 11:44:15 -0400 Subject: RFR(XS): 8154942 Remove ProcessTools.getVmInputArguments() from the hotspot test library, as it is not used by any of the hotspot tests In-Reply-To: <5720CF18.6000109@oracle.com> References: <5720CF18.6000109@oracle.com> Message-ID: Hi George, Are there any unnecessary @module tags we can remove as a result of this? Thanks, Christian -----Original Message----- From: hotspot-dev [mailto:hotspot-dev-bounces at openjdk.java.net] On Behalf Of George Triantafillou Sent: Wednesday, April 27, 2016 10:39 AM To: hotspot-dev at openjdk.java.net Subject: RFR(XS): 8154942 Remove ProcessTools.getVmInputArguments() from the hotspot test library, as it is not used by any of the hotspot tests Please review this very small change that removes the unused method getVmInputArguments() from ProcessTools. JBS: https://bugs.openjdk.java.net/browse/JDK-8154942 Webrev: http://cr.openjdk.java.net/~gtriantafill/bug-8154942/bug_8154942.patch/webrev/ Thanks, George From george.triantafillou at oracle.com Wed Apr 27 17:13:29 2016 From: george.triantafillou at oracle.com (George Triantafillou) Date: Wed, 27 Apr 2016 13:13:29 -0400 Subject: RFR(XS): 8154942 Remove ProcessTools.getVmInputArguments() from the hotspot test library, as it is not used by any of the hotspot tests In-Reply-To: References: <5720CF18.6000109@oracle.com> Message-ID: <5720F339.5000508@oracle.com> Thanks Harold. -George On 4/27/2016 11:35 AM, harold seigel wrote: > Hi George, > > The change looks good. > > Thanks, Harold > > > On 4/27/2016 10:39 AM, George Triantafillou wrote: >> Please review this very small change that removes the unused method >> getVmInputArguments() from ProcessTools. >> >> JBS: https://bugs.openjdk.java.net/browse/JDK-8154942 >> Webrev: >> http://cr.openjdk.java.net/~gtriantafill/bug-8154942/bug_8154942.patch/webrev/ >> >> >> >> Thanks, >> George > From mikhailo.seledtsov at oracle.com Wed Apr 27 18:04:10 2016 From: mikhailo.seledtsov at oracle.com (mikhailo) Date: Wed, 27 Apr 2016 11:04:10 -0700 Subject: RFR [XS] 8155239 [TESTBUG] Simple test setup for JVMTI ClassFileLoadHook In-Reply-To: <5720C248.6080902@oracle.com> References: <5720C248.6080902@oracle.com> Message-ID: <5720FF1A.3000907@oracle.com> Hi Ioi, Great. This is very helpful for the task at hand and more jvmti tests to come. Overall looks good, and I have couple of comments: - I recommend moving the libSimpleClassFileLoadHook.c to a sub-directory under hotpsot/test/testlibrary, such as hotpsot/test/testlibrary/jvmti - libSimpleClassFileLoadHook.c init_options(): - would it be safer to make a copy of "CLASS_NAME", "FROM" and "TO"; I am not sure of the convention of ownership of the arguments passed to Agent_Initialize(), and who is required to free them Just checking as a precaution ClassFileLoadHook(): - please add a brief comment, something like "this hook will match the class name to CLASS_NAME, and attempt to replace any occurrence of 'FROM' string to 'TO' string" Thank you, Misha On 04/27/2016 06:44 AM, Ioi Lam wrote: > Hi, > > Please review this simple test bug: > > https://bugs.openjdk.java.net/browse/JDK-8155239 > http://cr.openjdk.java.net/~iklam/jdk9/8155239_simple_classfileloadhook.v01/ > > > Testing ClassFileLoadHook is a pain. In many cases, we are not testing > CFLH itself, but rather "how does my feature interact with CFLH. One > example is the interaction between CDS and CFLH. This RFE makes it > easy to write such tests. > > Please see SimpleClassFileLoadHookTest.java for an example. > > I've tested under Linux and I am now trying to test under all other > platforms. > > Thanks > - Ioi From christian.thalinger at oracle.com Wed Apr 27 19:19:21 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Wed, 27 Apr 2016 09:19:21 -1000 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <62138c18-ff85-4cfe-c33a-ba46b54c9a44@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> <57208406.5040605@oracle.com> <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> <5720ADEC.3050904@oracle.com> <62138c18-ff85-4cfe-c33a-ba46b54c9a44@oracle.com> Message-ID: > On Apr 27, 2016, at 2:19 AM, Coleen Phillimore wrote: > > > > On 4/27/16 8:17 AM, David Holmes wrote: >> >> >> On 27/04/2016 7:41 PM, Marcus Larsson wrote: >>> >>> >>> On 04/27/2016 11:19 AM, David Holmes wrote: >>>> On 27/04/2016 6:39 PM, Marcus Larsson wrote: >>>>> Hi, >>>>> >>>>> >>>>> On 04/27/2016 10:32 AM, Ioi Lam wrote: >>>>>> >>>>>> >>>>>> On 4/26/16 6:13 PM, David Holmes wrote: >>>>>>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>>>>>> The changes look good to me. >>>>>>>> >>>>>>>> I think each line of the "constraints" prints only one item (?). >>>>>>>> If so, >>>>>>>> we should use the singular form. (Max, could you post a sample output >>>>>>>> from -Xlog?) >>>>>>> >>>>>>> The point is that it shows all the constraints. >>>>>>> >>>>>> >>>>>> We currently have a mix-match of singular/plural forms in the log tags: >>>>>> >>>>>> singular: >>>>>> LOG_TAG(alloc) >>>>>> LOG_TAG(region) >>>>>> >>>>>> plural: >>>>>> LOG_TAG(defaultmethods) >>>>>> >>>>>> Sometimes whether to use an (s) can be a personal preference. However, >>>>>> the poor user would need to try 2^n times to get the right combination >>>>>> of (s) or (no s), and that's too much. >>>>> >>>>> Yeah, we should definitely be consistent with this. I vote for tags in >>>>> singular, since it makes them ever so slightly shorter. >>>> >>>> Hmmm, so data becomes datum? Or is that the exception to the rule? ;-) >>> >>> Seems like a good exception to me. :) Perhaps the rule should be to >>> avoid trailing s if it signifies plural. >>> >>>> >>>> No matter what you choose some things will be unintuitively awkward: >>>> defaultmethods, exceptions, constraints, all suit plural forms and are >>>> awkward in singular. There are numerous plural forms in use already. >>> >>> I don't see what distinguishes the tags you mention from the others. I >>> guess I don't see what makes them awkward in singular. If I want >>> defaultmethod logging I enable that tag. It's obvious I will get >>> multiple log messages for it so I don't see the purpose of the 's'. >> >> Maybe it is just familiarity with the old TraceXXX forms. I prefer the noun forms to be plural - as many of them are. Many of the ones that are not are a prefix taken from a longer form ie TraceThreadXXX became thread + xxx; whereas TraceExceptions became exceptions. >> >> It is all somewhat arbitrary in my view. > > LOL, we said the same thing. I?d like to propose another approach: constraintz It?s way cooler and rulezzzz! > > Coleen > >> >> David >> >>> Thanks, >>> Marcus >>> >>>> >>>> David >>>> >>>>> Thanks, >>>>> Marcus >>>>> >>>>>> >>>>>> - Ioi >>>>>> >>>>>>> David >>>>>>> >>>>>>>> Thanks >>>>>>>> - Ioi >>>>>>>> >>>>>>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>>>>>> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>>>>>> >>>>>>>>> I have responded to these suggestions: >>>>>>>>> - class+loaderdata -> class+loader+data >>>>>>>>> - class+loaderconstraints -> class+loader+constraints >>>>>>>>> - I did not change constraints to constraint. I agree with Coleen's >>>>>>>>> reasoning, but please let me know if you think there is a good >>>>>>>>> reason >>>>>>>>> to change it. >>>>>>>>> >>>>>>>>> Thanks, >>>>>>>>> Max >>>>>>>>> >>>>>>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>>>>>> >>>>>>>>>> Hi, >>>>>>>>>> >>>>>>>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>>>>>>> class, loader, constraints >>>>>>>>>> and class, loaderdata => class, loader, data >>>>>>>>>> >>>>>>>>>> But I think constraints should be plural, we're logging the >>>>>>>>>> constraint checking (more than one constraint) so plural makes more >>>>>>>>>> sense to me. >>>>>>>>>> >>>>>>>>>> Okay, I think I now prefer the combination of short words to do >>>>>>>>>> logging after seeing this change. >>>>>>>>>> >>>>>>>>>> Thanks, >>>>>>>>>> Coleen >>>>>>>>>> >>>>>>>>>> >>>>>>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>>>>>> Hi Max, >>>>>>>>>>> >>>>>>>>>>> I was just looking at these tags and thought maybe they should be >>>>>>>>>>> split, nice ! >>>>>>>>>>> >>>>>>>>>>> I have request for the ' constraints' tag, can make it singular >>>>>>>>>>> (without ending 's') ? >>>>>>>>>>> >>>>>>>>>>> (there are a few other ones in plural which we also should fix) >>>>>>>>>>> >>>>>>>>>>> Otherwise I think this looks good! >>>>>>>>>>> >>>>>>>>>>> Thanks! >>>>>>>>>>> >>>>>>>>>>> /Robbin >>>>>>>>>>> >>>>>>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>>>>>> Hello, >>>>>>>>>>>> >>>>>>>>>>>> This change factors the tags from the class and safepoint logging >>>>>>>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>>>>>>> tags which are included in tag combination in their respective >>>>>>>>>>>> subsystems. >>>>>>>>>>>> >>>>>>>>>>>> classresolve -> class+resolve >>>>>>>>>>>> classload -> class+load >>>>>>>>>>>> classunload -> class+unload >>>>>>>>>>>> classpath -> class+path >>>>>>>>>>>> classloaderdata -> class+loaderdata >>>>>>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>>>>>> classinit -> class+init >>>>>>>>>>>> classload+preorder -> class+preorder >>>>>>>>>>>> >>>>>>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>>>>>> >>>>>>>>>>>> class+loaderdata can be further factored into class+load+data. >>>>>>>>>>>> Same >>>>>>>>>>>> with class+loaderconstraints. >>>>>>>>>>>> >>>>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>>>>>> webrev(hotspot): >>>>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>>>>>> >>>>>>>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>>>>>>> >>>>>>>>>>>> Thanks, >>>>>>>>>>>> Max From dean.long at oracle.com Wed Apr 27 19:33:31 2016 From: dean.long at oracle.com (Dean Long) Date: Wed, 27 Apr 2016 12:33:31 -0700 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <5720ADEC.3050904@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> <57208406.5040605@oracle.com> <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> <5720ADEC.3050904@oracle.com> Message-ID: <81a2e53f-d4a7-5d38-2687-1fa1c2544f4c@oracle.com> On 4/27/2016 5:17 AM, David Holmes wrote: > > > On 27/04/2016 7:41 PM, Marcus Larsson wrote: >> >> >> On 04/27/2016 11:19 AM, David Holmes wrote: >>> On 27/04/2016 6:39 PM, Marcus Larsson wrote: >>>> Hi, >>>> >>>> >>>> On 04/27/2016 10:32 AM, Ioi Lam wrote: >>>>> >>>>> >>>>> On 4/26/16 6:13 PM, David Holmes wrote: >>>>>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>>>>> The changes look good to me. >>>>>>> >>>>>>> I think each line of the "constraints" prints only one item (?). >>>>>>> If so, >>>>>>> we should use the singular form. (Max, could you post a sample >>>>>>> output >>>>>>> from -Xlog?) >>>>>> >>>>>> The point is that it shows all the constraints. >>>>>> >>>>> >>>>> We currently have a mix-match of singular/plural forms in the log >>>>> tags: >>>>> >>>>> singular: >>>>> LOG_TAG(alloc) >>>>> LOG_TAG(region) >>>>> >>>>> plural: >>>>> LOG_TAG(defaultmethods) >>>>> >>>>> Sometimes whether to use an (s) can be a personal preference. >>>>> However, >>>>> the poor user would need to try 2^n times to get the right >>>>> combination >>>>> of (s) or (no s), and that's too much. >>>> >>>> Yeah, we should definitely be consistent with this. I vote for tags in >>>> singular, since it makes them ever so slightly shorter. >>> >>> Hmmm, so data becomes datum? Or is that the exception to the rule? ;-) >> >> Seems like a good exception to me. :) Perhaps the rule should be to >> avoid trailing s if it signifies plural. >> >>> >>> No matter what you choose some things will be unintuitively awkward: >>> defaultmethods, exceptions, constraints, all suit plural forms and are >>> awkward in singular. There are numerous plural forms in use already. >> >> I don't see what distinguishes the tags you mention from the others. I >> guess I don't see what makes them awkward in singular. If I want >> defaultmethod logging I enable that tag. It's obvious I will get >> multiple log messages for it so I don't see the purpose of the 's'. > > Maybe it is just familiarity with the old TraceXXX forms. I prefer the > noun forms to be plural - as many of them are. Many of the ones that > are not are a prefix taken from a longer form ie TraceThreadXXX became > thread + xxx; whereas TraceExceptions became exceptions. > > It is all somewhat arbitrary in my view. > Do we have fuzzy matching logic for logging flags like we do for command-line flags? dl > David > >> Thanks, >> Marcus >> >>> >>> David >>> >>>> Thanks, >>>> Marcus >>>> >>>>> >>>>> - Ioi >>>>> >>>>>> David >>>>>> >>>>>>> Thanks >>>>>>> - Ioi >>>>>>> >>>>>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>>>>> New webrev: >>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>>>>> >>>>>>>> I have responded to these suggestions: >>>>>>>> - class+loaderdata -> class+loader+data >>>>>>>> - class+loaderconstraints -> class+loader+constraints >>>>>>>> - I did not change constraints to constraint. I agree with >>>>>>>> Coleen's >>>>>>>> reasoning, but please let me know if you think there is a good >>>>>>>> reason >>>>>>>> to change it. >>>>>>>> >>>>>>>> Thanks, >>>>>>>> Max >>>>>>>> >>>>>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>>>>> >>>>>>>>> Hi, >>>>>>>>> >>>>>>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>>>>>> class, loader, constraints >>>>>>>>> and class, loaderdata => class, loader, data >>>>>>>>> >>>>>>>>> But I think constraints should be plural, we're logging the >>>>>>>>> constraint checking (more than one constraint) so plural makes >>>>>>>>> more >>>>>>>>> sense to me. >>>>>>>>> >>>>>>>>> Okay, I think I now prefer the combination of short words to do >>>>>>>>> logging after seeing this change. >>>>>>>>> >>>>>>>>> Thanks, >>>>>>>>> Coleen >>>>>>>>> >>>>>>>>> >>>>>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>>>>> Hi Max, >>>>>>>>>> >>>>>>>>>> I was just looking at these tags and thought maybe they >>>>>>>>>> should be >>>>>>>>>> split, nice ! >>>>>>>>>> >>>>>>>>>> I have request for the ' constraints' tag, can make it singular >>>>>>>>>> (without ending 's') ? >>>>>>>>>> >>>>>>>>>> (there are a few other ones in plural which we also should fix) >>>>>>>>>> >>>>>>>>>> Otherwise I think this looks good! >>>>>>>>>> >>>>>>>>>> Thanks! >>>>>>>>>> >>>>>>>>>> /Robbin >>>>>>>>>> >>>>>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>>>>> Hello, >>>>>>>>>>> >>>>>>>>>>> This change factors the tags from the class and safepoint >>>>>>>>>>> logging >>>>>>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>>>>>> tags which are included in tag combination in their respective >>>>>>>>>>> subsystems. >>>>>>>>>>> >>>>>>>>>>> classresolve -> class+resolve >>>>>>>>>>> classload -> class+load >>>>>>>>>>> classunload -> class+unload >>>>>>>>>>> classpath -> class+path >>>>>>>>>>> classloaderdata -> class+loaderdata >>>>>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>>>>> classinit -> class+init >>>>>>>>>>> classload+preorder -> class+preorder >>>>>>>>>>> >>>>>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>>>>> >>>>>>>>>>> class+loaderdata can be further factored into class+load+data. >>>>>>>>>>> Same >>>>>>>>>>> with class+loaderconstraints. >>>>>>>>>>> >>>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>>>>> webrev(hotspot): >>>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>>>>> >>>>>>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>>>>>> >>>>>>>>>>> Thanks, >>>>>>>>>>> Max >>>>>>>>>> >>>>>>>>> >>>>>>>> >>>>>>> >>>>> >>>> >> From max.ockner at oracle.com Wed Apr 27 19:52:40 2016 From: max.ockner at oracle.com (Max Ockner) Date: Wed, 27 Apr 2016 15:52:40 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> <57208406.5040605@oracle.com> <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> <5720ADEC.3050904@oracle.com> <62138c18-ff85-4cfe-c33a-ba46b54c9a44@oracle.com> Message-ID: <57211888.8070603@oracle.com> Unless anyone feels that "constraints" is absolutely unacceptable, I think we should leave it alone. Shall we file a bug so we can relocate this discussion? Max On 4/27/2016 3:19 PM, Christian Thalinger wrote: >> On Apr 27, 2016, at 2:19 AM, Coleen Phillimore wrote: >> >> >> >> On 4/27/16 8:17 AM, David Holmes wrote: >>> >>> On 27/04/2016 7:41 PM, Marcus Larsson wrote: >>>> >>>> On 04/27/2016 11:19 AM, David Holmes wrote: >>>>> On 27/04/2016 6:39 PM, Marcus Larsson wrote: >>>>>> Hi, >>>>>> >>>>>> >>>>>> On 04/27/2016 10:32 AM, Ioi Lam wrote: >>>>>>> >>>>>>> On 4/26/16 6:13 PM, David Holmes wrote: >>>>>>>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>>>>>>> The changes look good to me. >>>>>>>>> >>>>>>>>> I think each line of the "constraints" prints only one item (?). >>>>>>>>> If so, >>>>>>>>> we should use the singular form. (Max, could you post a sample output >>>>>>>>> from -Xlog?) >>>>>>>> The point is that it shows all the constraints. >>>>>>>> >>>>>>> We currently have a mix-match of singular/plural forms in the log tags: >>>>>>> >>>>>>> singular: >>>>>>> LOG_TAG(alloc) >>>>>>> LOG_TAG(region) >>>>>>> >>>>>>> plural: >>>>>>> LOG_TAG(defaultmethods) >>>>>>> >>>>>>> Sometimes whether to use an (s) can be a personal preference. However, >>>>>>> the poor user would need to try 2^n times to get the right combination >>>>>>> of (s) or (no s), and that's too much. >>>>>> Yeah, we should definitely be consistent with this. I vote for tags in >>>>>> singular, since it makes them ever so slightly shorter. >>>>> Hmmm, so data becomes datum? Or is that the exception to the rule? ;-) >>>> Seems like a good exception to me. :) Perhaps the rule should be to >>>> avoid trailing s if it signifies plural. >>>> >>>>> No matter what you choose some things will be unintuitively awkward: >>>>> defaultmethods, exceptions, constraints, all suit plural forms and are >>>>> awkward in singular. There are numerous plural forms in use already. >>>> I don't see what distinguishes the tags you mention from the others. I >>>> guess I don't see what makes them awkward in singular. If I want >>>> defaultmethod logging I enable that tag. It's obvious I will get >>>> multiple log messages for it so I don't see the purpose of the 's'. >>> Maybe it is just familiarity with the old TraceXXX forms. I prefer the noun forms to be plural - as many of them are. Many of the ones that are not are a prefix taken from a longer form ie TraceThreadXXX became thread + xxx; whereas TraceExceptions became exceptions. >>> >>> It is all somewhat arbitrary in my view. >> LOL, we said the same thing. > I?d like to propose another approach: constraintz > > It?s way cooler and rulezzzz! > >> Coleen >> >>> David >>> >>>> Thanks, >>>> Marcus >>>> >>>>> David >>>>> >>>>>> Thanks, >>>>>> Marcus >>>>>> >>>>>>> - Ioi >>>>>>> >>>>>>>> David >>>>>>>> >>>>>>>>> Thanks >>>>>>>>> - Ioi >>>>>>>>> >>>>>>>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>>>>>>> New webrev: http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>>>>>>> >>>>>>>>>> I have responded to these suggestions: >>>>>>>>>> - class+loaderdata -> class+loader+data >>>>>>>>>> - class+loaderconstraints -> class+loader+constraints >>>>>>>>>> - I did not change constraints to constraint. I agree with Coleen's >>>>>>>>>> reasoning, but please let me know if you think there is a good >>>>>>>>>> reason >>>>>>>>>> to change it. >>>>>>>>>> >>>>>>>>>> Thanks, >>>>>>>>>> Max >>>>>>>>>> >>>>>>>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>>>>>>> Hi, >>>>>>>>>>> >>>>>>>>>>> Yes, his looks good, but I agree that class, loaderconstraints => >>>>>>>>>>> class, loader, constraints >>>>>>>>>>> and class, loaderdata => class, loader, data >>>>>>>>>>> >>>>>>>>>>> But I think constraints should be plural, we're logging the >>>>>>>>>>> constraint checking (more than one constraint) so plural makes more >>>>>>>>>>> sense to me. >>>>>>>>>>> >>>>>>>>>>> Okay, I think I now prefer the combination of short words to do >>>>>>>>>>> logging after seeing this change. >>>>>>>>>>> >>>>>>>>>>> Thanks, >>>>>>>>>>> Coleen >>>>>>>>>>> >>>>>>>>>>> >>>>>>>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>>>>>>> Hi Max, >>>>>>>>>>>> >>>>>>>>>>>> I was just looking at these tags and thought maybe they should be >>>>>>>>>>>> split, nice ! >>>>>>>>>>>> >>>>>>>>>>>> I have request for the ' constraints' tag, can make it singular >>>>>>>>>>>> (without ending 's') ? >>>>>>>>>>>> >>>>>>>>>>>> (there are a few other ones in plural which we also should fix) >>>>>>>>>>>> >>>>>>>>>>>> Otherwise I think this looks good! >>>>>>>>>>>> >>>>>>>>>>>> Thanks! >>>>>>>>>>>> >>>>>>>>>>>> /Robbin >>>>>>>>>>>> >>>>>>>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>>>>>>> Hello, >>>>>>>>>>>>> >>>>>>>>>>>>> This change factors the tags from the class and safepoint logging >>>>>>>>>>>>> subsystems into smaller tags, including "class" and "safepoint" >>>>>>>>>>>>> tags which are included in tag combination in their respective >>>>>>>>>>>>> subsystems. >>>>>>>>>>>>> >>>>>>>>>>>>> classresolve -> class+resolve >>>>>>>>>>>>> classload -> class+load >>>>>>>>>>>>> classunload -> class+unload >>>>>>>>>>>>> classpath -> class+path >>>>>>>>>>>>> classloaderdata -> class+loaderdata >>>>>>>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>>>>>>> classinit -> class+init >>>>>>>>>>>>> classload+preorder -> class+preorder >>>>>>>>>>>>> >>>>>>>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>>>>>>> >>>>>>>>>>>>> class+loaderdata can be further factored into class+load+data. >>>>>>>>>>>>> Same >>>>>>>>>>>>> with class+loaderconstraints. >>>>>>>>>>>>> >>>>>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>>>>>>> webrev(hotspot): >>>>>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>>>>>>> webrev(jdk): http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>>>>>>> >>>>>>>>>>>>> Tested with jtreg hotspot tests. jdk tests currently running. >>>>>>>>>>>>> >>>>>>>>>>>>> Thanks, >>>>>>>>>>>>> Max From chris.plummer at oracle.com Wed Apr 27 21:53:36 2016 From: chris.plummer at oracle.com (Chris Plummer) Date: Wed, 27 Apr 2016 14:53:36 -0700 Subject: RFR [XS] 8155239 [TESTBUG] Simple test setup for JVMTI ClassFileLoadHook In-Reply-To: <5720C248.6080902@oracle.com> References: <5720C248.6080902@oracle.com> Message-ID: <572134E0.20104@oracle.com> Hi Ioi, Looks good. Thanks for doing this. I assume this is only meant for testing the conversion of a String, and anything beyond that is use at your own risk (like renaming a field or a method). thanks, Chris On 4/27/16 6:44 AM, Ioi Lam wrote: > Hi, > > Please review this simple test bug: > > https://bugs.openjdk.java.net/browse/JDK-8155239 > http://cr.openjdk.java.net/~iklam/jdk9/8155239_simple_classfileloadhook.v01/ > > > Testing ClassFileLoadHook is a pain. In many cases, we are not testing > CFLH itself, but rather "how does my feature interact with CFLH. One > example is the interaction between CDS and CFLH. This RFE makes it > easy to write such tests. > > Please see SimpleClassFileLoadHookTest.java for an example. > > I've tested under Linux and I am now trying to test under all other > platforms. > > Thanks > - Ioi From ioi.lam at oracle.com Wed Apr 27 23:59:10 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Wed, 27 Apr 2016 16:59:10 -0700 Subject: RFR [XS] 8155239 [TESTBUG] Simple test setup for JVMTI ClassFileLoadHook In-Reply-To: <572134E0.20104@oracle.com> References: <5720C248.6080902@oracle.com> <572134E0.20104@oracle.com> Message-ID: <5721524E.7070203@oracle.com> The string replacement works on any UTF8 strings that are embedded inside the class file. Since UTF8 strings are used in the class file both for literal strings, as well as names for methods/fields/classes, renaming fields or methods should work as well. However, if you specify a very short string, like "X", it may actually overwrite any arbitrary bytes in the class file that happens to have the same value (e.g., in the bytecodes). I should add a precaution in the source code that you should pick a string that's long and unique enough to be safe. - Ioi On 4/27/16 2:53 PM, Chris Plummer wrote: > Hi Ioi, > > Looks good. Thanks for doing this. > > I assume this is only meant for testing the conversion of a String, > and anything beyond that is use at your own risk (like renaming a > field or a method). > > thanks, > > Chris > > On 4/27/16 6:44 AM, Ioi Lam wrote: >> Hi, >> >> Please review this simple test bug: >> >> https://bugs.openjdk.java.net/browse/JDK-8155239 >> http://cr.openjdk.java.net/~iklam/jdk9/8155239_simple_classfileloadhook.v01/ >> >> >> Testing ClassFileLoadHook is a pain. In many cases, we are not >> testing CFLH itself, but rather "how does my feature interact with >> CFLH. One example is the interaction between CDS and CFLH. This RFE >> makes it easy to write such tests. >> >> Please see SimpleClassFileLoadHookTest.java for an example. >> >> I've tested under Linux and I am now trying to test under all other >> platforms. >> >> Thanks >> - Ioi > From ioi.lam at oracle.com Thu Apr 28 00:00:01 2016 From: ioi.lam at oracle.com (Ioi Lam) Date: Wed, 27 Apr 2016 17:00:01 -0700 Subject: RFR [XS] 8155239 [TESTBUG] Simple test setup for JVMTI ClassFileLoadHook In-Reply-To: <5720FF1A.3000907@oracle.com> References: <5720C248.6080902@oracle.com> <5720FF1A.3000907@oracle.com> Message-ID: <57215281.1050304@oracle.com> Hi Misha, Thanks for the review. I'll fix the code as you suggested and post a new version. - Ioo On 4/27/16 11:04 AM, mikhailo wrote: > Hi Ioi, > > Great. This is very helpful for the task at hand and more jvmti tests > to come. > Overall looks good, and I have couple of comments: > > - I recommend moving the libSimpleClassFileLoadHook.c to a > sub-directory > under hotpsot/test/testlibrary, such as > hotpsot/test/testlibrary/jvmti > > - libSimpleClassFileLoadHook.c > init_options(): > - would it be safer to make a copy of "CLASS_NAME", "FROM" and > "TO"; I am not sure of the convention > of ownership of the arguments passed to Agent_Initialize(), > and who is required to free them > Just checking as a precaution > > ClassFileLoadHook(): > - please add a brief comment, something like "this hook will > match the class name to CLASS_NAME, and attempt > to replace any occurrence of 'FROM' string to 'TO' string" > > Thank you, > Misha > > > On 04/27/2016 06:44 AM, Ioi Lam wrote: >> Hi, >> >> Please review this simple test bug: >> >> https://bugs.openjdk.java.net/browse/JDK-8155239 >> http://cr.openjdk.java.net/~iklam/jdk9/8155239_simple_classfileloadhook.v01/ >> >> >> Testing ClassFileLoadHook is a pain. In many cases, we are not >> testing CFLH itself, but rather "how does my feature interact with >> CFLH. One example is the interaction between CDS and CFLH. This RFE >> makes it easy to write such tests. >> >> Please see SimpleClassFileLoadHookTest.java for an example. >> >> I've tested under Linux and I am now trying to test under all other >> platforms. >> >> Thanks >> - Ioi > From per.liden at oracle.com Thu Apr 28 07:53:54 2016 From: per.liden at oracle.com (Per Liden) Date: Thu, 28 Apr 2016 09:53:54 +0200 Subject: RFR: 8141501: Problems with BitMap buffer management In-Reply-To: <5720A91A.1020809@oracle.com> References: <5720A91A.1020809@oracle.com> Message-ID: <5721C192.5020806@oracle.com> Hi Stefan, On 2016-04-27 13:57, Stefan Karlsson wrote: > Hi all, > > Please review this patch to change how the backing storage of BitMaps > are managed. > > http://cr.openjdk.java.net/~stefank/8141501/webrev.01 This review only covers the GC and BitMap parts. vmStructs_cms.hpp ----------------- 36 nonstatic_field(CMSBitMap, _bm, BitMap) \ This should be BitMapView instead of BitMap. g1RegionToSpaceMapper.hpp ------------------------- 50 size_t _region_granularity; Pre-existing as far as I can tell, but this is unused and can be removed. 33 G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, 34 size_t used_size, 35 size_t page_size, 36 size_t region_granularity, 37 size_t commit_factor, 38 MemoryType type) : Also pre-existing, but I'd suggest we use the name region_granularity or alloc_granularity. Currently we use both names in different contexts to refer to the same thing. bitMap.cpp ---------- 84 typedef BitMapAllocator > CHeapAllocator; 85 typedef BitMapAllocator ResourceAllocator; How about calling these CHeapBitMapAllocator/ResourceBitMapAllocator? 111 ArenaBitMap::ArenaBitMap(Arena* arena, idx_t size_in_bits) 112 : BitMap((bm_word_t*)arena->Amalloc(calc_size_in_bytes(size_in_bits)), size_in_bits) { 113 } Could we break this out into a ArenaBitMapAllocator, to align to the model used for Resource/CHeap allocators? cheers, Per > https://bugs.openjdk.java.net/browse/JDK-8141501 > > The patch changes BitMap into an abstract base class, with concrete > sub-classes that manages the underlying bitmap backing storage. > > The proposed BitMap classes are: > > - BitMap - the abstract base class > - ResourceBitMap - bitmap with resource area allocated backing storage > - ArenaBitMap - bitmap with arena allocated backing storage > - CHeapBitMap - bitmap with CHeap allocated backing storage > - BitMapView - bitmap without the ownership of the backing storage > > This will hopefully make it less likely to use the BitMaps > incorrectly. Previously, it was possible to write the following broken > code: > > // CHeap allocate. > BitMap map(BITMAP_SIZE / 2, false); > > // Resource allocate. > // The CHeap memory is leaked. > map.resize(BITMAP_SIZE); > > and: > > // Resource allocate. > BitMap map(BITMAP_SIZE / 2); > > // CHeap allocate. > // CHeap freeing Resource allocated memory => memory stomping > map.resize(BITMAP_SIZE, false); > > The stricter typing of the new BitMap sub-classes prevents these classes > of bugs. > > Further motivation for this patch can be found in: > https://bugs.openjdk.java.net/browse/JDK-8141501 > > Tested with JPRT and ExecuteInternalVMTests. > > Thanks Kim for providing offline feedback on different revisions of this > patch. > > This changes code in mostly the GC and Compiler parts of the JVM, so it > would be good to get reviews from those groups. > > Thanks, > StefanK From serguei.spitsyn at oracle.com Thu Apr 28 09:44:55 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Thu, 28 Apr 2016 02:44:55 -0700 Subject: RFR (XS): 8153190 JVMTI GetAllModules should make it clear that it also returns unnamed module Message-ID: <5721DB97.4070903@oracle.com> Please, review the JVM TI spec one-liner fix below. The jvmti.xml patch: diff -r 888f37103953 src/share/vm/prims/jvmti.xml --- a/src/share/vm/prims/jvmti.xml Wed Apr 27 11:25:16 2016 +0200 +++ b/src/share/vm/prims/jvmti.xml Thu Apr 28 02:24:09 2016 -0700 @@ -6482,6 +6482,7 @@ Get All Modules Return an array of all modules loaded in the virtual machine. + This includes the unnamed module for each class loader. The number of modules in the array is returned via module_count_ptr, and the array itself via modules_ptr. Summary: The GetAllModules function returns all modules in the VM, this includes the unnamed module for each class loader. The JVM TI spec should make this clear. Testing: No testing is necessary. Thanks, Serguei From Alan.Bateman at oracle.com Thu Apr 28 09:48:16 2016 From: Alan.Bateman at oracle.com (Alan Bateman) Date: Thu, 28 Apr 2016 10:48:16 +0100 Subject: RFR (XS): 8153190 JVMTI GetAllModules should make it clear that it also returns unnamed module In-Reply-To: <5721DB97.4070903@oracle.com> References: <5721DB97.4070903@oracle.com> Message-ID: <5721DC60.7000303@oracle.com> On 28/04/2016 10:44, serguei.spitsyn at oracle.com wrote: > Please, review the JVM TI spec one-liner fix below. > > > The jvmti.xml patch: > > diff -r 888f37103953 src/share/vm/prims/jvmti.xml > --- a/src/share/vm/prims/jvmti.xml Wed Apr 27 11:25:16 2016 +0200 > +++ b/src/share/vm/prims/jvmti.xml Thu Apr 28 02:24:09 2016 -0700 > @@ -6482,6 +6482,7 @@ > Get All Modules > > Return an array of all modules loaded in the virtual machine. > + This includes the unnamed module for each class loader. Looks okay, alternative "The array include the unnamed module ...". -Alan. From serguei.spitsyn at oracle.com Thu Apr 28 09:53:07 2016 From: serguei.spitsyn at oracle.com (serguei.spitsyn at oracle.com) Date: Thu, 28 Apr 2016 02:53:07 -0700 Subject: RFR (XS): 8153190 JVMTI GetAllModules should make it clear that it also returns unnamed module In-Reply-To: <5721DC60.7000303@oracle.com> References: <5721DB97.4070903@oracle.com> <5721DC60.7000303@oracle.com> Message-ID: <5721DD83.8060800@oracle.com> On 4/28/16 02:48, Alan Bateman wrote: > > > On 28/04/2016 10:44, serguei.spitsyn at oracle.com wrote: >> Please, review the JVM TI spec one-liner fix below. >> >> >> The jvmti.xml patch: >> >> diff -r 888f37103953 src/share/vm/prims/jvmti.xml >> --- a/src/share/vm/prims/jvmti.xml Wed Apr 27 11:25:16 2016 +0200 >> +++ b/src/share/vm/prims/jvmti.xml Thu Apr 28 02:24:09 2016 -0700 >> @@ -6482,6 +6482,7 @@ >> Get All Modules >> >> Return an array of all modules loaded in the virtual machine. >> + This includes the unnamed module for each class loader. > Looks okay, alternative "The array include the unnamed module ...". It makes sense, thanks. The updated patch is: diff -r 888f37103953 src/share/vm/prims/jvmti.xml --- a/src/share/vm/prims/jvmti.xml Wed Apr 27 11:25:16 2016 +0200 +++ b/src/share/vm/prims/jvmti.xml Thu Apr 28 02:51:43 2016 -0700 @@ -6482,6 +6482,7 @@ Get All Modules Return an array of all modules loaded in the virtual machine. + The array includes the unnamed module for each class loader. The number of modules in the array is returned via module_count_ptr, and the array itself via modules_ptr. Thanks, Serguei > > -Alan. From tobias.hartmann at oracle.com Thu Apr 28 10:45:24 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Thu, 28 Apr 2016 12:45:24 +0200 Subject: [9] RFR(S): 8155608: String intrinsic range checks are not strict enough Message-ID: <5721E9C4.7010208@oracle.com> Hi please review the following patch: https://bugs.openjdk.java.net/browse/JDK-8155608 http://cr.openjdk.java.net/~thartmann/8155608/jdk/webrev.00/ http://cr.openjdk.java.net/~thartmann/8155608/hotspot/webrev.00/ Some String API methods use StringUTF16.putChar/getChar to read a char value from a byte array. For performance reasons, putChar/getChar is intrinsified by C1/C2 without range checks (like the Unsafe counterparts). The Java callers are responsible for adding the corresponding explicit range checks if necessary. I noticed that the Java level range checks in StringUTF16::compress(), StringUTF16::getChars() and StringLatin1::inflate() are not strong enough. Offset and length need to be multiplied by two because they index a char value in a byte array. I added a regression test that triggers the problem and also checks the other relevant intrinsics by invoking the methods with different arguments. Tested with regression test (-Xint/-Xcomp) and RBT (running). Thanks, Tobias From pavel.punegov at oracle.com Thu Apr 28 12:21:38 2016 From: pavel.punegov at oracle.com (Pavel Punegov) Date: Thu, 28 Apr 2016 15:21:38 +0300 Subject: RFR (XS): 8155034: [TESTBUG] ctw tests fail to compile: module reads package sun.reflect from both jdk.unsupported and java.base Message-ID: <168B0EBC-A08F-4527-86A6-34CDB53CB748@oracle.com> Hi, please review this small fix to ctw tests. Package sun.reflect was moved to jdk.internal.reflect, but these tests haven?t been changed accordingly bug: https://bugs.openjdk.java.net/browse/JDK-8155034 webrev: http://cr.openjdk.java.net/~ppunegov/8155034/webrev.00/ ? Thanks, Pavel Punegov From dmitry.dmitriev at oracle.com Thu Apr 28 12:29:55 2016 From: dmitry.dmitriev at oracle.com (Dmitry Dmitriev) Date: Thu, 28 Apr 2016 15:29:55 +0300 Subject: RFR (XS): 8155034: [TESTBUG] ctw tests fail to compile: module reads package sun.reflect from both jdk.unsupported and java.base In-Reply-To: <168B0EBC-A08F-4527-86A6-34CDB53CB748@oracle.com> References: <168B0EBC-A08F-4527-86A6-34CDB53CB748@oracle.com> Message-ID: <5e597330-12c8-3f32-324e-d645a62efb98@oracle.com> Hi Pavel, Looks good to me, except small thing: copyright year should be updated in Compiler.java. Not need a new webrev for that. Thanks, Dmitry On 28.04.2016 15:21, Pavel Punegov wrote: > Hi, > > please review this small fix to ctw tests. > Package sun.reflect was moved to jdk.internal.reflect, but these tests haven?t been changed accordingly > > bug: https://bugs.openjdk.java.net/browse/JDK-8155034 > webrev: http://cr.openjdk.java.net/~ppunegov/8155034/webrev.00/ > > ? Thanks, > Pavel Punegov > From stefan.karlsson at oracle.com Thu Apr 28 13:23:48 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 28 Apr 2016 15:23:48 +0200 Subject: RFR: 8141501: Problems with BitMap buffer management In-Reply-To: <5720A91A.1020809@oracle.com> References: <5720A91A.1020809@oracle.com> Message-ID: <57220EE4.4080007@oracle.com> Hi all, I decided to restructure the allocation code a bit to get better separation between the bitmap allocation logic and the actual bitmap memory allocators. http://cr.openjdk.java.net/~stefank/8141501/webrev.02.delta http://cr.openjdk.java.net/~stefank/8141501/webrev.02 BitMap has now the following allocation functions: reallocate, allocate, and free. These functions take care of the high-level handling of the BitMap allocations: requesting, copying, clearing, and releasing memory. The low-level allocation and freeing of memory are handled by the ResourceBitMapAllocator, CHeapBitMapAllocator, and ArenaBitMapAllocator classes. I've also moved the implementation of resize, initialize, and reinitialize into protected functions in BitMap. The sub-classes then expose public version of these functions, and pass down the appropriate allocator instances to the protected BitMap versions. The current patch does not provide resize, initialize and reinitialize functions for the ArenaBitMaps, since that requires an extra field in all ArenaBitMaps. If we decide that that's not a real problem, we could easily introduce that as a separate patch. See: http://cr.openjdk.java.net/~stefank/8141501/webrev.02.resizableArenaBitMaps/ While implementing the above patches I realized that the ResourceBitMaps actually are cleared in the constructor, so the previous comments were wrong and I've updated the comments accordingly. But this also means that the ResourceBitMap map(size); map.clear(); pattern, which can be seen mainly in the compiler code, clears the bitmaps twice. I've created the following CR to track that: https://bugs.openjdk.java.net/browse/JDK-8155638 - Resource allocated BitMaps are often cleared twice Thanks, StefanK On 2016-04-27 13:57, Stefan Karlsson wrote: > Hi all, > > Please review this patch to change how the backing storage of BitMaps > are managed. > > http://cr.openjdk.java.net/~stefank/8141501/webrev.01 > https://bugs.openjdk.java.net/browse/JDK-8141501 > > The patch changes BitMap into an abstract base class, with concrete > sub-classes that manages the underlying bitmap backing storage. > > The proposed BitMap classes are: > > - BitMap - the abstract base class > - ResourceBitMap - bitmap with resource area allocated backing storage > - ArenaBitMap - bitmap with arena allocated backing storage > - CHeapBitMap - bitmap with CHeap allocated backing storage > - BitMapView - bitmap without the ownership of the backing storage > > This will hopefully make it less likely to use the BitMaps > incorrectly. Previously, it was possible to write the following broken > code: > > // CHeap allocate. > BitMap map(BITMAP_SIZE / 2, false); > > // Resource allocate. > // The CHeap memory is leaked. > map.resize(BITMAP_SIZE); > > and: > > // Resource allocate. > BitMap map(BITMAP_SIZE / 2); > > // CHeap allocate. > // CHeap freeing Resource allocated memory => memory stomping > map.resize(BITMAP_SIZE, false); > > The stricter typing of the new BitMap sub-classes prevents these > classes of bugs. > > Further motivation for this patch can be found in: > https://bugs.openjdk.java.net/browse/JDK-8141501 > > Tested with JPRT and ExecuteInternalVMTests. > > Thanks Kim for providing offline feedback on different revisions of > this patch. > > This changes code in mostly the GC and Compiler parts of the JVM, so > it would be good to get reviews from those groups. > > Thanks, > StefanK From per.liden at oracle.com Thu Apr 28 14:51:26 2016 From: per.liden at oracle.com (Per Liden) Date: Thu, 28 Apr 2016 16:51:26 +0200 Subject: RFR: 8141501: Problems with BitMap buffer management In-Reply-To: <57220EE4.4080007@oracle.com> References: <5720A91A.1020809@oracle.com> <57220EE4.4080007@oracle.com> Message-ID: <5722236E.2060602@oracle.com> Hi Stefan, On 2016-04-28 15:23, Stefan Karlsson wrote: > Hi all, > > I decided to restructure the allocation code a bit to get better > separation between the bitmap allocation logic and the actual bitmap > memory allocators. > > http://cr.openjdk.java.net/~stefank/8141501/webrev.02.delta > http://cr.openjdk.java.net/~stefank/8141501/webrev.02 Looks good. I like the new allocators model. cheers, Per > > BitMap has now the following allocation functions: reallocate, allocate, > and free. These functions take care of the high-level handling of the > BitMap allocations: requesting, copying, clearing, and releasing memory. > The low-level allocation and freeing of memory are handled by the > ResourceBitMapAllocator, CHeapBitMapAllocator, and ArenaBitMapAllocator > classes. > > I've also moved the implementation of resize, initialize, and > reinitialize into protected functions in BitMap. The sub-classes then > expose public version of these functions, and pass down the appropriate > allocator instances to the protected BitMap versions. > > The current patch does not provide resize, initialize and reinitialize > functions for the ArenaBitMaps, since that requires an extra field in > all ArenaBitMaps. If we decide that that's not a real problem, we could > easily introduce that as a separate patch. See: > > http://cr.openjdk.java.net/~stefank/8141501/webrev.02.resizableArenaBitMaps/ > > > While implementing the above patches I realized that the ResourceBitMaps > actually are cleared in the constructor, so the previous comments were > wrong and I've updated the comments accordingly. But this also means > that the ResourceBitMap map(size); map.clear(); pattern, which can be > seen mainly in the compiler code, clears the bitmaps twice. I've created > the following CR to track that: > > https://bugs.openjdk.java.net/browse/JDK-8155638 - Resource allocated > BitMaps are often cleared twice > > Thanks, > StefanK > > On 2016-04-27 13:57, Stefan Karlsson wrote: >> Hi all, >> >> Please review this patch to change how the backing storage of BitMaps >> are managed. >> >> http://cr.openjdk.java.net/~stefank/8141501/webrev.01 >> https://bugs.openjdk.java.net/browse/JDK-8141501 >> >> The patch changes BitMap into an abstract base class, with concrete >> sub-classes that manages the underlying bitmap backing storage. >> >> The proposed BitMap classes are: >> >> - BitMap - the abstract base class >> - ResourceBitMap - bitmap with resource area allocated backing storage >> - ArenaBitMap - bitmap with arena allocated backing storage >> - CHeapBitMap - bitmap with CHeap allocated backing storage >> - BitMapView - bitmap without the ownership of the backing storage >> >> This will hopefully make it less likely to use the BitMaps >> incorrectly. Previously, it was possible to write the following broken >> code: >> >> // CHeap allocate. >> BitMap map(BITMAP_SIZE / 2, false); >> >> // Resource allocate. >> // The CHeap memory is leaked. >> map.resize(BITMAP_SIZE); >> >> and: >> >> // Resource allocate. >> BitMap map(BITMAP_SIZE / 2); >> >> // CHeap allocate. >> // CHeap freeing Resource allocated memory => memory stomping >> map.resize(BITMAP_SIZE, false); >> >> The stricter typing of the new BitMap sub-classes prevents these >> classes of bugs. >> >> Further motivation for this patch can be found in: >> https://bugs.openjdk.java.net/browse/JDK-8141501 >> >> Tested with JPRT and ExecuteInternalVMTests. >> >> Thanks Kim for providing offline feedback on different revisions of >> this patch. >> >> This changes code in mostly the GC and Compiler parts of the JVM, so >> it would be good to get reviews from those groups. >> >> Thanks, >> StefanK > From igor.ignatyev at oracle.com Thu Apr 28 14:54:25 2016 From: igor.ignatyev at oracle.com (Igor Ignatyev) Date: Thu, 28 Apr 2016 17:54:25 +0300 Subject: RFR (XS): 8155034: [TESTBUG] ctw tests fail to compile: module reads package sun.reflect from both jdk.unsupported and java.base In-Reply-To: <5e597330-12c8-3f32-324e-d645a62efb98@oracle.com> References: <168B0EBC-A08F-4527-86A6-34CDB53CB748@oracle.com> <5e597330-12c8-3f32-324e-d645a62efb98@oracle.com> Message-ID: <892E1385-667E-44FF-A8B4-493B084CD6F1@oracle.com> Pavel, looks good to me. ? Igor > On Apr 28, 2016, at 3:29 PM, Dmitry Dmitriev wrote: > > Hi Pavel, > > Looks good to me, except small thing: copyright year should be updated in Compiler.java. Not need a new webrev for that. > > Thanks, Dmitry > > On 28.04.2016 15:21, Pavel Punegov wrote: >> Hi, >> >> please review this small fix to ctw tests. >> Package sun.reflect was moved to jdk.internal.reflect, but these tests haven?t been changed accordingly >> >> bug: https://bugs.openjdk.java.net/browse/JDK-8155034 >> webrev: http://cr.openjdk.java.net/~ppunegov/8155034/webrev.00/ >> >> ? Thanks, >> Pavel Punegov >> > From pavel.punegov at oracle.com Thu Apr 28 15:02:19 2016 From: pavel.punegov at oracle.com (Pavel Punegov) Date: Thu, 28 Apr 2016 18:02:19 +0300 Subject: RFR (XS): 8155034: [TESTBUG] ctw tests fail to compile: module reads package sun.reflect from both jdk.unsupported and java.base In-Reply-To: <892E1385-667E-44FF-A8B4-493B084CD6F1@oracle.com> References: <168B0EBC-A08F-4527-86A6-34CDB53CB748@oracle.com> <5e597330-12c8-3f32-324e-d645a62efb98@oracle.com> <892E1385-667E-44FF-A8B4-493B084CD6F1@oracle.com> Message-ID: <7138373A-9F8A-4CAF-B169-1360C6FA6CF2@oracle.com> Thank you for review, Igor and Dmitry ? Pavel. > On 28 Apr 2016, at 17:54, Igor Ignatyev wrote: > > Pavel, > > looks good to me. > > ? Igor > >> On Apr 28, 2016, at 3:29 PM, Dmitry Dmitriev wrote: >> >> Hi Pavel, >> >> Looks good to me, except small thing: copyright year should be updated in Compiler.java. Not need a new webrev for that. >> >> Thanks, Dmitry >> >> On 28.04.2016 15:21, Pavel Punegov wrote: >>> Hi, >>> >>> please review this small fix to ctw tests. >>> Package sun.reflect was moved to jdk.internal.reflect, but these tests haven?t been changed accordingly >>> >>> bug: https://bugs.openjdk.java.net/browse/JDK-8155034 >>> webrev: http://cr.openjdk.java.net/~ppunegov/8155034/webrev.00/ >>> >>> ? Thanks, >>> Pavel Punegov >>> >> > From stefan.karlsson at oracle.com Thu Apr 28 15:42:36 2016 From: stefan.karlsson at oracle.com (Stefan Karlsson) Date: Thu, 28 Apr 2016 17:42:36 +0200 Subject: RFR: 8141501: Problems with BitMap buffer management In-Reply-To: <5722236E.2060602@oracle.com> References: <5720A91A.1020809@oracle.com> <57220EE4.4080007@oracle.com> <5722236E.2060602@oracle.com> Message-ID: <57222F6C.6050607@oracle.com> Thanks, Per. StefanK On 28/04/16 16:51, Per Liden wrote: > Hi Stefan, > > On 2016-04-28 15:23, Stefan Karlsson wrote: >> Hi all, >> >> I decided to restructure the allocation code a bit to get better >> separation between the bitmap allocation logic and the actual bitmap >> memory allocators. >> >> http://cr.openjdk.java.net/~stefank/8141501/webrev.02.delta >> http://cr.openjdk.java.net/~stefank/8141501/webrev.02 > > Looks good. I like the new allocators model. > > cheers, > Per > >> >> BitMap has now the following allocation functions: reallocate, allocate, >> and free. These functions take care of the high-level handling of the >> BitMap allocations: requesting, copying, clearing, and releasing memory. >> The low-level allocation and freeing of memory are handled by the >> ResourceBitMapAllocator, CHeapBitMapAllocator, and ArenaBitMapAllocator >> classes. >> >> I've also moved the implementation of resize, initialize, and >> reinitialize into protected functions in BitMap. The sub-classes then >> expose public version of these functions, and pass down the appropriate >> allocator instances to the protected BitMap versions. >> >> The current patch does not provide resize, initialize and reinitialize >> functions for the ArenaBitMaps, since that requires an extra field in >> all ArenaBitMaps. If we decide that that's not a real problem, we could >> easily introduce that as a separate patch. See: >> >> http://cr.openjdk.java.net/~stefank/8141501/webrev.02.resizableArenaBitMaps/ >> >> >> >> While implementing the above patches I realized that the ResourceBitMaps >> actually are cleared in the constructor, so the previous comments were >> wrong and I've updated the comments accordingly. But this also means >> that the ResourceBitMap map(size); map.clear(); pattern, which can be >> seen mainly in the compiler code, clears the bitmaps twice. I've created >> the following CR to track that: >> >> https://bugs.openjdk.java.net/browse/JDK-8155638 - Resource allocated >> BitMaps are often cleared twice >> >> Thanks, >> StefanK >> >> On 2016-04-27 13:57, Stefan Karlsson wrote: >>> Hi all, >>> >>> Please review this patch to change how the backing storage of BitMaps >>> are managed. >>> >>> http://cr.openjdk.java.net/~stefank/8141501/webrev.01 >>> https://bugs.openjdk.java.net/browse/JDK-8141501 >>> >>> The patch changes BitMap into an abstract base class, with concrete >>> sub-classes that manages the underlying bitmap backing storage. >>> >>> The proposed BitMap classes are: >>> >>> - BitMap - the abstract base class >>> - ResourceBitMap - bitmap with resource area allocated backing storage >>> - ArenaBitMap - bitmap with arena allocated backing storage >>> - CHeapBitMap - bitmap with CHeap allocated backing storage >>> - BitMapView - bitmap without the ownership of the backing storage >>> >>> This will hopefully make it less likely to use the BitMaps >>> incorrectly. Previously, it was possible to write the following broken >>> code: >>> >>> // CHeap allocate. >>> BitMap map(BITMAP_SIZE / 2, false); >>> >>> // Resource allocate. >>> // The CHeap memory is leaked. >>> map.resize(BITMAP_SIZE); >>> >>> and: >>> >>> // Resource allocate. >>> BitMap map(BITMAP_SIZE / 2); >>> >>> // CHeap allocate. >>> // CHeap freeing Resource allocated memory => memory stomping >>> map.resize(BITMAP_SIZE, false); >>> >>> The stricter typing of the new BitMap sub-classes prevents these >>> classes of bugs. >>> >>> Further motivation for this patch can be found in: >>> https://bugs.openjdk.java.net/browse/JDK-8141501 >>> >>> Tested with JPRT and ExecuteInternalVMTests. >>> >>> Thanks Kim for providing offline feedback on different revisions of >>> this patch. >>> >>> This changes code in mostly the GC and Compiler parts of the JVM, so >>> it would be good to get reviews from those groups. >>> >>> Thanks, >>> StefanK >> From sgehwolf at redhat.com Thu Apr 28 16:35:23 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Thu, 28 Apr 2016 18:35:23 +0200 Subject: fastdebug builds broken for jdk9/hs tree? Message-ID: <1461861323.25448.42.camel@redhat.com> Hi, Recently, a fastdebug build of a server JVM fails for me on Linux x86_64 with older sys/sdt.h and/or GCC. The same failure shows up since a couple of days on the Zero builder[1]: /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp: In static member function ?static void ClassLoadingService::notify_class_loaded(InstanceKlass*, bool)?: /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? gmake[4]: *** [/home/sgehwolf/openjdk9-hs-pristine/build/linux-x86_64-normal-server-fastdebug/hotspot/variant-server/libjvm/objs/classLoadingService.o] Error 1 gmake[4]: Leaving directory `/home/sgehwolf/openjdk9-hs-pristine/hotspot/make' On that line is a macro for dtrace: void ClassLoadingService::notify_class_loaded(InstanceKlass* k, bool shared_class) { ? DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); // <<< This fails If I compile the same file *without* -DDTRACE_ENABLE it compiles fine. A slowdebug build compiles fine too. I was able to *reproduce* this on RHEL 6. It *does not* reproduce on F23. I had a brief look at the preprocessed compilation unit for classLoadingService and the code that gets included for the dtrace macro looks different. So here the questions: 1.) Has anything related to dtrace macros changed recently? 2.) Has anybody else been seeing this? Thanks, Severin [1]?http://builder.classpath.org/jenkins/job/OpenJDK9_hs_rt_Zero/227/console From max.ockner at oracle.com Thu Apr 28 19:11:13 2016 From: max.ockner at oracle.com (Max Ockner) Date: Thu, 28 Apr 2016 15:11:13 -0400 Subject: RFR: 8154110: Update class* and safepoint* logging subsystems In-Reply-To: <57211888.8070603@oracle.com> References: <571EFFD9.9050408@oracle.com> <75e5bc77-a00f-8f12-f636-0b40c28199de@oracle.com> <428a3bb9-d523-bfff-794a-8e1330ff8eeb@oracle.com> <571FD805.3040201@oracle.com> <5720061C.4070404@oracle.com> <57201241.50609@oracle.com> <57207939.2020805@oracle.com> <5d14b827-63b8-02cb-2cfe-73f88ce41678@oracle.com> <57208406.5040605@oracle.com> <38802e29-b1c8-9dea-093a-5eabe3cb94e4@oracle.com> <5720ADEC.3050904@oracle.com> <62138c18-ff85-4cfe-c33a-ba46b54c9a44@oracle.com> <57211888.8070603@oracle.com> Message-ID: <57226051.9050505@oracle.com> Here is an RFE: https://bugs.openjdk.java.net/browse/JDK-8155666. Basically we want to reevaluate our tag choices and potentially decide on a set of conventions to follow for tag names in the future. I will commit the fix for 8154110 now. Thanks, Max On 4/27/2016 3:52 PM, Max Ockner wrote: > Unless anyone feels that "constraints" is absolutely unacceptable, I > think we should leave it alone. Shall we file a bug so we can relocate > this discussion? > > Max > > On 4/27/2016 3:19 PM, Christian Thalinger wrote: >>> On Apr 27, 2016, at 2:19 AM, Coleen Phillimore >>> wrote: >>> >>> >>> >>> On 4/27/16 8:17 AM, David Holmes wrote: >>>> >>>> On 27/04/2016 7:41 PM, Marcus Larsson wrote: >>>>> >>>>> On 04/27/2016 11:19 AM, David Holmes wrote: >>>>>> On 27/04/2016 6:39 PM, Marcus Larsson wrote: >>>>>>> Hi, >>>>>>> >>>>>>> >>>>>>> On 04/27/2016 10:32 AM, Ioi Lam wrote: >>>>>>>> >>>>>>>> On 4/26/16 6:13 PM, David Holmes wrote: >>>>>>>>> On 27/04/2016 10:21 AM, Ioi Lam wrote: >>>>>>>>>> The changes look good to me. >>>>>>>>>> >>>>>>>>>> I think each line of the "constraints" prints only one item (?). >>>>>>>>>> If so, >>>>>>>>>> we should use the singular form. (Max, could you post a >>>>>>>>>> sample output >>>>>>>>>> from -Xlog?) >>>>>>>>> The point is that it shows all the constraints. >>>>>>>>> >>>>>>>> We currently have a mix-match of singular/plural forms in the >>>>>>>> log tags: >>>>>>>> >>>>>>>> singular: >>>>>>>> LOG_TAG(alloc) >>>>>>>> LOG_TAG(region) >>>>>>>> >>>>>>>> plural: >>>>>>>> LOG_TAG(defaultmethods) >>>>>>>> >>>>>>>> Sometimes whether to use an (s) can be a personal preference. >>>>>>>> However, >>>>>>>> the poor user would need to try 2^n times to get the right >>>>>>>> combination >>>>>>>> of (s) or (no s), and that's too much. >>>>>>> Yeah, we should definitely be consistent with this. I vote for >>>>>>> tags in >>>>>>> singular, since it makes them ever so slightly shorter. >>>>>> Hmmm, so data becomes datum? Or is that the exception to the >>>>>> rule? ;-) >>>>> Seems like a good exception to me. :) Perhaps the rule should be to >>>>> avoid trailing s if it signifies plural. >>>>> >>>>>> No matter what you choose some things will be unintuitively awkward: >>>>>> defaultmethods, exceptions, constraints, all suit plural forms >>>>>> and are >>>>>> awkward in singular. There are numerous plural forms in use already. >>>>> I don't see what distinguishes the tags you mention from the >>>>> others. I >>>>> guess I don't see what makes them awkward in singular. If I want >>>>> defaultmethod logging I enable that tag. It's obvious I will get >>>>> multiple log messages for it so I don't see the purpose of the 's'. >>>> Maybe it is just familiarity with the old TraceXXX forms. I prefer >>>> the noun forms to be plural - as many of them are. Many of the ones >>>> that are not are a prefix taken from a longer form ie >>>> TraceThreadXXX became thread + xxx; whereas TraceExceptions became >>>> exceptions. >>>> >>>> It is all somewhat arbitrary in my view. >>> LOL, we said the same thing. >> I?d like to propose another approach: constraintz >> >> It?s way cooler and rulezzzz! >> >>> Coleen >>> >>>> David >>>> >>>>> Thanks, >>>>> Marcus >>>>> >>>>>> David >>>>>> >>>>>>> Thanks, >>>>>>> Marcus >>>>>>> >>>>>>>> - Ioi >>>>>>>> >>>>>>>>> David >>>>>>>>> >>>>>>>>>> Thanks >>>>>>>>>> - Ioi >>>>>>>>>> >>>>>>>>>> On 4/26/16 2:05 PM, Max Ockner wrote: >>>>>>>>>>> New webrev: >>>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot.02/ >>>>>>>>>>> >>>>>>>>>>> I have responded to these suggestions: >>>>>>>>>>> - class+loaderdata -> class+loader+data >>>>>>>>>>> - class+loaderconstraints -> class+loader+constraints >>>>>>>>>>> - I did not change constraints to constraint. I agree with >>>>>>>>>>> Coleen's >>>>>>>>>>> reasoning, but please let me know if you think there is a good >>>>>>>>>>> reason >>>>>>>>>>> to change it. >>>>>>>>>>> >>>>>>>>>>> Thanks, >>>>>>>>>>> Max >>>>>>>>>>> >>>>>>>>>>> On 4/26/2016 4:05 PM, Coleen Phillimore wrote: >>>>>>>>>>>> Hi, >>>>>>>>>>>> >>>>>>>>>>>> Yes, his looks good, but I agree that class, >>>>>>>>>>>> loaderconstraints => >>>>>>>>>>>> class, loader, constraints >>>>>>>>>>>> and class, loaderdata => class, loader, data >>>>>>>>>>>> >>>>>>>>>>>> But I think constraints should be plural, we're logging the >>>>>>>>>>>> constraint checking (more than one constraint) so plural >>>>>>>>>>>> makes more >>>>>>>>>>>> sense to me. >>>>>>>>>>>> >>>>>>>>>>>> Okay, I think I now prefer the combination of short words >>>>>>>>>>>> to do >>>>>>>>>>>> logging after seeing this change. >>>>>>>>>>>> >>>>>>>>>>>> Thanks, >>>>>>>>>>>> Coleen >>>>>>>>>>>> >>>>>>>>>>>> >>>>>>>>>>>> On 4/26/16 3:44 PM, Robbin Ehn wrote: >>>>>>>>>>>>> Hi Max, >>>>>>>>>>>>> >>>>>>>>>>>>> I was just looking at these tags and thought maybe they >>>>>>>>>>>>> should be >>>>>>>>>>>>> split, nice ! >>>>>>>>>>>>> >>>>>>>>>>>>> I have request for the ' constraints' tag, can make it >>>>>>>>>>>>> singular >>>>>>>>>>>>> (without ending 's') ? >>>>>>>>>>>>> >>>>>>>>>>>>> (there are a few other ones in plural which we also should >>>>>>>>>>>>> fix) >>>>>>>>>>>>> >>>>>>>>>>>>> Otherwise I think this looks good! >>>>>>>>>>>>> >>>>>>>>>>>>> Thanks! >>>>>>>>>>>>> >>>>>>>>>>>>> /Robbin >>>>>>>>>>>>> >>>>>>>>>>>>> On 04/26/2016 07:42 AM, Max Ockner wrote: >>>>>>>>>>>>>> Hello, >>>>>>>>>>>>>> >>>>>>>>>>>>>> This change factors the tags from the class and safepoint >>>>>>>>>>>>>> logging >>>>>>>>>>>>>> subsystems into smaller tags, including "class" and >>>>>>>>>>>>>> "safepoint" >>>>>>>>>>>>>> tags which are included in tag combination in their >>>>>>>>>>>>>> respective >>>>>>>>>>>>>> subsystems. >>>>>>>>>>>>>> >>>>>>>>>>>>>> classresolve -> class+resolve >>>>>>>>>>>>>> classload -> class+load >>>>>>>>>>>>>> classunload -> class+unload >>>>>>>>>>>>>> classpath -> class+path >>>>>>>>>>>>>> classloaderdata -> class+loaderdata >>>>>>>>>>>>>> classload+constraints -> class+loaderconstraints >>>>>>>>>>>>>> classinit -> class+init >>>>>>>>>>>>>> classload+preorder -> class+preorder >>>>>>>>>>>>>> >>>>>>>>>>>>>> safepointcleanup -> safepoint+cleanup >>>>>>>>>>>>>> >>>>>>>>>>>>>> class+loaderdata can be further factored into >>>>>>>>>>>>>> class+load+data. >>>>>>>>>>>>>> Same >>>>>>>>>>>>>> with class+loaderconstraints. >>>>>>>>>>>>>> >>>>>>>>>>>>>> Bug: https://bugs.openjdk.java.net/browse/JDK-8154110 >>>>>>>>>>>>>> webrev(hotspot): >>>>>>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.hotspot/ >>>>>>>>>>>>>> webrev(jdk): >>>>>>>>>>>>>> http://cr.openjdk.java.net/~mockner/8154110.jdk/ >>>>>>>>>>>>>> >>>>>>>>>>>>>> Tested with jtreg hotspot tests. jdk tests currently >>>>>>>>>>>>>> running. >>>>>>>>>>>>>> >>>>>>>>>>>>>> Thanks, >>>>>>>>>>>>>> Max > From christian.thalinger at oracle.com Thu Apr 28 20:11:11 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Thu, 28 Apr 2016 10:11:11 -1000 Subject: [9] RFR(S): 8155608: String intrinsic range checks are not strict enough In-Reply-To: <5721E9C4.7010208@oracle.com> References: <5721E9C4.7010208@oracle.com> Message-ID: <3E5A7A17-9D61-4C80-A19D-9F510BCAADCE@oracle.com> > On Apr 28, 2016, at 12:45 AM, Tobias Hartmann wrote: > > Hi > > please review the following patch: > > https://bugs.openjdk.java.net/browse/JDK-8155608 > http://cr.openjdk.java.net/~thartmann/8155608/jdk/webrev.00/ + checkBoundsOffCount(dstOff << 1, len << 1, dst.length); It?s funny that we still do << 1 instead of * 2 when every compiler on this planet can optimize that. Yeah, yeah, I know, it?s because of the interpreter but does it really matter? Actually, I would prefer: + checkBoundsOffCount(dstOff * Character.BYTES, len * Character.BYTES, dst.length); > http://cr.openjdk.java.net/~thartmann/8155608/hotspot/webrev.00/ > > Some String API methods use StringUTF16.putChar/getChar to read a char value from a byte array. For performance reasons, putChar/getChar is intrinsified by C1/C2 without range checks (like the Unsafe counterparts). The Java callers are responsible for adding the corresponding explicit range checks if necessary. > > I noticed that the Java level range checks in StringUTF16::compress(), StringUTF16::getChars() and StringLatin1::inflate() are not strong enough. Offset and length need to be multiplied by two because they index a char value in a byte array. I added a regression test that triggers the problem and also checks the other relevant intrinsics by invoking the methods with different arguments. > > Tested with regression test (-Xint/-Xcomp) and RBT (running). > > Thanks, > Tobias From vladimir.kozlov at oracle.com Thu Apr 28 23:07:08 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Thu, 28 Apr 2016 16:07:08 -0700 Subject: [9] RFR(S): 8155608: String intrinsic range checks are not strict enough In-Reply-To: <5721E9C4.7010208@oracle.com> References: <5721E9C4.7010208@oracle.com> Message-ID: <5722979C.1020904@oracle.com> In StringUTF16.java should getChars() throw when srcBegin > srcEnd? Or there is check in other place? Hotspot test changes seems fine but do you really need new helper methods? Why not allocate dst array in TestStringIntrinsicRangeChecks.java? Thanks, Vladimir On 4/28/16 3:45 AM, Tobias Hartmann wrote: > Hi > > please review the following patch: > > https://bugs.openjdk.java.net/browse/JDK-8155608 > http://cr.openjdk.java.net/~thartmann/8155608/jdk/webrev.00/ > http://cr.openjdk.java.net/~thartmann/8155608/hotspot/webrev.00/ > > Some String API methods use StringUTF16.putChar/getChar to read a char value from a byte array. For performance reasons, putChar/getChar is intrinsified by C1/C2 without range checks (like the Unsafe counterparts). The Java callers are responsible for adding the corresponding explicit range checks if necessary. > > I noticed that the Java level range checks in StringUTF16::compress(), StringUTF16::getChars() and StringLatin1::inflate() are not strong enough. Offset and length need to be multiplied by two because they index a char value in a byte array. I added a regression test that triggers the problem and also checks the other relevant intrinsics by invoking the methods with different arguments. > > Tested with regression test (-Xint/-Xcomp) and RBT (running). > > Thanks, > Tobias > From david.holmes at oracle.com Thu Apr 28 23:09:57 2016 From: david.holmes at oracle.com (David Holmes) Date: Fri, 29 Apr 2016 09:09:57 +1000 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's Message-ID: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> bug: https://bugs.openjdk.java.net/browse/JDK-8154710 webrev: http://cr.openjdk.java.net/~dholmes/8154710/webrev/ This change is small in nature but somewhat broad in scope. It "affects" the implementation of System.currentTimeMillis() in the Java space, and os::javaTimeMillis() in the VM. But on Solaris only. I say "affects" but the change will be unobservable other than in terms of performance. As of Solaris 11.3.6 a new in-memory timestamp has been made available (not unlike what has always existed on Windows). There are actually 3 different timestamps exported but the one we are interested in is get_nsecs_fromepoch - which is of course elapsed nanoseconds since the epoch - which is exactly what javaTimeMillis() is, but expressed in milliseconds. The in-memory timestamps have an update accuracy of 1ms, so are not suitable for any other API's that want the time-of-day, but at a greater accuracy. Microbenchmark shows the in-memory access is approx 45% faster (19ns on my test system) compared to the gettimeofday call (35ns). Thanks, David From tobias.hartmann at oracle.com Fri Apr 29 07:23:59 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Fri, 29 Apr 2016 09:23:59 +0200 Subject: [9] RFR(S): 8155608: String intrinsic range checks are not strict enough In-Reply-To: <3E5A7A17-9D61-4C80-A19D-9F510BCAADCE@oracle.com> References: <5721E9C4.7010208@oracle.com> <3E5A7A17-9D61-4C80-A19D-9F510BCAADCE@oracle.com> Message-ID: <57230C0F.7060702@oracle.com> Hi Chris, On 28.04.2016 22:11, Christian Thalinger wrote: > >> On Apr 28, 2016, at 12:45 AM, Tobias Hartmann > wrote: >> >> Hi >> >> please review the following patch: >> >> https://bugs.openjdk.java.net/browse/JDK-8155608 >> http://cr.openjdk.java.net/~thartmann/8155608/jdk/webrev.00/ > + checkBoundsOffCount(dstOff << 1, len << 1, dst.length); > > It?s funny that we still do << 1 instead of * 2 when every compiler on this planet can optimize that. Yeah, yeah, I know, it?s because of the interpreter but does it really matter? I used it more for consistency because we use "<< 1" in all the other places in StringLatin1, StringUTF16 and String as well. I think this originated from the "value.length >> String.coder()" use case to get the length depending on the String encoding. Besides that, I'm not sure if interpreter speed really matters here but the String methods are executed a lot (especially during startup). > Actually, I would prefer: > > + checkBoundsOffCount(dstOff * Character.BYTES, len * Character.BYTES, dst.length); I agree that this is more readable but for consistency I would like to go with the "<< 1" approach. Thanks, Tobias > >> http://cr.openjdk.java.net/~thartmann/8155608/hotspot/webrev.00/ >> >> Some String API methods use StringUTF16.putChar/getChar to read a char value from a byte array. For performance reasons, putChar/getChar is intrinsified by C1/C2 without range checks (like the Unsafe counterparts). The Java callers are responsible for adding the corresponding explicit range checks if necessary. >> >> I noticed that the Java level range checks in StringUTF16::compress(), StringUTF16::getChars() and StringLatin1::inflate() are not strong enough. Offset and length need to be multiplied by two because they index a char value in a byte array. I added a regression test that triggers the problem and also checks the other relevant intrinsics by invoking the methods with different arguments. >> >> Tested with regression test (-Xint/-Xcomp) and RBT (running). >> >> Thanks, >> Tobias > From david.holmes at oracle.com Fri Apr 29 07:28:41 2016 From: david.holmes at oracle.com (David Holmes) Date: Fri, 29 Apr 2016 17:28:41 +1000 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: <1461861323.25448.42.camel@redhat.com> References: <1461861323.25448.42.camel@redhat.com> Message-ID: Hi Severin, On 29/04/2016 2:35 AM, Severin Gehwolf wrote: > Hi, > > Recently, a fastdebug build of a server JVM fails for me on Linux > x86_64 with older sys/sdt.h and/or GCC. The same failure shows up since > a couple of days on the Zero builder[1]: > > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp: In static member function ?static void ClassLoadingService::notify_class_loaded(InstanceKlass*, bool)?: > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? > gmake[4]: *** [/home/sgehwolf/openjdk9-hs-pristine/build/linux-x86_64-normal-server-fastdebug/hotspot/variant-server/libjvm/objs/classLoadingService.o] Error 1 > gmake[4]: Leaving directory `/home/sgehwolf/openjdk9-hs-pristine/hotspot/make' > > On that line is a macro for dtrace: > > void ClassLoadingService::notify_class_loaded(InstanceKlass* k, bool shared_class) { > DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); // <<< This fails > > If I compile the same file *without* -DDTRACE_ENABLE it compiles fine. > A slowdebug build compiles fine too. I was able to *reproduce* this on > RHEL 6. It *does not* reproduce on F23. > > I had a brief look at the preprocessed compilation unit for > classLoadingService and the code that gets included for the dtrace > macro looks different. > > So here the questions: > > 1.) Has anything related to dtrace macros changed recently? > 2.) Has anybody else been seeing this? This would seem to be an issue with the content of sdt.h and the version of gcc being used. Have either of those changed at your end? This seems external to the OpenJDK sources ... though perhaps there is a build related issue? How recently did this start - did it happen after the switch to the new hotspot build? Cheers, David ----- > Thanks, > Severin > > [1] http://builder.classpath.org/jenkins/job/OpenJDK9_hs_rt_Zero/227/console > From sgehwolf at redhat.com Fri Apr 29 07:39:43 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 29 Apr 2016 09:39:43 +0200 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: References: <1461861323.25448.42.camel@redhat.com> Message-ID: <1461915583.3712.5.camel@redhat.com> On Fri, 2016-04-29 at 17:28 +1000, David Holmes wrote: > Hi Severin, > > On 29/04/2016 2:35 AM, Severin Gehwolf wrote: > > > > Hi, > > > > Recently, a fastdebug build of a server JVM fails for me on Linux > > x86_64 with older sys/sdt.h and/or GCC. The same failure shows up since > > a couple of days on the Zero builder[1]: > > > > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp: In static member function ?static void ClassLoadingService::notify_class_loaded(InstanceKlass*, bool)?: > > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? > > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? > > gmake[4]: *** [/home/sgehwolf/openjdk9-hs-pristine/build/linux-x86_64-normal-server-fastdebug/hotspot/variant-server/libjvm/objs/classLoadingService.o] Error 1 > > gmake[4]: Leaving directory `/home/sgehwolf/openjdk9-hs-pristine/hotspot/make' > > > > On that line is a macro for dtrace: > > > > void ClassLoadingService::notify_class_loaded(InstanceKlass* k, bool shared_class) { > > ? DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); // <<< This fails > > > > If I compile the same file *without* -DDTRACE_ENABLE it compiles fine. > > A slowdebug build compiles fine too. I was able to *reproduce* this on > > RHEL 6. It *does not* reproduce on F23. > > > > I had a brief look at the preprocessed compilation unit for > > classLoadingService and the code that gets included for the dtrace > > macro looks different. > > > > So here the questions: > > > > 1.) Has anything related to dtrace macros changed recently? > > 2.) Has anybody else been seeing this? > This would seem to be an issue with the content of sdt.h and the version? > of gcc being used. Have either of those changed at your end? This seems? > external to the OpenJDK sources ... though perhaps there is a build? > related issue? How recently did this start - did it happen after the? > switch to the new hotspot build? It might be. I'll investigate. I'm not aware of any gcc/systemtap changes, but I'll have a closer look. I take it that's a no to question 1 above? Cheers, Severin > Cheers, > David > ----- > > > > > Thanks, > > Severin > > > > [1] http://builder.classpath.org/jenkins/job/OpenJDK9_hs_rt_Zero/227/console > > From aph at redhat.com Fri Apr 29 07:43:12 2016 From: aph at redhat.com (Andrew Haley) Date: Fri, 29 Apr 2016 08:43:12 +0100 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: <1461915583.3712.5.camel@redhat.com> References: <1461861323.25448.42.camel@redhat.com> <1461915583.3712.5.camel@redhat.com> Message-ID: <57231090.3020704@redhat.com> On 29/04/16 08:39, Severin Gehwolf wrote: > It might be. I'll investigate. I'm not aware of any gcc/systemtap > changes, but I'll have a closer look. It's probably a systemtap bug. I'll have a look if you like.# It would be helpful to see a fully-preprocessed expansion of that asm. Andrew. From tobias.hartmann at oracle.com Fri Apr 29 07:56:01 2016 From: tobias.hartmann at oracle.com (Tobias Hartmann) Date: Fri, 29 Apr 2016 09:56:01 +0200 Subject: [9] RFR(S): 8155608: String intrinsic range checks are not strict enough In-Reply-To: <5722979C.1020904@oracle.com> References: <5721E9C4.7010208@oracle.com> <5722979C.1020904@oracle.com> Message-ID: <57231391.4010006@oracle.com> Hi Vladimir, On 29.04.2016 01:07, Vladimir Kozlov wrote: > In StringUTF16.java should getChars() throw when srcBegin > srcEnd? Or there is check in other place? Yes, there is another range check in String::getChars() that throws an exception if srcBegin > srcEnd but we don't want to throw an exception in this case if StringUTF16::getChars() is invoked directly. > Hotspot test changes seems fine but do you really need new helper methods? Why not allocate dst array in TestStringIntrinsicRangeChecks.java? StringUTF16 and StringLatin1 are package private classes in java.lang because they are part of the String internals. Therefore, the test (which is not part of the java.lang package) cannot invoke the intrinsified methods. To circumvent this, we use the "patch-library approach" to inject a wrapper class into java.lang that allows us to call the package private methods from the test: 26 package java.lang; 27 28 /** 29 * A helper class to get access to package-private members 30 */ 31 public class Helper { We use the same approach for other compiler tests as well, for example the tests in test/compiler/jsr292/. Thanks, Tobias > Thanks, > Vladimir > > On 4/28/16 3:45 AM, Tobias Hartmann wrote: >> Hi >> >> please review the following patch: >> >> https://bugs.openjdk.java.net/browse/JDK-8155608 >> http://cr.openjdk.java.net/~thartmann/8155608/jdk/webrev.00/ >> http://cr.openjdk.java.net/~thartmann/8155608/hotspot/webrev.00/ >> >> Some String API methods use StringUTF16.putChar/getChar to read a char value from a byte array. For performance reasons, putChar/getChar is intrinsified by C1/C2 without range checks (like the Unsafe counterparts). The Java callers are responsible for adding the corresponding explicit range checks if necessary. >> >> I noticed that the Java level range checks in StringUTF16::compress(), StringUTF16::getChars() and StringLatin1::inflate() are not strong enough. Offset and length need to be multiplied by two because they index a char value in a byte array. I added a regression test that triggers the problem and also checks the other relevant intrinsics by invoking the methods with different arguments. >> >> Tested with regression test (-Xint/-Xcomp) and RBT (running). >> >> Thanks, >> Tobias >> From sgehwolf at redhat.com Fri Apr 29 07:56:36 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 29 Apr 2016 09:56:36 +0200 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: <57231090.3020704@redhat.com> References: <1461861323.25448.42.camel@redhat.com> <1461915583.3712.5.camel@redhat.com> <57231090.3020704@redhat.com> Message-ID: <1461916596.3712.7.camel@redhat.com> On Fri, 2016-04-29 at 08:43 +0100, Andrew Haley wrote: > On 29/04/16 08:39, Severin Gehwolf wrote: > > > > It might be. I'll investigate. I'm not aware of any gcc/systemtap > > changes, but I'll have a closer look. > It's probably a systemtap bug.??I'll have a look if you like.# Sure, it would help indeed. > It would be helpful to see a fully-preprocessed expansion of that > asm. Here you go (DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); preprocessed): { char* data = __null; int len = 0; Symbol* name = (k)->name(); if (name != __null) { data = (char*)name->bytes(); len = name->utf8_length(); } do { __asm__ __volatile__ ("990: nop" "\n" ".pushsection .note.stapsdt" "," "\"\"" "," "\"note\"" "\n" ".balign 4" "\n" ".4byte 992f-991f" "," "994f-993f" "," "3" "\n" "991: .asciz \"stapsdt\"" "\n" "992: .balign 4" "\n" "993: .8byte 990b" "\n" ".8byte _.stapsdt.base" "\n" ".8byte 0" "\n" ".asciz \"hotspot\"" "\n" ".asciz \"class__loaded\"" "\n" ".asciz \"%n[_SDT_S1]@%[_SDT_A1] %n[_SDT_S2]@%[_SDT_A2] %n[_SDT_S3]@%[_SDT_A3] %n[_SDT_S4]@%[_SDT_A4]\"" "\n" "994: .balign 4" "\n" ".popsection" "\n" :: [_SDT_S1] "n" (((!(__builtin_classify_type (data) == 14 || __builtin_classify_type (data) == 5) && __sdt_type<__typeof (data)>::__sdt_signed) ? 1 : -1) * (int) ((__builtin_classify_type (data) == 14 || __builtin_classify_type (data) == 5) ? sizeof (void *) : sizeof (data))), [_SDT_A1] "nor" ((data)), [_SDT_S2] "n" (((!(__builtin_classify_type (len) == 14 || __builtin_classify_type (len) == 5) && __sdt_type<__typeof (len)>::__sdt_signed) ? 1 : -1) * (int) ((__builtin_classify_type (len) == 14 || __builtin_classify_type (len) == 5) ? sizeof (void *) : sizeof (len))), [_SDT_A2] "nor" ((len)), [_SDT_S3] "n" (((!(__builtin_classify_type ((k)->class_loader()) == 14 || __builtin_classify_type ((k)->class_loader()) == 5) && __sdt_type<__typeof ((k)->class_loader())>::__sdt_signed) ? 1 : -1) * (int) ((__builtin_classify_type ((k)->class_loader()) == 14 || __builtin_classify_type ((k)->class_loader()) == 5) ? sizeof (void *) : sizeof ((k)->class_loader()))), [_SDT_A3] "nor" (((k)->class_loader())), [_SDT_S4] "n" (((!(__builtin_classify_type ((shared_class)) == 14 || __builtin_classify_type ((shared_class)) == 5) && __sdt_type<__typeof ((shared_class))>::__sdt_signed) ? 1 : -1) * (int) ((__builtin_classify_type ((shared_class)) == 14 || __builtin_classify_type ((shared_class)) == 5) ? sizeof (void *) : sizeof ((shared_class)))), [_SDT_A4] "nor" (((shared_class)))); __asm__ __volatile__ (".ifndef _.stapsdt.base" "\n" ".pushsection .stapsdt.base" "," "\"aG\"" "," "\"progbits\"" "," ".stapsdt.base" "," "comdat" "\n" ".weak _.stapsdt.base" "\n" ".hidden _.stapsdt.base" "\n" "_.stapsdt.base: .space 1" "\n" ".size _.stapsdt.base" "," "1" "\n" ".popsection" "\n" ".endif" "\n"); } while (0); }; Cheers, Severin From david.holmes at oracle.com Fri Apr 29 08:11:56 2016 From: david.holmes at oracle.com (David Holmes) Date: Fri, 29 Apr 2016 18:11:56 +1000 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: <1461915583.3712.5.camel@redhat.com> References: <1461861323.25448.42.camel@redhat.com> <1461915583.3712.5.camel@redhat.com> Message-ID: <74f1521c-c8d8-7409-f4ed-d1f1b6ee6878@oracle.com> On 29/04/2016 5:39 PM, Severin Gehwolf wrote: > On Fri, 2016-04-29 at 17:28 +1000, David Holmes wrote: >> Hi Severin, >> >> On 29/04/2016 2:35 AM, Severin Gehwolf wrote: >>> >>> Hi, >>> >>> Recently, a fastdebug build of a server JVM fails for me on Linux >>> x86_64 with older sys/sdt.h and/or GCC. The same failure shows up since >>> a couple of days on the Zero builder[1]: >>> >>> /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp: In static member function ?static void ClassLoadingService::notify_class_loaded(InstanceKlass*, bool)?: >>> /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? >>> /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? >>> gmake[4]: *** [/home/sgehwolf/openjdk9-hs-pristine/build/linux-x86_64-normal-server-fastdebug/hotspot/variant-server/libjvm/objs/classLoadingService.o] Error 1 >>> gmake[4]: Leaving directory `/home/sgehwolf/openjdk9-hs-pristine/hotspot/make' >>> >>> On that line is a macro for dtrace: >>> >>> void ClassLoadingService::notify_class_loaded(InstanceKlass* k, bool shared_class) { >>> DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); // <<< This fails >>> >>> If I compile the same file *without* -DDTRACE_ENABLE it compiles fine. >>> A slowdebug build compiles fine too. I was able to *reproduce* this on >>> RHEL 6. It *does not* reproduce on F23. >>> >>> I had a brief look at the preprocessed compilation unit for >>> classLoadingService and the code that gets included for the dtrace >>> macro looks different. >>> >>> So here the questions: >>> >>> 1.) Has anything related to dtrace macros changed recently? >>> 2.) Has anybody else been seeing this? >> This would seem to be an issue with the content of sdt.h and the version >> of gcc being used. Have either of those changed at your end? This seems >> external to the OpenJDK sources ... though perhaps there is a build >> related issue? How recently did this start - did it happen after the >> switch to the new hotspot build? > > It might be. I'll investigate. I'm not aware of any gcc/systemtap > changes, but I'll have a closer look. > > I take it that's a no to question 1 above? Nothing on our side, and there is no asm in anything in our sources AFAICS so I assume it is something in sdt.h. Only change here was a few weeks back for jigsaw: > hg log -p ./os/posix/dtrace/hotspot_jni.d changeset: 10487:c558850fac57 tag: jdk-9+111 parent: 10484:0de4d895a5c8 user: alanb date: Thu Mar 17 19:04:01 2016 +0000 summary: 8142968: Module System implementation diff -r 0de4d895a5c8 -r c558850fac57 src/os/posix/dtrace/hotspot_jni.d --- a/src/os/posix/dtrace/hotspot_jni.d +++ b/src/os/posix/dtrace/hotspot_jni.d @@ -300,6 +300,8 @@ probe GetLongField__return(uintptr_t); probe GetMethodID__entry(void*, void*, const char*, const char*); probe GetMethodID__return(uintptr_t); + probe GetModule__entry(void*, void*); + probe GetModule__return(void*); probe GetObjectArrayElement__entry(void*, void*, uintptr_t); probe GetObjectArrayElement__return(void*); David > Cheers, > Severin > >> Cheers, >> David >> ----- >> >>> >>> Thanks, >>> Severin >>> >>> [1] http://builder.classpath.org/jenkins/job/OpenJDK9_hs_rt_Zero/227/console >>> > From aph at redhat.com Fri Apr 29 08:21:26 2016 From: aph at redhat.com (Andrew Haley) Date: Fri, 29 Apr 2016 09:21:26 +0100 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: <1461916596.3712.7.camel@redhat.com> References: <1461861323.25448.42.camel@redhat.com> <1461915583.3712.5.camel@redhat.com> <57231090.3020704@redhat.com> <1461916596.3712.7.camel@redhat.com> Message-ID: <57231986.9010106@redhat.com> On 29/04/16 08:56, Severin Gehwolf wrote: > Here you go (DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); OK. Now take that expansion, run indent on it, and paste it in to the program where the macro was. Then we'll be able to get much better info about the reload failure. (See appended expansion) Andrew. { char *data = __null; int len = 0; Symbol *name = (k)->name (); if (name != __null) { data = (char *) name->bytes (); len = name->utf8_length (); } do { __asm__ __volatile__ ("990: nop" "\n" ".pushsection .note.stapsdt" "," "\"\"" "," "\"note\"" "\n" ".balign 4" "\n" ".4byte 992f-991f" "," "994f-993f" "," "3" "\n" "991: .asciz \"stapsdt\"" "\n" "992: .balign 4" "\n" "993: .8byte 990b" "\n" ".8byte _.stapsdt.base" "\n" ".8byte 0" "\n" ".asciz \"hotspot\"" "\n" ".asciz \"class__loaded\"" "\n" ".asciz \"%n[_SDT_S1]@%[_SDT_A1] %n[_SDT_S2]@%[_SDT_A2] %n[_SDT_S3]@%[_SDT_A3] %n[_SDT_S4]@%[_SDT_A4]\"" "\n" "994: .balign 4" "\n" ".popsection" "\n"::[_SDT_S1] "n" (((! (__builtin_classify_type (data) == 14 || __builtin_classify_type (data) == 5) && __sdt_type < __typeof (data) >:: __sdt_signed) ? 1 : -1) * (int) ((__builtin_classify_type (data) == 14 || __builtin_classify_type (data) == 5) ? sizeof (void *) : sizeof (data))), [_SDT_A1] "nor" ((data)), [_SDT_S2] "n" (((! (__builtin_classify_type (len) == 14 || __builtin_classify_type (len) == 5) && __sdt_type < __typeof (len) >::__sdt_signed) ? 1 : -1) * (int) ((__builtin_classify_type (len) == 14 || __builtin_classify_type (len) == 5) ? sizeof (void *) : sizeof (len))), [_SDT_A2] "nor" ((len)), [_SDT_S3] "n" (((! (__builtin_classify_type ((k)->class_loader ()) == 14 || __builtin_classify_type ((k)-> class_loader ()) == 5) && __sdt_type < __typeof ((k)-> class_loader ()) >:: __sdt_signed) ? 1 : -1) * (int) ((__builtin_classify_type ((k)->class_loader ()) == 14 || __builtin_classify_type ((k)-> class_loader ()) == 5) ? sizeof (void *) : sizeof ((k)-> class_loader ()))), [_SDT_A3] "nor" (((k)->class_loader ())), [_SDT_S4] "n" (((! (__builtin_classify_type ((shared_class)) == 14 || __builtin_classify_type ((shared_class)) == 5) && __sdt_type < __typeof ((shared_class)) >:: __sdt_signed) ? 1 : -1) * (int) ((__builtin_classify_type ((shared_class)) == 14 || __builtin_classify_type ((shared_class)) == 5) ? sizeof (void *) : sizeof ((shared_class)))),[_SDT_A4] "nor" (((shared_class)))); __asm__ __volatile__ (".ifndef _.stapsdt.base" "\n" ".pushsection .stapsdt.base" "," "\"aG\"" "," "\"progbits\"" "," ".stapsdt.base" "," "comdat" "\n" ".weak _.stapsdt.base" "\n" ".hidden _.stapsdt.base" "\n" "_.stapsdt.base: .space 1" "\n" ".size _.stapsdt.base" "," "1" "\n" ".popsection" "\n" ".endif" "\n"); } while (0); }; From sgehwolf at redhat.com Fri Apr 29 08:29:55 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 29 Apr 2016 10:29:55 +0200 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: <57231986.9010106@redhat.com> References: <1461861323.25448.42.camel@redhat.com> <1461915583.3712.5.camel@redhat.com> <57231090.3020704@redhat.com> <1461916596.3712.7.camel@redhat.com> <57231986.9010106@redhat.com> Message-ID: <1461918595.3712.11.camel@redhat.com> On Fri, 2016-04-29 at 09:21 +0100, Andrew Haley wrote: > On 29/04/16 08:56, Severin Gehwolf wrote: > > > > Here you go (DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); > OK.??Now take that expansion, run indent on it, and paste it in > to the program where the macro was.??Then we'll be able to get > much better info about the reload failure. > > (See appended expansion) > > Andrew. > > > { > ? char *data = __null; > ? int len = 0; > ? Symbol *name = (k)->name (); > ? if (name != __null) > ????{ > ??????data = (char *) name->bytes (); > ??????len = name->utf8_length (); > ????} > ? do > ????{ > ??????__asm__ __volatile__ ("990: nop" "\n" ".pushsection .note.stapsdt" "," > ????"\"\"" "," "\"note\"" "\n" ".balign 4" "\n" > ????".4byte 992f-991f" "," "994f-993f" "," "3" "\n" > ????"991: .asciz \"stapsdt\"" "\n" "992: .balign 4" > ????"\n" "993: .8byte 990b" "\n" > ????".8byte _.stapsdt.base" "\n" ".8byte 0" "\n" > ????".asciz \"hotspot\"" "\n" > ????".asciz \"class__loaded\"" "\n" > ????".asciz \"%n[_SDT_S1]@%[_SDT_A1] %n[_SDT_S2]@%[_SDT_A2] %n[_SDT_S3]@%[_SDT_A3] %n[_SDT_S4]@%[_SDT_A4]\"" > ????"\n" "994: .balign 4" "\n" ".popsection" > ????"\n"::[_SDT_S1] > ????"n" (((! > ???(__builtin_classify_type (data) == 14 > ????|| __builtin_classify_type (data) == 5) > ???&& __sdt_type < > ???__typeof (data) >:: > ???__sdt_signed) ? 1 : -1) * > ?(int) ((__builtin_classify_type (data) == 14 > ?|| __builtin_classify_type (data) == > ?5) ? sizeof (void *) : > sizeof (data))), > ????[_SDT_A1] "nor" ((data)), > ????[_SDT_S2] > ????"n" (((! > ???(__builtin_classify_type (len) == 14 > ????|| __builtin_classify_type (len) == 5) > ???&& __sdt_type < > ???__typeof (len) >::__sdt_signed) ? 1 : -1) * > ?(int) ((__builtin_classify_type (len) == 14 > ?|| __builtin_classify_type (len) == > ?5) ? sizeof (void *) : > sizeof (len))), > ????[_SDT_A2] "nor" ((len)), > ????[_SDT_S3] > ????"n" (((! > ???(__builtin_classify_type > ????((k)->class_loader ()) == 14 > ????|| __builtin_classify_type ((k)-> > class_loader > ()) == 5) > ???&& __sdt_type < > ???__typeof ((k)-> > ?????class_loader ()) >:: > ???__sdt_signed) ? 1 : -1) * > ?(int) ((__builtin_classify_type > ?((k)->class_loader ()) == 14 > ?|| __builtin_classify_type ((k)-> > ?????class_loader > ?????()) == > ?5) ? sizeof (void *) : sizeof ((k)-> > class_loader > ()))), > ????[_SDT_A3] "nor" (((k)->class_loader ())), > ????[_SDT_S4] > ????"n" (((! > ???(__builtin_classify_type ((shared_class)) > ????== 14 > ????|| > ????__builtin_classify_type ((shared_class)) > ????== 5) > ???&& __sdt_type < > ???__typeof ((shared_class)) >:: > ???__sdt_signed) ? 1 : -1) * > ?(int) ((__builtin_classify_type > ?((shared_class)) == 14 > ?|| > ?__builtin_classify_type ((shared_class)) == 5) ? sizeof (void *) : sizeof ((shared_class)))),[_SDT_A4] "nor" (((shared_class)))); > ??????__asm__ __volatile__ (".ifndef _.stapsdt.base" "\n" > ????".pushsection .stapsdt.base" "," "\"aG\"" "," > ????"\"progbits\"" "," ".stapsdt.base" "," "comdat" > ????"\n" ".weak _.stapsdt.base" "\n" > ????".hidden _.stapsdt.base" "\n" > ????"_.stapsdt.base: .space 1" "\n" > ????".size _.stapsdt.base" "," "1" "\n" ".popsection" > ????"\n" ".endif" "\n"); > ????} > ? while (0); > }; > Done. Error now is: /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp: In static member function ?static void ClassLoadingService::notify_class_loaded(InstanceKlass*, bool)?: /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:226: error: cannot reload integer constant operand in ?asm? /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:226: error: cannot reload integer constant operand in ?asm? It's in this snippet: (int) ((__builtin_classify_type ?????????????????????????????????????????((shared_class)) == 14 ?????????????????????????????????????????|| ?????????????????????????????????????????__builtin_classify_type ((shared_class)) == 5) ? sizeof (void *) : sizeof ((shared_class)))),[_SDT_A4] "nor" (((shared_class)))); // <<<< fails on this line Thanks, Severin From sgehwolf at redhat.com Fri Apr 29 09:24:46 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 29 Apr 2016 11:24:46 +0200 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: <74f1521c-c8d8-7409-f4ed-d1f1b6ee6878@oracle.com> References: <1461861323.25448.42.camel@redhat.com> <1461915583.3712.5.camel@redhat.com> <74f1521c-c8d8-7409-f4ed-d1f1b6ee6878@oracle.com> Message-ID: <1461921886.3712.21.camel@redhat.com> On Fri, 2016-04-29 at 18:11 +1000, David Holmes wrote: > On 29/04/2016 5:39 PM, Severin Gehwolf wrote: > > > > On Fri, 2016-04-29 at 17:28 +1000, David Holmes wrote: > > > > > > Hi Severin, > > > > > > On 29/04/2016 2:35 AM, Severin Gehwolf wrote: > > > > > > > > > > > > Hi, > > > > > > > > Recently, a fastdebug build of a server JVM fails for me on Linux > > > > x86_64 with older sys/sdt.h and/or GCC. The same failure shows up since > > > > a couple of days on the Zero builder[1]: > > > > > > > > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp: In static member function ?static void ClassLoadingService::notify_class_loaded(InstanceKlass*, bool)?: > > > > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? > > > > /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp:148: error: cannot reload integer constant operand in ?asm? > > > > gmake[4]: *** [/home/sgehwolf/openjdk9-hs-pristine/build/linux-x86_64-normal-server-fastdebug/hotspot/variant-server/libjvm/objs/classLoadingService.o] Error 1 > > > > gmake[4]: Leaving directory `/home/sgehwolf/openjdk9-hs-pristine/hotspot/make' > > > > > > > > On that line is a macro for dtrace: > > > > > > > > void ClassLoadingService::notify_class_loaded(InstanceKlass* k, bool shared_class) { > > > > ? DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); // <<< This fails > > > > > > > > If I compile the same file *without* -DDTRACE_ENABLE it compiles fine. > > > > A slowdebug build compiles fine too. I was able to *reproduce* this on > > > > RHEL 6. It *does not* reproduce on F23. > > > > > > > > I had a brief look at the preprocessed compilation unit for > > > > classLoadingService and the code that gets included for the dtrace > > > > macro looks different. > > > > > > > > So here the questions: > > > > > > > > 1.) Has anything related to dtrace macros changed recently? > > > > 2.) Has anybody else been seeing this? > > > This would seem to be an issue with the content of sdt.h and the version > > > of gcc being used. Have either of those changed at your end? This seems > > > external to the OpenJDK sources ... though perhaps there is a build > > > related issue? How recently did this start - did it happen after the > > > switch to the new hotspot build? > > It might be. I'll investigate. I'm not aware of any gcc/systemtap > > changes, but I'll have a closer look. > > > > I take it that's a no to question 1 above? > Nothing on our side, and there is no asm in anything in our sources? > AFAICS so I assume it is something in sdt.h. Only change here was a few? > weeks back for jigsaw: > > ? > hg log -p ./os/posix/dtrace/hotspot_jni.d > changeset:???10487:c558850fac57 > tag:?????????jdk-9+111 > parent:??????10484:0de4d895a5c8 > user:????????alanb > date:????????Thu Mar 17 19:04:01 2016 +0000 > summary:?????8142968: Module System implementation > > diff -r 0de4d895a5c8 -r c558850fac57 src/os/posix/dtrace/hotspot_jni.d > --- a/src/os/posix/dtrace/hotspot_jni.d > +++ b/src/os/posix/dtrace/hotspot_jni.d > @@ -300,6 +300,8 @@ > ????probe GetLongField__return(uintptr_t); > ????probe GetMethodID__entry(void*, void*, const char*, const char*); > ????probe GetMethodID__return(uintptr_t); > +??probe GetModule__entry(void*, void*); > +??probe GetModule__return(void*); > ????probe GetObjectArrayElement__entry(void*, void*, uintptr_t); > ????probe GetObjectArrayElement__return(void*); OK. Thanks, David! Cheers, Severin > David > > > > > Cheers, > > Severin > > > > > > > > Cheers, > > > David > > > ----- > > > > > > > > > > > > > > > Thanks, > > > > Severin > > > > > > > > [1] http://builder.classpath.org/jenkins/job/OpenJDK9_hs_rt_Zero/227/console > > > > From aleksey.shipilev at oracle.com Fri Apr 29 09:50:33 2016 From: aleksey.shipilev at oracle.com (Aleksey Shipilev) Date: Fri, 29 Apr 2016 12:50:33 +0300 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> Message-ID: <57232E69.20800@oracle.com> On 04/29/2016 02:09 AM, David Holmes wrote: > bug: https://bugs.openjdk.java.net/browse/JDK-8154710 > webrev: http://cr.openjdk.java.net/~dholmes/8154710/webrev/ Looks good. Is hrtime_t always integral, so you can "(hrtime_t)now / NANOSECS_PER_MILLISEC" it? > This change is small in nature but somewhat broad in scope. It "affects" > the implementation of System.currentTimeMillis() in the Java space, and > os::javaTimeMillis() in the VM. But on Solaris only. > > I say "affects" but the change will be unobservable other than in terms > of performance. Observable enough to me. Thanks, -Aleksey From david.holmes at oracle.com Fri Apr 29 10:05:24 2016 From: david.holmes at oracle.com (David Holmes) Date: Fri, 29 Apr 2016 20:05:24 +1000 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: <57232E69.20800@oracle.com> References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> <57232E69.20800@oracle.com> Message-ID: On 29/04/2016 7:50 PM, Aleksey Shipilev wrote: > On 04/29/2016 02:09 AM, David Holmes wrote: >> bug: https://bugs.openjdk.java.net/browse/JDK-8154710 >> webrev: http://cr.openjdk.java.net/~dholmes/8154710/webrev/ > > Looks good. > > Is hrtime_t always integral, so you can "(hrtime_t)now / > NANOSECS_PER_MILLISEC" it? Yes it is a 64-bit (long long) signed integer. >> This change is small in nature but somewhat broad in scope. It "affects" >> the implementation of System.currentTimeMillis() in the Java space, and >> os::javaTimeMillis() in the VM. But on Solaris only. >> >> I say "affects" but the change will be unobservable other than in terms >> of performance. > > Observable enough to me. :) Any apps you can think of that might show benefit from this? Thanks, David > Thanks, > -Aleksey > From aleksey.shipilev at oracle.com Fri Apr 29 10:12:46 2016 From: aleksey.shipilev at oracle.com (Aleksey Shipilev) Date: Fri, 29 Apr 2016 13:12:46 +0300 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> <57232E69.20800@oracle.com> Message-ID: <5723339E.306@oracle.com> On 04/29/2016 01:05 PM, David Holmes wrote: > On 29/04/2016 7:50 PM, Aleksey Shipilev wrote: >> On 04/29/2016 02:09 AM, David Holmes wrote: >>> This change is small in nature but somewhat broad in scope. It "affects" >>> the implementation of System.currentTimeMillis() in the Java space, and >>> os::javaTimeMillis() in the VM. But on Solaris only. >>> >>> I say "affects" but the change will be unobservable other than in terms >>> of performance. >> >> Observable enough to me. > > :) Any apps you can think of that might show benefit from this? Theoretically, this might affect heavily logging apps. IIRC, SPECjbb2000 was affected by currentTimeMillis performance. But, I see no reason in trying to justify the change, apart from the targeted microbenchmark. -Aleksey From charlie.hunt at oracle.com Fri Apr 29 12:57:37 2016 From: charlie.hunt at oracle.com (charlie hunt) Date: Fri, 29 Apr 2016 07:57:37 -0500 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: <5723339E.306@oracle.com> References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> <57232E69.20800@oracle.com> <5723339E.306@oracle.com> Message-ID: > On Apr 29, 2016, at 5:12 AM, Aleksey Shipilev wrote: > >> On 04/29/2016 01:05 PM, David Holmes wrote: >>> On 29/04/2016 7:50 PM, Aleksey Shipilev wrote: >>>> On 04/29/2016 02:09 AM, David Holmes wrote: >>>> This change is small in nature but somewhat broad in scope. It "affects" >>>> the implementation of System.currentTimeMillis() in the Java space, and >>>> os::javaTimeMillis() in the VM. But on Solaris only. >>>> >>>> I say "affects" but the change will be unobservable other than in terms >>>> of performance. >>> >>> Observable enough to me. >> >> :) Any apps you can think of that might show benefit from this? > > Theoretically, this might affect heavily logging apps. IIRC, SPECjbb2000 > was affected by currentTimeMillis performance. But, I see no reason in > trying to justify the change, apart from the targeted microbenchmark. > > -Aleksey Fwiw, "back in the day" there was a slight gap in perf between Solaris and Windows on SPECjbb2005. That slight gap was attributed to differences in currentTimeMillis overhead. Charlie From aph at redhat.com Fri Apr 29 13:14:03 2016 From: aph at redhat.com (Andrew Haley) Date: Fri, 29 Apr 2016 14:14:03 +0100 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <57185E31.9010705@oracle.com> References: <57168C17.40307@oracle.com> <57185E31.9010705@oracle.com> Message-ID: <57235E1B.8080001@redhat.com> On 04/21/2016 05:59 AM, Stefan Karlsson wrote: >> Need testing with ppc and aarch64 open code. I implemented the >> changes but I can't test them. It seems to be fine on AArch64, thanks. It does break jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java, though, which needs the following patch. I guess this applies to all platforms. Andrew. --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java @@ -49,11 +49,12 @@ private static final int SENDER_SP_OFFSET = 2; // Interpreter frames - private static final int INTERPRETER_FRAME_MIRROR_OFFSET = 2; // for native calls only private static final int INTERPRETER_FRAME_SENDER_SP_OFFSET = -1; private static final int INTERPRETER_FRAME_LAST_SP_OFFSET = INTERPRETER_FRAME_SENDER_SP_OFFSET - 1; private static final int INTERPRETER_FRAME_METHOD_OFFSET = INTERPRETER_FRAME_LAST_SP_OFFSET - 1; private static int INTERPRETER_FRAME_MDX_OFFSET; // Non-core builds only + private static int INTERPRETER_FRAME_PADDING_OFFSET; + private static int INTERPRETER_FRAME_MIRROR_OFFSET; private static int INTERPRETER_FRAME_CACHE_OFFSET; private static int INTERPRETER_FRAME_LOCALS_OFFSET; private static int INTERPRETER_FRAME_BCX_OFFSET; @@ -79,7 +80,9 @@ private static synchronized void initialize(TypeDataBase db) { INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1; - INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1; + INTERPRETER_FRAME_PADDING_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1; + INTERPRETER_FRAME_MIRROR_OFFSET = INTERPRETER_FRAME_PADDING_OFFSET - 1; + INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MIRROR_OFFSET - 1; INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1; INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1; INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1; From adinn at redhat.com Fri Apr 29 13:18:49 2016 From: adinn at redhat.com (Andrew Dinn) Date: Fri, 29 Apr 2016 14:18:49 +0100 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <57235E1B.8080001@redhat.com> References: <57168C17.40307@oracle.com> <57185E31.9010705@oracle.com> <57235E1B.8080001@redhat.com> Message-ID: <57235F39.5080204@redhat.com> On 29/04/16 14:14, Andrew Haley wrote: > On 04/21/2016 05:59 AM, Stefan Karlsson wrote: >>> Need testing with ppc and aarch64 open code. I implemented the >>> changes but I can't test them. > > It seems to be fine on AArch64, thanks. That's probably something to do with me checking the patch on behalf on Coleen last week before it went in (you have not been reading my weekly status reports :-) > It does break > jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java, > though, which needs the following patch. I guess this applies to all > platforms. Ah, well, didn't check that bit though! regards, Andrew Dinn ----------- From sgehwolf at redhat.com Fri Apr 29 13:30:47 2016 From: sgehwolf at redhat.com (Severin Gehwolf) Date: Fri, 29 Apr 2016 15:30:47 +0200 Subject: fastdebug builds broken for jdk9/hs tree? In-Reply-To: <57231986.9010106@redhat.com> References: <1461861323.25448.42.camel@redhat.com> <1461915583.3712.5.camel@redhat.com> <57231090.3020704@redhat.com> <1461916596.3712.7.camel@redhat.com> <57231986.9010106@redhat.com> Message-ID: <1461936647.3712.41.camel@redhat.com> On Fri, 2016-04-29 at 09:21 +0100, Andrew Haley wrote: > On 29/04/16 08:56, Severin Gehwolf wrote: > > > > Here you go (DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class); > OK.??Now take that expansion, run indent on it, and paste it in > to the program where the macro was.??Then we'll be able to get > much better info about the reload failure. For the curious, this is the bit that trips up GCC?4.4.7 and GCC?4.7.2 and seems to work fine on GCC 5.3.1: --- /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp.fail_mini 2016-04-29 14:56:30.711924152 +0200 +++ /home/sgehwolf/openjdk9-hs-pristine/hotspot/src/share/vm/services/classLoadingService.cpp 2016-04-29 14:57:00.102527106 +0200 @@ -170,7 +170,7 @@ ?????????????????????????????"\n":: ?????????????????????????????[_SDT_S3] ?????????????????????????????"n" ( 1 ), -????????????????????????????[_SDT_A3] "nor" ( ((k)->class_loader ()))); +????????????????????????????[_SDT_A3] "nor" ( 1 )); ???????__asm__ __volatile__ (".ifndef _.stapsdt.base" "\n" ?????????????????????????????".pushsection .stapsdt.base" "," "\"aG\"" "," ?????????????????????????????"\"progbits\"" "," ".stapsdt.base" "," "comdat" Affected GCC versions choke on the "(k)->class_loader ()" bit with -O3 :-( Cheers, Severin From daniel.fuchs at oracle.com Fri Apr 29 13:35:44 2016 From: daniel.fuchs at oracle.com (Daniel Fuchs) Date: Fri, 29 Apr 2016 15:35:44 +0200 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: <5723339E.306@oracle.com> References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> <57232E69.20800@oracle.com> <5723339E.306@oracle.com> Message-ID: <6f257e7e-d6b8-e489-86d3-6948af564e0f@oracle.com> Hi Aleksey, On 29/04/16 12:12, Aleksey Shipilev wrote: > On 04/29/2016 01:05 PM, David Holmes wrote: >> On 29/04/2016 7:50 PM, Aleksey Shipilev wrote: >>> On 04/29/2016 02:09 AM, David Holmes wrote: >>>> This change is small in nature but somewhat broad in scope. It "affects" >>>> the implementation of System.currentTimeMillis() in the Java space, and >>>> os::javaTimeMillis() in the VM. But on Solaris only. >>>> >>>> I say "affects" but the change will be unobservable other than in terms >>>> of performance. >>> >>> Observable enough to me. >> >> :) Any apps you can think of that might show benefit from this? > > Theoretically, this might affect heavily logging apps. IIRC, SPECjbb2000 > was affected by currentTimeMillis performance. But, I see no reason in > trying to justify the change, apart from the targeted microbenchmark. If by "logging" you mean java.util.logging then this should have no effect as logging now calls os::javaTimeSystemUTC (through java.time), to get more precise time stamps. best regards, -- daniel > > -Aleksey > From charlie.hunt at oracle.com Fri Apr 29 13:46:31 2016 From: charlie.hunt at oracle.com (charlie hunt) Date: Fri, 29 Apr 2016 08:46:31 -0500 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: <6f257e7e-d6b8-e489-86d3-6948af564e0f@oracle.com> References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> <57232E69.20800@oracle.com> <5723339E.306@oracle.com> <6f257e7e-d6b8-e489-86d3-6948af564e0f@oracle.com> Message-ID: <64C391E3-9FFC-4101-BD86-9F0936860677@oracle.com> > On Apr 29, 2016, at 8:35 AM, Daniel Fuchs wrote: > > Hi Aleksey, > > On 29/04/16 12:12, Aleksey Shipilev wrote: >> On 04/29/2016 01:05 PM, David Holmes wrote: >>> On 29/04/2016 7:50 PM, Aleksey Shipilev wrote: >>>> On 04/29/2016 02:09 AM, David Holmes wrote: >>>>> This change is small in nature but somewhat broad in scope. It "affects" >>>>> the implementation of System.currentTimeMillis() in the Java space, and >>>>> os::javaTimeMillis() in the VM. But on Solaris only. >>>>> >>>>> I say "affects" but the change will be unobservable other than in terms >>>>> of performance. >>>> >>>> Observable enough to me. >>> >>> :) Any apps you can think of that might show benefit from this? >> >> Theoretically, this might affect heavily logging apps. IIRC, SPECjbb2000 >> was affected by currentTimeMillis performance. But, I see no reason in >> trying to justify the change, apart from the targeted microbenchmark. > > If by "logging" you mean java.util.logging then this should have no > effect as logging now calls os::javaTimeSystemUTC (through java.time), > to get more precise time stamps. > > best regards, > > ? daniel I think Alexey means getting timestamps via System.currentTimeMillis() and internal JVM?s os::javaTimeMillis(), (which could have included logging). That was the intention with my comment wrt SPECjbb2005, (of which was of similar flavor as SPECjbb2000). The good news (to me anyway) is SPECjbb2000 and SPECjbb2005 have been retired in favor of SPECjbb2015. hths, charlie > >> >> -Aleksey From coleen.phillimore at oracle.com Fri Apr 29 14:13:01 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Fri, 29 Apr 2016 10:13:01 -0400 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: <57235F39.5080204@redhat.com> References: <57168C17.40307@oracle.com> <57185E31.9010705@oracle.com> <57235E1B.8080001@redhat.com> <57235F39.5080204@redhat.com> Message-ID: Thanks for checking out this change. It turned out that this serviceability agent change didn't affect the other platforms, at least for the tests that we support, except one of our closed platform. Was the test failure serviceability/sa/TestStackTrace.java ? Your change looks good, if you want to push it. Coleen On 4/29/16 9:18 AM, Andrew Dinn wrote: > On 29/04/16 14:14, Andrew Haley wrote: >> On 04/21/2016 05:59 AM, Stefan Karlsson wrote: >>>> Need testing with ppc and aarch64 open code. I implemented the >>>> changes but I can't test them. >> It seems to be fine on AArch64, thanks. > That's probably something to do with me checking the patch on behalf on > Coleen last week before it went in (you have not been reading my weekly > status reports :-) > >> It does break >> jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java, >> though, which needs the following patch. I guess this applies to all >> platforms. > Ah, well, didn't check that bit though! > > regards, > > > Andrew Dinn > ----------- > From coleen.phillimore at oracle.com Fri Apr 29 14:14:50 2016 From: coleen.phillimore at oracle.com (Coleen Phillimore) Date: Fri, 29 Apr 2016 10:14:50 -0400 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: References: <57168C17.40307@oracle.com> <57185E31.9010705@oracle.com> <57235E1B.8080001@redhat.com> <57235F39.5080204@redhat.com> Message-ID: <101be161-cfb4-b2d4-0b35-0d497d3dfa19@oracle.com> On 4/29/16 10:13 AM, Coleen Phillimore wrote: > > Thanks for checking out this change. It turned out that this > serviceability agent change didn't affect the other platforms, at > least for the tests that we support, except one of our closed platform. > Was the test failure serviceability/sa/TestStackTrace.java ? (removed link) > Your change looks good, if you want to push it. > Coleen > > > On 4/29/16 9:18 AM, Andrew Dinn wrote: >> On 29/04/16 14:14, Andrew Haley wrote: >>> On 04/21/2016 05:59 AM, Stefan Karlsson wrote: >>>>> Need testing with ppc and aarch64 open code. I implemented the >>>>> changes but I can't test them. >>> It seems to be fine on AArch64, thanks. >> That's probably something to do with me checking the patch on behalf on >> Coleen last week before it went in (you have not been reading my weekly >> status reports :-) >> >>> It does break >>> jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/aarch64/AARCH64Frame.java, >>> >>> though, which needs the following patch. I guess this applies to all >>> platforms. >> Ah, well, didn't check that bit though! >> >> regards, >> >> >> Andrew Dinn >> ----------- >> > From aph at redhat.com Fri Apr 29 14:17:26 2016 From: aph at redhat.com (Andrew Haley) Date: Fri, 29 Apr 2016 15:17:26 +0100 Subject: RFR (L) 8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure In-Reply-To: References: <57168C17.40307@oracle.com> <57185E31.9010705@oracle.com> <57235E1B.8080001@redhat.com> <57235F39.5080204@redhat.com> Message-ID: <57236CF6.1060109@redhat.com> On 04/29/2016 03:13 PM, Coleen Phillimore wrote: > Was the test failure serviceability/sa/TestStackTrace.java > > ? No, it was interactive use of HSDB. > Your change looks good, if you want to push it. OK, thanks. Andrew. From vladimir.kozlov at oracle.com Fri Apr 29 14:48:48 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Fri, 29 Apr 2016 07:48:48 -0700 Subject: [9] RFR(S): 8155608: String intrinsic range checks are not strict enough In-Reply-To: <57231391.4010006@oracle.com> References: <5721E9C4.7010208@oracle.com> <5722979C.1020904@oracle.com> <57231391.4010006@oracle.com> Message-ID: Thank you, Tobias, for answering my questions. Changes looks good. Vladimir On 4/29/16 12:56 AM, Tobias Hartmann wrote: > Hi Vladimir, > > On 29.04.2016 01:07, Vladimir Kozlov wrote: >> In StringUTF16.java should getChars() throw when srcBegin > srcEnd? Or there is check in other place? > > Yes, there is another range check in String::getChars() that throws an exception if srcBegin > srcEnd but we don't want to throw an exception in this case if StringUTF16::getChars() is invoked directly. > >> Hotspot test changes seems fine but do you really need new helper methods? Why not allocate dst array in TestStringIntrinsicRangeChecks.java? > > StringUTF16 and StringLatin1 are package private classes in java.lang because they are part of the String internals. Therefore, the test (which is not part of the java.lang package) cannot invoke the intrinsified methods. To circumvent this, we use the "patch-library approach" to inject a wrapper class into java.lang that allows us to call the package private methods from the test: > > 26 package java.lang; > 27 > 28 /** > 29 * A helper class to get access to package-private members > 30 */ > 31 public class Helper { > > We use the same approach for other compiler tests as well, for example the tests in test/compiler/jsr292/. > > Thanks, > Tobias > >> Thanks, >> Vladimir >> >> On 4/28/16 3:45 AM, Tobias Hartmann wrote: >>> Hi >>> >>> please review the following patch: >>> >>> https://bugs.openjdk.java.net/browse/JDK-8155608 >>> http://cr.openjdk.java.net/~thartmann/8155608/jdk/webrev.00/ >>> http://cr.openjdk.java.net/~thartmann/8155608/hotspot/webrev.00/ >>> >>> Some String API methods use StringUTF16.putChar/getChar to read a char value from a byte array. For performance reasons, putChar/getChar is intrinsified by C1/C2 without range checks (like the Unsafe counterparts). The Java callers are responsible for adding the corresponding explicit range checks if necessary. >>> >>> I noticed that the Java level range checks in StringUTF16::compress(), StringUTF16::getChars() and StringLatin1::inflate() are not strong enough. Offset and length need to be multiplied by two because they index a char value in a byte array. I added a regression test that triggers the problem and also checks the other relevant intrinsics by invoking the methods with different arguments. >>> >>> Tested with regression test (-Xint/-Xcomp) and RBT (running). >>> >>> Thanks, >>> Tobias >>> From vladimir.x.ivanov at oracle.com Fri Apr 29 16:42:13 2016 From: vladimir.x.ivanov at oracle.com (Vladimir Ivanov) Date: Fri, 29 Apr 2016 19:42:13 +0300 Subject: [9] RFR (S): 8155751: Some tests miss othervm for main/bootclasspath mode Message-ID: <57238EE5.9050500@oracle.com> http://cr.openjdk.java.net/~vlivanov/8155751/webrev.00/ https://bugs.openjdk.java.net/browse/JDK-8155751 Since 4.2b02 jtreg requires /othervm for bootclasspath mode when command-line flags are specified. Testing: ran affected tests with jtreg 4.2b02. Thanks! Best regards, Vladimir Ivanov PS: I'll push the fix directly into jdk9/hs once it is reviewed. From vladimir.kozlov at oracle.com Fri Apr 29 17:00:31 2016 From: vladimir.kozlov at oracle.com (Vladimir Kozlov) Date: Fri, 29 Apr 2016 10:00:31 -0700 Subject: [9] RFR (S): 8155751: Some tests miss othervm for main/bootclasspath mode In-Reply-To: <57238EE5.9050500@oracle.com> References: <57238EE5.9050500@oracle.com> Message-ID: <237d3bb0-d6ba-4d87-115c-6e4cd182cef3@oracle.com> Looks good. Thanks, Vladimir On 4/29/16 9:42 AM, Vladimir Ivanov wrote: > http://cr.openjdk.java.net/~vlivanov/8155751/webrev.00/ > https://bugs.openjdk.java.net/browse/JDK-8155751 > > Since 4.2b02 jtreg requires /othervm for bootclasspath mode when command-line flags are specified. > > Testing: ran affected tests with jtreg 4.2b02. > > Thanks! > > Best regards, > Vladimir Ivanov > > PS: I'll push the fix directly into jdk9/hs once it is reviewed. From vladimir.x.ivanov at oracle.com Fri Apr 29 17:01:46 2016 From: vladimir.x.ivanov at oracle.com (Vladimir Ivanov) Date: Fri, 29 Apr 2016 20:01:46 +0300 Subject: [9] RFR (S): 8155751: Some tests miss othervm for main/bootclasspath mode In-Reply-To: <237d3bb0-d6ba-4d87-115c-6e4cd182cef3@oracle.com> References: <57238EE5.9050500@oracle.com> <237d3bb0-d6ba-4d87-115c-6e4cd182cef3@oracle.com> Message-ID: <5723937A.9010309@oracle.com> Thanks, Vladimir! Best regards, Vladimir Ivanov On 4/29/16 8:00 PM, Vladimir Kozlov wrote: > Looks good. > > Thanks, > Vladimir > > On 4/29/16 9:42 AM, Vladimir Ivanov wrote: >> http://cr.openjdk.java.net/~vlivanov/8155751/webrev.00/ >> https://bugs.openjdk.java.net/browse/JDK-8155751 >> >> Since 4.2b02 jtreg requires /othervm for bootclasspath mode when >> command-line flags are specified. >> >> Testing: ran affected tests with jtreg 4.2b02. >> >> Thanks! >> >> Best regards, >> Vladimir Ivanov >> >> PS: I'll push the fix directly into jdk9/hs once it is reviewed. From christian.thalinger at oracle.com Fri Apr 29 20:00:31 2016 From: christian.thalinger at oracle.com (Christian Thalinger) Date: Fri, 29 Apr 2016 10:00:31 -1000 Subject: [9] RFR(S): 8155608: String intrinsic range checks are not strict enough In-Reply-To: <57230C0F.7060702@oracle.com> References: <5721E9C4.7010208@oracle.com> <3E5A7A17-9D61-4C80-A19D-9F510BCAADCE@oracle.com> <57230C0F.7060702@oracle.com> Message-ID: > On Apr 28, 2016, at 9:23 PM, Tobias Hartmann wrote: > > Hi Chris, > > On 28.04.2016 22:11, Christian Thalinger wrote: >> >>> On Apr 28, 2016, at 12:45 AM, Tobias Hartmann > wrote: >>> >>> Hi >>> >>> please review the following patch: >>> >>> https://bugs.openjdk.java.net/browse/JDK-8155608 >>> http://cr.openjdk.java.net/~thartmann/8155608/jdk/webrev.00/ >> + checkBoundsOffCount(dstOff << 1, len << 1, dst.length); >> >> It?s funny that we still do << 1 instead of * 2 when every compiler on this planet can optimize that. Yeah, yeah, I know, it?s because of the interpreter but does it really matter? > > I used it more for consistency because we use "<< 1" in all the other places in StringLatin1, StringUTF16 and String as well. I think this originated from the "value.length >> String.coder()" use case to get the length depending on the String encoding. Besides that, I'm not sure if interpreter speed really matters here but the String methods are executed a lot (especially during startup). > >> Actually, I would prefer: >> >> + checkBoundsOffCount(dstOff * Character.BYTES, len * Character.BYTES, dst.length); > > I agree that this is more readable but for consistency I would like to go with the "<< 1" approach. Again, a loss for maintainability versus consistency (a.k.a. ?we?ve always done it this way?). > > Thanks, > Tobias > >> >>> http://cr.openjdk.java.net/~thartmann/8155608/hotspot/webrev.00/ >>> >>> Some String API methods use StringUTF16.putChar/getChar to read a char value from a byte array. For performance reasons, putChar/getChar is intrinsified by C1/C2 without range checks (like the Unsafe counterparts). The Java callers are responsible for adding the corresponding explicit range checks if necessary. >>> >>> I noticed that the Java level range checks in StringUTF16::compress(), StringUTF16::getChars() and StringLatin1::inflate() are not strong enough. Offset and length need to be multiplied by two because they index a char value in a byte array. I added a regression test that triggers the problem and also checks the other relevant intrinsics by invoking the methods with different arguments. >>> >>> Tested with regression test (-Xint/-Xcomp) and RBT (running). >>> >>> Thanks, >>> Tobias From derek.white at oracle.com Fri Apr 29 21:23:17 2016 From: derek.white at oracle.com (Derek White) Date: Fri, 29 Apr 2016 17:23:17 -0400 Subject: RFR (XXS): 8155754: Quarantine serviceability/tmtools/jstat/GcTest02.java Message-ID: <5723D0C5.6000104@oracle.com> JDK-8155570 is a test bug that is failing intermittently. It is probably an integration blocker, so *this* issue (8155754) is quarantining the test for now. Bug (sub task): https://bugs.openjdk.java.net/browse/JDK-8155754 Webrev: http://cr.openjdk.java.net/~drwhite/8155754/webrev.01/ jprt in progress... - Derek From david.holmes at oracle.com Fri Apr 29 22:57:26 2016 From: david.holmes at oracle.com (David Holmes) Date: Sat, 30 Apr 2016 08:57:26 +1000 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: <45182d6e-34d0-d93b-c28a-5c99fc260cb6@Oracle.com> References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> <57232E69.20800@oracle.com> <5723339E.306@oracle.com> <6f257e7e-d6b8-e489-86d3-6948af564e0f@oracle.com> <64C391E3-9FFC-4101-BD86-9F0936860677@oracle.com> <45182d6e-34d0-d93b-c28a-5c99fc260cb6@Oracle.com> Message-ID: (adding back hotspot-dev - still heed a hs/runtime reviewer) Hi Roger, On 30/04/2016 12:19 AM, Roger Riggs wrote: > Hi, > > This change seems fine to me; though barely observable only in a microcosm. Thanks for the review. > (I was going to make the same comment as Daniel, logging now uses higher > resolution timestamps). Good to know. Thanks, David > Roger > > > On 4/29/2016 9:46 AM, charlie hunt wrote: >>> On Apr 29, 2016, at 8:35 AM, Daniel Fuchs >>> wrote: >>> >>> Hi Aleksey, >>> >>> On 29/04/16 12:12, Aleksey Shipilev wrote: >>>> On 04/29/2016 01:05 PM, David Holmes wrote: >>>>> On 29/04/2016 7:50 PM, Aleksey Shipilev wrote: >>>>>> On 04/29/2016 02:09 AM, David Holmes wrote: >>>>>>> This change is small in nature but somewhat broad in scope. It >>>>>>> "affects" >>>>>>> the implementation of System.currentTimeMillis() in the Java >>>>>>> space, and >>>>>>> os::javaTimeMillis() in the VM. But on Solaris only. >>>>>>> >>>>>>> I say "affects" but the change will be unobservable other than in >>>>>>> terms >>>>>>> of performance. >>>>>> Observable enough to me. >>>>> :) Any apps you can think of that might show benefit from this? >>>> Theoretically, this might affect heavily logging apps. IIRC, >>>> SPECjbb2000 >>>> was affected by currentTimeMillis performance. But, I see no reason in >>>> trying to justify the change, apart from the targeted microbenchmark. >>> If by "logging" you mean java.util.logging then this should have no >>> effect as logging now calls os::javaTimeSystemUTC (through java.time), >>> to get more precise time stamps. >>> >>> best regards, >>> >>> ? daniel >> I think Alexey means getting timestamps via System.currentTimeMillis() >> and internal JVM?s os::javaTimeMillis(), (which could have included >> logging). That was the intention with my comment wrt SPECjbb2005, (of >> which was of similar flavor as SPECjbb2000). The good news (to me >> anyway) is SPECjbb2000 and SPECjbb2005 have been retired in favor of >> SPECjbb2015. >> >> hths, >> >> charlie >> >>>> -Aleksey > From daniel.daugherty at oracle.com Fri Apr 29 23:28:00 2016 From: daniel.daugherty at oracle.com (Daniel D. Daugherty) Date: Fri, 29 Apr 2016 17:28:00 -0600 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> Message-ID: <5723EE00.7070503@oracle.com> On 4/28/16 5:09 PM, David Holmes wrote: > bug: https://bugs.openjdk.java.net/browse/JDK-8154710 > webrev: http://cr.openjdk.java.net/~dholmes/8154710/webrev/ src/os/solaris/vm/os_solaris.cpp L1356: static _get_nsec_fromepoch_func_t _get_nsec_fromepoch = NULL; nit: two spaced between the type and the var name. Not sure why since you aren't lining up with anything. L4444: Solaris::_pthread_setname_np = // from 11.3 Thanks for documenting the release. L4450: nit: why add a blank line? Thumbs up! Nits only so feel free to fix or ignore, but don't need another webrev. Dan > > This change is small in nature but somewhat broad in scope. It > "affects" the implementation of System.currentTimeMillis() in the Java > space, and os::javaTimeMillis() in the VM. But on Solaris only. > > I say "affects" but the change will be unobservable other than in > terms of performance. > > As of Solaris 11.3.6 a new in-memory timestamp has been made available > (not unlike what has always existed on Windows). There are actually 3 > different timestamps exported but the one we are interested in is > get_nsecs_fromepoch - which is of course elapsed nanoseconds since the > epoch - which is exactly what javaTimeMillis() is, but expressed in > milliseconds. The in-memory timestamps have an update accuracy of 1ms, > so are not suitable for any other API's that want the time-of-day, but > at a greater accuracy. > > Microbenchmark shows the in-memory access is approx 45% faster (19ns > on my test system) compared to the gettimeofday call (35ns). > > Thanks, > David > From kim.barrett at oracle.com Sat Apr 30 00:46:06 2016 From: kim.barrett at oracle.com (Kim Barrett) Date: Fri, 29 Apr 2016 20:46:06 -0400 Subject: RFR (XXS): 8155754: Quarantine serviceability/tmtools/jstat/GcTest02.java In-Reply-To: <5723D0C5.6000104@oracle.com> References: <5723D0C5.6000104@oracle.com> Message-ID: <40DCAF3B-3613-4642-A6F2-2DD9684AB499@oracle.com> > On Apr 29, 2016, at 5:23 PM, Derek White wrote: > > JDK-8155570 is a test bug that is failing intermittently. It is probably an integration blocker, so *this* issue (8155754) is quarantining the test for now. > > Bug (sub task): https://bugs.openjdk.java.net/browse/JDK-8155754 > Webrev: http://cr.openjdk.java.net/~drwhite/8155754/webrev.01/ > jprt in progress... > > - Derek Looks good. From john.r.rose at oracle.com Sat Apr 30 00:47:26 2016 From: john.r.rose at oracle.com (John Rose) Date: Fri, 29 Apr 2016 17:47:26 -0700 Subject: [9] RFR(S): 8155608: String intrinsic range checks are not strict enough In-Reply-To: References: <5721E9C4.7010208@oracle.com> <3E5A7A17-9D61-4C80-A19D-9F510BCAADCE@oracle.com> <57230C0F.7060702@oracle.com> Message-ID: > On Apr 29, 2016, at 1:00 PM, Christian Thalinger wrote: > > >> On Apr 28, 2016, at 9:23 PM, Tobias Hartmann wrote: >> >> Hi Chris, >> >> On 28.04.2016 22:11, Christian Thalinger wrote: >>> >>>> On Apr 28, 2016, at 12:45 AM, Tobias Hartmann > wrote: >>>> >>>> Hi >>>> >>>> please review the following patch: >>>> >>>> https://bugs.openjdk.java.net/browse/JDK-8155608 >>>> http://cr.openjdk.java.net/~thartmann/8155608/jdk/webrev.00/ >>> + checkBoundsOffCount(dstOff << 1, len << 1, dst.length); >>> >>> It?s funny that we still do << 1 instead of * 2 when every compiler on this planet can optimize that. Yeah, yeah, I know, it?s because of the interpreter but does it really matter? >> >> I used it more for consistency because we use "<< 1" in all the other places in StringLatin1, StringUTF16 and String as well. I think this originated from the "value.length >> String.coder()" use case to get the length depending on the String encoding. Besides that, I'm not sure if interpreter speed really matters here but the String methods are executed a lot (especially during startup). >> >>> Actually, I would prefer: >>> >>> + checkBoundsOffCount(dstOff * Character.BYTES, len * Character.BYTES, dst.length); >> >> I agree that this is more readable but for consistency I would like to go with the "<< 1" approach. > > Again, a loss for maintainability versus consistency (a.k.a. ?we?ve always done it this way?). That '1' is an anti-pattern I call "naked constant". Which "1" is it, after all? The maintainer of code needs to know which condition (of thousands of possibilities) dictates a "1" here. In HotSpot we discourage such nakedness, in favor of named constants: https://wiki.openjdk.java.net/display/HotSpot/StyleGuide#StyleGuide-NamedCons The Java code should follow this practice also. Also, independently, I'm having a hard time figuring out how to prove that, between the Java code and the HotSpot intrinsic (and interpreter/C1/C2), the range checking logic is consistently applied. My best advice for this is, always, put the range checking logic in one place, in Java code. This makes me profoundly suspicious of *any* public method that is also marked @HSIC, if it takes any array arguments. If the public method is an intrinsic, it means we are trusting C++ IR-assembly code to securely check Java array bounds. That is unnatural, and (as history proves repeatedly) subject to errors. Instead, we should have a Java routine which checks bounds, and a *private* intrinsic which is provably called *only after* the Java checker is called. ? John From igor.ignatyev at oracle.com Sat Apr 30 01:47:20 2016 From: igor.ignatyev at oracle.com (Igor Ignatyev) Date: Sat, 30 Apr 2016 04:47:20 +0300 Subject: RFR: 8149591 - Prepare hotspot for GTest In-Reply-To: <56BC0BF1.7070506@oracle.com> References: <56BB93D6.3000905@oracle.com> <56BC0BF1.7070506@oracle.com> Message-ID: <2E67D611-36FF-4485-9468-7E7A1FC7819F@oracle.com> Hi, I?d like to renew this RFR. besides updated previous changes, new webrev contains two new things: - new vm-flag ? ExecutingUnitTests, we use it in debug jvm to print a clean error message (w/o assert, line, file, etc). it is needed to simplify comparing error message in ?death? tests (the tests which intentionally cause crashes/asserts) - if CreateCoredumpOnCrash is false, we don't determine default core path webrev: http://cr.openjdk.java.net/~iignatyev/8149591/webrev.00/ JBS: https://bugs.openjdk.java.net/browse/JDK-8149591 Thanks, ? Igor > On Feb 11, 2016, at 7:20 AM, David Holmes wrote: > > Hi Jesper, > > On 11/02/2016 5:47 AM, Jesper Wilhelmsson wrote: >> Hi, >> >> Please review this change to prepare the Hotspot code for the Google >> unit test framework. From the RFE: >> >> A few changes are needed in the hotspot code to start using the Google >> Test framework. >> >> 1. The new() operator as defined in allocation.cpp can not be used >> together with GTest. This needs to be moved to a separate file so that >> we can avoid compiling it when building the GTest enabled JVM. > > I presume that is because GTest will use the real global operator new? > > The name of the new file, given it contains new and delete, seems one-sided. But I can't think of anything better. :) > >> 2. In management.cpp there is a local variable called err_msg. This >> variable is shadowing a global variable in debug.hpp. In the GTest work >> the global err_msg variable is used in the vmassert macro and this >> creates a conflict with the local variable in management.cpp. > > Renaming seems trivially fine. > >> 3. If SuppressFatalErrorMessage is set ALL error messages should be >> suppressed, even the ones in error_is_suppressed() in debug.cpp. > > Took me a while to think this one through. Not sure what purpose SuppressFatalErrorMessages is intended to serve. The idea that the VM can just vanish without any kind of message to the user just seems like a bad idea. I wonder if there is a SuppressFatalErrorMessages test somewhere that actually relies on the output from error_is_suppressed to determine that a crash really did happen? ;-) > > Cheers, > David > >> This is what is done by this change. >> >> RFE: https://bugs.openjdk.java.net/browse/JDK-8149591 >> Webrev: http://cr.openjdk.java.net/~jwilhelm/8149591/webrev.00/index.html >> >> Thanks, >> /Jesper From david.holmes at oracle.com Sat Apr 30 01:49:53 2016 From: david.holmes at oracle.com (David Holmes) Date: Sat, 30 Apr 2016 11:49:53 +1000 Subject: (S) RFR: 8154710: [Solaris] Investigate use of in-memory low-resolution timestamps for Java and internal time API's In-Reply-To: <5723EE00.7070503@oracle.com> References: <6a343a65-c2bf-475d-88de-0ec5c337e296@oracle.com> <5723EE00.7070503@oracle.com> Message-ID: On 30/04/2016 9:28 AM, Daniel D. Daugherty wrote: > On 4/28/16 5:09 PM, David Holmes wrote: >> bug: https://bugs.openjdk.java.net/browse/JDK-8154710 >> webrev: http://cr.openjdk.java.net/~dholmes/8154710/webrev/ > > src/os/solaris/vm/os_solaris.cpp > L1356: static _get_nsec_fromepoch_func_t _get_nsec_fromepoch = NULL; > nit: two spaced between the type and the var name. > Not sure why since you aren't lining up with anything. > > L4444: Solaris::_pthread_setname_np = // from 11.3 > Thanks for documenting the release. > > L4450: > nit: why add a blank line? > > Thumbs up! Nits only so feel free to fix or ignore, but don't > need another webrev. Thanks for the review Dan - will nit fix. :) David > Dan > > >> >> This change is small in nature but somewhat broad in scope. It >> "affects" the implementation of System.currentTimeMillis() in the Java >> space, and os::javaTimeMillis() in the VM. But on Solaris only. >> >> I say "affects" but the change will be unobservable other than in >> terms of performance. >> >> As of Solaris 11.3.6 a new in-memory timestamp has been made available >> (not unlike what has always existed on Windows). There are actually 3 >> different timestamps exported but the one we are interested in is >> get_nsecs_fromepoch - which is of course elapsed nanoseconds since the >> epoch - which is exactly what javaTimeMillis() is, but expressed in >> milliseconds. The in-memory timestamps have an update accuracy of 1ms, >> so are not suitable for any other API's that want the time-of-day, but >> at a greater accuracy. >> >> Microbenchmark shows the in-memory access is approx 45% faster (19ns >> on my test system) compared to the gettimeofday call (35ns). >> >> Thanks, >> David >> > From derek.white at oracle.com Sat Apr 30 02:42:12 2016 From: derek.white at oracle.com (Derek White) Date: Fri, 29 Apr 2016 22:42:12 -0400 Subject: RFR (XXS): 8155754: Quarantine serviceability/tmtools/jstat/GcTest02.java In-Reply-To: <40DCAF3B-3613-4642-A6F2-2DD9684AB499@oracle.com> References: <5723D0C5.6000104@oracle.com> <40DCAF3B-3613-4642-A6F2-2DD9684AB499@oracle.com> Message-ID: Thanks Kim! On Apr 29, 2016, at 8:46 PM, Kim Barrett wrote: >> On Apr 29, 2016, at 5:23 PM, Derek White wrote: >> >> JDK-8155570 is a test bug that is failing intermittently. It is probably an integration blocker, so *this* issue (8155754) is quarantining the test for now. >> >> Bug (sub task): https://bugs.openjdk.java.net/browse/JDK-8155754 >> Webrev: http://cr.openjdk.java.net/~drwhite/8155754/webrev.01/ >> jprt in progress... >> >> - Derek > > Looks good. > From kim.barrett at oracle.com Sat Apr 30 17:01:41 2016 From: kim.barrett at oracle.com (Kim Barrett) Date: Sat, 30 Apr 2016 13:01:41 -0400 Subject: RFR: 8141501: Problems with BitMap buffer management In-Reply-To: <57220EE4.4080007@oracle.com> References: <5720A91A.1020809@oracle.com> <57220EE4.4080007@oracle.com> Message-ID: <2CE7A7C6-24BA-4A2D-8DE8-8C01E922EBD4@oracle.com> > On Apr 28, 2016, at 9:23 AM, Stefan Karlsson wrote: > > Hi all, > > I decided to restructure the allocation code a bit to get better separation between the bitmap allocation logic and the actual bitmap memory allocators. > > http://cr.openjdk.java.net/~stefank/8141501/webrev.02.delta > http://cr.openjdk.java.net/~stefank/8141501/webrev.02 Generally looks good. Just a few mostly minor comments or suggestions. The only really important one is about ~G1RegionToSpaceMapper. ------------------------------------------------------------------------------ src/share/vm/c1/c1_GraphBuilder.cpp 355 void BlockListBuilder::mark_loops() { 356 ResourceMark rm; 357 358 _active.initialize(BlockBegin::number_of_blocks()); 359 _visited.initialize(BlockBegin::number_of_blocks()); Consider (possibly debug-only) resizing _active and _visited to zero at the end of the function, to avoid leaving dangling pointers when the ResourceMark is exited. ------------------------------------------------------------------------------ src/share/vm/c1/c1_Instruction.hpp 1721 void set_live_in (ResourceBitMap map) { _live_in = map; } 1722 void set_live_out (ResourceBitMap map) { _live_out = map; } 1723 void set_live_gen (ResourceBitMap map) { _live_gen = map; } 1724 void set_live_kill (ResourceBitMap map) { _live_kill = map; } 1725 void set_fpu_register_usage(ResourceBitMap map) { _fpu_register_usage = map; } By-value parameters seem like unnecessary (shallow) copies. Could these be passed by const-ref? ------------------------------------------------------------------------------ src/share/vm/c1/c1_LinearScan.cpp 718 block->set_live_in (ResourceBitMap(live_size)); block->live_in().clear(); 719 block->set_live_out (ResourceBitMap(live_size)); block->live_out().clear(); Consider instead block->live_in().reinitialize(live_size); block->live_out().reinitialize(live_size); ------------------------------------------------------------------------------ src/share/vm/gc/g1/g1CardLiveData.cpp 400 G1ClearCardLiveDataTask(BitMapView bitmap, size_t num_tasks) : Pass bitmap by const-ref? ------------------------------------------------------------------------------ src/share/vm/gc/g1/g1RegionToSpaceMapper.hpp [Removed] 65 virtual ~G1RegionToSpaceMapper() { 66 _commit_map.resize(0, /* in_resource_area */ false); 67 } This changes this base class's destructor from public virtual to public non-virtual, introducing risk of unintended slicing. ------------------------------------------------------------------------------ src/share/vm/utilities/bitMap.hpp 182 idx_t size_in_bytes() const { return size_in_words() * BytesPerWord; } Perhaps instead use return calc_size_in_bytes(size()); ------------------------------------------------------------------------------ src/share/vm/utilities/bitMap.hpp 163 // Protected constructor. Mention destructor too? ------------------------------------------------------------------------------