RFR: 8203929: Limit amount of data for JFR.dump
Markus Gronlund
markus.gronlund at oracle.com
Sat Jun 23 10:14:15 UTC 2018
Hi Erik,
I am reading through the code and making some annotations. I have attached a description of my first pass of things, thought I send it your way so you have something to start with, more to come.
Do note the informal character of the review file, some sections are mainly clarifications for my own understanding, but I think you can infer from the context what are questions and suggestions.
Thanks
Markus
-----Original Message-----
From: Erik Gahlin
Sent: den 21 juni 2018 18:16
To: hotspot-jfr-dev at openjdk.java.net; Andrew Gross <andrew.gross at oracle.com>
Subject: RFR: 8203929: Limit amount of data for JFR.dump
Hi,
Could I have a a review of an enhancement that will simplify the usage of JFR.dump and make it possible to copy parts of a recording to a file. For more information, see the CSR:
https://bugs.openjdk.java.net/browse/JDK-8203930
Bug:
https://bugs.openjdk.java.net/browse/JDK-8203929
Webrev:
http://cr.openjdk.java.net/~egahlin/8203929/
Testing:
Tests in test/jdk/jdk/jfr
Thanks
Erik
-------------- next part --------------
PlatformRecording.java
// To be used internally when doing dumps.
297 // Caller must have recorder lock and close recording before releasing lock
298 public PlatformRecording newSnapshotClone(String reason, Boolean pathToGcRoots) throws IOException {
299 if(!Thread.holdsLock(recorder)) {
300 throw new InternalError("Caller msut have recorder lock"); <<-- "msut" -> "must"
301 }
clone.setDestination(this.destination); <<-- is this really needed? What is this used for in relation to the clone?
clone.setToDisk(true);
323 // We purposely don't clone settings, since <<-- says we dont clone settings [just here?] but later down that is what is done
324 // a union a == a
325 if (!isToDisk()) {
326 // force memory contents to disk
327 clone.start();
328 } else {
329 // using existing chunks on disk
330 for (RepositoryChunk c : chunks) {
331 clone.add(c);
332 }
333 clone.setState(RecordingState.RUNNING);
334 clone.setStartTime(getStartTime());
335 }
336 if (pathToGcRoots == null) {
337 clone.setSettings(getSettings()); // needed for old object sample <<<-- but here that is exactly what happens, that is settings are cloned
338 clone.stop(reason); // dumps to destination path here
339 } else {
340 // Risk of violating lock order here, since
341 // clone.stop() will take recorder lock inside
342 // metadata lock, but OK if we already
343 // have recorder lock when we entered metadata lock
344 synchronized (MetadataRepository.getInstance()) {
345 clone.setSettings(OldObjectSample.createSettingsForSnapshot(this, pathToGcRoots)); <<-- what is this settings snapshot? Is it getSettings() + OldObjectSample?
346 clone.stop(reason);
347 }
348 }
349 return clone;
PlatformRecorder.java
112 // To be used internally when doing dumps.
113 // Caller must have recorder lock and close recording before releasing lock
114 public PlatformRecording newTemporaryRecording() {
<<-- insert assertions if(!Thread.holdsLock(recorder))
115 return newRecording(new HashMap<>(), 0);
116 }
// passing in the newTemporaryRecording with id 0
public synchronized void fillWithRecordedData(PlatformRecording target, Boolean pathToGcRoots) {
boolean running = false;
boolean toDisk = false;
for (PlatformRecording r : recordings) { // iterate all running recordings to find out state RUNNING and disk / memory recording
if (r.getState() == RecordingState.RUNNING) {
running = true;
if (r.isToDisk()) {
toDisk = true; <<-- is any recording running that record to disk?
}
}
}
// If needed, flush data from memory
if (running) {
if (toDisk) {
OldObjectSample.emit(recordings, pathToGcRoots); <<-- already recording to disk, issue OldObjectSample emit and then do a regular rotation (finalize chunk)
rotateDisk();
} else { <<--- recording but only to memory
try (PlatformRecording snapshot = newTemporaryRecording()) { // create another snapshot recording? <<-- why do we need to do this again? We are already passed a temporary recording with id 0. Can't we just use that directly? Then the chunks will populate automatically?
snapshot.setToDisk(true);
snapshot.setShouldWriteActiveRecordingEvent(false);
snapshot.start();
OldObjectSample.emit(recordings, pathToGcRoots);
snapshot.stop("Snapshot dump");
fillWithDisckChunks(target);
}
return;
}
}
fillWithDisckChunks(target); <<-- nothing is currently running
}
// is this an alternative for the else clause (running recordings but only to memory)
} else { <<--- recording but only to memory
target.setToDisk(true);
target.setShouldWriteActiveRecordingEvent(false);
target.start();
OldObjectSample.emit(recordings, pathToGcRoots);
target.stop("Snapshot dump");
// in this case, would there even be a need to call fillWithDiskChunks(target) here? Don't we already have everything populated now (all chunks added on stop?)
private void fillWithDisckChunks(PlatformRecording target) {
for (RepositoryChunk c : makeChunkList(null, null)) {
target.add(c); <<-- adding all chunks that came out of the starttime and enttime filtered list of chunks (is the chunk "in" the interval?), but since they are null, all chunks are added
}
target.setState(RecordingState.STOPPED);
Instant startTime = null;
Instant endTime = null;
for (RepositoryChunk c : makeChunkList(null, null)) { <<-- same iteration again, cant you just iterate the chunks of the target now, since they were popluated above
if (startTime == null || c.getStartTime().isBefore(startTime)) {
startTime = c.getStartTime();
}
if (endTime == null || c.getEndTime().isAfter(endTime)) {
endTime = c.getEndTime();
}
}
// here the start and endtime would reflect the first chunk and the last chunk
Instant now = Instant.now();
if (startTime == null) {
startTime = now;
}
if (endTime == null) {
endTime = now;
}
target.setStartTime(startTime); <<-
target.setStopTime(endTime); <<-- to cover the added chunks
target.setInternalDuration(Duration.between(startTime, endTime));
}
private List<RepositoryChunk> makeChunkList(Instant startTime, Instant endTime) {
Set<RepositoryChunk> chunkSet = new HashSet<>();
for (PlatformRecording r : getRecordings()) {
chunkSet.addAll(r.getChunks());
}
if (chunkSet.size() > 0) {
List<RepositoryChunk> chunks = new ArrayList<>(chunkSet.size());
for (RepositoryChunk rc : chunkSet) {
if (rc.inInterval(startTime, endTime)) {
chunks.add(rc);
}
}
// n*log(n), should be able to do n*log(k) with a priority queue,
// where k = number of recordings, n = number of chunks
Collections.sort(chunks, RepositoryChunk.END_TIME_COMPARATOR); <<-- sorted on entime
return chunks;
}
return Collections.emptyList();
}
Utils.java
502 public static String makeFilename(Recording recording) {
503 String pid = JVM.getJVM().getPid();
504 String date = Repository.REPO_DATE_FORMAT.format(LocalDateTime.now());
505 String id = recording == null ? "x" : Long.toString(recording.getId()); <<-- "x" ?? What is x? Just a placeholder that signifies "all"? Maybe use "all". or "0"?
506 return "hotspot-" + "pid-" + pid + "-id-" + id + "-" + date + ".jfr";
507 }
DCMDDump.java
* @param name name or id of the recording to dump, or <code>null</code> <<-- "or null" for what? to dump everything i guess? Can we state this here?
* @param maxSzie how far back in size to dump, may be null <<<-- maxSzie --> maxSize
* @param begin point in time to dump data from, may be null <<-- some descriptions says "dump data" others just "dump". Make consistent? I suggest only use "dump".
* @param time point in time from where the dump should occur from, may be null <<-- this is obsolete right? there is no "time" parameter
85 if (FlightRecorder.getFlightRecorder().getRecordings().isEmpty()) {
86 throw new DCmdException("No recodings to dump from. Use JFR.start to start a recording."); <<-- "no recodings" -> "no recordings"
87 }
"No recodings to dump from. " -> "No recordings to dump. Use..."
102 if (maxSize!= null) -> if (maxSize != null) {
public void dump(PlatformRecorder recorder, Recording recording, String name, String filename, Long maxSize, Boolean pathToGcRoots, Instant beginTime, Instant endTime) throws DCmdException {
137 try (PlatformRecording r = newSnapShot(recorder, recording, pathToGcRoots)) {
138 r.filter(beginTime, endTime, maxSize);
139 if (r.getChunks().isEmpty()) {
140 throw new DCmdException("Failed to dump. No data found in the specified interval.");
141 }
142 SafePath dumpFile = resolvePath(recording, filename);
143
144 // Needed for JVM
145 Utils.touch(dumpFile.toPath());
146 r.dumpStopped(new WriteableUserPath(dumpFile.toPath()));
147 reportOperationComplete("Dumped", name, dumpFile);
148 } catch (IOException | InvalidPathException e) {
149 throw new DCmdException("Failed to dump. Could not copy recording data. %s", e.getMessage());
150 }
151 }
private PlatformRecording newSnapShot(PlatformRecorder recorder, Recording recording, Boolean pathToGcRoots) throws DCmdException, IOException {
if (recording == null) {
// Operate on all recordings
PlatformRecording snapshot = recorder.newTemporaryRecording();
recorder.fillWithRecordedData(snapshot, pathToGcRoots);
return snapshot;
}
PlatformRecording pr = PrivateAccess.getInstance().getPlatformRecording(recording);
return pr.newSnapshotClone("Dumped by user", pathToGcRoots);
}
More information about the hotspot-jfr-dev
mailing list