Unverified Commit 29c88081 authored by Yegor's avatar Yegor Committed by GitHub

Collect chrome://tracing data in Web benchmarks (#53879)

Collect chrome://tracing data in Web benchmarks
parent 58acf4e7
...@@ -191,31 +191,54 @@ class BenchTextCachedLayout extends RawRecorder { ...@@ -191,31 +191,54 @@ class BenchTextCachedLayout extends RawRecorder {
/// build are unique. /// build are unique.
int _counter = 0; int _counter = 0;
/// Measures how expensive it is to construct material checkboxes. /// Which mode to run [BenchBuildColorsGrid] in.
enum _TestMode {
/// Uses the HTML rendering backend with the canvas 2D text layout.
useCanvasTextLayout,
/// Uses the HTML rendering backend with the DOM text layout.
useDomTextLayout,
/// Uses CanvasKit for everything.
useCanvasKit,
}
/// Measures how expensive it is to construct a realistic text-heavy piece of UI.
/// ///
/// Creates a 10x10 grid of tristate checkboxes. /// The benchmark constructs a tabbed view, where each tab displays a list of
/// colors. Each color's description is made of several [Text] nodes.
class BenchBuildColorsGrid extends WidgetBuildRecorder { class BenchBuildColorsGrid extends WidgetBuildRecorder {
BenchBuildColorsGrid({@required this.useCanvas}) BenchBuildColorsGrid.canvas()
: super(name: useCanvas ? canvasBenchmarkName : domBenchmarkName); : mode = _TestMode.useCanvasTextLayout, super(name: canvasBenchmarkName);
BenchBuildColorsGrid.dom()
: mode = _TestMode.useDomTextLayout, super(name: domBenchmarkName);
BenchBuildColorsGrid.canvasKit()
: mode = _TestMode.useCanvasKit, super(name: canvasKitBenchmarkName);
static const String domBenchmarkName = 'text_dom_color_grid'; static const String domBenchmarkName = 'text_dom_color_grid';
static const String canvasBenchmarkName = 'text_canvas_color_grid'; static const String canvasBenchmarkName = 'text_canvas_color_grid';
static const String canvasKitBenchmarkName = 'text_canvas_kit_color_grid';
/// Whether to use the new canvas-based text measurement implementation. /// Whether to use the new canvas-based text measurement implementation.
final bool useCanvas; final _TestMode mode;
num _textLayoutMicros = 0; num _textLayoutMicros = 0;
@override @override
void setUpAll() { Future<void> setUpAll() async {
_useCanvasText(useCanvas); if (mode == _TestMode.useCanvasTextLayout) {
_useCanvasText(true);
}
if (mode == _TestMode.useDomTextLayout) {
_useCanvasText(false);
}
_onBenchmark((String name, num value) { _onBenchmark((String name, num value) {
_textLayoutMicros += value; _textLayoutMicros += value;
}); });
} }
@override @override
void tearDownAll() { Future<void> tearDownAll() async {
_useCanvasText(null); _useCanvasText(null);
_onBenchmark(null); _onBenchmark(null);
} }
...@@ -230,7 +253,8 @@ class BenchBuildColorsGrid extends WidgetBuildRecorder { ...@@ -230,7 +253,8 @@ class BenchBuildColorsGrid extends WidgetBuildRecorder {
void frameDidDraw() { void frameDidDraw() {
// We need to do this before calling [super.frameDidDraw] because the latter // We need to do this before calling [super.frameDidDraw] because the latter
// updates the value of [showWidget] in preparation for the next frame. // updates the value of [showWidget] in preparation for the next frame.
if (showWidget) { // TODO(yjbanov): https://github.com/flutter/flutter/issues/53877
if (showWidget && mode != _TestMode.useCanvasKit) {
profile.addDataPoint( profile.addDataPoint(
'text_layout', 'text_layout',
Duration(microseconds: _textLayoutMicros.toInt()), Duration(microseconds: _textLayoutMicros.toInt()),
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
import 'dart:async'; import 'dart:async';
import 'dart:html' as html;
import 'dart:math' as math; import 'dart:math' as math;
import 'dart:ui'; import 'dart:ui';
...@@ -28,6 +29,8 @@ const int kMaxSampleCount = 10 * kMinSampleCount; ...@@ -28,6 +29,8 @@ const int kMaxSampleCount = 10 * kMinSampleCount;
/// The number of samples used to extract metrics, such as noise, means, /// The number of samples used to extract metrics, such as noise, means,
/// max/min values. /// max/min values.
///
/// Keep this constant in sync with the same constant defined in `dev/devicelab/lib/framework/browser.dart`.
const int _kMeasuredSampleCount = 10; const int _kMeasuredSampleCount = 10;
/// Maximum tolerated noise level. /// Maximum tolerated noise level.
...@@ -44,6 +47,50 @@ Duration timeAction(VoidCallback action) { ...@@ -44,6 +47,50 @@ Duration timeAction(VoidCallback action) {
return stopwatch.elapsed; return stopwatch.elapsed;
} }
/// A function that performs asynchronous work.
typedef AsyncVoidCallback = Future<void> Function();
/// Runs the benchmark using the given [recorder].
///
/// Notifies about "set up" and "tear down" events via the [setUpAllDidRun]
/// and [tearDownAllWillRun] callbacks.
@sealed
class Runner {
/// Creates a runner for the [recorder].
///
/// All arguments must not be null.
Runner({
@required this.recorder,
@required this.setUpAllDidRun,
@required this.tearDownAllWillRun,
});
/// The recorder that will run and record the benchmark.
final Recorder recorder;
/// Called immediately after [Recorder.setUpAll] future is resolved.
///
/// This is useful, for example, to kick off a profiler or a tracer such that
/// the "set up" computations are not included in the metrics.
final AsyncVoidCallback setUpAllDidRun;
/// Called just before calling [Recorder.tearDownAll].
///
/// This is useful, for example, to stop a profiler or a tracer such that
/// the "tear down" computations are not included in the metrics.
final AsyncVoidCallback tearDownAllWillRun;
/// Runs the benchmark and reports the results.
Future<Profile> run() async {
await recorder.setUpAll();
await setUpAllDidRun();
final Profile profile = await recorder.run();
await tearDownAllWillRun();
await recorder.tearDownAll();
return profile;
}
}
/// Base class for benchmark recorders. /// Base class for benchmark recorders.
/// ///
/// Each benchmark recorder has a [name] and a [run] method at a minimum. /// Each benchmark recorder has a [name] and a [run] method at a minimum.
...@@ -56,8 +103,20 @@ abstract class Recorder { ...@@ -56,8 +103,20 @@ abstract class Recorder {
/// prefix. /// prefix.
final String name; final String name;
/// Called once before all runs of this benchmark recorder.
///
/// This is useful for doing one-time setup work that's needed for the
/// benchmark.
Future<void> setUpAll() async {}
/// The implementation of the benchmark that will produce a [Profile]. /// The implementation of the benchmark that will produce a [Profile].
Future<Profile> run(); Future<Profile> run();
/// Called once after all runs of this benchmark recorder.
///
/// This is useful for doing one-time clean up work after the benchmark is
/// complete.
Future<void> tearDownAll() async {}
} }
/// A recorder for benchmarking raw execution of Dart code. /// A recorder for benchmarking raw execution of Dart code.
...@@ -86,18 +145,6 @@ abstract class Recorder { ...@@ -86,18 +145,6 @@ abstract class Recorder {
abstract class RawRecorder extends Recorder { abstract class RawRecorder extends Recorder {
RawRecorder({@required String name}) : super._(name); RawRecorder({@required String name}) : super._(name);
/// Called once before all runs of this benchmark recorder.
///
/// This is useful for doing one-time setup work that's needed for the
/// benchmark.
void setUpAll() {}
/// Called once after all runs of this benchmark recorder.
///
/// This is useful for doing one-time clean up work after the benchmark is
/// complete.
void tearDownAll() {}
/// The body of the benchmark. /// The body of the benchmark.
/// ///
/// This is the part that records measurements of the benchmark. /// This is the part that records measurements of the benchmark.
...@@ -107,12 +154,10 @@ abstract class RawRecorder extends Recorder { ...@@ -107,12 +154,10 @@ abstract class RawRecorder extends Recorder {
@nonVirtual @nonVirtual
Future<Profile> run() async { Future<Profile> run() async {
final Profile profile = Profile(name: name); final Profile profile = Profile(name: name);
setUpAll();
do { do {
await Future<void>.delayed(Duration.zero); await Future<void>.delayed(Duration.zero);
body(profile); body(profile);
} while (profile.shouldContinue()); } while (profile.shouldContinue());
tearDownAll();
return profile; return profile;
} }
} }
...@@ -162,24 +207,36 @@ abstract class SceneBuilderRecorder extends Recorder { ...@@ -162,24 +207,36 @@ abstract class SceneBuilderRecorder extends Recorder {
final Profile profile = Profile(name: name); final Profile profile = Profile(name: name);
window.onBeginFrame = (_) { window.onBeginFrame = (_) {
onBeginFrame(); try {
startMeasureFrame();
onBeginFrame();
} catch (error, stackTrace) {
profileCompleter.completeError(error, stackTrace);
rethrow;
}
}; };
window.onDrawFrame = () { window.onDrawFrame = () {
profile.record('drawFrameDuration', () { try {
final SceneBuilder sceneBuilder = SceneBuilder(); profile.record('drawFrameDuration', () {
onDrawFrame(sceneBuilder); final SceneBuilder sceneBuilder = SceneBuilder();
profile.record('sceneBuildDuration', () { onDrawFrame(sceneBuilder);
final Scene scene = sceneBuilder.build(); profile.record('sceneBuildDuration', () {
profile.record('windowRenderDuration', () { final Scene scene = sceneBuilder.build();
window.render(scene); profile.record('windowRenderDuration', () {
window.render(scene);
});
}); });
}); });
}); endMeasureFrame();
if (profile.shouldContinue()) { if (profile.shouldContinue()) {
window.scheduleFrame(); window.scheduleFrame();
} else { } else {
profileCompleter.complete(profile); profileCompleter.complete(profile);
}
} catch (error, stackTrace) {
profileCompleter.completeError(error, stackTrace);
rethrow;
} }
}; };
window.scheduleFrame(); window.scheduleFrame();
...@@ -270,12 +327,14 @@ abstract class WidgetRecorder extends Recorder ...@@ -270,12 +327,14 @@ abstract class WidgetRecorder extends Recorder
@override @override
@mustCallSuper @mustCallSuper
void frameWillDraw() { void frameWillDraw() {
startMeasureFrame();
_drawFrameStopwatch = Stopwatch()..start(); _drawFrameStopwatch = Stopwatch()..start();
} }
@override @override
@mustCallSuper @mustCallSuper
void frameDidDraw() { void frameDidDraw() {
endMeasureFrame();
profile.addDataPoint('drawFrameDuration', _drawFrameStopwatch.elapsed); profile.addDataPoint('drawFrameDuration', _drawFrameStopwatch.elapsed);
if (profile.shouldContinue()) { if (profile.shouldContinue()) {
...@@ -323,18 +382,6 @@ abstract class WidgetBuildRecorder extends Recorder ...@@ -323,18 +382,6 @@ abstract class WidgetBuildRecorder extends Recorder
/// consider using [WidgetRecorder]. /// consider using [WidgetRecorder].
Widget createWidget(); Widget createWidget();
/// Called once before all runs of this benchmark recorder.
///
/// This is useful for doing one-time setup work that's needed for the
/// benchmark.
void setUpAll() {}
/// Called once after all runs of this benchmark recorder.
///
/// This is useful for doing one-time clean up work after the benchmark is
/// complete.
void tearDownAll() {}
@override @override
Profile profile; Profile profile;
...@@ -361,7 +408,10 @@ abstract class WidgetBuildRecorder extends Recorder ...@@ -361,7 +408,10 @@ abstract class WidgetBuildRecorder extends Recorder
@override @override
@mustCallSuper @mustCallSuper
void frameWillDraw() { void frameWillDraw() {
_drawFrameStopwatch = Stopwatch()..start(); if (showWidget) {
startMeasureFrame();
_drawFrameStopwatch = Stopwatch()..start();
}
} }
@override @override
...@@ -369,6 +419,7 @@ abstract class WidgetBuildRecorder extends Recorder ...@@ -369,6 +419,7 @@ abstract class WidgetBuildRecorder extends Recorder
void frameDidDraw() { void frameDidDraw() {
// Only record frames that show the widget. // Only record frames that show the widget.
if (showWidget) { if (showWidget) {
endMeasureFrame();
profile.addDataPoint('drawFrameDuration', _drawFrameStopwatch.elapsed); profile.addDataPoint('drawFrameDuration', _drawFrameStopwatch.elapsed);
} }
...@@ -388,13 +439,11 @@ abstract class WidgetBuildRecorder extends Recorder ...@@ -388,13 +439,11 @@ abstract class WidgetBuildRecorder extends Recorder
@override @override
Future<Profile> run() { Future<Profile> run() {
profile = Profile(name: name); profile = Profile(name: name);
setUpAll();
final _RecordingWidgetsBinding binding = final _RecordingWidgetsBinding binding =
_RecordingWidgetsBinding.ensureInitialized(); _RecordingWidgetsBinding.ensureInitialized();
binding._beginRecording(this, _WidgetBuildRecorderHost(this)); binding._beginRecording(this, _WidgetBuildRecorderHost(this));
_profileCompleter.future.whenComplete(() { _profileCompleter.future.whenComplete(() {
tearDownAll();
profile = null; profile = null;
}); });
return _profileCompleter.future; return _profileCompleter.future;
...@@ -433,7 +482,9 @@ class _WidgetBuildRecorderHostState extends State<_WidgetBuildRecorderHost> { ...@@ -433,7 +482,9 @@ class _WidgetBuildRecorderHostState extends State<_WidgetBuildRecorderHost> {
/// calculations will only apply to the latest [_kMeasuredSampleCount] data /// calculations will only apply to the latest [_kMeasuredSampleCount] data
/// points. /// points.
class Timeseries { class Timeseries {
Timeseries(); Timeseries(this.name);
final String name;
/// List of all the values that have been recorded. /// List of all the values that have been recorded.
/// ///
...@@ -450,14 +501,28 @@ class Timeseries { ...@@ -450,14 +501,28 @@ class Timeseries {
/// because of the sample size limit. /// because of the sample size limit.
int get count => _allValues.length; int get count => _allValues.length;
double get average => _computeMean(_measuredValues); /// Computes the average value of the measured values.
double get average => _computeAverage(name, _measuredValues);
/// Computes the standard deviation of the measured values.
double get standardDeviation => double get standardDeviation =>
_computeStandardDeviationForPopulation(_measuredValues); _computeStandardDeviationForPopulation(name, _measuredValues);
double get noise => standardDeviation / average; /// Computes noise as a multiple of the [average] value.
///
/// This value can be multiplied by 100.0 to get noise as a percentage of
/// the average.
///
/// If [average] is zero, treats the result as perfect score, returns zero.
double get noise => average > 0.0 ? standardDeviation / average : 0.0;
/// Adds a value to this timeseries.
void add(num value) { void add(num value) {
if (value < 0.0) {
throw StateError(
'Timeseries $name: negative metric values are not supported. Got: $value',
);
}
_measuredValues.add(value); _measuredValues.add(value);
_allValues.add(value); _allValues.add(value);
// Don't let the [_measuredValues] list grow beyond [_kMeasuredSampleCount]. // Don't let the [_measuredValues] list grow beyond [_kMeasuredSampleCount].
...@@ -488,7 +553,7 @@ class Profile { ...@@ -488,7 +553,7 @@ class Profile {
} }
void addDataPoint(String key, Duration duration) { void addDataPoint(String key, Duration duration) {
scoreData.putIfAbsent(key, () => Timeseries()).add(duration.inMicroseconds); scoreData.putIfAbsent(key, () => Timeseries(key)).add(duration.inMicroseconds);
} }
/// Decides whether the data collected so far is sufficient to stop, or /// Decides whether the data collected so far is sufficient to stop, or
...@@ -599,7 +664,11 @@ class Profile { ...@@ -599,7 +664,11 @@ class Profile {
} }
/// Computes the arithmetic mean (or average) of given [values]. /// Computes the arithmetic mean (or average) of given [values].
double _computeMean(Iterable<num> values) { double _computeAverage(String label, Iterable<num> values) {
if (values.isEmpty) {
throw StateError('$label: attempted to compute an average of an empty value list.');
}
final num sum = values.reduce((num a, num b) => a + b); final num sum = values.reduce((num a, num b) => a + b);
return sum / values.length; return sum / values.length;
} }
...@@ -611,8 +680,11 @@ double _computeMean(Iterable<num> values) { ...@@ -611,8 +680,11 @@ double _computeMean(Iterable<num> values) {
/// See also: /// See also:
/// ///
/// * https://en.wikipedia.org/wiki/Standard_deviation /// * https://en.wikipedia.org/wiki/Standard_deviation
double _computeStandardDeviationForPopulation(Iterable<num> population) { double _computeStandardDeviationForPopulation(String label, Iterable<num> population) {
final double mean = _computeMean(population); if (population.isEmpty) {
throw StateError('$label: attempted to compute the standard deviation of empty population.');
}
final double mean = _computeAverage(label, population);
final double sumOfSquaredDeltas = population.fold<double>( final double sumOfSquaredDeltas = population.fold<double>(
0.0, 0.0,
(double previous, num value) => previous += math.pow(value - mean, 2), (double previous, num value) => previous += math.pow(value - mean, 2),
...@@ -676,17 +748,21 @@ class _RecordingWidgetsBinding extends BindingBase ...@@ -676,17 +748,21 @@ class _RecordingWidgetsBinding extends BindingBase
// Fail hard and fast on errors. Benchmarks should not have any errors. // Fail hard and fast on errors. Benchmarks should not have any errors.
FlutterError.onError = (FlutterErrorDetails details) { FlutterError.onError = (FlutterErrorDetails details) {
if (_hasErrored) { _haltBenchmarkWithError(details.exception, details.stack);
return;
}
_listener._onError(details.exception, details.stack);
_hasErrored = true;
originalOnError(details); originalOnError(details);
}; };
_listener = recorder; _listener = recorder;
runApp(widget); runApp(widget);
} }
void _haltBenchmarkWithError(dynamic error, StackTrace stackTrace) {
if (_hasErrored) {
return;
}
_listener._onError(error, stackTrace);
_hasErrored = true;
}
/// To avoid calling [Profile.shouldContinue] every time [scheduleFrame] is /// To avoid calling [Profile.shouldContinue] every time [scheduleFrame] is
/// called, we cache this value at the beginning of the frame. /// called, we cache this value at the beginning of the frame.
bool _benchmarkStopped = false; bool _benchmarkStopped = false;
...@@ -697,8 +773,13 @@ class _RecordingWidgetsBinding extends BindingBase ...@@ -697,8 +773,13 @@ class _RecordingWidgetsBinding extends BindingBase
if (_hasErrored) { if (_hasErrored) {
return; return;
} }
_benchmarkStopped = !_listener.profile.shouldContinue(); try {
super.handleBeginFrame(rawTimeStamp); _benchmarkStopped = !_listener.profile.shouldContinue();
super.handleBeginFrame(rawTimeStamp);
} catch (error, stackTrace) {
_haltBenchmarkWithError(error, stackTrace);
rethrow;
}
} }
@override @override
...@@ -715,8 +796,40 @@ class _RecordingWidgetsBinding extends BindingBase ...@@ -715,8 +796,40 @@ class _RecordingWidgetsBinding extends BindingBase
if (_hasErrored) { if (_hasErrored) {
return; return;
} }
_listener.frameWillDraw(); try {
super.handleDrawFrame(); _listener.frameWillDraw();
_listener.frameDidDraw(); super.handleDrawFrame();
_listener.frameDidDraw();
} catch (error, stackTrace) {
_haltBenchmarkWithError(error, stackTrace);
rethrow;
}
} }
} }
int _currentFrameNumber = 1;
/// Adds a marker indication the beginning of frame rendering.
///
/// This adds an event to the performance trace used to find measured frames in
/// Chrome tracing data. The tracing data contains all frames, but some
/// benchmarks are only interested in a subset of frames. For example,
/// [WidgetBuildRecorder] only measures frames that build widgets, and ignores
/// frames that clear the screen.
void startMeasureFrame() {
html.window.performance.mark('measured_frame_start#$_currentFrameNumber');
}
/// Signals the end of a measured frame.
///
/// See [startMeasureFrame] for details on what this instrumentation is used
/// for.
void endMeasureFrame() {
html.window.performance.mark('measured_frame_end#$_currentFrameNumber');
html.window.performance.measure(
'measured_frame',
'measured_frame_start#$_currentFrameNumber',
'measured_frame_end#$_currentFrameNumber',
);
_currentFrameNumber += 1;
}
...@@ -32,6 +32,8 @@ final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{ ...@@ -32,6 +32,8 @@ final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{
BenchSimpleLazyTextScroll.benchmarkName: () => BenchSimpleLazyTextScroll(), BenchSimpleLazyTextScroll.benchmarkName: () => BenchSimpleLazyTextScroll(),
BenchBuildMaterialCheckbox.benchmarkName: () => BenchBuildMaterialCheckbox(), BenchBuildMaterialCheckbox.benchmarkName: () => BenchBuildMaterialCheckbox(),
BenchDynamicClipOnStaticPicture.benchmarkName: () => BenchDynamicClipOnStaticPicture(), BenchDynamicClipOnStaticPicture.benchmarkName: () => BenchDynamicClipOnStaticPicture(),
if (isCanvasKit)
BenchBuildColorsGrid.canvasKitBenchmarkName: () => BenchBuildColorsGrid.canvasKit(),
// Benchmarks that we don't want to run using CanvasKit. // Benchmarks that we don't want to run using CanvasKit.
if (!isCanvasKit) ...<String, RecorderFactory>{ if (!isCanvasKit) ...<String, RecorderFactory>{
...@@ -39,37 +41,23 @@ final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{ ...@@ -39,37 +41,23 @@ final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{
BenchTextLayout.canvasBenchmarkName: () => BenchTextLayout(useCanvas: true), BenchTextLayout.canvasBenchmarkName: () => BenchTextLayout(useCanvas: true),
BenchTextCachedLayout.domBenchmarkName: () => BenchTextCachedLayout(useCanvas: false), BenchTextCachedLayout.domBenchmarkName: () => BenchTextCachedLayout(useCanvas: false),
BenchTextCachedLayout.canvasBenchmarkName: () => BenchTextCachedLayout(useCanvas: true), BenchTextCachedLayout.canvasBenchmarkName: () => BenchTextCachedLayout(useCanvas: true),
BenchBuildColorsGrid.domBenchmarkName: () => BenchBuildColorsGrid(useCanvas: false), BenchBuildColorsGrid.domBenchmarkName: () => BenchBuildColorsGrid.dom(),
BenchBuildColorsGrid.canvasBenchmarkName: () => BenchBuildColorsGrid(useCanvas: true), BenchBuildColorsGrid.canvasBenchmarkName: () => BenchBuildColorsGrid.canvas(),
} }
}; };
/// Whether we fell back to manual mode. final LocalBenchmarkServerClient _client = LocalBenchmarkServerClient();
///
/// This happens when you run benchmarks using plain `flutter run` rather than
/// devicelab test harness. The test harness spins up a special server that
/// provides API for automatically picking the next benchmark to run.
bool isInManualMode = false;
Future<void> main() async { Future<void> main() async {
// Check if the benchmark server wants us to run a specific benchmark. // Check if the benchmark server wants us to run a specific benchmark.
final html.HttpRequest request = await requestXhr( final String nextBenchmark = await _client.requestNextBenchmark();
'/next-benchmark',
method: 'POST', if (nextBenchmark == LocalBenchmarkServerClient.kManualFallback) {
mimeType: 'application/json',
sendData: json.encode(benchmarks.keys.toList()),
);
// 404 is expected in the following cases:
// - The benchmark is ran using plain `flutter run`, which does not provide "next-benchmark" handler.
// - We ran all benchmarks and the benchmark is telling us there are no more benchmarks to run.
if (request.status == 404) {
_fallbackToManual('The server did not tell us which benchmark to run next.'); _fallbackToManual('The server did not tell us which benchmark to run next.');
return; return;
} }
final String benchmarkName = request.responseText; await _runBenchmark(nextBenchmark);
await _runBenchmark(benchmarkName);
html.window.location.reload(); html.window.location.reload();
} }
...@@ -81,44 +69,36 @@ Future<void> _runBenchmark(String benchmarkName) async { ...@@ -81,44 +69,36 @@ Future<void> _runBenchmark(String benchmarkName) async {
return; return;
} }
final Recorder recorder = recorderFactory();
try { try {
final Profile profile = await recorder.run(); final Runner runner = Runner(
if (!isInManualMode) { recorder: recorderFactory(),
final html.HttpRequest request = await html.HttpRequest.request( setUpAllDidRun: () async {
'/profile-data', if (!_client.isInManualMode) {
method: 'POST', await _client.startPerformanceTracing(benchmarkName);
mimeType: 'application/json', }
sendData: json.encode(profile.toJson()), },
); tearDownAllWillRun: () async {
if (request.status != 200) { if (!_client.isInManualMode) {
throw Exception( await _client.stopPerformanceTracing();
'Failed to report profile data to benchmark server. ' }
'The server responded with status code ${request.status}.' },
); );
}
final Profile profile = await runner.run();
if (!_client.isInManualMode) {
await _client.sendProfileData(profile);
} else { } else {
print(profile); print(profile);
} }
} catch (error, stackTrace) { } catch (error, stackTrace) {
if (isInManualMode) { if (_client.isInManualMode) {
rethrow; rethrow;
} }
await html.HttpRequest.request( await _client.reportError(error, stackTrace);
'/on-error',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(<String, dynamic>{
'error': '$error',
'stackTrace': '$stackTrace',
}),
);
} }
} }
void _fallbackToManual(String error) { void _fallbackToManual(String error) {
isInManualMode = true;
html.document.body.appendHtml(''' html.document.body.appendHtml('''
<div id="manual-panel"> <div id="manual-panel">
<h3>$error</h3> <h3>$error</h3>
...@@ -146,50 +126,157 @@ void _fallbackToManual(String error) { ...@@ -146,50 +126,157 @@ void _fallbackToManual(String error) {
} }
} }
Future<html.HttpRequest> requestXhr( /// Implements the client REST API for the local benchmark server.
String url, { ///
String method, /// The local server is optional. If it is not available the benchmark UI must
bool withCredentials, /// implement a manual fallback. This allows debugging benchmarks using plain
String responseType, /// `flutter run`.
String mimeType, class LocalBenchmarkServerClient {
Map<String, String> requestHeaders, /// This value is returned by [requestNextBenchmark].
dynamic sendData, static const String kManualFallback = '__manual_fallback__';
}) {
final Completer<html.HttpRequest> completer = Completer<html.HttpRequest>(); /// Whether we fell back to manual mode.
final html.HttpRequest xhr = html.HttpRequest(); ///
/// This happens when you run benchmarks using plain `flutter run` rather than
method ??= 'GET'; /// devicelab test harness. The test harness spins up a special server that
xhr.open(method, url, async: true); /// provides API for automatically picking the next benchmark to run.
bool isInManualMode;
if (withCredentials != null) {
xhr.withCredentials = withCredentials; /// Asks the local server for the name of the next benchmark to run.
} ///
/// Returns [kManualFallback] if local server is not available (uses 404 as a
/// signal).
Future<String> requestNextBenchmark() async {
final html.HttpRequest request = await _requestXhr(
'/next-benchmark',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(benchmarks.keys.toList()),
);
// 404 is expected in the following cases:
// - The benchmark is ran using plain `flutter run`, which does not provide "next-benchmark" handler.
// - We ran all benchmarks and the benchmark is telling us there are no more benchmarks to run.
if (request.status == 404) {
isInManualMode = true;
return kManualFallback;
}
if (responseType != null) { isInManualMode = false;
xhr.responseType = responseType; return request.responseText;
} }
if (mimeType != null) { void _checkNotManualMode() {
xhr.overrideMimeType(mimeType); if (isInManualMode) {
throw StateError('Operation not supported in manual fallback mode.');
}
} }
if (requestHeaders != null) { /// Asks the local server to begin tracing performance.
requestHeaders.forEach((String header, String value) { ///
xhr.setRequestHeader(header, value); /// This uses the chrome://tracing tracer, which is not available from within
}); /// the page itself, and therefore must be controlled from outside using the
/// DevTools Protocol.
Future<void> startPerformanceTracing(String benchmarkName) async {
_checkNotManualMode();
await html.HttpRequest.request(
'/start-performance-tracing?label=$benchmarkName',
method: 'POST',
mimeType: 'application/json',
);
} }
xhr.onLoad.listen((html.ProgressEvent e) { /// Stops the performance tracing session started by [startPerformanceTracing].
completer.complete(xhr); Future<void> stopPerformanceTracing() async {
}); _checkNotManualMode();
await html.HttpRequest.request(
'/stop-performance-tracing',
method: 'POST',
mimeType: 'application/json',
);
}
xhr.onError.listen(completer.completeError); /// Sends the profile data collected by the benchmark to the local benchmark
/// server.
Future<void> sendProfileData(Profile profile) async {
_checkNotManualMode();
final html.HttpRequest request = await html.HttpRequest.request(
'/profile-data',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(profile.toJson()),
);
if (request.status != 200) {
throw Exception(
'Failed to report profile data to benchmark server. '
'The server responded with status code ${request.status}.'
);
}
}
if (sendData != null) { /// Reports an error to the benchmark server.
xhr.send(sendData); ///
} else { /// The server will halt the devicelab task and log the error.
xhr.send(); Future<void> reportError(dynamic error, StackTrace stackTrace) async {
_checkNotManualMode();
await html.HttpRequest.request(
'/on-error',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(<String, dynamic>{
'error': '$error',
'stackTrace': '$stackTrace',
}),
);
} }
return completer.future; /// This is the same as calling [html.HttpRequest.request] but it doesn't
/// crash on 404, which we use to detect `flutter run`.
Future<html.HttpRequest> _requestXhr(
String url, {
String method,
bool withCredentials,
String responseType,
String mimeType,
Map<String, String> requestHeaders,
dynamic sendData,
}) {
final Completer<html.HttpRequest> completer = Completer<html.HttpRequest>();
final html.HttpRequest xhr = html.HttpRequest();
method ??= 'GET';
xhr.open(method, url, async: true);
if (withCredentials != null) {
xhr.withCredentials = withCredentials;
}
if (responseType != null) {
xhr.responseType = responseType;
}
if (mimeType != null) {
xhr.overrideMimeType(mimeType);
}
if (requestHeaders != null) {
requestHeaders.forEach((String header, String value) {
xhr.setRequestHeader(header, value);
});
}
xhr.onLoad.listen((html.ProgressEvent e) {
completer.complete(xhr);
});
xhr.onError.listen(completer.completeError);
if (sendData != null) {
xhr.send(sendData);
} else {
xhr.send();
}
return completer.future;
}
} }
...@@ -2,11 +2,19 @@ ...@@ -2,11 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
import 'dart:async';
import 'dart:convert' show json, utf8, LineSplitter, JsonEncoder;
import 'dart:io' as io; import 'dart:io' as io;
import 'dart:math' as math;
import 'package:meta/meta.dart'; import 'package:meta/meta.dart';
import 'package:webkit_inspection_protocol/webkit_inspection_protocol.dart';
import 'utils.dart' show forwardStandardStreams; /// The number of samples used to extract metrics, such as noise, means,
/// max/min values.
///
/// Keep this constant in sync with the same constant defined in `dev/benchmarks/macrobenchmarks/lib/src/web/recorder.dart`.
const int _kMeasuredSampleCount = 10;
/// Options passed to Chrome when launching it. /// Options passed to Chrome when launching it.
class ChromeOptions { class ChromeOptions {
...@@ -52,7 +60,7 @@ typedef ChromeErrorCallback = void Function(String); ...@@ -52,7 +60,7 @@ typedef ChromeErrorCallback = void Function(String);
/// Manages a single Chrome process. /// Manages a single Chrome process.
class Chrome { class Chrome {
Chrome._(this._chromeProcess, this._onError) { Chrome._(this._chromeProcess, this._onError, this._debugConnection) {
// If the Chrome process quits before it was asked to quit, notify the // If the Chrome process quits before it was asked to quit, notify the
// error listener. // error listener.
_chromeProcess.exitCode.then((int exitCode) { _chromeProcess.exitCode.then((int exitCode) {
...@@ -71,6 +79,7 @@ class Chrome { ...@@ -71,6 +79,7 @@ class Chrome {
final io.ProcessResult versionResult = io.Process.runSync(_findSystemChromeExecutable(), const <String>['--version']); final io.ProcessResult versionResult = io.Process.runSync(_findSystemChromeExecutable(), const <String>['--version']);
print('Launching ${versionResult.stdout}'); print('Launching ${versionResult.stdout}');
final bool withDebugging = options.debugPort != null;
final List<String> args = <String>[ final List<String> args = <String>[
if (options.userDataDirectory != null) if (options.userDataDirectory != null)
'--user-data-dir=${options.userDataDirectory}', '--user-data-dir=${options.userDataDirectory}',
...@@ -80,7 +89,7 @@ class Chrome { ...@@ -80,7 +89,7 @@ class Chrome {
'--no-sandbox', '--no-sandbox',
if (options.headless) if (options.headless)
'--headless', '--headless',
if (options.debugPort != null) if (withDebugging)
'--remote-debugging-port=${options.debugPort}', '--remote-debugging-port=${options.debugPort}',
'--window-size=${options.windowWidth},${options.windowHeight}', '--window-size=${options.windowWidth},${options.windowHeight}',
'--disable-extensions', '--disable-extensions',
...@@ -97,14 +106,89 @@ class Chrome { ...@@ -97,14 +106,89 @@ class Chrome {
args, args,
workingDirectory: workingDirectory, workingDirectory: workingDirectory,
); );
forwardStandardStreams(chromeProcess);
return Chrome._(chromeProcess, onError); WipConnection debugConnection;
if (withDebugging) {
debugConnection = await _connectToChromeDebugPort(chromeProcess, options.debugPort);
}
return Chrome._(chromeProcess, onError, debugConnection);
} }
final io.Process _chromeProcess; final io.Process _chromeProcess;
final ChromeErrorCallback _onError; final ChromeErrorCallback _onError;
final WipConnection _debugConnection;
bool _isStopped = false; bool _isStopped = false;
Completer<void> _tracingCompleter;
StreamSubscription<WipEvent> _tracingSubscription;
List<Map<String, dynamic>> _tracingData;
/// Starts recording a performance trace.
///
/// If there is already a tracing session in progress, throws an error. Call
/// [endRecordingPerformance] before starting a new tracing session.
///
/// The [label] is for debugging convenience.
Future<void> beginRecordingPerformance(String label) async {
if (_tracingCompleter != null) {
throw StateError(
'Cannot start a new performance trace. A tracing session labeled '
'"$label" is already in progress.'
);
}
_tracingCompleter = Completer<void>();
_tracingData = <Map<String, dynamic>>[];
// Subscribe to tracing events prior to calling "Tracing.start". Otherwise,
// we'll miss tracing data.
_tracingSubscription = _debugConnection.onNotification.listen((WipEvent event) {
// We receive data as a sequence of "Tracing.dataCollected" followed by
// "Tracing.tracingComplete" at the end. Until "Tracing.tracingComplete"
// is received, the data may be incomplete.
if (event.method == 'Tracing.tracingComplete') {
_tracingCompleter.complete();
_tracingSubscription.cancel();
_tracingSubscription = null;
} else if (event.method == 'Tracing.dataCollected') {
final dynamic value = event.params['value'];
if (value is! List) {
throw FormatException('"Tracing.dataCollected" returned malformed data. '
'Expected a List but got: ${value.runtimeType}');
}
_tracingData.addAll((event.params['value'] as List<dynamic>).cast<Map<String, dynamic>>());
}
});
await _debugConnection.sendCommand('Tracing.start', <String, dynamic>{
// The choice of categories is as follows:
//
// blink:
// provides everything on the UI thread, including scripting,
// style recalculations, layout, painting, and some compositor
// work.
// blink.user_timing:
// provides marks recorded using window.performance. We use marks
// to find frames that the benchmark cares to measure.
// gpu:
// provides tracing data from the GPU data
// TODO(yjbanov): extract useful GPU data
'categories': 'blink,blink.user_timing,gpu',
'transferMode': 'SendAsStream',
});
}
/// Stops a performance tracing session started by [beginRecordingPerformance].
///
/// Returns all the collected tracing data unfiltered.
Future<List<Map<String, dynamic>>> endRecordingPerformance() async {
await _debugConnection.sendCommand('Tracing.end');
await _tracingCompleter.future;
final List<Map<String, dynamic>> data = _tracingData;
_tracingCompleter = null;
_tracingData = null;
return data;
}
/// Stops the Chrome process. /// Stops the Chrome process.
void stop() { void stop() {
_isStopped = true; _isStopped = true;
...@@ -136,3 +220,324 @@ String _findSystemChromeExecutable() { ...@@ -136,3 +220,324 @@ String _findSystemChromeExecutable() {
throw Exception('Web benchmarks cannot run on ${io.Platform.operatingSystem} yet.'); throw Exception('Web benchmarks cannot run on ${io.Platform.operatingSystem} yet.');
} }
} }
/// Waits for Chrome to print DevTools URI and connects to it.
Future<WipConnection> _connectToChromeDebugPort(io.Process chromeProcess, int port) async {
chromeProcess.stdout
.transform(utf8.decoder)
.transform(const LineSplitter())
.listen((String line) {
print('[CHROME]: $line');
});
await chromeProcess.stderr
.transform(utf8.decoder)
.transform(const LineSplitter())
.map((String line) {
print('[CHROME]: $line');
return line;
})
.firstWhere((String line) => line.startsWith('DevTools listening'), orElse: () {
throw Exception('Expected Chrome to print "DevTools listening" string '
'with DevTools URL, but the string was never printed.');
});
final Uri devtoolsUri = await _getRemoteDebuggerUrl(Uri.parse('http://localhost:$port'));
print('Connecting to DevTools: $devtoolsUri');
final ChromeConnection chromeConnection = ChromeConnection('localhost', port);
final Iterable<ChromeTab> tabs = (await chromeConnection.getTabs()).where((ChromeTab tab) {
return tab.url.startsWith('http://localhost');
});
final ChromeTab tab = tabs.single;
final WipConnection debugConnection = await tab.connect();
print('Connected to Chrome tab: ${tab.title} (${tab.url})');
return debugConnection;
}
/// Gets the Chrome debugger URL for the web page being benchmarked.
Future<Uri> _getRemoteDebuggerUrl(Uri base) async {
final io.HttpClient client = io.HttpClient();
final io.HttpClientRequest request = await client.getUrl(base.resolve('/json/list'));
final io.HttpClientResponse response = await request.close();
final List<dynamic> jsonObject = await json.fuse(utf8).decoder.bind(response).single as List<dynamic>;
if (jsonObject == null || jsonObject.isEmpty) {
return base;
}
return base.resolve(jsonObject.first['webSocketDebuggerUrl'] as String);
}
/// Summarizes a Blink trace down to a few interesting values.
class BlinkTraceSummary {
BlinkTraceSummary._({
@required this.averageBeginFrameTime,
@required this.averageUpdateLifecyclePhasesTime,
}) : averageTotalUIFrameTime = averageBeginFrameTime + averageUpdateLifecyclePhasesTime;
static BlinkTraceSummary fromJson(List<Map<String, dynamic>> traceJson) {
try {
// Convert raw JSON data to BlinkTraceEvent objects sorted by timestamp.
List<BlinkTraceEvent> events = traceJson
.map<BlinkTraceEvent>(BlinkTraceEvent.fromJson)
.toList()
..sort((BlinkTraceEvent a, BlinkTraceEvent b) => a.ts - b.ts);
// Filter out data from unrelated processes
final BlinkTraceEvent processLabel = events
.where((BlinkTraceEvent event) => event.isCrRendererMain)
.single;
final int tabPid = processLabel.pid;
events = events.where((BlinkTraceEvent element) => element.pid == tabPid).toList();
// Extract frame data.
final List<BlinkFrame> frames = <BlinkFrame>[];
int skipCount = 0;
BlinkFrame frame = BlinkFrame();
for (final BlinkTraceEvent event in events) {
if (event.isBeginFrame) {
frame.beginFrame = event;
} else if (event.isUpdateAllLifecyclePhases) {
frame.updateAllLifecyclePhases = event;
if (frame.endMeasuredFrame != null) {
frames.add(frame);
} else {
skipCount += 1;
}
frame = BlinkFrame();
} else if (event.isBeginMeasuredFrame) {
frame.beginMeasuredFrame = event;
} else if (event.isEndMeasuredFrame) {
frame.endMeasuredFrame = event;
}
}
print('Extracted ${frames.length} measured frames.');
print('Skipped $skipCount non-measured frames.');
if (frames.isEmpty) {
// The benchmark is not measuring frames.
return null;
}
// Compute averages and summarize.
return BlinkTraceSummary._(
averageBeginFrameTime: _computeAverageDuration(frames.map((BlinkFrame frame) => frame.beginFrame).toList()),
averageUpdateLifecyclePhasesTime: _computeAverageDuration(frames.map((BlinkFrame frame) => frame.updateAllLifecyclePhases).toList()),
);
} catch (_, __) {
final io.File traceFile = io.File('./chrome-trace.json');
io.stderr.writeln('Failed to interpret the Chrome trace contents. The trace was saved in ${traceFile.path}');
traceFile.writeAsStringSync(const JsonEncoder.withIndent(' ').convert(traceJson));
rethrow;
}
}
/// The average duration of "WebViewImpl::beginFrame" events.
///
/// This event contains all of scripting time of an animation frame, plus an
/// unknown small amount of work browser does before and after scripting.
final Duration averageBeginFrameTime;
/// The average duration of "WebViewImpl::updateAllLifecyclePhases" events.
///
/// This event contains style, layout, painting, and compositor computations,
/// which are not included in the scripting time. This event does not
/// include GPU time, which happens on a separate thread.
final Duration averageUpdateLifecyclePhasesTime;
/// The average sum of [averageBeginFrameTime] and
/// [averageUpdateLifecyclePhasesTime].
///
/// This value contains the vast majority of work the UI thread performs in
/// any given animation frame.
final Duration averageTotalUIFrameTime;
@override
String toString() => '$BlinkTraceSummary('
'averageBeginFrameTime: ${averageBeginFrameTime.inMicroseconds / 1000}ms, '
'averageUpdateLifecyclePhasesTime: ${averageUpdateLifecyclePhasesTime.inMicroseconds / 1000}ms)';
}
/// Contains events pertaining to a single frame in the Blink trace data.
class BlinkFrame {
/// Corresponds to 'WebViewImpl::beginFrame' event.
BlinkTraceEvent beginFrame;
/// Corresponds to 'WebViewImpl::updateAllLifecyclePhases' event.
BlinkTraceEvent updateAllLifecyclePhases;
/// Corresponds to 'measured_frame' begin event.
BlinkTraceEvent beginMeasuredFrame;
/// Corresponds to 'measured_frame' end event.
BlinkTraceEvent endMeasuredFrame;
}
/// Takes a list of events that have non-null [BlinkTraceEvent.tdur] computes
/// their average as a [Duration] value.
Duration _computeAverageDuration(List<BlinkTraceEvent> events) {
// Compute the sum of "tdur" fields of the last _kMeasuredSampleCount events.
final double sum = events
.skip(math.max(events.length - _kMeasuredSampleCount, 0))
.fold(0.0, (double previousValue, BlinkTraceEvent event) {
if (event.tdur == null) {
throw FormatException('Trace event lacks "tdur" field: $event');
}
return previousValue + event.tdur;
});
final int sampleCount = math.min(events.length, _kMeasuredSampleCount);
return Duration(microseconds: sum ~/ sampleCount);
}
/// An event collected by the Blink tracer (in Chrome accessible using chrome://tracing).
///
/// See also:
/// * https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview
class BlinkTraceEvent {
BlinkTraceEvent._({
@required this.args,
@required this.cat,
@required this.name,
@required this.ph,
@required this.pid,
@required this.tid,
@required this.ts,
@required this.tts,
@required this.tdur,
});
/// Parses an event from its JSON representation.
///
/// Sample event encoded as JSON (the data is bogus, this just shows the format):
///
/// ```
/// {
/// "name": "myName",
/// "cat": "category,list",
/// "ph": "B",
/// "ts": 12345,
/// "pid": 123,
/// "tid": 456,
/// "args": {
/// "someArg": 1,
/// "anotherArg": {
/// "value": "my value"
/// }
/// }
/// }
/// ```
///
/// For detailed documentation of the format see:
///
/// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview
static BlinkTraceEvent fromJson(Map<String, dynamic> json) {
return BlinkTraceEvent._(
args: json['args'] as Map<String, dynamic>,
cat: json['cat'] as String,
name: json['name'] as String,
ph: json['ph'] as String,
pid: _readInt(json, 'pid'),
tid: _readInt(json, 'tid'),
ts: _readInt(json, 'ts'),
tts: _readInt(json, 'tts'),
tdur: _readInt(json, 'tdur'),
);
}
/// Event-specific data.
final Map<String, dynamic> args;
/// Event category.
final String cat;
/// Event name.
final String name;
/// Event "phase".
final String ph;
/// Process ID of the process that emitted the event.
final int pid;
/// Thread ID of the thread that emitted the event.
final int tid;
/// Timestamp in microseconds using tracer clock.
final int ts;
/// Timestamp in microseconds using thread clock.
final int tts;
/// Event duration in microseconds.
final int tdur;
/// A "begin frame" event contains all of the scripting time of an animation
/// frame (JavaScript, WebAssembly), plus a negligible amount of internal
/// browser overhead.
///
/// This event does not include non-UI thread scripting, such as web workers,
/// service workers, and CSS Paint paintlets.
///
/// This event is a duration event that has its `tdur` populated.
bool get isBeginFrame => ph == 'X' && name == 'WebViewImpl::beginFrame';
/// An "update all lifecycle phases" event contains UI thread computations
/// related to an animation frame that's outside the scripting phase.
///
/// This event includes style recalculation, layer tree update, layout,
/// painting, and parts of compositing work.
///
/// This event is a duration event that has its `tdur` populated.
bool get isUpdateAllLifecyclePhases => ph == 'X' && name == 'WebViewImpl::updateAllLifecyclePhases';
/// A "CrRendererMain" event contains information about the browser's UI
/// thread.
///
/// This event's [pid] field identifies the process that performs web page
/// rendering. The [isBeginFrame] and [isUpdateAllLifecyclePhases] events
/// with the same [pid] as this event all belong to the same web page.
bool get isCrRendererMain => name == 'thread_name' && args['name'] == 'CrRendererMain';
/// Whether this is the beginning of a "measured_frame" event.
///
/// This event is a custom event emitted by our benchmark test harness.
///
/// See also:
/// * `recorder.dart`, which emits this event.
bool get isBeginMeasuredFrame => ph == 'b' && name == 'measured_frame';
/// Whether this is the end of a "measured_frame" event.
///
/// This event is a custom event emitted by our benchmark test harness.
///
/// See also:
/// * `recorder.dart`, which emits this event.
bool get isEndMeasuredFrame => ph == 'e' && name == 'measured_frame';
@override
String toString() => '$BlinkTraceEvent('
'args: ${json.encode(args)}, '
'cat: $cat, '
'name: $name, '
'ph: $ph, '
'pid: $pid, '
'tid: $tid, '
'ts: $ts, '
'tts: $tts, '
'tdur: $tdur)';
}
/// Read an integer out of [json] stored under [key].
///
/// Since JSON does not distinguish between `int` and `double`, extra
/// validation and conversion is needed.
///
/// Returns null if the value is null.
int _readInt(Map<String, dynamic> json, String key) {
final num jsonValue = json[key] as num;
if (jsonValue == null) {
return null;
}
return jsonValue.toInt();
}
...@@ -6,6 +6,7 @@ import 'dart:async'; ...@@ -6,6 +6,7 @@ import 'dart:async';
import 'dart:convert' show json; import 'dart:convert' show json;
import 'dart:io' as io; import 'dart:io' as io;
import 'package:logging/logging.dart';
import 'package:meta/meta.dart'; import 'package:meta/meta.dart';
import 'package:path/path.dart' as path; import 'package:path/path.dart' as path;
import 'package:shelf/shelf.dart'; import 'package:shelf/shelf.dart';
...@@ -18,8 +19,11 @@ import 'package:flutter_devicelab/framework/utils.dart'; ...@@ -18,8 +19,11 @@ import 'package:flutter_devicelab/framework/utils.dart';
/// The port number used by the local benchmark server. /// The port number used by the local benchmark server.
const int benchmarkServerPort = 9999; const int benchmarkServerPort = 9999;
const int chromeDebugPort = 10000;
Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
// Reduce logging level. Otherwise, package:webkit_inspection_protocol is way too spammy.
Logger.root.level = Level.INFO;
final String macrobenchmarksDirectory = path.join(flutterDirectory.path, 'dev', 'benchmarks', 'macrobenchmarks'); final String macrobenchmarksDirectory = path.join(flutterDirectory.path, 'dev', 'benchmarks', 'macrobenchmarks');
return await inDirectory(macrobenchmarksDirectory, () async { return await inDirectory(macrobenchmarksDirectory, () async {
await evalFlutter('build', options: <String>[ await evalFlutter('build', options: <String>[
...@@ -38,51 +42,79 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { ...@@ -38,51 +42,79 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
List<String> benchmarks; List<String> benchmarks;
Iterator<String> benchmarkIterator; Iterator<String> benchmarkIterator;
// This future fixes a race condition between the web-page loading and
// asking to run a benchmark, and us connecting to Chrome's DevTools port.
// Sometime one wins. Other times, the other wins.
Future<Chrome> whenChromeIsReady;
Chrome chrome;
io.HttpServer server; io.HttpServer server;
Cascade cascade = Cascade(); Cascade cascade = Cascade();
List<Map<String, dynamic>> latestPerformanceTrace;
cascade = cascade.add((Request request) async { cascade = cascade.add((Request request) async {
if (request.requestedUri.path.endsWith('/profile-data')) { try {
final Map<String, dynamic> profile = json.decode(await request.readAsString()) as Map<String, dynamic>; chrome ??= await whenChromeIsReady;
final String benchmarkName = profile['name'] as String; if (request.requestedUri.path.endsWith('/profile-data')) {
if (benchmarkName != benchmarkIterator.current) { final Map<String, dynamic> profile = json.decode(await request.readAsString()) as Map<String, dynamic>;
profileData.completeError(Exception( final String benchmarkName = profile['name'] as String;
'Browser returned benchmark results from a wrong benchmark.\n' if (benchmarkName != benchmarkIterator.current) {
'Requested to run bechmark ${benchmarkIterator.current}, but ' profileData.completeError(Exception(
'got results for $benchmarkName.', 'Browser returned benchmark results from a wrong benchmark.\n'
)); 'Requested to run bechmark ${benchmarkIterator.current}, but '
'got results for $benchmarkName.',
));
server.close();
}
final BlinkTraceSummary traceSummary = BlinkTraceSummary.fromJson(latestPerformanceTrace);
// Trace summary can be null if the benchmark is not frame-based, such as RawRecorder.
if (traceSummary != null) {
profile['totalUiFrame.average'] = traceSummary.averageTotalUIFrameTime.inMicroseconds;
profile['scoreKeys'] ??= <dynamic>[]; // using dynamic for consistency with JSON
profile['scoreKeys'].add('totalUiFrame.average');
}
latestPerformanceTrace = null;
collectedProfiles.add(profile);
return Response.ok('Profile received');
} else if (request.requestedUri.path.endsWith('/start-performance-tracing')) {
latestPerformanceTrace = null;
await chrome.beginRecordingPerformance(request.requestedUri.queryParameters['label']);
return Response.ok('Started performance tracing');
} else if (request.requestedUri.path.endsWith('/stop-performance-tracing')) {
latestPerformanceTrace = await chrome.endRecordingPerformance();
return Response.ok('Stopped performance tracing');
} else if (request.requestedUri.path.endsWith('/on-error')) {
final Map<String, dynamic> errorDetails = json.decode(await request.readAsString()) as Map<String, dynamic>;
server.close(); server.close();
} // Keep the stack trace as a string. It's thrown in the browser, not this Dart VM.
collectedProfiles.add(profile); profileData.completeError('${errorDetails['error']}\n${errorDetails['stackTrace']}');
return Response.ok('Profile received'); return Response.ok('');
} else if (request.requestedUri.path.endsWith('/on-error')) { } else if (request.requestedUri.path.endsWith('/next-benchmark')) {
final Map<String, dynamic> errorDetails = json.decode(await request.readAsString()) as Map<String, dynamic>; if (benchmarks == null) {
server.close(); benchmarks = (json.decode(await request.readAsString()) as List<dynamic>).cast<String>();
// Keep the stack trace as a string. It's thrown in the browser, not this Dart VM. benchmarkIterator = benchmarks.iterator;
profileData.completeError('${errorDetails['error']}\n${errorDetails['stackTrace']}'); }
return Response.ok(''); if (benchmarkIterator.moveNext()) {
} else if (request.requestedUri.path.endsWith('/next-benchmark')) { final String nextBenchmark = benchmarkIterator.current;
if (benchmarks == null) { print('Launching benchmark "$nextBenchmark"');
benchmarks = (json.decode(await request.readAsString()) as List<dynamic>).cast<String>(); return Response.ok(nextBenchmark);
benchmarkIterator = benchmarks.iterator; } else {
} profileData.complete(collectedProfiles);
if (benchmarkIterator.moveNext()) { return Response.notFound('Finished running benchmarks.');
final String nextBenchmark = benchmarkIterator.current; }
print('Launching benchmark "$nextBenchmark"');
return Response.ok(nextBenchmark);
} else { } else {
profileData.complete(collectedProfiles); return Response.notFound(
return Response.notFound('Finished running benchmarks.'); 'This request is not handled by the profile-data handler.');
} }
} else { } catch (error, stackTrace) {
return Response.notFound( profileData.completeError(error, stackTrace);
'This request is not handled by the profile-data handler.'); return Response.internalServerError(body: '$error');
} }
}).add(createStaticHandler( }).add(createStaticHandler(
path.join(macrobenchmarksDirectory, 'build', 'web'), path.join(macrobenchmarksDirectory, 'build', 'web'),
)); ));
server = await io.HttpServer.bind('localhost', benchmarkServerPort); server = await io.HttpServer.bind('localhost', benchmarkServerPort);
Chrome chrome;
try { try {
shelf_io.serveRequests(server, cascade.handler); shelf_io.serveRequests(server, cascade.handler);
...@@ -102,13 +134,11 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { ...@@ -102,13 +134,11 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
windowHeight: 1024, windowHeight: 1024,
windowWidth: 1024, windowWidth: 1024,
headless: isUncalibratedSmokeTest, headless: isUncalibratedSmokeTest,
// When running in headless mode Chrome exits immediately unless debugPort: chromeDebugPort,
// a debug port is specified.
debugPort: isUncalibratedSmokeTest ? benchmarkServerPort + 1 : null,
); );
print('Launching Chrome.'); print('Launching Chrome.');
chrome = await Chrome.launch( whenChromeIsReady = Chrome.launch(
options, options,
onError: (String error) { onError: (String error) {
profileData.completeError(Exception(error)); profileData.completeError(Exception(error));
...@@ -151,8 +181,8 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { ...@@ -151,8 +181,8 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
} }
return TaskResult.success(taskResult, benchmarkScoreKeys: benchmarkScoreKeys); return TaskResult.success(taskResult, benchmarkScoreKeys: benchmarkScoreKeys);
} finally { } finally {
server.close(); server?.close();
chrome.stop(); chrome?.stop();
} }
}); });
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment