Unverified Commit 29c88081 authored by Yegor's avatar Yegor Committed by GitHub

Collect chrome://tracing data in Web benchmarks (#53879)

Collect chrome://tracing data in Web benchmarks
parent 58acf4e7
...@@ -191,31 +191,54 @@ class BenchTextCachedLayout extends RawRecorder { ...@@ -191,31 +191,54 @@ class BenchTextCachedLayout extends RawRecorder {
/// build are unique. /// build are unique.
int _counter = 0; int _counter = 0;
/// Measures how expensive it is to construct material checkboxes. /// Which mode to run [BenchBuildColorsGrid] in.
enum _TestMode {
/// Uses the HTML rendering backend with the canvas 2D text layout.
useCanvasTextLayout,
/// Uses the HTML rendering backend with the DOM text layout.
useDomTextLayout,
/// Uses CanvasKit for everything.
useCanvasKit,
}
/// Measures how expensive it is to construct a realistic text-heavy piece of UI.
/// ///
/// Creates a 10x10 grid of tristate checkboxes. /// The benchmark constructs a tabbed view, where each tab displays a list of
/// colors. Each color's description is made of several [Text] nodes.
class BenchBuildColorsGrid extends WidgetBuildRecorder { class BenchBuildColorsGrid extends WidgetBuildRecorder {
BenchBuildColorsGrid({@required this.useCanvas}) BenchBuildColorsGrid.canvas()
: super(name: useCanvas ? canvasBenchmarkName : domBenchmarkName); : mode = _TestMode.useCanvasTextLayout, super(name: canvasBenchmarkName);
BenchBuildColorsGrid.dom()
: mode = _TestMode.useDomTextLayout, super(name: domBenchmarkName);
BenchBuildColorsGrid.canvasKit()
: mode = _TestMode.useCanvasKit, super(name: canvasKitBenchmarkName);
static const String domBenchmarkName = 'text_dom_color_grid'; static const String domBenchmarkName = 'text_dom_color_grid';
static const String canvasBenchmarkName = 'text_canvas_color_grid'; static const String canvasBenchmarkName = 'text_canvas_color_grid';
static const String canvasKitBenchmarkName = 'text_canvas_kit_color_grid';
/// Whether to use the new canvas-based text measurement implementation. /// Whether to use the new canvas-based text measurement implementation.
final bool useCanvas; final _TestMode mode;
num _textLayoutMicros = 0; num _textLayoutMicros = 0;
@override @override
void setUpAll() { Future<void> setUpAll() async {
_useCanvasText(useCanvas); if (mode == _TestMode.useCanvasTextLayout) {
_useCanvasText(true);
}
if (mode == _TestMode.useDomTextLayout) {
_useCanvasText(false);
}
_onBenchmark((String name, num value) { _onBenchmark((String name, num value) {
_textLayoutMicros += value; _textLayoutMicros += value;
}); });
} }
@override @override
void tearDownAll() { Future<void> tearDownAll() async {
_useCanvasText(null); _useCanvasText(null);
_onBenchmark(null); _onBenchmark(null);
} }
...@@ -230,7 +253,8 @@ class BenchBuildColorsGrid extends WidgetBuildRecorder { ...@@ -230,7 +253,8 @@ class BenchBuildColorsGrid extends WidgetBuildRecorder {
void frameDidDraw() { void frameDidDraw() {
// We need to do this before calling [super.frameDidDraw] because the latter // We need to do this before calling [super.frameDidDraw] because the latter
// updates the value of [showWidget] in preparation for the next frame. // updates the value of [showWidget] in preparation for the next frame.
if (showWidget) { // TODO(yjbanov): https://github.com/flutter/flutter/issues/53877
if (showWidget && mode != _TestMode.useCanvasKit) {
profile.addDataPoint( profile.addDataPoint(
'text_layout', 'text_layout',
Duration(microseconds: _textLayoutMicros.toInt()), Duration(microseconds: _textLayoutMicros.toInt()),
......
...@@ -32,6 +32,8 @@ final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{ ...@@ -32,6 +32,8 @@ final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{
BenchSimpleLazyTextScroll.benchmarkName: () => BenchSimpleLazyTextScroll(), BenchSimpleLazyTextScroll.benchmarkName: () => BenchSimpleLazyTextScroll(),
BenchBuildMaterialCheckbox.benchmarkName: () => BenchBuildMaterialCheckbox(), BenchBuildMaterialCheckbox.benchmarkName: () => BenchBuildMaterialCheckbox(),
BenchDynamicClipOnStaticPicture.benchmarkName: () => BenchDynamicClipOnStaticPicture(), BenchDynamicClipOnStaticPicture.benchmarkName: () => BenchDynamicClipOnStaticPicture(),
if (isCanvasKit)
BenchBuildColorsGrid.canvasKitBenchmarkName: () => BenchBuildColorsGrid.canvasKit(),
// Benchmarks that we don't want to run using CanvasKit. // Benchmarks that we don't want to run using CanvasKit.
if (!isCanvasKit) ...<String, RecorderFactory>{ if (!isCanvasKit) ...<String, RecorderFactory>{
...@@ -39,37 +41,23 @@ final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{ ...@@ -39,37 +41,23 @@ final Map<String, RecorderFactory> benchmarks = <String, RecorderFactory>{
BenchTextLayout.canvasBenchmarkName: () => BenchTextLayout(useCanvas: true), BenchTextLayout.canvasBenchmarkName: () => BenchTextLayout(useCanvas: true),
BenchTextCachedLayout.domBenchmarkName: () => BenchTextCachedLayout(useCanvas: false), BenchTextCachedLayout.domBenchmarkName: () => BenchTextCachedLayout(useCanvas: false),
BenchTextCachedLayout.canvasBenchmarkName: () => BenchTextCachedLayout(useCanvas: true), BenchTextCachedLayout.canvasBenchmarkName: () => BenchTextCachedLayout(useCanvas: true),
BenchBuildColorsGrid.domBenchmarkName: () => BenchBuildColorsGrid(useCanvas: false), BenchBuildColorsGrid.domBenchmarkName: () => BenchBuildColorsGrid.dom(),
BenchBuildColorsGrid.canvasBenchmarkName: () => BenchBuildColorsGrid(useCanvas: true), BenchBuildColorsGrid.canvasBenchmarkName: () => BenchBuildColorsGrid.canvas(),
} }
}; };
/// Whether we fell back to manual mode. final LocalBenchmarkServerClient _client = LocalBenchmarkServerClient();
///
/// This happens when you run benchmarks using plain `flutter run` rather than
/// devicelab test harness. The test harness spins up a special server that
/// provides API for automatically picking the next benchmark to run.
bool isInManualMode = false;
Future<void> main() async { Future<void> main() async {
// Check if the benchmark server wants us to run a specific benchmark. // Check if the benchmark server wants us to run a specific benchmark.
final html.HttpRequest request = await requestXhr( final String nextBenchmark = await _client.requestNextBenchmark();
'/next-benchmark',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(benchmarks.keys.toList()),
);
// 404 is expected in the following cases: if (nextBenchmark == LocalBenchmarkServerClient.kManualFallback) {
// - The benchmark is ran using plain `flutter run`, which does not provide "next-benchmark" handler.
// - We ran all benchmarks and the benchmark is telling us there are no more benchmarks to run.
if (request.status == 404) {
_fallbackToManual('The server did not tell us which benchmark to run next.'); _fallbackToManual('The server did not tell us which benchmark to run next.');
return; return;
} }
final String benchmarkName = request.responseText; await _runBenchmark(nextBenchmark);
await _runBenchmark(benchmarkName);
html.window.location.reload(); html.window.location.reload();
} }
...@@ -81,44 +69,36 @@ Future<void> _runBenchmark(String benchmarkName) async { ...@@ -81,44 +69,36 @@ Future<void> _runBenchmark(String benchmarkName) async {
return; return;
} }
final Recorder recorder = recorderFactory();
try { try {
final Profile profile = await recorder.run(); final Runner runner = Runner(
if (!isInManualMode) { recorder: recorderFactory(),
final html.HttpRequest request = await html.HttpRequest.request( setUpAllDidRun: () async {
'/profile-data', if (!_client.isInManualMode) {
method: 'POST', await _client.startPerformanceTracing(benchmarkName);
mimeType: 'application/json', }
sendData: json.encode(profile.toJson()), },
); tearDownAllWillRun: () async {
if (request.status != 200) { if (!_client.isInManualMode) {
throw Exception( await _client.stopPerformanceTracing();
'Failed to report profile data to benchmark server. '
'The server responded with status code ${request.status}.'
);
} }
},
);
final Profile profile = await runner.run();
if (!_client.isInManualMode) {
await _client.sendProfileData(profile);
} else { } else {
print(profile); print(profile);
} }
} catch (error, stackTrace) { } catch (error, stackTrace) {
if (isInManualMode) { if (_client.isInManualMode) {
rethrow; rethrow;
} }
await html.HttpRequest.request( await _client.reportError(error, stackTrace);
'/on-error',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(<String, dynamic>{
'error': '$error',
'stackTrace': '$stackTrace',
}),
);
} }
} }
void _fallbackToManual(String error) { void _fallbackToManual(String error) {
isInManualMode = true;
html.document.body.appendHtml(''' html.document.body.appendHtml('''
<div id="manual-panel"> <div id="manual-panel">
<h3>$error</h3> <h3>$error</h3>
...@@ -146,7 +126,113 @@ void _fallbackToManual(String error) { ...@@ -146,7 +126,113 @@ void _fallbackToManual(String error) {
} }
} }
Future<html.HttpRequest> requestXhr( /// Implements the client REST API for the local benchmark server.
///
/// The local server is optional. If it is not available the benchmark UI must
/// implement a manual fallback. This allows debugging benchmarks using plain
/// `flutter run`.
class LocalBenchmarkServerClient {
/// This value is returned by [requestNextBenchmark].
static const String kManualFallback = '__manual_fallback__';
/// Whether we fell back to manual mode.
///
/// This happens when you run benchmarks using plain `flutter run` rather than
/// devicelab test harness. The test harness spins up a special server that
/// provides API for automatically picking the next benchmark to run.
bool isInManualMode;
/// Asks the local server for the name of the next benchmark to run.
///
/// Returns [kManualFallback] if local server is not available (uses 404 as a
/// signal).
Future<String> requestNextBenchmark() async {
final html.HttpRequest request = await _requestXhr(
'/next-benchmark',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(benchmarks.keys.toList()),
);
// 404 is expected in the following cases:
// - The benchmark is ran using plain `flutter run`, which does not provide "next-benchmark" handler.
// - We ran all benchmarks and the benchmark is telling us there are no more benchmarks to run.
if (request.status == 404) {
isInManualMode = true;
return kManualFallback;
}
isInManualMode = false;
return request.responseText;
}
void _checkNotManualMode() {
if (isInManualMode) {
throw StateError('Operation not supported in manual fallback mode.');
}
}
/// Asks the local server to begin tracing performance.
///
/// This uses the chrome://tracing tracer, which is not available from within
/// the page itself, and therefore must be controlled from outside using the
/// DevTools Protocol.
Future<void> startPerformanceTracing(String benchmarkName) async {
_checkNotManualMode();
await html.HttpRequest.request(
'/start-performance-tracing?label=$benchmarkName',
method: 'POST',
mimeType: 'application/json',
);
}
/// Stops the performance tracing session started by [startPerformanceTracing].
Future<void> stopPerformanceTracing() async {
_checkNotManualMode();
await html.HttpRequest.request(
'/stop-performance-tracing',
method: 'POST',
mimeType: 'application/json',
);
}
/// Sends the profile data collected by the benchmark to the local benchmark
/// server.
Future<void> sendProfileData(Profile profile) async {
_checkNotManualMode();
final html.HttpRequest request = await html.HttpRequest.request(
'/profile-data',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(profile.toJson()),
);
if (request.status != 200) {
throw Exception(
'Failed to report profile data to benchmark server. '
'The server responded with status code ${request.status}.'
);
}
}
/// Reports an error to the benchmark server.
///
/// The server will halt the devicelab task and log the error.
Future<void> reportError(dynamic error, StackTrace stackTrace) async {
_checkNotManualMode();
await html.HttpRequest.request(
'/on-error',
method: 'POST',
mimeType: 'application/json',
sendData: json.encode(<String, dynamic>{
'error': '$error',
'stackTrace': '$stackTrace',
}),
);
}
/// This is the same as calling [html.HttpRequest.request] but it doesn't
/// crash on 404, which we use to detect `flutter run`.
Future<html.HttpRequest> _requestXhr(
String url, { String url, {
String method, String method,
bool withCredentials, bool withCredentials,
...@@ -154,7 +240,7 @@ Future<html.HttpRequest> requestXhr( ...@@ -154,7 +240,7 @@ Future<html.HttpRequest> requestXhr(
String mimeType, String mimeType,
Map<String, String> requestHeaders, Map<String, String> requestHeaders,
dynamic sendData, dynamic sendData,
}) { }) {
final Completer<html.HttpRequest> completer = Completer<html.HttpRequest>(); final Completer<html.HttpRequest> completer = Completer<html.HttpRequest>();
final html.HttpRequest xhr = html.HttpRequest(); final html.HttpRequest xhr = html.HttpRequest();
...@@ -192,4 +278,5 @@ Future<html.HttpRequest> requestXhr( ...@@ -192,4 +278,5 @@ Future<html.HttpRequest> requestXhr(
} }
return completer.future; return completer.future;
}
} }
This diff is collapsed.
...@@ -6,6 +6,7 @@ import 'dart:async'; ...@@ -6,6 +6,7 @@ import 'dart:async';
import 'dart:convert' show json; import 'dart:convert' show json;
import 'dart:io' as io; import 'dart:io' as io;
import 'package:logging/logging.dart';
import 'package:meta/meta.dart'; import 'package:meta/meta.dart';
import 'package:path/path.dart' as path; import 'package:path/path.dart' as path;
import 'package:shelf/shelf.dart'; import 'package:shelf/shelf.dart';
...@@ -18,8 +19,11 @@ import 'package:flutter_devicelab/framework/utils.dart'; ...@@ -18,8 +19,11 @@ import 'package:flutter_devicelab/framework/utils.dart';
/// The port number used by the local benchmark server. /// The port number used by the local benchmark server.
const int benchmarkServerPort = 9999; const int benchmarkServerPort = 9999;
const int chromeDebugPort = 10000;
Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
// Reduce logging level. Otherwise, package:webkit_inspection_protocol is way too spammy.
Logger.root.level = Level.INFO;
final String macrobenchmarksDirectory = path.join(flutterDirectory.path, 'dev', 'benchmarks', 'macrobenchmarks'); final String macrobenchmarksDirectory = path.join(flutterDirectory.path, 'dev', 'benchmarks', 'macrobenchmarks');
return await inDirectory(macrobenchmarksDirectory, () async { return await inDirectory(macrobenchmarksDirectory, () async {
await evalFlutter('build', options: <String>[ await evalFlutter('build', options: <String>[
...@@ -38,9 +42,17 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { ...@@ -38,9 +42,17 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
List<String> benchmarks; List<String> benchmarks;
Iterator<String> benchmarkIterator; Iterator<String> benchmarkIterator;
// This future fixes a race condition between the web-page loading and
// asking to run a benchmark, and us connecting to Chrome's DevTools port.
// Sometime one wins. Other times, the other wins.
Future<Chrome> whenChromeIsReady;
Chrome chrome;
io.HttpServer server; io.HttpServer server;
Cascade cascade = Cascade(); Cascade cascade = Cascade();
List<Map<String, dynamic>> latestPerformanceTrace;
cascade = cascade.add((Request request) async { cascade = cascade.add((Request request) async {
try {
chrome ??= await whenChromeIsReady;
if (request.requestedUri.path.endsWith('/profile-data')) { if (request.requestedUri.path.endsWith('/profile-data')) {
final Map<String, dynamic> profile = json.decode(await request.readAsString()) as Map<String, dynamic>; final Map<String, dynamic> profile = json.decode(await request.readAsString()) as Map<String, dynamic>;
final String benchmarkName = profile['name'] as String; final String benchmarkName = profile['name'] as String;
...@@ -52,8 +64,25 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { ...@@ -52,8 +64,25 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
)); ));
server.close(); server.close();
} }
final BlinkTraceSummary traceSummary = BlinkTraceSummary.fromJson(latestPerformanceTrace);
// Trace summary can be null if the benchmark is not frame-based, such as RawRecorder.
if (traceSummary != null) {
profile['totalUiFrame.average'] = traceSummary.averageTotalUIFrameTime.inMicroseconds;
profile['scoreKeys'] ??= <dynamic>[]; // using dynamic for consistency with JSON
profile['scoreKeys'].add('totalUiFrame.average');
}
latestPerformanceTrace = null;
collectedProfiles.add(profile); collectedProfiles.add(profile);
return Response.ok('Profile received'); return Response.ok('Profile received');
} else if (request.requestedUri.path.endsWith('/start-performance-tracing')) {
latestPerformanceTrace = null;
await chrome.beginRecordingPerformance(request.requestedUri.queryParameters['label']);
return Response.ok('Started performance tracing');
} else if (request.requestedUri.path.endsWith('/stop-performance-tracing')) {
latestPerformanceTrace = await chrome.endRecordingPerformance();
return Response.ok('Stopped performance tracing');
} else if (request.requestedUri.path.endsWith('/on-error')) { } else if (request.requestedUri.path.endsWith('/on-error')) {
final Map<String, dynamic> errorDetails = json.decode(await request.readAsString()) as Map<String, dynamic>; final Map<String, dynamic> errorDetails = json.decode(await request.readAsString()) as Map<String, dynamic>;
server.close(); server.close();
...@@ -77,12 +106,15 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { ...@@ -77,12 +106,15 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
return Response.notFound( return Response.notFound(
'This request is not handled by the profile-data handler.'); 'This request is not handled by the profile-data handler.');
} }
} catch (error, stackTrace) {
profileData.completeError(error, stackTrace);
return Response.internalServerError(body: '$error');
}
}).add(createStaticHandler( }).add(createStaticHandler(
path.join(macrobenchmarksDirectory, 'build', 'web'), path.join(macrobenchmarksDirectory, 'build', 'web'),
)); ));
server = await io.HttpServer.bind('localhost', benchmarkServerPort); server = await io.HttpServer.bind('localhost', benchmarkServerPort);
Chrome chrome;
try { try {
shelf_io.serveRequests(server, cascade.handler); shelf_io.serveRequests(server, cascade.handler);
...@@ -102,13 +134,11 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { ...@@ -102,13 +134,11 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
windowHeight: 1024, windowHeight: 1024,
windowWidth: 1024, windowWidth: 1024,
headless: isUncalibratedSmokeTest, headless: isUncalibratedSmokeTest,
// When running in headless mode Chrome exits immediately unless debugPort: chromeDebugPort,
// a debug port is specified.
debugPort: isUncalibratedSmokeTest ? benchmarkServerPort + 1 : null,
); );
print('Launching Chrome.'); print('Launching Chrome.');
chrome = await Chrome.launch( whenChromeIsReady = Chrome.launch(
options, options,
onError: (String error) { onError: (String error) {
profileData.completeError(Exception(error)); profileData.completeError(Exception(error));
...@@ -151,8 +181,8 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async { ...@@ -151,8 +181,8 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
} }
return TaskResult.success(taskResult, benchmarkScoreKeys: benchmarkScoreKeys); return TaskResult.success(taskResult, benchmarkScoreKeys: benchmarkScoreKeys);
} finally { } finally {
server.close(); server?.close();
chrome.stop(); chrome?.stop();
} }
}); });
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment