Unverified Commit 29c88081 authored by Yegor's avatar Yegor Committed by GitHub

Collect chrome://tracing data in Web benchmarks (#53879)

Collect chrome://tracing data in Web benchmarks
parent 58acf4e7
......@@ -191,31 +191,54 @@ class BenchTextCachedLayout extends RawRecorder {
/// build are unique.
int _counter = 0;
/// Measures how expensive it is to construct material checkboxes.
/// Which mode to run [BenchBuildColorsGrid] in.
enum _TestMode {
/// Uses the HTML rendering backend with the canvas 2D text layout.
useCanvasTextLayout,
/// Uses the HTML rendering backend with the DOM text layout.
useDomTextLayout,
/// Uses CanvasKit for everything.
useCanvasKit,
}
/// Measures how expensive it is to construct a realistic text-heavy piece of UI.
///
/// Creates a 10x10 grid of tristate checkboxes.
/// The benchmark constructs a tabbed view, where each tab displays a list of
/// colors. Each color's description is made of several [Text] nodes.
class BenchBuildColorsGrid extends WidgetBuildRecorder {
BenchBuildColorsGrid({@required this.useCanvas})
: super(name: useCanvas ? canvasBenchmarkName : domBenchmarkName);
BenchBuildColorsGrid.canvas()
: mode = _TestMode.useCanvasTextLayout, super(name: canvasBenchmarkName);
BenchBuildColorsGrid.dom()
: mode = _TestMode.useDomTextLayout, super(name: domBenchmarkName);
BenchBuildColorsGrid.canvasKit()
: mode = _TestMode.useCanvasKit, super(name: canvasKitBenchmarkName);
static const String domBenchmarkName = 'text_dom_color_grid';
static const String canvasBenchmarkName = 'text_canvas_color_grid';
static const String canvasKitBenchmarkName = 'text_canvas_kit_color_grid';
/// Whether to use the new canvas-based text measurement implementation.
final bool useCanvas;
final _TestMode mode;
num _textLayoutMicros = 0;
@override
void setUpAll() {
_useCanvasText(useCanvas);
Future<void> setUpAll() async {
if (mode == _TestMode.useCanvasTextLayout) {
_useCanvasText(true);
}
if (mode == _TestMode.useDomTextLayout) {
_useCanvasText(false);
}
_onBenchmark((String name, num value) {
_textLayoutMicros += value;
});
}
@override
void tearDownAll() {
Future<void> tearDownAll() async {
_useCanvasText(null);
_onBenchmark(null);
}
......@@ -230,7 +253,8 @@ class BenchBuildColorsGrid extends WidgetBuildRecorder {
void frameDidDraw() {
// We need to do this before calling [super.frameDidDraw] because the latter
// updates the value of [showWidget] in preparation for the next frame.
if (showWidget) {
// TODO(yjbanov): https://github.com/flutter/flutter/issues/53877
if (showWidget && mode != _TestMode.useCanvasKit) {
profile.addDataPoint(
'text_layout',
Duration(microseconds: _textLayoutMicros.toInt()),
......
This diff is collapsed.
......@@ -6,6 +6,7 @@ import 'dart:async';
import 'dart:convert' show json;
import 'dart:io' as io;
import 'package:logging/logging.dart';
import 'package:meta/meta.dart';
import 'package:path/path.dart' as path;
import 'package:shelf/shelf.dart';
......@@ -18,8 +19,11 @@ import 'package:flutter_devicelab/framework/utils.dart';
/// The port number used by the local benchmark server.
const int benchmarkServerPort = 9999;
const int chromeDebugPort = 10000;
Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
// Reduce logging level. Otherwise, package:webkit_inspection_protocol is way too spammy.
Logger.root.level = Level.INFO;
final String macrobenchmarksDirectory = path.join(flutterDirectory.path, 'dev', 'benchmarks', 'macrobenchmarks');
return await inDirectory(macrobenchmarksDirectory, () async {
await evalFlutter('build', options: <String>[
......@@ -38,51 +42,79 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
List<String> benchmarks;
Iterator<String> benchmarkIterator;
// This future fixes a race condition between the web-page loading and
// asking to run a benchmark, and us connecting to Chrome's DevTools port.
// Sometime one wins. Other times, the other wins.
Future<Chrome> whenChromeIsReady;
Chrome chrome;
io.HttpServer server;
Cascade cascade = Cascade();
List<Map<String, dynamic>> latestPerformanceTrace;
cascade = cascade.add((Request request) async {
if (request.requestedUri.path.endsWith('/profile-data')) {
final Map<String, dynamic> profile = json.decode(await request.readAsString()) as Map<String, dynamic>;
final String benchmarkName = profile['name'] as String;
if (benchmarkName != benchmarkIterator.current) {
profileData.completeError(Exception(
'Browser returned benchmark results from a wrong benchmark.\n'
'Requested to run bechmark ${benchmarkIterator.current}, but '
'got results for $benchmarkName.',
));
try {
chrome ??= await whenChromeIsReady;
if (request.requestedUri.path.endsWith('/profile-data')) {
final Map<String, dynamic> profile = json.decode(await request.readAsString()) as Map<String, dynamic>;
final String benchmarkName = profile['name'] as String;
if (benchmarkName != benchmarkIterator.current) {
profileData.completeError(Exception(
'Browser returned benchmark results from a wrong benchmark.\n'
'Requested to run bechmark ${benchmarkIterator.current}, but '
'got results for $benchmarkName.',
));
server.close();
}
final BlinkTraceSummary traceSummary = BlinkTraceSummary.fromJson(latestPerformanceTrace);
// Trace summary can be null if the benchmark is not frame-based, such as RawRecorder.
if (traceSummary != null) {
profile['totalUiFrame.average'] = traceSummary.averageTotalUIFrameTime.inMicroseconds;
profile['scoreKeys'] ??= <dynamic>[]; // using dynamic for consistency with JSON
profile['scoreKeys'].add('totalUiFrame.average');
}
latestPerformanceTrace = null;
collectedProfiles.add(profile);
return Response.ok('Profile received');
} else if (request.requestedUri.path.endsWith('/start-performance-tracing')) {
latestPerformanceTrace = null;
await chrome.beginRecordingPerformance(request.requestedUri.queryParameters['label']);
return Response.ok('Started performance tracing');
} else if (request.requestedUri.path.endsWith('/stop-performance-tracing')) {
latestPerformanceTrace = await chrome.endRecordingPerformance();
return Response.ok('Stopped performance tracing');
} else if (request.requestedUri.path.endsWith('/on-error')) {
final Map<String, dynamic> errorDetails = json.decode(await request.readAsString()) as Map<String, dynamic>;
server.close();
}
collectedProfiles.add(profile);
return Response.ok('Profile received');
} else if (request.requestedUri.path.endsWith('/on-error')) {
final Map<String, dynamic> errorDetails = json.decode(await request.readAsString()) as Map<String, dynamic>;
server.close();
// Keep the stack trace as a string. It's thrown in the browser, not this Dart VM.
profileData.completeError('${errorDetails['error']}\n${errorDetails['stackTrace']}');
return Response.ok('');
} else if (request.requestedUri.path.endsWith('/next-benchmark')) {
if (benchmarks == null) {
benchmarks = (json.decode(await request.readAsString()) as List<dynamic>).cast<String>();
benchmarkIterator = benchmarks.iterator;
}
if (benchmarkIterator.moveNext()) {
final String nextBenchmark = benchmarkIterator.current;
print('Launching benchmark "$nextBenchmark"');
return Response.ok(nextBenchmark);
// Keep the stack trace as a string. It's thrown in the browser, not this Dart VM.
profileData.completeError('${errorDetails['error']}\n${errorDetails['stackTrace']}');
return Response.ok('');
} else if (request.requestedUri.path.endsWith('/next-benchmark')) {
if (benchmarks == null) {
benchmarks = (json.decode(await request.readAsString()) as List<dynamic>).cast<String>();
benchmarkIterator = benchmarks.iterator;
}
if (benchmarkIterator.moveNext()) {
final String nextBenchmark = benchmarkIterator.current;
print('Launching benchmark "$nextBenchmark"');
return Response.ok(nextBenchmark);
} else {
profileData.complete(collectedProfiles);
return Response.notFound('Finished running benchmarks.');
}
} else {
profileData.complete(collectedProfiles);
return Response.notFound('Finished running benchmarks.');
return Response.notFound(
'This request is not handled by the profile-data handler.');
}
} else {
return Response.notFound(
'This request is not handled by the profile-data handler.');
} catch (error, stackTrace) {
profileData.completeError(error, stackTrace);
return Response.internalServerError(body: '$error');
}
}).add(createStaticHandler(
path.join(macrobenchmarksDirectory, 'build', 'web'),
));
server = await io.HttpServer.bind('localhost', benchmarkServerPort);
Chrome chrome;
try {
shelf_io.serveRequests(server, cascade.handler);
......@@ -102,13 +134,11 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
windowHeight: 1024,
windowWidth: 1024,
headless: isUncalibratedSmokeTest,
// When running in headless mode Chrome exits immediately unless
// a debug port is specified.
debugPort: isUncalibratedSmokeTest ? benchmarkServerPort + 1 : null,
debugPort: chromeDebugPort,
);
print('Launching Chrome.');
chrome = await Chrome.launch(
whenChromeIsReady = Chrome.launch(
options,
onError: (String error) {
profileData.completeError(Exception(error));
......@@ -151,8 +181,8 @@ Future<TaskResult> runWebBenchmark({ @required bool useCanvasKit }) async {
}
return TaskResult.success(taskResult, benchmarkScoreKeys: benchmarkScoreKeys);
} finally {
server.close();
chrome.stop();
server?.close();
chrome?.stop();
}
});
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment