Commit fde985b3 authored by Yegor's avatar Yegor Committed by GitHub

resurrect analyzer benchmarks (#10668)

* resurrect analyzer benchmarks

* move analyzer_benchmark to the more stable linux/android

* report average rather than best result
parent 8bf17192
...@@ -6,15 +6,7 @@ import 'dart:async'; ...@@ -6,15 +6,7 @@ import 'dart:async';
import 'package:flutter_devicelab/tasks/analysis.dart'; import 'package:flutter_devicelab/tasks/analysis.dart';
import 'package:flutter_devicelab/framework/framework.dart'; import 'package:flutter_devicelab/framework/framework.dart';
import 'package:flutter_devicelab/framework/utils.dart';
Future<Null> main() async { Future<Null> main() async {
final String revision = await getCurrentFlutterRepoCommit(); await task(analyzerBenchmarkTask);
final DateTime revisionTimestamp = await getFlutterRepoCommitTimestamp(revision);
final String dartSdkVersion = await getDartVersion();
await task(createAnalyzerCliTest(
sdk: dartSdkVersion,
commit: revision,
timestamp: revisionTimestamp,
));
} }
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'dart:async';
import 'package:flutter_devicelab/tasks/analysis.dart';
import 'package:flutter_devicelab/framework/framework.dart';
import 'package:flutter_devicelab/framework/utils.dart';
Future<Null> main() async {
final String revision = await getCurrentFlutterRepoCommit();
final DateTime revisionTimestamp = await getFlutterRepoCommitTimestamp(revision);
final String dartSdkVersion = await getDartVersion();
await task(createAnalyzerServerTest(
sdk: dartSdkVersion,
commit: revision,
timestamp: revisionTimestamp,
));
}
// Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'dart:async';
import 'framework.dart';
/// A benchmark harness used to run a benchmark multiple times and report the
/// best result.
abstract class Benchmark {
Benchmark(this.name);
final String name;
TaskResult bestResult;
Future<Null> init() => new Future<Null>.value();
Future<num> run();
TaskResult get lastResult;
@override
String toString() => name;
}
/// Runs a [benchmark] [iterations] times and reports the best result.
///
/// Use [warmUpBenchmark] to discard cold performance results.
Future<num> runBenchmark(Benchmark benchmark, {
int iterations: 1,
bool warmUpBenchmark: false
}) async {
await benchmark.init();
final List<num> allRuns = <num>[];
num minValue;
if (warmUpBenchmark)
await benchmark.run();
while (iterations > 0) {
iterations--;
print('');
try {
final num result = await benchmark.run();
allRuns.add(result);
if (minValue == null || result < minValue) {
benchmark.bestResult = benchmark.lastResult;
minValue = result;
}
} catch (error) {
print('benchmark failed with error: $error');
}
}
return minValue;
}
...@@ -380,35 +380,6 @@ void checkNotNull(Object o1, ...@@ -380,35 +380,6 @@ void checkNotNull(Object o1,
throw 'o10 is null'; throw 'o10 is null';
} }
/// Add benchmark values to a JSON results file.
///
/// If the file contains information about how long the benchmark took to run
/// (a `time` field), then return that info.
// TODO(yjbanov): move this data to __metadata__
num addBuildInfo(File jsonFile,
{num expected, String sdk, String commit, DateTime timestamp}) {
Map<String, dynamic> json;
if (jsonFile.existsSync())
json = JSON.decode(jsonFile.readAsStringSync());
else
json = <String, dynamic>{};
if (expected != null)
json['expected'] = expected;
if (sdk != null)
json['sdk'] = sdk;
if (commit != null)
json['commit'] = commit;
if (timestamp != null)
json['timestamp'] = timestamp.millisecondsSinceEpoch;
jsonFile.writeAsStringSync(jsonEncode(json));
// Return the elapsed time of the benchmark (if any).
return json['time'];
}
/// Splits [from] into lines and selects those that contain [pattern]. /// Splits [from] into lines and selects those that contain [pattern].
Iterable<String> grep(Pattern pattern, {@required String from}) { Iterable<String> grep(Pattern pattern, {@required String from}) {
return from.split('\n').where((String line) { return from.split('\n').where((String line) {
......
...@@ -5,112 +5,98 @@ ...@@ -5,112 +5,98 @@
import 'dart:async'; import 'dart:async';
import 'dart:io'; import 'dart:io';
import 'package:meta/meta.dart';
import 'package:path/path.dart' as path; import 'package:path/path.dart' as path;
import '../framework/benchmarks.dart';
import '../framework/framework.dart'; import '../framework/framework.dart';
import '../framework/utils.dart'; import '../framework/utils.dart';
TaskFunction createAnalyzerCliTest({ /// Run each benchmark this many times and compute average.
@required String sdk, const int _kRunsPerBenchmark = 3;
@required String commit,
@required DateTime timestamp,
}) {
return new AnalyzerCliTask(sdk, commit, timestamp);
}
TaskFunction createAnalyzerServerTest({ /// Runs a benchmark once and reports the result as a lower-is-better numeric
@required String sdk, /// value.
@required String commit, typedef Future<double> _Benchmark();
@required DateTime timestamp,
}) {
return new AnalyzerServerTask(sdk, commit, timestamp);
}
abstract class AnalyzerTask { /// Path to the generated "mega gallery" app.
Benchmark benchmark; Directory get _megaGalleryDirectory => dir(path.join(Directory.systemTemp.path, 'mega_gallery'));
Future<TaskResult> call() async { Future<TaskResult> analyzerBenchmarkTask() async {
section(benchmark.name); await inDirectory(flutterDirectory, () async {
await runBenchmark(benchmark, iterations: 3, warmUpBenchmark: true); rmTree(_megaGalleryDirectory);
return benchmark.bestResult; mkdirs(_megaGalleryDirectory);
} await dart(<String>['dev/tools/mega_gallery.dart', '--out=${_megaGalleryDirectory.path}']);
} });
class AnalyzerCliTask extends AnalyzerTask { final Map<String, dynamic> data = <String, dynamic>{
AnalyzerCliTask(String sdk, String commit, DateTime timestamp) { 'flutter_repo_batch': await _run(new _FlutterRepoBenchmark()),
benchmark = new FlutterAnalyzeBenchmark(sdk, commit, timestamp); 'flutter_repo_watch': await _run(new _FlutterRepoBenchmark(watch: true)),
} 'mega_gallery_batch': await _run(new _MegaGalleryBenchmark()),
} 'mega_gallery_watch': await _run(new _MegaGalleryBenchmark(watch: true)),
};
class AnalyzerServerTask extends AnalyzerTask { return new TaskResult.success(data, benchmarkScoreKeys: data.keys.toList());
AnalyzerServerTask(String sdk, String commit, DateTime timestamp) {
benchmark = new FlutterAnalyzeAppBenchmark(sdk, commit, timestamp);
}
} }
class FlutterAnalyzeBenchmark extends Benchmark { /// Times how long it takes to analyze the Flutter repository.
FlutterAnalyzeBenchmark(this.sdk, this.commit, this.timestamp) class _FlutterRepoBenchmark {
: super('flutter analyze --flutter-repo'); _FlutterRepoBenchmark({ this.watch = false });
final String sdk;
final String commit;
final DateTime timestamp;
File get benchmarkFile => final bool watch;
file(path.join(flutterDirectory.path, 'analysis_benchmark.json'));
@override Future<double> call() async {
TaskResult get lastResult => new TaskResult.successFromFile(benchmarkFile); section('Analyze Flutter repo ${watch ? 'with watcher' : ''}');
final Stopwatch stopwatch = new Stopwatch();
@override
Future<num> run() async {
rm(benchmarkFile);
await inDirectory(flutterDirectory, () async { await inDirectory(flutterDirectory, () async {
await flutter('analyze', options: <String>[ final List<String> options = <String>[
'--flutter-repo', '--flutter-repo',
'--benchmark', '--benchmark',
]); ];
if (watch)
options.add('--watch');
stopwatch.start();
await flutter('analyze', options: options);
stopwatch.stop();
}); });
return addBuildInfo(benchmarkFile, return stopwatch.elapsedMilliseconds / 1000;
timestamp: timestamp, expected: 25.0, sdk: sdk, commit: commit);
} }
} }
class FlutterAnalyzeAppBenchmark extends Benchmark { /// Times how long it takes to analyze the generated "mega_gallery" app.
FlutterAnalyzeAppBenchmark(this.sdk, this.commit, this.timestamp) class _MegaGalleryBenchmark {
: super('analysis server mega_gallery'); _MegaGalleryBenchmark({ this.watch = false });
final String sdk; final bool watch;
final String commit;
final DateTime timestamp;
@override Future<double> call() async {
TaskResult get lastResult => new TaskResult.successFromFile(benchmarkFile); section('Analyze mega gallery ${watch ? 'with watcher' : ''}');
final Stopwatch stopwatch = new Stopwatch();
await inDirectory(_megaGalleryDirectory, () async {
final List<String> options = <String>[
'--benchmark',
];
Directory get megaDir => dir( if (watch)
path.join(flutterDirectory.path, 'dev/benchmarks/mega_gallery')); options.add('--watch');
File get benchmarkFile =>
file(path.join(megaDir.path, 'analysis_benchmark.json'));
@override stopwatch.start();
Future<Null> init() { await flutter('analyze', options: options);
return inDirectory(flutterDirectory, () async { stopwatch.stop();
await dart(<String>['dev/tools/mega_gallery.dart']);
}); });
return stopwatch.elapsedMilliseconds / 1000;
} }
}
@override /// Runs a [benchmark] several times and reports the average result.
Future<num> run() async { Future<double> _run(_Benchmark benchmark) async {
rm(benchmarkFile); double total = 0.0;
await inDirectory(megaDir, () async { for (int i = 0; i < _kRunsPerBenchmark; i++) {
await flutter('analyze', options: <String>[ // Delete cached analysis results.
'--watch', rmTree(dir('${Platform.environment['HOME']}/.dartServer'));
'--benchmark',
]); total += await benchmark();
});
return addBuildInfo(benchmarkFile,
timestamp: timestamp, expected: 10.0, sdk: sdk, commit: commit);
} }
final double average = total / _kRunsPerBenchmark;
return average;
} }
...@@ -50,18 +50,6 @@ tasks: ...@@ -50,18 +50,6 @@ tasks:
stage: devicelab stage: devicelab
required_agent_capabilities: ["has-android-device"] required_agent_capabilities: ["has-android-device"]
analyzer_cli__analysis_time:
description: >
Measures the speed of analyzing Flutter itself in batch mode.
stage: devicelab
required_agent_capabilities: ["has-android-device"]
analyzer_server__analysis_time:
description: >
Measures the speed of analyzing Flutter itself in server mode.
stage: devicelab
required_agent_capabilities: ["has-android-device"]
# Android on-device tests # Android on-device tests
complex_layout_scroll_perf__timeline_summary: complex_layout_scroll_perf__timeline_summary:
...@@ -297,3 +285,9 @@ tasks: ...@@ -297,3 +285,9 @@ tasks:
Measures memory usage after Android app suspend and resume. Measures memory usage after Android app suspend and resume.
stage: devicelab stage: devicelab
required_agent_capabilities: ["linux/android"] required_agent_capabilities: ["linux/android"]
analyzer_benchmark:
description: >
Measures the speed of Dart analyzer.
stage: devicelab
required_agent_capabilities: ["linux/android"]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment