Kate Lovett 9d96df2364
Modernize framework lints (#179089)
WIP

Commits separated as follows:
- Update lints in analysis_options files
- Run `dart fix --apply`
- Clean up leftover analysis issues 
- Run `dart format .` in the right places.

Local analysis and testing passes. Checking CI now.

Part of https://github.com/flutter/flutter/issues/178827
- Adoption of flutter_lints in examples/api coming in a separate change
(cc @loic-sharma)

## Pre-launch Checklist

- [ ] I read the [Contributor Guide] and followed the process outlined
there for submitting PRs.
- [ ] I read the [Tree Hygiene] wiki page, which explains my
responsibilities.
- [ ] I read and followed the [Flutter Style Guide], including [Features
we expect every widget to implement].
- [ ] I signed the [CLA].
- [ ] I listed at least one issue that this PR fixes in the description
above.
- [ ] I updated/added relevant documentation (doc comments with `///`).
- [ ] I added new tests to check the change I am making, or this PR is
[test-exempt].
- [ ] I followed the [breaking change policy] and added [Data Driven
Fixes] where supported.
- [ ] All existing and new tests are passing.

If you need help, consider asking for advice on the #hackers-new channel
on [Discord].

**Note**: The Flutter team is currently trialing the use of [Gemini Code
Assist for
GitHub](https://developers.google.com/gemini-code-assist/docs/review-github-code).
Comments from the `gemini-code-assist` bot should not be taken as
authoritative feedback from the Flutter team. If you find its comments
useful you can update your code accordingly, but if you are unsure or
disagree with the feedback, please feel free to wait for a Flutter team
member's review for guidance on which automated comments should be
addressed.

<!-- Links -->
[Contributor Guide]:
https://github.com/flutter/flutter/blob/main/docs/contributing/Tree-hygiene.md#overview
[Tree Hygiene]:
https://github.com/flutter/flutter/blob/main/docs/contributing/Tree-hygiene.md
[test-exempt]:
https://github.com/flutter/flutter/blob/main/docs/contributing/Tree-hygiene.md#tests
[Flutter Style Guide]:
https://github.com/flutter/flutter/blob/main/docs/contributing/Style-guide-for-Flutter-repo.md
[Features we expect every widget to implement]:
https://github.com/flutter/flutter/blob/main/docs/contributing/Style-guide-for-Flutter-repo.md#features-we-expect-every-widget-to-implement
[CLA]: https://cla.developers.google.com/
[flutter/tests]: https://github.com/flutter/tests
[breaking change policy]:
https://github.com/flutter/flutter/blob/main/docs/contributing/Tree-hygiene.md#handling-breaking-changes
[Discord]:
https://github.com/flutter/flutter/blob/main/docs/contributing/Chat.md
[Data Driven Fixes]:
https://github.com/flutter/flutter/blob/main/docs/contributing/Data-driven-Fixes.md
2025-11-26 01:10:39 +00:00

313 lines
10 KiB
Dart

// Copyright 2014 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'dart:math' as math;
import 'task_result.dart';
const String kBenchmarkTypeKeyName = 'benchmark_type';
const String kBenchmarkVersionKeyName = 'version';
const String kLocalEngineKeyName = 'local_engine';
const String kLocalEngineHostKeyName = 'local_engine_host';
const String kTaskNameKeyName = 'task_name';
const String kRunStartKeyName = 'run_start';
const String kRunEndKeyName = 'run_end';
const String kAResultsKeyName = 'default_results';
const String kBResultsKeyName = 'local_engine_results';
const String kBenchmarkResultsType = 'A/B summaries';
const String kBenchmarkABVersion = '1.0';
enum FieldJustification { LEFT, RIGHT, CENTER }
/// Collects data from an A/B test and produces a summary for human evaluation.
///
/// See [printSummary] for more.
class ABTest {
ABTest({required this.localEngine, required this.localEngineHost, required this.taskName})
: runStart = DateTime.now(),
_aResults = <String, List<double>>{},
_bResults = <String, List<double>>{};
ABTest.fromJsonMap(Map<String, dynamic> jsonResults)
: localEngine = jsonResults[kLocalEngineKeyName] as String,
localEngineHost = jsonResults[kLocalEngineHostKeyName] as String?,
taskName = jsonResults[kTaskNameKeyName] as String,
runStart = DateTime.parse(jsonResults[kRunStartKeyName] as String),
_runEnd = DateTime.parse(jsonResults[kRunEndKeyName] as String),
_aResults = _convertFrom(jsonResults[kAResultsKeyName] as Map<String, dynamic>),
_bResults = _convertFrom(jsonResults[kBResultsKeyName] as Map<String, dynamic>);
final String localEngine;
final String? localEngineHost;
final String taskName;
final DateTime runStart;
DateTime? _runEnd;
DateTime? get runEnd => _runEnd;
final Map<String, List<double>> _aResults;
final Map<String, List<double>> _bResults;
static Map<String, List<double>> _convertFrom(dynamic results) {
final resultMap = results as Map<String, dynamic>;
return <String, List<double>>{
for (final String key in resultMap.keys)
key: (resultMap[key] as List<dynamic>).cast<double>(),
};
}
/// Adds the result of a single A run of the benchmark.
///
/// The result may contain multiple score keys.
///
/// [result] is expected to be a serialization of [TaskResult].
void addAResult(TaskResult result) {
if (_runEnd != null) {
throw StateError('Cannot add results to ABTest after it is finalized');
}
_addResult(result, _aResults);
}
/// Adds the result of a single B run of the benchmark.
///
/// The result may contain multiple score keys.
///
/// [result] is expected to be a serialization of [TaskResult].
void addBResult(TaskResult result) {
if (_runEnd != null) {
throw StateError('Cannot add results to ABTest after it is finalized');
}
_addResult(result, _bResults);
}
void finalize() {
_runEnd = DateTime.now();
}
Map<String, dynamic> get jsonMap => <String, dynamic>{
kBenchmarkTypeKeyName: kBenchmarkResultsType,
kBenchmarkVersionKeyName: kBenchmarkABVersion,
kLocalEngineKeyName: localEngine,
kLocalEngineHostKeyName: ?localEngineHost,
kTaskNameKeyName: taskName,
kRunStartKeyName: runStart.toIso8601String(),
kRunEndKeyName: runEnd!.toIso8601String(),
kAResultsKeyName: _aResults,
kBResultsKeyName: _bResults,
};
static void updateColumnLengths(List<int> lengths, List<String?> results) {
for (var column = 0; column < lengths.length; column++) {
if (results[column] != null) {
lengths[column] = math.max(lengths[column], results[column]?.length ?? 0);
}
}
}
static void formatResult(
StringBuffer buffer,
List<int> lengths,
List<FieldJustification> aligns,
List<String?> values,
) {
for (var column = 0; column < lengths.length; column++) {
final int len = lengths[column];
String? value = values[column];
if (value == null) {
value = ''.padRight(len);
} else {
value = switch (aligns[column]) {
FieldJustification.LEFT => value.padRight(len),
FieldJustification.RIGHT => value.padLeft(len),
FieldJustification.CENTER => value.padLeft((len + value.length) ~/ 2).padRight(len),
};
}
if (column > 0) {
value = value.padLeft(len + 1);
}
buffer.write(value);
}
buffer.writeln();
}
/// Returns the summary as a tab-separated spreadsheet.
///
/// This value can be copied straight to a Google Spreadsheet for further analysis.
String asciiSummary() {
final Map<String, _ScoreSummary> summariesA = _summarize(_aResults);
final Map<String, _ScoreSummary> summariesB = _summarize(_bResults);
final tableRows = <List<String?>>[
for (final String scoreKey in <String>{...summariesA.keys, ...summariesB.keys})
<String?>[
scoreKey,
summariesA[scoreKey]?.averageString,
summariesA[scoreKey]?.noiseString,
summariesB[scoreKey]?.averageString,
summariesB[scoreKey]?.noiseString,
summariesA[scoreKey]?.improvementOver(summariesB[scoreKey]),
],
];
final titles = <String>['Score', 'Average A', '(noise)', 'Average B', '(noise)', 'Speed-up'];
final alignments = <FieldJustification>[
FieldJustification.LEFT,
FieldJustification.RIGHT,
FieldJustification.LEFT,
FieldJustification.RIGHT,
FieldJustification.LEFT,
FieldJustification.CENTER,
];
final lengths = List<int>.filled(6, 0);
updateColumnLengths(lengths, titles);
for (final row in tableRows) {
updateColumnLengths(lengths, row);
}
final buffer = StringBuffer();
formatResult(buffer, lengths, <FieldJustification>[
FieldJustification.CENTER,
...alignments.skip(1),
], titles);
for (final row in tableRows) {
formatResult(buffer, lengths, alignments, row);
}
return buffer.toString();
}
/// Returns unprocessed data collected by the A/B test formatted as
/// a tab-separated spreadsheet.
String rawResults() {
final buffer = StringBuffer();
for (final String scoreKey in _allScoreKeys) {
buffer.writeln('$scoreKey:');
buffer.write(' A:\t');
if (_aResults.containsKey(scoreKey)) {
for (final double score in _aResults[scoreKey]!) {
buffer.write('${score.toStringAsFixed(2)}\t');
}
} else {
buffer.write('N/A');
}
buffer.writeln();
buffer.write(' B:\t');
if (_bResults.containsKey(scoreKey)) {
for (final double score in _bResults[scoreKey]!) {
buffer.write('${score.toStringAsFixed(2)}\t');
}
} else {
buffer.write('N/A');
}
buffer.writeln();
}
return buffer.toString();
}
Set<String> get _allScoreKeys => <String>{..._aResults.keys, ..._bResults.keys};
/// Returns the summary as a tab-separated spreadsheet.
///
/// This value can be copied straight to a Google Spreadsheet for further analysis.
String printSummary() {
final Map<String, _ScoreSummary> summariesA = _summarize(_aResults);
final Map<String, _ScoreSummary> summariesB = _summarize(_bResults);
final buffer = StringBuffer('Score\tAverage A (noise)\tAverage B (noise)\tSpeed-up\n');
for (final String scoreKey in _allScoreKeys) {
final _ScoreSummary? summaryA = summariesA[scoreKey];
final _ScoreSummary? summaryB = summariesB[scoreKey];
buffer.write('$scoreKey\t');
if (summaryA != null) {
buffer.write('${summaryA.averageString} ${summaryA.noiseString}\t');
} else {
buffer.write('\t');
}
if (summaryB != null) {
buffer.write('${summaryB.averageString} ${summaryB.noiseString}\t');
} else {
buffer.write('\t');
}
if (summaryA != null && summaryB != null) {
buffer.write('${summaryA.improvementOver(summaryB)}\t');
}
buffer.writeln();
}
return buffer.toString();
}
}
class _ScoreSummary {
_ScoreSummary({required this.average, required this.noise});
/// Average (arithmetic mean) of a series of values collected by a benchmark.
final double average;
/// The noise (standard deviation divided by [average]) in the collected
/// values.
final double noise;
String get averageString => average.toStringAsFixed(2);
String get noiseString => '(${_ratioToPercent(noise)})';
String improvementOver(_ScoreSummary? other) {
return other == null ? '' : '${(average / other.average).toStringAsFixed(2)}x';
}
}
void _addResult(TaskResult result, Map<String, List<double>> results) {
for (final String scoreKey in result.benchmarkScoreKeys ?? <String>[]) {
final double score = (result.data![scoreKey] as num).toDouble();
results.putIfAbsent(scoreKey, () => <double>[]).add(score);
}
}
Map<String, _ScoreSummary> _summarize(Map<String, List<double>> results) {
return results.map<String, _ScoreSummary>((String scoreKey, List<double> values) {
final double average = _computeAverage(values);
return MapEntry<String, _ScoreSummary>(
scoreKey,
_ScoreSummary(
average: average,
// If the average is zero, the benchmark got the perfect score with no noise.
noise: average > 0 ? _computeStandardDeviationForPopulation(values) / average : 0.0,
),
);
});
}
/// Computes the arithmetic mean (or average) of given [values].
double _computeAverage(Iterable<double> values) {
final double sum = values.reduce((double a, double b) => a + b);
return sum / values.length;
}
/// Computes population standard deviation.
///
/// Unlike sample standard deviation, which divides by N - 1, this divides by N.
///
/// See also:
///
/// * https://en.wikipedia.org/wiki/Standard_deviation
double _computeStandardDeviationForPopulation(Iterable<double> population) {
final double mean = _computeAverage(population);
final double sumOfSquaredDeltas = population.fold<double>(
0.0,
(double previous, num value) => previous += math.pow(value - mean, 2),
);
return math.sqrt(sumOfSquaredDeltas / population.length);
}
String _ratioToPercent(double value) {
return '${(value * 100).toStringAsFixed(2)}%';
}