1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
// Copyright 2014 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'dart:math' as math;
import 'dart:typed_data';
import 'package:flutter/foundation.dart';
// TODO(abarth): Consider using vector_math.
class _Vector {
_Vector(int size)
: _offset = 0,
_length = size,
_elements = Float64List(size);
_Vector.fromVOL(List<double> values, int offset, int length)
: _offset = offset,
_length = length,
_elements = values;
final int _offset;
final int _length;
final List<double> _elements;
double operator [](int i) => _elements[i + _offset];
void operator []=(int i, double value) {
_elements[i + _offset] = value;
}
double operator *(_Vector a) {
double result = 0.0;
for (int i = 0; i < _length; i += 1)
result += this[i] * a[i];
return result;
}
double norm() => math.sqrt(this * this);
}
// TODO(abarth): Consider using vector_math.
class _Matrix {
_Matrix(int rows, int cols)
: _columns = cols,
_elements = Float64List(rows * cols);
final int _columns;
final List<double> _elements;
double get(int row, int col) => _elements[row * _columns + col];
void set(int row, int col, double value) {
_elements[row * _columns + col] = value;
}
_Vector getRow(int row) => _Vector.fromVOL(
_elements,
row * _columns,
_columns,
);
}
/// An nth degree polynomial fit to a dataset.
class PolynomialFit {
/// Creates a polynomial fit of the given degree.
///
/// There are n + 1 coefficients in a fit of degree n.
PolynomialFit(int degree) : coefficients = Float64List(degree + 1);
/// The polynomial coefficients of the fit.
final List<double> coefficients;
/// An indicator of the quality of the fit.
///
/// Larger values indicate greater quality.
late double confidence;
}
/// Uses the least-squares algorithm to fit a polynomial to a set of data.
class LeastSquaresSolver {
/// Creates a least-squares solver.
///
/// The [x], [y], and [w] arguments must not be null.
LeastSquaresSolver(this.x, this.y, this.w)
: assert(x.length == y.length),
assert(y.length == w.length);
/// The x-coordinates of each data point.
final List<double> x;
/// The y-coordinates of each data point.
final List<double> y;
/// The weight to use for each data point.
final List<double> w;
/// Fits a polynomial of the given degree to the data points.
///
/// When there is not enough data to fit a curve null is returned.
PolynomialFit? solve(int degree) {
if (degree > x.length) // Not enough data to fit a curve.
return null;
final PolynomialFit result = PolynomialFit(degree);
// Shorthands for the purpose of notation equivalence to original C++ code.
final int m = x.length;
final int n = degree + 1;
// Expand the X vector to a matrix A, pre-multiplied by the weights.
final _Matrix a = _Matrix(n, m);
for (int h = 0; h < m; h += 1) {
a.set(0, h, w[h]);
for (int i = 1; i < n; i += 1)
a.set(i, h, a.get(i - 1, h) * x[h]);
}
// Apply the Gram-Schmidt process to A to obtain its QR decomposition.
// Orthonormal basis, column-major ordVectorer.
final _Matrix q = _Matrix(n, m);
// Upper triangular matrix, row-major order.
final _Matrix r = _Matrix(n, n);
for (int j = 0; j < n; j += 1) {
for (int h = 0; h < m; h += 1)
q.set(j, h, a.get(j, h));
for (int i = 0; i < j; i += 1) {
final double dot = q.getRow(j) * q.getRow(i);
for (int h = 0; h < m; h += 1)
q.set(j, h, q.get(j, h) - dot * q.get(i, h));
}
final double norm = q.getRow(j).norm();
if (norm < precisionErrorTolerance) {
// Vectors are linearly dependent or zero so no solution.
return null;
}
final double inverseNorm = 1.0 / norm;
for (int h = 0; h < m; h += 1)
q.set(j, h, q.get(j, h) * inverseNorm);
for (int i = 0; i < n; i += 1)
r.set(j, i, i < j ? 0.0 : q.getRow(j) * a.getRow(i));
}
// Solve R B = Qt W Y to find B. This is easy because R is upper triangular.
// We just work from bottom-right to top-left calculating B's coefficients.
final _Vector wy = _Vector(m);
for (int h = 0; h < m; h += 1)
wy[h] = y[h] * w[h];
for (int i = n - 1; i >= 0; i -= 1) {
result.coefficients[i] = q.getRow(i) * wy;
for (int j = n - 1; j > i; j -= 1)
result.coefficients[i] -= r.get(i, j) * result.coefficients[j];
result.coefficients[i] /= r.get(i, i);
}
// Calculate the coefficient of determination (confidence) as:
// 1 - (sumSquaredError / sumSquaredTotal)
// ...where sumSquaredError is the residual sum of squares (variance of the
// error), and sumSquaredTotal is the total sum of squares (variance of the
// data) where each has been weighted.
double yMean = 0.0;
for (int h = 0; h < m; h += 1)
yMean += y[h];
yMean /= m;
double sumSquaredError = 0.0;
double sumSquaredTotal = 0.0;
for (int h = 0; h < m; h += 1) {
double term = 1.0;
double err = y[h] - result.coefficients[0];
for (int i = 1; i < n; i += 1) {
term *= x[h];
err -= term * result.coefficients[i];
}
sumSquaredError += w[h] * w[h] * err * err;
final double v = y[h] - yMean;
sumSquaredTotal += w[h] * w[h] * v * v;
}
result.confidence = sumSquaredTotal <= precisionErrorTolerance ? 1.0 :
1.0 - (sumSquaredError / sumSquaredTotal);
return result;
}
}