Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Sign in
Toggle navigation
F
Front-End
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
abdullh.alsoleman
Front-End
Commits
6a9ac450
Unverified
Commit
6a9ac450
authored
Feb 23, 2022
by
Lau Ching Jun
Committed by
GitHub
Feb 23, 2022
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add option in ProxiedDevice to only transfer the delta when deploying. (#97462)
parent
c74a646b
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
733 additions
and
8 deletions
+733
-8
daemon.dart
packages/flutter_tools/lib/src/commands/daemon.dart
+30
-3
devices.dart
packages/flutter_tools/lib/src/proxied_devices/devices.dart
+49
-5
file_transfer.dart
.../flutter_tools/lib/src/proxied_devices/file_transfer.dart
+452
-0
file_transfer_test.dart
...est/general.shard/proxied_devices/file_transfer_test.dart
+202
-0
No files found.
packages/flutter_tools/lib/src/commands/daemon.dart
View file @
6a9ac450
...
...
@@ -27,6 +27,7 @@ import '../emulator.dart';
import
'../features.dart'
;
import
'../globals.dart'
as
globals
;
import
'../project.dart'
;
import
'../proxied_devices/file_transfer.dart'
;
import
'../resident_runner.dart'
;
import
'../run_cold.dart'
;
import
'../run_hot.dart'
;
...
...
@@ -1320,6 +1321,8 @@ class EmulatorDomain extends Domain {
class
ProxyDomain
extends
Domain
{
ProxyDomain
(
Daemon
daemon
)
:
super
(
daemon
,
'proxy'
)
{
registerHandlerWithBinary
(
'writeTempFile'
,
writeTempFile
);
registerHandler
(
'calculateFileHashes'
,
calculateFileHashes
);
registerHandlerWithBinary
(
'updateFile'
,
updateFile
);
registerHandler
(
'connect'
,
connect
);
registerHandler
(
'disconnect'
,
disconnect
);
registerHandlerWithBinary
(
'write'
,
write
);
...
...
@@ -1336,6 +1339,29 @@ class ProxyDomain extends Domain {
await
file
.
openWrite
().
addStream
(
binary
);
}
/// Calculate rolling hashes for a file in the local temporary directory.
Future
<
Map
<
String
,
dynamic
>>
calculateFileHashes
(
Map
<
String
,
dynamic
>
args
)
async
{
final
String
path
=
_getStringArg
(
args
,
'path'
,
required:
true
);
final
File
file
=
tempDirectory
.
childFile
(
path
);
if
(!
await
file
.
exists
())
{
return
null
;
}
final
BlockHashes
result
=
await
FileTransfer
().
calculateBlockHashesOfFile
(
file
);
return
result
.
toJson
();
}
Future
<
bool
>
updateFile
(
Map
<
String
,
dynamic
>
args
,
Stream
<
List
<
int
>>
binary
)
async
{
final
String
path
=
_getStringArg
(
args
,
'path'
,
required:
true
);
final
File
file
=
tempDirectory
.
childFile
(
path
);
if
(!
await
file
.
exists
())
{
return
null
;
}
final
List
<
Map
<
String
,
dynamic
>>
deltaJson
=
(
args
[
'delta'
]
as
List
<
dynamic
>).
cast
<
Map
<
String
,
dynamic
>>();
final
List
<
FileDeltaBlock
>
delta
=
FileDeltaBlock
.
fromJsonList
(
deltaJson
);
final
bool
result
=
await
FileTransfer
().
rebuildFile
(
file
,
delta
,
binary
);
return
result
;
}
/// Opens a connection to a local port, and returns the connection id.
Future
<
String
>
connect
(
Map
<
String
,
dynamic
>
args
)
async
{
final
int
targetPort
=
_getIntArg
(
args
,
'port'
,
required:
true
);
...
...
@@ -1404,12 +1430,13 @@ class ProxyDomain extends Domain {
for
(
final
Socket
connection
in
_forwardedConnections
.
values
)
{
connection
.
destroy
();
}
await
_tempDirectory
?.
delete
(
recursive:
true
);
// We deliberately not clean up the tempDirectory here. The application package files that
// are transferred into this directory through ProxiedDevices are left in the directory
// to be reused on any subsequent runs.
}
Directory
_tempDirectory
;
Directory
get
tempDirectory
=>
_tempDirectory
??=
globals
.
fs
.
systemTempDirectory
.
c
reateTempSync
(
'flutter_tool_daemon.'
);
Directory
get
tempDirectory
=>
_tempDirectory
??=
globals
.
fs
.
systemTempDirectory
.
c
hildDirectory
(
'flutter_tool_daemon'
)..
createSync
(
);
}
/// A [Logger] which sends log messages to a listening daemon client.
...
...
packages/flutter_tools/lib/src/proxied_devices/devices.dart
View file @
6a9ac450
...
...
@@ -16,6 +16,7 @@ import '../daemon.dart';
import
'../device.dart'
;
import
'../device_port_forwarder.dart'
;
import
'../project.dart'
;
import
'file_transfer.dart'
;
bool
_isNullable
<
T
>()
=>
null
is
T
;
...
...
@@ -29,16 +30,23 @@ T _cast<T>(Object? object) {
/// A [DeviceDiscovery] that will connect to a flutter daemon and connects to
/// the devices remotely.
///
/// If [deltaFileTransfer] is true, the proxy will use an rsync-like algorithm that
/// only transfers the changed part of the application package for deployment.
class
ProxiedDevices
extends
DeviceDiscovery
{
ProxiedDevices
(
this
.
connection
,
{
bool
deltaFileTransfer
=
true
,
required
Logger
logger
,
})
:
_logger
=
logger
;
})
:
_deltaFileTransfer
=
deltaFileTransfer
,
_logger
=
logger
;
/// [DaemonConnection] used to communicate with the daemon.
final
DaemonConnection
connection
;
final
Logger
_logger
;
final
bool
_deltaFileTransfer
;
@override
bool
get
supportsPlatform
=>
true
;
...
...
@@ -70,6 +78,7 @@ class ProxiedDevices extends DeviceDiscovery {
final
Map
<
String
,
Object
?>
capabilities
=
_cast
<
Map
<
String
,
Object
?>>(
device
[
'capabilities'
]);
return
ProxiedDevice
(
connection
,
_cast
<
String
>(
device
[
'id'
]),
deltaFileTransfer:
_deltaFileTransfer
,
category:
Category
.
fromString
(
_cast
<
String
>(
device
[
'category'
])),
platformType:
PlatformType
.
fromString
(
_cast
<
String
>(
device
[
'platformType'
])),
targetPlatform:
getTargetPlatformForName
(
_cast
<
String
>(
device
[
'platform'
])),
...
...
@@ -92,8 +101,12 @@ class ProxiedDevices extends DeviceDiscovery {
/// A [Device] that acts as a proxy to remotely connected device.
///
/// The communication happens via a flutter daemon.
///
/// If [deltaFileTransfer] is true, the proxy will use an rsync-like algorithm that
/// only transfers the changed part of the application package for deployment.
class
ProxiedDevice
extends
Device
{
ProxiedDevice
(
this
.
connection
,
String
id
,
{
bool
deltaFileTransfer
=
true
,
required
Category
?
category
,
required
PlatformType
?
platformType
,
required
TargetPlatform
targetPlatform
,
...
...
@@ -109,7 +122,8 @@ class ProxiedDevice extends Device {
required
this
.
supportsFastStart
,
required
bool
supportsHardwareRendering
,
required
Logger
logger
,
}):
_isLocalEmulator
=
isLocalEmulator
,
}):
_deltaFileTransfer
=
deltaFileTransfer
,
_isLocalEmulator
=
isLocalEmulator
,
_emulatorId
=
emulatorId
,
_sdkNameAndVersion
=
sdkNameAndVersion
,
_supportsHardwareRendering
=
supportsHardwareRendering
,
...
...
@@ -125,6 +139,8 @@ class ProxiedDevice extends Device {
final
Logger
_logger
;
final
bool
_deltaFileTransfer
;
@override
final
String
name
;
...
...
@@ -288,9 +304,37 @@ class ProxiedDevice extends Device {
final
String
fileName
=
binary
.
basename
;
final
Completer
<
String
>
idCompleter
=
Completer
<
String
>();
_applicationPackageMap
[
path
]
=
idCompleter
.
future
;
await
connection
.
sendRequest
(
'proxy.writeTempFile'
,
<
String
,
Object
>{
'path'
:
fileName
,
},
await
binary
.
readAsBytes
());
final
Map
<
String
,
Object
>
args
=
<
String
,
Object
>{
'path'
:
fileName
};
Map
<
String
,
Object
?>?
rollingHashResultJson
;
if
(
_deltaFileTransfer
)
{
rollingHashResultJson
=
_cast
<
Map
<
String
,
Object
?>?>(
await
connection
.
sendRequest
(
'proxy.calculateFileHashes'
,
args
));
}
if
(
rollingHashResultJson
==
null
)
{
// Either file not found on the remote end, or deltaFileTransfer is set to false, transfer the file directly.
if
(
_deltaFileTransfer
)
{
_logger
.
printTrace
(
'Delta file transfer is enabled but file is not found on the remote end, do a full transfer.'
);
}
await
connection
.
sendRequest
(
'proxy.writeTempFile'
,
args
,
await
binary
.
readAsBytes
());
}
else
{
final
BlockHashes
rollingHashResult
=
BlockHashes
.
fromJson
(
rollingHashResultJson
);
final
List
<
FileDeltaBlock
>
delta
=
await
FileTransfer
().
computeDelta
(
binary
,
rollingHashResult
);
// Delta is empty if the file does not need to be updated
if
(
delta
.
isNotEmpty
)
{
final
List
<
Map
<
String
,
Object
>>
deltaJson
=
delta
.
map
((
FileDeltaBlock
block
)
=>
block
.
toJson
()).
toList
();
final
Uint8List
buffer
=
await
FileTransfer
().
binaryForRebuilding
(
binary
,
delta
);
await
connection
.
sendRequest
(
'proxy.updateFile'
,
<
String
,
Object
>{
'path'
:
fileName
,
'delta'
:
deltaJson
,
},
buffer
);
}
}
final
String
id
=
_cast
<
String
>(
await
connection
.
sendRequest
(
'device.uploadApplicationPackage'
,
<
String
,
Object
>{
'targetPlatform'
:
getNameForTargetPlatform
(
_targetPlatform
),
'applicationBinary'
:
fileName
,
...
...
packages/flutter_tools/lib/src/proxied_devices/file_transfer.dart
0 → 100644
View file @
6a9ac450
// Copyright 2014 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import
'dart:async'
;
import
'dart:math'
;
import
'dart:typed_data'
;
import
'package:crypto/crypto.dart'
;
import
'package:meta/meta.dart'
;
import
'../base/file_system.dart'
;
import
'../base/io.dart'
;
import
'../build_system/hash.dart'
;
import
'../convert.dart'
;
/// Adler-32 and MD5 hashes of blocks in files.
class
BlockHashes
{
BlockHashes
({
required
this
.
blockSize
,
required
this
.
totalSize
,
required
this
.
adler32
,
required
this
.
md5
,
required
this
.
fileMd5
,
});
/// The block size used to generate the hashes.
final
int
blockSize
;
/// Total size of the file.
final
int
totalSize
;
/// List of adler32 hashes of each block in the file.
final
List
<
int
>
adler32
;
/// List of MD5 hashes of each block in the file.
final
List
<
String
>
md5
;
/// MD5 hash of the whole file.
final
String
fileMd5
;
Map
<
String
,
Object
>
toJson
()
=>
<
String
,
Object
>{
'blockSize'
:
blockSize
,
'totalSize'
:
totalSize
,
'adler32'
:
base64
.
encode
(
Uint8List
.
view
(
Uint32List
.
fromList
(
adler32
).
buffer
)),
'md5'
:
md5
,
'fileMd5'
:
fileMd5
,
};
static
BlockHashes
fromJson
(
Map
<
String
,
Object
?>
obj
)
{
return
BlockHashes
(
blockSize:
obj
[
'blockSize'
]!
as
int
,
totalSize:
obj
[
'totalSize'
]!
as
int
,
adler32:
Uint32List
.
view
(
base64
.
decode
(
obj
[
'adler32'
]!
as
String
).
buffer
),
md5:
(
obj
[
'md5'
]!
as
List
<
Object
>).
cast
<
String
>(),
fileMd5:
obj
[
'fileMd5'
]!
as
String
,
);
}
}
/// Converts a stream of bytes, into a stream of bytes of fixed chunk size.
@visibleForTesting
Stream
<
Uint8List
>
convertToChunks
(
Stream
<
Uint8List
>
source
,
int
chunkSize
)
{
final
BytesBuilder
bytesBuilder
=
BytesBuilder
(
copy:
false
);
final
StreamController
<
Uint8List
>
controller
=
StreamController
<
Uint8List
>();
final
StreamSubscription
<
Uint8List
>
subscription
=
source
.
listen
((
Uint8List
chunk
)
{
int
start
=
0
;
while
(
start
<
chunk
.
length
)
{
final
int
sizeToTake
=
min
(
chunkSize
-
bytesBuilder
.
length
,
chunk
.
length
-
start
);
assert
(
sizeToTake
>
0
);
assert
(
sizeToTake
<=
chunkSize
);
final
Uint8List
sublist
=
chunk
.
sublist
(
start
,
start
+
sizeToTake
);
start
+=
sizeToTake
;
if
(
bytesBuilder
.
isEmpty
&&
sizeToTake
==
chunkSize
)
{
controller
.
add
(
sublist
);
}
else
{
bytesBuilder
.
add
(
sublist
);
assert
(
bytesBuilder
.
length
<=
chunkSize
);
if
(
bytesBuilder
.
length
==
chunkSize
)
{
controller
.
add
(
bytesBuilder
.
takeBytes
());
}
}
}
},
onDone:
()
{
if
(
controller
.
hasListener
&&
!
controller
.
isClosed
)
{
if
(
bytesBuilder
.
isNotEmpty
)
{
controller
.
add
(
bytesBuilder
.
takeBytes
());
}
controller
.
close
();
}
},
onError:
(
Object
error
,
StackTrace
stackTrace
)
{
controller
.
addError
(
error
,
stackTrace
);
});
controller
.
onCancel
=
subscription
.
cancel
;
controller
.
onPause
=
subscription
.
pause
;
controller
.
onResume
=
subscription
.
resume
;
return
controller
.
stream
;
}
const
int
_adler32Prime
=
65521
;
/// Helper function to calculate Adler32 hash of a binary.
@visibleForTesting
int
adler32Hash
(
List
<
int
>
binary
)
{
// The maximum integer that can be stored in the `int` data type.
const
int
maxInt
=
0x1fffffffffffff
;
// maxChunkSize is the maximum number of bytes we can sum without
// performing the modulus operation, without overflow.
// n * (n + 1) / 2 * 255 < maxInt
// n < sqrt(maxInt / 255) - 1
final
int
maxChunkSize
=
sqrt
(
maxInt
/
255
).
floor
()
-
1
;
int
a
=
1
;
int
b
=
0
;
final
int
length
=
binary
.
length
;
for
(
int
i
=
0
;
i
<
length
;
i
+=
maxChunkSize
)
{
final
int
end
=
i
+
maxChunkSize
<
length
?
i
+
maxChunkSize
:
length
;
for
(
final
int
c
in
binary
.
getRange
(
i
,
end
))
{
a
+=
c
;
b
+=
a
;
}
a
%=
_adler32Prime
;
b
%=
_adler32Prime
;
}
return
((
b
&
0xffff
)
<<
16
)
|
(
a
&
0xffff
);
}
/// Helper to calculate rolling Adler32 hash of a file.
@visibleForTesting
class
RollingAdler32
{
RollingAdler32
(
this
.
blockSize
):
_buffer
=
Uint8List
(
blockSize
);
/// Block size of the rolling hash calculation.
final
int
blockSize
;
int
processedBytes
=
0
;
final
Uint8List
_buffer
;
int
_cur
=
0
;
int
_a
=
1
;
int
_b
=
0
;
/// The current rolling hash value.
int
get
hash
=>
((
_b
&
0xffff
)
<<
16
)
|
(
_a
&
0xffff
);
/// Push a new character into the rolling chunk window, and returns the
/// current hash value.
int
push
(
int
char
)
{
processedBytes
++;
if
(
processedBytes
>
blockSize
)
{
final
int
prev
=
_buffer
[
_cur
];
_b
-=
prev
*
blockSize
+
1
;
_a
-=
prev
;
}
_a
+=
char
;
_b
+=
_a
;
_buffer
[
_cur
]
=
char
;
_cur
++;
if
(
_cur
==
blockSize
)
{
_cur
=
0
;
}
_a
%=
_adler32Prime
;
_b
%=
_adler32Prime
;
return
hash
;
}
/// Returns a [Uint8List] of size [blockSize] that was used to calculate the
/// current Adler32 hash.
Uint8List
currentBlock
()
{
if
(
processedBytes
<
blockSize
)
{
return
Uint8List
.
sublistView
(
_buffer
,
0
,
processedBytes
);
}
else
if
(
_cur
==
0
)
{
return
_buffer
;
}
else
{
final
BytesBuilder
builder
=
BytesBuilder
(
copy:
false
)
..
add
(
Uint8List
.
sublistView
(
_buffer
,
_cur
))
..
add
(
Uint8List
.
sublistView
(
_buffer
,
0
,
_cur
));
return
builder
.
takeBytes
();
}
}
void
reset
()
{
_a
=
1
;
_b
=
0
;
processedBytes
=
0
;
}
}
/// Helper for rsync-like file transfer.
///
/// The algorithm works as follows.
///
/// First, in the destination device, calculate hashes of the every block of
/// the same size. Two hashes are used, Adler-32 for the rolling hash, and MD5
/// as a hash with a lower chance of collision.
///
/// The block size is chosen to balance between the amount of data required in
/// the initial transmission, and the amount of data needed for rebuilding the
/// file.
///
/// Next, on the machine that contains the source file, we calculate the
/// rolling hash of the source file, for every possible position. If the hash
/// is found on the block hashes, we then compare the MD5 of the block. If both
/// the Adler-32 and MD5 hash match, we consider that the block is identical.
///
/// For each block that can be found, we will generate the instruction asking
/// the destination machine to read block from the destination block. For
/// blocks that can't be found, we will transfer the content of the blocks.
///
/// On the receiving end, it will build a copy of the source file from the
/// given instructions.
class
FileTransfer
{
/// Calculate hashes of blocks in the file.
Future
<
BlockHashes
>
calculateBlockHashesOfFile
(
File
file
,
{
int
?
blockSize
})
async
{
final
int
totalSize
=
await
file
.
length
();
blockSize
??=
max
(
sqrt
(
totalSize
).
ceil
(),
2560
);
final
Stream
<
Uint8List
>
fileContentStream
=
file
.
openRead
().
map
((
List
<
int
>
chunk
)
=>
Uint8List
.
fromList
(
chunk
));
final
List
<
int
>
adler32Results
=
<
int
>[];
final
List
<
String
>
md5Results
=
<
String
>[];
await
for
(
final
Uint8List
chunk
in
convertToChunks
(
fileContentStream
,
blockSize
))
{
adler32Results
.
add
(
adler32Hash
(
chunk
));
md5Results
.
add
(
base64
.
encode
(
md5
.
convert
(
chunk
).
bytes
));
}
// Handle whole file md5 separately. Md5Hash requires the chunk size to be a multiple of 64.
final
String
fileMd5
=
await
_md5OfFile
(
file
);
return
BlockHashes
(
blockSize:
blockSize
,
totalSize:
totalSize
,
adler32:
adler32Results
,
md5:
md5Results
,
fileMd5:
fileMd5
,
);
}
/// Compute the instructions to rebuild the source [file] with the block
/// hashes of the destination file.
///
/// Returns an empty list if the destination file is exactly the same as the
/// source file.
Future
<
List
<
FileDeltaBlock
>>
computeDelta
(
File
file
,
BlockHashes
hashes
)
async
{
// Skip computing delta if the destination file matches the source file.
if
(
await
file
.
length
()
==
hashes
.
totalSize
&&
await
_md5OfFile
(
file
)
==
hashes
.
fileMd5
)
{
return
<
FileDeltaBlock
>[];
}
final
Stream
<
List
<
int
>>
fileContentStream
=
file
.
openRead
();
final
int
blockSize
=
hashes
.
blockSize
;
// Generate a lookup for adler32 hash to block index.
final
Map
<
int
,
List
<
int
>>
adler32ToBlockIndex
=
<
int
,
List
<
int
>>{};
for
(
int
i
=
0
;
i
<
hashes
.
adler32
.
length
;
i
++)
{
(
adler32ToBlockIndex
[
hashes
.
adler32
[
i
]]
??=
<
int
>[]).
add
(
i
);
}
final
RollingAdler32
adler32
=
RollingAdler32
(
blockSize
);
// Number of bytes read.
int
size
=
0
;
// Offset of the beginning of the current block.
int
start
=
0
;
final
List
<
FileDeltaBlock
>
blocks
=
<
FileDeltaBlock
>[];
await
for
(
final
List
<
int
>
chunk
in
fileContentStream
)
{
for
(
final
int
c
in
chunk
)
{
final
int
hash
=
adler32
.
push
(
c
);
size
++;
if
(
size
-
start
<
blockSize
)
{
// Ignore if we have not processed enough bytes.
continue
;
}
if
(!
adler32ToBlockIndex
.
containsKey
(
hash
))
{
// Adler32 hash of the current block does not match the destination file.
continue
;
}
// The indices of possible matching blocks.
final
List
<
int
>
blockIndices
=
adler32ToBlockIndex
[
hash
]!;
final
String
md5Hash
=
base64
.
encode
(
md5
.
convert
(
adler32
.
currentBlock
()).
bytes
);
// Verify if any of our findings actually matches the destination block by comparing its MD5.
for
(
final
int
blockIndex
in
blockIndices
)
{
if
(
hashes
.
md5
[
blockIndex
]
!=
md5Hash
)
{
// Adler-32 hash collision. This is not an actual match.
continue
;
}
// Found matching entry, generate instruction for reconstructing the file.
// Copy the previously unmatched data from the source file.
if
(
size
-
start
>
blockSize
)
{
blocks
.
add
(
FileDeltaBlock
.
fromSource
(
start:
start
,
size:
size
-
start
-
blockSize
));
}
start
=
size
;
// Try to extend the previous entry.
if
(
blocks
.
isNotEmpty
&&
blocks
.
last
.
copyFromDestination
)
{
final
int
lastBlockIndex
=
(
blocks
.
last
.
start
+
blocks
.
last
.
size
)
~/
blockSize
;
if
(
hashes
.
md5
[
lastBlockIndex
]
==
md5Hash
)
{
// We can extend the previous entry.
final
FileDeltaBlock
last
=
blocks
.
removeLast
();
blocks
.
add
(
FileDeltaBlock
.
fromDestination
(
start:
last
.
start
,
size:
last
.
size
+
blockSize
));
break
;
}
}
blocks
.
add
(
FileDeltaBlock
.
fromDestination
(
start:
blockIndex
*
blockSize
,
size:
blockSize
));
break
;
}
}
}
// For the remaining content that is not matched, copy from the source.
if
(
start
<
size
)
{
blocks
.
add
(
FileDeltaBlock
.
fromSource
(
start:
start
,
size:
size
-
start
));
}
return
blocks
;
}
/// Generates the binary blocks that need to be transferred to the remote
/// end to regenerate the file.
Future
<
Uint8List
>
binaryForRebuilding
(
File
file
,
List
<
FileDeltaBlock
>
delta
)
async
{
final
RandomAccessFile
binaryView
=
await
file
.
open
();
final
Iterable
<
FileDeltaBlock
>
toTransfer
=
delta
.
where
((
FileDeltaBlock
block
)
=>
!
block
.
copyFromDestination
);
final
int
totalSize
=
toTransfer
.
map
((
FileDeltaBlock
i
)
=>
i
.
size
).
reduce
((
int
a
,
int
b
)
=>
a
+
b
);
final
Uint8List
buffer
=
Uint8List
(
totalSize
);
int
start
=
0
;
for
(
final
FileDeltaBlock
current
in
toTransfer
)
{
await
binaryView
.
setPosition
(
current
.
start
);
await
binaryView
.
readInto
(
buffer
,
start
,
start
+
current
.
size
);
start
+=
current
.
size
;
}
assert
(
start
==
buffer
.
length
);
return
buffer
;
}
/// Generate the new destination file from the source file, with the
/// [blocks] and [binary] stream given.
Future
<
bool
>
rebuildFile
(
File
file
,
List
<
FileDeltaBlock
>
delta
,
Stream
<
List
<
int
>>
binary
)
async
{
final
RandomAccessFile
fileView
=
await
file
.
open
();
// Buffer used to hold the file content in memory.
final
BytesBuilder
buffer
=
BytesBuilder
(
copy:
false
);
final
StreamIterator
<
List
<
int
>>
iterator
=
StreamIterator
<
List
<
int
>>(
binary
);
int
currentIteratorStart
=
-
1
;
bool
iteratorMoveNextReturnValue
=
true
;
for
(
final
FileDeltaBlock
current
in
delta
)
{
if
(
current
.
copyFromDestination
)
{
await
fileView
.
setPosition
(
current
.
start
);
buffer
.
add
(
await
fileView
.
read
(
current
.
size
));
}
else
{
int
toRead
=
current
.
size
;
while
(
toRead
>
0
)
{
if
(
currentIteratorStart
>=
0
&&
currentIteratorStart
<
iterator
.
current
.
length
)
{
final
int
size
=
iterator
.
current
.
length
-
currentIteratorStart
;
final
int
sizeToRead
=
min
(
toRead
,
size
);
buffer
.
add
(
iterator
.
current
.
sublist
(
currentIteratorStart
,
currentIteratorStart
+
sizeToRead
));
currentIteratorStart
+=
sizeToRead
;
toRead
-=
sizeToRead
;
}
else
{
currentIteratorStart
=
0
;
iteratorMoveNextReturnValue
=
await
iterator
.
moveNext
();
}
}
}
}
await
file
.
writeAsBytes
(
buffer
.
takeBytes
(),
flush:
true
);
// Drain the stream iterator if needed.
while
(
iteratorMoveNextReturnValue
)
{
iteratorMoveNextReturnValue
=
await
iterator
.
moveNext
();
}
return
true
;
}
Future
<
String
>
_md5OfFile
(
File
file
)
async
{
final
Md5Hash
fileMd5Hash
=
Md5Hash
();
await
file
.
openRead
().
forEach
((
List
<
int
>
chunk
)
=>
fileMd5Hash
.
addChunk
(
Uint8List
.
fromList
(
chunk
)));
return
base64
.
encode
(
fileMd5Hash
.
finalize
().
buffer
.
asUint8List
());
}
}
/// Represents a single line of instruction on how to generate the target file.
@immutable
class
FileDeltaBlock
{
const
FileDeltaBlock
.
fromSource
({
required
this
.
start
,
required
this
.
size
}):
copyFromDestination
=
false
;
const
FileDeltaBlock
.
fromDestination
({
required
this
.
start
,
required
this
.
size
}):
copyFromDestination
=
true
;
/// If true, this block should be read from the destination file.
final
bool
copyFromDestination
;
/// The size of the current block.
final
int
size
;
/// Byte offset in the destination file from which the block should be read.
final
int
start
;
Map
<
String
,
Object
>
toJson
()
=>
<
String
,
Object
>
{
if
(
copyFromDestination
)
'start'
:
start
,
'size'
:
size
,
};
static
List
<
FileDeltaBlock
>
fromJsonList
(
List
<
Map
<
String
,
Object
?>>
jsonList
)
{
return
jsonList
.
map
((
Map
<
String
,
Object
?>
json
)
{
if
(
json
.
containsKey
(
'start'
))
{
return
FileDeltaBlock
.
fromDestination
(
start:
json
[
'start'
]!
as
int
,
size:
json
[
'size'
]!
as
int
);
}
else
{
// The start position does not matter on the destination machine.
return
FileDeltaBlock
.
fromSource
(
start:
0
,
size:
json
[
'size'
]!
as
int
);
}
}).
toList
();
}
@override
bool
operator
==(
Object
other
)
{
if
(
other
is
!
FileDeltaBlock
)
{
return
false
;
}
return
other
.
copyFromDestination
==
copyFromDestination
&&
other
.
size
==
size
&&
other
.
start
==
start
;
}
@override
int
get
hashCode
=>
Object
.
hash
(
copyFromDestination
,
size
,
start
);
}
packages/flutter_tools/test/general.shard/proxied_devices/file_transfer_test.dart
0 → 100644
View file @
6a9ac450
// Copyright 2014 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import
'dart:async'
;
import
'dart:typed_data'
;
import
'package:file/memory.dart'
;
import
'package:flutter_tools/src/base/file_system.dart'
;
import
'package:flutter_tools/src/convert.dart'
;
import
'package:flutter_tools/src/proxied_devices/file_transfer.dart'
;
import
'../../src/common.dart'
;
void
main
(
)
{
// late BufferLogger bufferLogger;
// late FakeDaemonStreams daemonStreams;
// late DaemonConnection daemonConnection;
// setUp(() {
// bufferLogger = BufferLogger.test();
// daemonStreams = FakeDaemonStreams();
// daemonConnection = DaemonConnection(
// daemonStreams: daemonStreams,
// logger: bufferLogger,
// );
// });
group
(
'convertToChunks'
,
()
{
test
(
'works correctly'
,
()
async
{
final
StreamController
<
Uint8List
>
controller
=
StreamController
<
Uint8List
>();
final
Stream
<
Uint8List
>
chunked
=
convertToChunks
(
controller
.
stream
,
4
);
final
Future
<
List
<
Uint8List
>>
chunkedListFuture
=
chunked
.
toList
();
// Full chunk.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
1
,
2
,
3
,
4
]));
// Multiple of full chunks, on chunk bounraries.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
]));
// Larger than one chunk, starts on chunk boundary, ends not on chunk boundary.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
13
,
14
,
15
,
16
,
17
,
18
]));
// Larger than one chunk, starts not on chunk boundary, ends not on chunk boundary.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
19
,
20
,
21
,
22
,
23
]));
// Larger than one chunk, starts not on chunk boundary, ends on chunk boundary.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
24
,
25
,
26
,
27
,
28
]));
// Smaller than one chunk, starts on chunk boundary, ends not on chunk boundary.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
29
,
30
]));
// Smaller than one chunk, starts not on chunk boundary, ends not on chunk boundary.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
31
,
32
,
33
]));
// Full chunk, not on chunk boundary.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
34
,
35
,
36
,
37
]));
// Smaller than one chunk, starts not on chunk boundary, ends on chunk boundary.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
38
,
39
,
40
]));
// Empty chunk.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[]));
// Extra chunk.
controller
.
add
(
Uint8List
.
fromList
(<
int
>[
41
,
42
]));
await
controller
.
close
();
final
List
<
Uint8List
>
chunkedList
=
await
chunkedListFuture
;
expect
(
chunkedList
,
hasLength
(
11
));
expect
(
chunkedList
[
0
],
<
int
>[
1
,
2
,
3
,
4
]);
expect
(
chunkedList
[
1
],
<
int
>[
5
,
6
,
7
,
8
]);
expect
(
chunkedList
[
2
],
<
int
>[
9
,
10
,
11
,
12
]);
expect
(
chunkedList
[
3
],
<
int
>[
13
,
14
,
15
,
16
]);
expect
(
chunkedList
[
4
],
<
int
>[
17
,
18
,
19
,
20
]);
expect
(
chunkedList
[
5
],
<
int
>[
21
,
22
,
23
,
24
]);
expect
(
chunkedList
[
6
],
<
int
>[
25
,
26
,
27
,
28
]);
expect
(
chunkedList
[
7
],
<
int
>[
29
,
30
,
31
,
32
]);
expect
(
chunkedList
[
8
],
<
int
>[
33
,
34
,
35
,
36
]);
expect
(
chunkedList
[
9
],
<
int
>[
37
,
38
,
39
,
40
]);
expect
(
chunkedList
[
10
],
<
int
>[
41
,
42
]);
});
});
group
(
'adler32Hash'
,
()
{
test
(
'works correctly'
,
()
{
final
int
hash
=
adler32Hash
(
utf8
.
encode
(
'abcdefg'
));
expect
(
hash
,
0x0adb02bd
);
});
});
group
(
'RollingAdler32'
,
()
{
test
(
'works correctly without rolling'
,
()
{
final
RollingAdler32
adler32
=
RollingAdler32
(
7
);
utf8
.
encode
(
'abcdefg'
).
forEach
(
adler32
.
push
);
expect
(
adler32
.
hash
,
adler32Hash
(
utf8
.
encode
(
'abcdefg'
)));
});
test
(
'works correctly after rolling once'
,
()
{
final
RollingAdler32
adler32
=
RollingAdler32
(
7
);
utf8
.
encode
(
'12abcdefg'
).
forEach
(
adler32
.
push
);
expect
(
adler32
.
hash
,
adler32Hash
(
utf8
.
encode
(
'abcdefg'
)));
});
test
(
'works correctly after rolling multiple cycles'
,
()
{
final
RollingAdler32
adler32
=
RollingAdler32
(
7
);
utf8
.
encode
(
'1234567890123456789abcdefg'
).
forEach
(
adler32
.
push
);
expect
(
adler32
.
hash
,
adler32Hash
(
utf8
.
encode
(
'abcdefg'
)));
});
test
(
'works correctly after reset'
,
()
{
final
RollingAdler32
adler32
=
RollingAdler32
(
7
);
utf8
.
encode
(
'1234567890123456789abcdefg'
).
forEach
(
adler32
.
push
);
adler32
.
reset
();
utf8
.
encode
(
'abcdefg'
).
forEach
(
adler32
.
push
);
expect
(
adler32
.
hash
,
adler32Hash
(
utf8
.
encode
(
'abcdefg'
)));
});
test
(
'currentBlock returns the correct entry when read less than one block'
,
()
{
final
RollingAdler32
adler32
=
RollingAdler32
(
7
);
utf8
.
encode
(
'abcd'
).
forEach
(
adler32
.
push
);
expect
(
adler32
.
currentBlock
(),
utf8
.
encode
(
'abcd'
));
});
test
(
'currentBlock returns the correct entry when read exactly one block'
,
()
{
final
RollingAdler32
adler32
=
RollingAdler32
(
7
);
utf8
.
encode
(
'abcdefg'
).
forEach
(
adler32
.
push
);
expect
(
adler32
.
currentBlock
(),
utf8
.
encode
(
'abcdefg'
));
});
test
(
'currentBlock returns the correct entry when read more than one block'
,
()
{
final
RollingAdler32
adler32
=
RollingAdler32
(
7
);
utf8
.
encode
(
'123456789abcdefg'
).
forEach
(
adler32
.
push
);
expect
(
adler32
.
currentBlock
(),
utf8
.
encode
(
'abcdefg'
));
});
});
group
(
'FileTransfer'
,
()
{
const
String
content1
=
'a...b...c...d...e.'
;
const
String
content2
=
'b...c...d...a...f...g...b...h..'
;
const
List
<
FileDeltaBlock
>
expectedDelta
=
<
FileDeltaBlock
>[
FileDeltaBlock
.
fromDestination
(
start:
4
,
size:
12
),
FileDeltaBlock
.
fromDestination
(
start:
0
,
size:
4
),
FileDeltaBlock
.
fromSource
(
start:
16
,
size:
8
),
FileDeltaBlock
.
fromDestination
(
start:
4
,
size:
4
),
FileDeltaBlock
.
fromSource
(
start:
28
,
size:
3
),
];
const
String
expectedBinaryForRebuilding
=
'f...g...h..'
;
late
MemoryFileSystem
fileSystem
;
setUp
(()
{
fileSystem
=
MemoryFileSystem
();
});
test
(
'calculateBlockHashesOfFile works normally'
,
()
async
{
final
File
file
=
fileSystem
.
file
(
'test'
)..
writeAsStringSync
(
content1
);
final
BlockHashes
hashes
=
await
FileTransfer
().
calculateBlockHashesOfFile
(
file
,
blockSize:
4
);
expect
(
hashes
.
blockSize
,
4
);
expect
(
hashes
.
totalSize
,
content1
.
length
);
expect
(
hashes
.
adler32
,
hasLength
(
5
));
expect
(
hashes
.
adler32
,
<
int
>[
0x029c00ec
,
0x02a000ed
,
0x02a400ee
,
0x02a800ef
,
0x00fa0094
,
]);
expect
(
hashes
.
md5
,
hasLength
(
5
));
expect
(
hashes
.
md5
,
<
String
>[
'zB0S8R/fGt05GcI5v8AjIQ=='
,
'uZCZ4i/LUGFYAD+K1ZD0Wg=='
,
'6kbZGS8T1NJl/naWODQcNw=='
,
'kKh/aA2XAhR/r0HdZa3Bxg=='
,
'34eF7Bs/OhfoJ5+sAw0zyw=='
,
]);
expect
(
hashes
.
fileMd5
,
'VT/gkSEdctzUEUJCxclxuQ=='
);
});
test
(
'computeDelta returns empty list if file is identical'
,
()
async
{
final
File
file1
=
fileSystem
.
file
(
'file1'
)..
writeAsStringSync
(
content1
);
final
File
file2
=
fileSystem
.
file
(
'file1'
)..
writeAsStringSync
(
content1
);
final
BlockHashes
hashes
=
await
FileTransfer
().
calculateBlockHashesOfFile
(
file1
,
blockSize:
4
);
final
List
<
FileDeltaBlock
>
delta
=
await
FileTransfer
().
computeDelta
(
file2
,
hashes
);
expect
(
delta
,
isEmpty
);
});
test
(
'computeDelta returns the correct delta'
,
()
async
{
final
File
file1
=
fileSystem
.
file
(
'file1'
)..
writeAsStringSync
(
content1
);
final
File
file2
=
fileSystem
.
file
(
'file2'
)..
writeAsStringSync
(
content2
);
final
BlockHashes
hashes
=
await
FileTransfer
().
calculateBlockHashesOfFile
(
file1
,
blockSize:
4
);
final
List
<
FileDeltaBlock
>
delta
=
await
FileTransfer
().
computeDelta
(
file2
,
hashes
);
expect
(
delta
,
expectedDelta
);
});
test
(
'binaryForRebuilding returns the correct binary'
,
()
async
{
final
File
file
=
fileSystem
.
file
(
'file'
)..
writeAsStringSync
(
content2
);
final
List
<
int
>
binaryForRebuilding
=
await
FileTransfer
().
binaryForRebuilding
(
file
,
expectedDelta
);
expect
(
binaryForRebuilding
,
utf8
.
encode
(
expectedBinaryForRebuilding
));
});
test
(
'rebuildFile can rebuild the correct file'
,
()
async
{
final
File
file
=
fileSystem
.
file
(
'file'
)..
writeAsStringSync
(
content1
);
await
FileTransfer
().
rebuildFile
(
file
,
expectedDelta
,
Stream
<
List
<
int
>>.
fromIterable
(<
List
<
int
>>[
utf8
.
encode
(
expectedBinaryForRebuilding
)]));
expect
(
file
.
readAsStringSync
(),
content2
);
});
});
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment