diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..dedd3c7
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,16 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = false
+max_line_length = 120
+tab_width = 4
+
+[{*.yaml,*.yml}]
+indent_size = 2
+
+[{*.kt,*.kts}]
+ij_kotlin_packages_to_use_import_on_demand = org.junit.jupiter.api,aws.sdk.kotlin.services.s3,kotlinx.coroutines,java.io,ziputils
\ No newline at end of file
diff --git a/.idea/uiDesigner.xml b/.idea/uiDesigner.xml
new file mode 100644
index 0000000..2b63946
--- /dev/null
+++ b/.idea/uiDesigner.xml
@@ -0,0 +1,124 @@
+
+
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+
+
+
+ -
+
+
+
+
+
+ -
+
+
+
+
+
+ -
+
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+
+
+ -
+
+
+ -
+
+
+
+
+
\ No newline at end of file
diff --git a/README.md b/README.md
index 77764dd..6109775 100644
--- a/README.md
+++ b/README.md
@@ -6,6 +6,64 @@ an AWS S3 bucket.
This tool is released as a JAR in the [release page](https://git.koval.net/cyclane/teamcity-executors-test-task/releases).
Use `java -jar .jar --help` for more detailed usage instructions.
+### --help
+```
+Usage: s3backup-tool [] []...
+
+ A simple AWS S3 backup tool. This tool assumes credentials are properly configured using aws-cli.
+
+Options:
+ -h, --help Show this message and exit
+
+Commands:
+ create Create a backup of a file or directory.
+ restore Restore a backup from AWS S3.
+ restore-file Restore a single file from a backup from AWS S3.
+```
+
+#### Subcommands
+```
+Usage: s3backup-tool create []
+
+ Create a backup of a file or directory.
+
+Options:
+ -h, --help Show this message and exit
+
+Arguments:
+ File or directory to backup
+ Name of S3 bucket to backup to
+```
+
+```
+Usage: s3backup-tool restore []
+
+ Restore a backup from AWS S3.
+
+Options:
+ -h, --help Show this message and exit
+
+Arguments:
+ Name of S3 bucket to restore the backup from
+ The S3 key of the backup to restore
+ Directory to restore to
+```
+
+```
+Usage: s3backup-tool restore-file []
+
+ Restore a single file from a backup from AWS S3.
+
+Options:
+ -h, --help Show this message and exit
+
+Arguments:
+ Name of S3 bucket to restore the backup from
+ The S3 key of the backup to restore
+ File path within the backup
+ Directory to restore to
+```
+
## Assumptions
1. This test task is not interested in re-implementations of common libraries (AWS SDK, Clikt, Gradle Shadow, ...)
2. The last part (restoration of a single file) should be optimised so that only the part of the blob required for this
diff --git a/build.gradle.kts b/build.gradle.kts
index f2853e3..82c035d 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -1,7 +1,7 @@
-import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar
-
plugins {
+ application
kotlin("jvm") version "1.9.21"
+ id("org.jlleitschuh.gradle.ktlint") version "12.0.3"
id("com.github.johnrengelman.shadow") version "8.1.1"
}
@@ -16,6 +16,7 @@ dependencies {
implementation("aws.sdk.kotlin:s3:1.0.25")
implementation("org.slf4j:slf4j-simple:2.0.9")
implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.7.3")
+ implementation("com.github.ajalt.clikt:clikt:4.2.1")
testImplementation("org.jetbrains.kotlin:kotlin-test")
}
@@ -25,8 +26,6 @@ tasks.test {
kotlin {
jvmToolchain(17)
}
-tasks.jar {
- manifest {
- attributes("Main-Class" to "backup.MainKt")
- }
+application {
+ mainClass.set("MainKt")
}
\ No newline at end of file
diff --git a/settings.gradle.kts b/settings.gradle.kts
index b206040..fe5c626 100644
--- a/settings.gradle.kts
+++ b/settings.gradle.kts
@@ -1,5 +1,4 @@
plugins {
id("org.gradle.toolchains.foojay-resolver-convention") version "0.5.0"
}
-rootProject.name = "teamcity-executors-test-task"
-
+rootProject.name = "teamcity-executors-test-task"
\ No newline at end of file
diff --git a/src/main/kotlin/Main.kt b/src/main/kotlin/Main.kt
new file mode 100644
index 0000000..44b8f62
--- /dev/null
+++ b/src/main/kotlin/Main.kt
@@ -0,0 +1,80 @@
+import aws.sdk.kotlin.services.s3.S3Client
+import backup.BackupClient
+import com.github.ajalt.clikt.core.CliktCommand
+import com.github.ajalt.clikt.core.subcommands
+import com.github.ajalt.clikt.parameters.arguments.argument
+import com.github.ajalt.clikt.parameters.arguments.help
+import com.github.ajalt.clikt.parameters.types.file
+import kotlinx.coroutines.runBlocking
+import kotlin.system.exitProcess
+
+fun main(args: Array) =
+ runBlocking {
+ S3Client.fromEnvironment().use { s3 ->
+ S3BackupTool()
+ .subcommands(
+ Create(s3),
+ Restore(s3),
+ RestoreFile(s3),
+ )
+ .main(args)
+ }
+ }
+
+class S3BackupTool : CliktCommand(
+ help = "A simple AWS S3 backup tool. This tool assumes credentials are properly configured using aws-cli.",
+) {
+ override fun run() {
+ shortHelp(currentContext)
+ }
+}
+
+class Create(val s3: S3Client) : CliktCommand(
+ help = "Create a backup of a file or directory.",
+) {
+ val source by argument().file(mustExist = true).help("File or directory to backup")
+ val bucket by argument().help("Name of S3 bucket to backup to")
+
+ override fun run() =
+ runBlocking {
+ val backupKey = BackupClient(s3, bucket).upload(source)
+ echo("Successfully created backup with key '$backupKey'")
+ }
+}
+
+class Restore(val s3: S3Client) : CliktCommand(
+ help = "Restore a backup from AWS S3.",
+) {
+ val bucket by argument().help("Name of S3 bucket to restore the backup from")
+ val backupKey by argument().help("The S3 key of the backup to restore")
+ val destination by argument().file(mustExist = true).help("Directory to restore to")
+
+ override fun run() =
+ runBlocking {
+ if (!destination.isDirectory) {
+ echo("Destination must be an existing directory", err = true)
+ exitProcess(1)
+ }
+ BackupClient(s3, bucket).restore(destination.toPath(), backupKey)
+ echo("Successfully restored backup '$backupKey' to '$destination'")
+ }
+}
+
+class RestoreFile(val s3: S3Client) : CliktCommand(
+ help = "Restore a single file from a backup from AWS S3.",
+) {
+ val bucket by argument().help("Name of S3 bucket to restore the backup from")
+ val backupKey by argument().help("The S3 key of the backup to restore")
+ val filePath by argument().help("File path within the backup")
+ val destination by argument().file(mustExist = true).help("Directory to restore to")
+
+ override fun run() =
+ runBlocking {
+ if (!destination.isDirectory) {
+ echo("Destination must be an existing directory", err = true)
+ exitProcess(1)
+ }
+ BackupClient(s3, bucket).restoreFile(destination.toPath(), backupKey, filePath)
+ echo("Successfully restored '$filePath' from backup '$backupKey' to '$destination'")
+ }
+}
\ No newline at end of file
diff --git a/src/main/kotlin/backup/BackupClient.kt b/src/main/kotlin/backup/BackupClient.kt
index 4eaf2c7..2cb9b74 100644
--- a/src/main/kotlin/backup/BackupClient.kt
+++ b/src/main/kotlin/backup/BackupClient.kt
@@ -30,89 +30,98 @@ import kotlin.io.path.createDirectory
class BackupClient(
private val s3: S3Client,
private val bucketName: String,
- private val bufSize: Int = 1024 * 1024 * 100
+ private val bufSize: Int = 1024 * 1024 * 32,
) {
/**
* Upload a file/directory backup to AWS S3.
* @param file The File object for the file or directory.
*/
- suspend fun upload(file: File) = coroutineScope {
- val backupKey = "${file.name}/${Instant.now()}.zip"
- PipedInputStream().use { inputStream ->
- val outputStream = PipedOutputStream(inputStream)
- val zipper = launch(Dispatchers.IO) {
- file.compressToZip(outputStream)
- }
+ suspend fun upload(file: File) =
+ coroutineScope {
+ val backupKey = "${file.canonicalFile.name}/${Instant.now()}.zip"
+ PipedInputStream().use { inputStream ->
+ val outputStream = PipedOutputStream(inputStream)
+ val zipper =
+ launch(Dispatchers.IO) {
+ file.compressToZip(outputStream)
+ }
- val data = ByteArray(bufSize)
- val initialRead = inputStream.readNBytes(data, 0, bufSize)
- if (initialRead == bufSize) {
- // Large upload, use multipart
- // TODO: multipart uploads can be asynchronous, which would improve
- // performance a little bit for big uploads.
- val upload = s3.createMultipartUpload {
- bucket = bucketName
- key = backupKey
- }
- try {
- val uploadParts = mutableListOf()
- var number = 1
- var bytesRead = initialRead
- while (bytesRead > 0) {
- val part = s3.uploadPart {
+ val data = ByteArray(bufSize)
+ val initialRead = inputStream.readNBytes(data, 0, bufSize)
+ if (initialRead == bufSize) {
+ // Large upload, use multipart
+ // TODO: multipart uploads can be asynchronous, which would improve
+ // performance a little bit for big uploads.
+ val upload =
+ s3.createMultipartUpload {
bucket = bucketName
key = backupKey
- partNumber = number
- uploadId = upload.uploadId
- body = ByteStream.fromBytes(data.take(bytesRead))
- }.toCompletedPart(number)
- uploadParts.add(part)
- number++
- bytesRead = inputStream.readNBytes(data, 0, bufSize)
- }
- s3.completeMultipartUpload {
- bucket = bucketName
- key = backupKey
- uploadId = upload.uploadId
- multipartUpload = CompletedMultipartUpload {
- parts = uploadParts
}
+ try {
+ val uploadParts = mutableListOf()
+ var number = 1
+ var bytesRead = initialRead
+ while (bytesRead > 0) {
+ val part =
+ s3.uploadPart {
+ bucket = bucketName
+ key = backupKey
+ partNumber = number
+ uploadId = upload.uploadId
+ body = ByteStream.fromBytes(data.take(bytesRead))
+ }.toCompletedPart(number)
+ uploadParts.add(part)
+ number++
+ bytesRead = inputStream.readNBytes(data, 0, bufSize)
+ }
+ s3.completeMultipartUpload {
+ bucket = bucketName
+ key = backupKey
+ uploadId = upload.uploadId
+ multipartUpload =
+ CompletedMultipartUpload {
+ parts = uploadParts
+ }
+ }
+ } catch (e: Exception) {
+ s3.abortMultipartUpload {
+ bucket = bucketName
+ key = backupKey
+ uploadId = upload.uploadId
+ }
+ throw e
}
- } catch (e: Exception) {
- s3.abortMultipartUpload {
+ } else {
+ // Small upload, use single request
+ s3.putObject {
bucket = bucketName
key = backupKey
- uploadId = upload.uploadId
+ body = ByteStream.fromBytes(data.take(initialRead))
}
- throw e
- }
- } else {
- // Small upload, use single request
- s3.putObject {
- bucket = bucketName
- key = backupKey
- body = ByteStream.fromBytes(data.take(initialRead))
}
+ zipper.join() // Should be instant
}
- zipper.join() // Should be instant
+ backupKey
}
- backupKey
- }
/**
* Restore a backup from AWS S3.
* @param destination The destination directory path for the backup contents.
* @param backupKey The S3 key of the backup.
*/
- suspend fun restore(destination: Path, backupKey: String) = coroutineScope {
- val req = GetObjectRequest {
- bucket = bucketName
- key = backupKey
- }
+ suspend fun restore(
+ destination: Path,
+ backupKey: String,
+ ) = coroutineScope {
+ val req =
+ GetObjectRequest {
+ bucket = bucketName
+ key = backupKey
+ }
s3.getObject(req) { resp ->
ZipInputStream(
resp.body?.toInputStream()
- ?: throw IOException("S3 response is missing body")
+ ?: throw IOException("S3 response is missing body"),
).use { zipStream ->
zipStream.decompress { destination.resolve(it) }
}
@@ -125,81 +134,99 @@ class BackupClient(
* @param backupKey The S3 key of the backup.
* @param fileName The full name of the file to restore (including directories if it was under a subdirectory).
*/
- suspend fun restoreFile(destination: Path, backupKey: String, fileName: String) = coroutineScope {
+ suspend fun restoreFile(
+ destination: Path,
+ backupKey: String,
+ fileName: String,
+ ) = coroutineScope {
// For byte ranges refer to https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
- val eocdReq = GetObjectRequest {
- bucket = bucketName
- key = backupKey
- // Assumption: EOCD has an empty comment
- // Assumption: Backups are at least 22 + 20 (= 42) bytes. Only COMPLETELY empty backups can be smaller,
- // in which case this function would error anyway, so it should be fine to have this edge-case.
- range = "bytes=-${EndOfCentralDirectoryRecord.SIZE + EndOfCentralDirectoryLocator.SIZE}"
- }
- val eocdBytes = s3.getObject(eocdReq) { resp ->
- val bytes = resp.body?.toByteArray() ?: throw IOException("S3 response is missing body")
- bytes
- }
- val eocd = EndOfCentralDirectoryRecord.fromByteArray(eocdBytes, EndOfCentralDirectoryLocator.SIZE)
- val eocd64 = if (eocd.eocd64Required()) {
- val locator = EndOfCentralDirectoryLocator.fromByteArray(eocdBytes, 0)
- val eocd64Req = GetObjectRequest {
+ val eocdReq =
+ GetObjectRequest {
bucket = bucketName
key = backupKey
- range = "bytes=${locator.endOfCentralDirectory64Offset}-"
+ // Assumption: EOCD has an empty comment
+ // Assumption: Backups are at least 22 + 20 (= 42) bytes. Only COMPLETELY empty backups can be smaller,
+ // in which case this function would error anyway, so it should be fine to have this edge-case.
+ range = "bytes=-${EndOfCentralDirectoryRecord.SIZE + EndOfCentralDirectoryLocator.SIZE}"
}
- s3.getObject(eocd64Req) { resp ->
+ val eocdBytes =
+ s3.getObject(eocdReq) { resp ->
val bytes = resp.body?.toByteArray() ?: throw IOException("S3 response is missing body")
- EndOfCentralDirectoryRecord64.fromByteArray(bytes, 0)
+ bytes
}
- } else null
- val cenOffset = if (eocd.centralDirectoryOffset == 0xffffffffU && eocd64 != null) {
- eocd64.centralDirectoryOffset
- } else eocd.centralDirectoryOffset.toULong()
- val censReq = GetObjectRequest {
- bucket = bucketName
- key = backupKey
- // We only know where to fetch until if we've also fetched EOCD64 (which isn't always the case).
- // So just over-fetch a little bit, these headers aren't that big anyway.
- range = "bytes=${cenOffset}-"
- }
- val cen = s3.getObject(censReq) { resp ->
- val bytes = resp.body?.toByteArray() ?: throw IOException("S3 response is missing body")
- var p = 0
- while (p < bytes.size) {
- try {
- val cen = CentralDirectoryFileHeader.fromByteArray(bytes, p)
- p += cen.size
- if (cen.fileName == fileName) return@getObject cen
- } catch (_: InvalidSignatureException) {
- return@getObject null
+ val eocd = EndOfCentralDirectoryRecord.fromByteArray(eocdBytes, EndOfCentralDirectoryLocator.SIZE)
+ val eocd64 =
+ if (eocd.eocd64Required()) {
+ val locator = EndOfCentralDirectoryLocator.fromByteArray(eocdBytes, 0)
+ val eocd64Req =
+ GetObjectRequest {
+ bucket = bucketName
+ key = backupKey
+ range = "bytes=${locator.endOfCentralDirectory64Offset}-"
+ }
+ s3.getObject(eocd64Req) { resp ->
+ val bytes = resp.body?.toByteArray() ?: throw IOException("S3 response is missing body")
+ EndOfCentralDirectoryRecord64.fromByteArray(bytes, 0)
}
+ } else {
+ null
}
- null
- } ?: throw FileNotFoundException("File '${fileName}' not found in backup")
+ val cenOffset =
+ if (eocd.centralDirectoryOffset == 0xffffffffU && eocd64 != null) {
+ eocd64.centralDirectoryOffset
+ } else {
+ eocd.centralDirectoryOffset.toULong()
+ }
+ val censReq =
+ GetObjectRequest {
+ bucket = bucketName
+ key = backupKey
+ // We only know where to fetch until if we've also fetched EOCD64 (which isn't always the case).
+ // So just over-fetch a little bit, these headers aren't that big anyway.
+ range = "bytes=$cenOffset-"
+ }
+ val cen =
+ s3.getObject(censReq) { resp ->
+ val bytes = resp.body?.toByteArray() ?: throw IOException("S3 response is missing body")
+ var p = 0
+ while (p < bytes.size) {
+ try {
+ val cen = CentralDirectoryFileHeader.fromByteArray(bytes, p)
+ p += cen.size
+ if (cen.fileName == fileName) return@getObject cen
+ } catch (_: InvalidDataException) {
+ return@getObject null
+ }
+ }
+ null
+ } ?: throw FileNotFoundException("File '$fileName' not found in backup")
- val localHeaderOffset = cen.extraFieldRecords.firstNotNullOfOrNull {
- if (it is Zip64ExtraFieldRecord && it.localHeaderOffset != null) it else null
- }?.localHeaderOffset ?: cen.localHeaderOffset.toULong()
- val compressedSize = cen.extraFieldRecords.firstNotNullOfOrNull {
- if (it is Zip64ExtraFieldRecord && it.compressedSize != null) it else null
- }?.compressedSize ?: cen.compressedSize.toULong()
- val req = GetObjectRequest {
- bucket = bucketName
- key = backupKey
- range = "bytes=${localHeaderOffset}-${
- // Add CEN min size (46 bytes) so that the next CEN / LOC header is seen by the ZipInputStream
- // and so it can see the current entry has stopped.
- // Note: yes ZipInputStream should know the exact content length from the LOC, but it was still sending
- // EOF errors. Perhaps due to fetching multiples of a power of two, or something else. But this helps.
- localHeaderOffset + cen.size.toULong() + compressedSize + CentralDirectoryFileHeader.SIZE.toULong()
- }"
- }
+ val localHeaderOffset =
+ cen.extraFieldRecords.firstNotNullOfOrNull {
+ if (it is Zip64ExtraFieldRecord && it.localHeaderOffset != null) it else null
+ }?.localHeaderOffset ?: cen.localHeaderOffset.toULong()
+ val compressedSize =
+ cen.extraFieldRecords.firstNotNullOfOrNull {
+ if (it is Zip64ExtraFieldRecord && it.compressedSize != null) it else null
+ }?.compressedSize ?: cen.compressedSize.toULong()
+ val req =
+ GetObjectRequest {
+ bucket = bucketName
+ key = backupKey
+ range = "bytes=$localHeaderOffset-${
+ // Add CEN min size (46 bytes) so that the next CEN / LOC header is seen by the ZipInputStream
+ // and so it can see the current entry has stopped.
+ // Note: yes ZipInputStream should know the exact content length from the LOC, but it was still sending
+ // EOF errors. Perhaps due to fetching multiples of a power of two, or something else. But this helps.
+ localHeaderOffset + cen.size.toULong() + compressedSize + CentralDirectoryFileHeader.SIZE.toULong()
+ }"
+ }
s3.getObject(req) { resp ->
ZipInputStream(
resp.body?.toInputStream()
- ?: throw IOException("S3 response is missing body")
+ ?: throw IOException("S3 response is missing body"),
).use { zipStream ->
- zipStream.decompress { name -> destination.resolve(name.takeLastWhile { it != '/' }) }
+ zipStream.decompress(limit = 1) { name -> destination.resolve(name.takeLastWhile { it != '/' }) }
}
}
}
@@ -228,35 +255,39 @@ private fun UploadPartResponse.toCompletedPart(number: Int): CompletedPart {
* @return A ByteArray of the first `n` items.
*/
private fun ByteArray.take(n: Int) =
- if (n == size) this // No copy
- else asList().subList(0, n).toByteArray() // TODO: One copy (toByteArray()), not sure how to do 0 copies here
+ if (n == size) {
+ this // No copy
+ } else {
+ asList().subList(0, n).toByteArray() // TODO: One copy (toByteArray()), not sure how to do 0 copies here
+ }
/**
* Compress a file or directory as a ZIP file to an `OutputStream`.
* @param outputStream The `OutputStream` to write the ZIP file contents to.
*/
-private fun File.compressToZip(outputStream: OutputStream) = ZipOutputStream(outputStream).use { zipStream ->
- val parentDir = this.absoluteFile.parent + "/"
- val fileQueue = ArrayDeque()
- fileQueue.add(this)
- fileQueue.forEach { subFile ->
- val path = subFile.absolutePath.removePrefix(parentDir)
- val subFiles = subFile.listFiles()
- if (subFiles != null) { // Is a directory
- val entry = ZipEntry("$path/")
- setZipAttributes(entry, subFile.toPath())
- zipStream.putNextEntry(entry)
- fileQueue.addAll(subFiles)
- } else { // Otherwise, treat it as a file
- BufferedInputStream(subFile.inputStream()).use { origin ->
- val entry = ZipEntry(path)
+private fun File.compressToZip(outputStream: OutputStream) =
+ ZipOutputStream(outputStream).use { zipStream ->
+ val parentDir = this.canonicalFile.parent + "/"
+ val fileQueue = ArrayDeque()
+ fileQueue.add(this)
+ fileQueue.forEach { subFile ->
+ val path = subFile.canonicalPath.removePrefix(parentDir)
+ val subFiles = subFile.listFiles()
+ if (subFiles != null) { // Is a directory
+ val entry = ZipEntry("$path/")
setZipAttributes(entry, subFile.toPath())
zipStream.putNextEntry(entry)
- origin.copyTo(zipStream)
+ fileQueue.addAll(subFiles)
+ } else { // Otherwise, treat it as a file
+ BufferedInputStream(subFile.inputStream()).use { origin ->
+ val entry = ZipEntry(path)
+ setZipAttributes(entry, subFile.toPath())
+ zipStream.putNextEntry(entry)
+ origin.copyTo(zipStream)
+ }
}
}
}
-}
/**
* Decompress `ZipInputStream` contents to specified destination paths.
@@ -265,9 +296,11 @@ private fun File.compressToZip(outputStream: OutputStream) = ZipOutputStream(out
*/
private fun ZipInputStream.decompress(
bufSize: Int = 1024 * 1024,
- entryNameToPath: (String) -> Path
+ limit: Int? = null,
+ entryNameToPath: (String) -> Path,
) {
var entry = this.nextEntry
+ var count = 1
while (entry != null) {
val path = entryNameToPath(entry.name)
if (entry.isDirectory) {
@@ -283,6 +316,9 @@ private fun ZipInputStream.decompress(
}
}
applyZipAttributes(entry, path)
+ // This is here, not in while loop, since we do not want to read more from the input stream.
+ // But this.nextEntry will read from the input stream.
+ if (limit != null && count++ >= limit) return
entry = this.nextEntry
}
}
@@ -292,7 +328,10 @@ private fun ZipInputStream.decompress(
* @param entry The `ZipEntry` to set attributes of.
* @param path The `Path` of the file to get the attributes from.
*/
-private fun setZipAttributes(entry: ZipEntry, path: Path) {
+private fun setZipAttributes(
+ entry: ZipEntry,
+ path: Path,
+) {
try {
val attrs = Files.getFileAttributeView(path, BasicFileAttributeView::class.java).readAttributes()
entry.setCreationTime(attrs.creationTime())
@@ -307,7 +346,10 @@ private fun setZipAttributes(entry: ZipEntry, path: Path) {
* @param entry The `ZipEntry` to get the attributes from.
* @param path The `Path` of the file to set the attributes of.
*/
-private fun applyZipAttributes(entry: ZipEntry, path: Path) {
+private fun applyZipAttributes(
+ entry: ZipEntry,
+ path: Path,
+) {
try {
val attrs = Files.getFileAttributeView(path, BasicFileAttributeView::class.java)
attrs.setTimes(entry.lastModifiedTime, entry.lastAccessTime, entry.creationTime)
diff --git a/src/main/kotlin/backup/main.kt b/src/main/kotlin/backup/main.kt
deleted file mode 100644
index e5cf2a4..0000000
--- a/src/main/kotlin/backup/main.kt
+++ /dev/null
@@ -1,10 +0,0 @@
-package backup
-
-import aws.sdk.kotlin.services.s3.S3Client
-import kotlinx.coroutines.runBlocking
-
-fun main() = runBlocking {
- S3Client.fromEnvironment().use { s3 ->
- val backupClient = BackupClient(s3, "teamcity-executors-test-task", 1024 * 1024 * 10)
- }
-}
\ No newline at end of file
diff --git a/src/main/kotlin/ziputils/CentralDirectoryFileHeader.kt b/src/main/kotlin/ziputils/CentralDirectoryFileHeader.kt
index b54754c..5c981b0 100644
--- a/src/main/kotlin/ziputils/CentralDirectoryFileHeader.kt
+++ b/src/main/kotlin/ziputils/CentralDirectoryFileHeader.kt
@@ -15,7 +15,7 @@ internal class CentralDirectoryFileHeader(
val disk: UShort,
val localHeaderOffset: UInt,
val fileName: String,
- val extraFieldRecords: List
+ val extraFieldRecords: List,
) {
val size: Int
get() = SIZE + nameLength.toInt() + extraFieldLength.toInt() + commentLength.toInt()
@@ -32,65 +32,85 @@ internal class CentralDirectoryFileHeader(
* @return A `CentralDirectoryFileHeader`.
*/
@Throws(InvalidDataException::class)
- fun fromByteArray(data: ByteArray, offset: Int): CentralDirectoryFileHeader {
+ fun fromByteArray(
+ data: ByteArray,
+ offset: Int,
+ ): CentralDirectoryFileHeader {
if (data.size - offset < SIZE) {
throw InvalidDataException("CEN must be at least 46 bytes")
}
val buf = ByteBuffer.wrap(data, offset, 46).order(ByteOrder.LITTLE_ENDIAN)
if (buf.getInt().toUInt() != SIGNATURE) {
- throw InvalidSignatureException("Invalid signature")
+ throw InvalidDataException("Invalid signature")
}
val extraFieldRecords = mutableListOf()
val nameLength = buf.getShort(offset + 28).toUShort()
buf.position(offset + 20)
- val cen = CentralDirectoryFileHeader(
- compressedSize = buf.getInt().toUInt(),
- uncompressedSize = buf.getInt().toUInt(),
- nameLength = nameLength
- .also { buf.position(offset + 30) },
- extraFieldLength = buf.getShort().toUShort(),
- commentLength = buf.getShort().toUShort(),
- disk = buf.getShort().toUShort()
- .also { buf.position(offset + 42) },
- localHeaderOffset = buf.getInt().toUInt(),
- fileName = String(data.sliceArray(offset + SIZE.. 0) {
val id = extraFieldsBuf.getShort().toUShort()
val size = extraFieldsBuf.getShort().toUShort()
- extraFieldRecords.add(when (id) {
- Zip64ExtraFieldRecord.ID -> {
- Zip64ExtraFieldRecord(
- size,
- if (cen.uncompressedSize == 0xffffffffU) {
- extraFieldsBuf.getLong().toULong()
- } else null,
- if (cen.compressedSize == 0xffffffffU) {
- extraFieldsBuf.getLong().toULong()
- } else null,
- if (cen.localHeaderOffset == 0xffffffffU) {
- extraFieldsBuf.getLong().toULong()
- } else null,
- if (cen.disk == 0xffffU.toUShort()) {
- extraFieldsBuf.getInt().toUInt()
- } else null
- )
- }
- else -> {
- extraFieldsBuf.position(extraFieldsBuf.position() + size.toInt())
- ExtraFieldRecord(id, size)
- }
- })
+ extraFieldRecords.add(
+ when (id) {
+ Zip64ExtraFieldRecord.ID -> {
+ Zip64ExtraFieldRecord(
+ size,
+ if (cen.uncompressedSize == 0xffffffffU) {
+ extraFieldsBuf.getLong().toULong()
+ } else {
+ null
+ },
+ if (cen.compressedSize == 0xffffffffU) {
+ extraFieldsBuf.getLong().toULong()
+ } else {
+ null
+ },
+ if (cen.localHeaderOffset == 0xffffffffU) {
+ extraFieldsBuf.getLong().toULong()
+ } else {
+ null
+ },
+ if (cen.disk == 0xffffU.toUShort()) {
+ extraFieldsBuf.getInt().toUInt()
+ } else {
+ null
+ },
+ )
+ }
+
+ else -> {
+ extraFieldsBuf.position(extraFieldsBuf.position() + size.toInt())
+ ExtraFieldRecord(id, size)
+ }
+ },
+ )
}
return cen
diff --git a/src/main/kotlin/ziputils/EndOfCentralDirectoryLocator.kt b/src/main/kotlin/ziputils/EndOfCentralDirectoryLocator.kt
index c73e314..70346fd 100644
--- a/src/main/kotlin/ziputils/EndOfCentralDirectoryLocator.kt
+++ b/src/main/kotlin/ziputils/EndOfCentralDirectoryLocator.kt
@@ -7,11 +7,12 @@ import java.nio.ByteOrder
* Represents a partial ZIP64 end of central directory locator.
*/
internal class EndOfCentralDirectoryLocator(
- val endOfCentralDirectory64Offset: ULong
+ val endOfCentralDirectory64Offset: ULong,
) {
companion object {
const val SIGNATURE = 0x07064b50U
const val SIZE = 20
+
/**
* Create `EndOfCentralDirectoryLocator` from raw byte data.
* @throws InvalidDataException Provided `ByteArray` is not a supported EOCD locator.
@@ -20,13 +21,16 @@ internal class EndOfCentralDirectoryLocator(
* @return A `EndOfCentralDirectoryLocator`.
*/
@Throws(InvalidDataException::class)
- fun fromByteArray(data: ByteArray, offset: Int): EndOfCentralDirectoryLocator {
+ fun fromByteArray(
+ data: ByteArray,
+ offset: Int,
+ ): EndOfCentralDirectoryLocator {
if (data.size - offset < SIZE) {
throw InvalidDataException("EOCD64 locator must be at least 20 bytes")
}
val buf = ByteBuffer.wrap(data, offset, SIZE).order(ByteOrder.LITTLE_ENDIAN)
if (buf.getInt().toUInt() != SIGNATURE) {
- throw InvalidSignatureException("Invalid signature")
+ throw InvalidDataException("Invalid signature")
}
buf.position(offset + 8)
return EndOfCentralDirectoryLocator(buf.getLong().toULong())
diff --git a/src/main/kotlin/ziputils/EndOfCentralDirectoryRecord.kt b/src/main/kotlin/ziputils/EndOfCentralDirectoryRecord.kt
index 4e1a132..f4065f8 100644
--- a/src/main/kotlin/ziputils/EndOfCentralDirectoryRecord.kt
+++ b/src/main/kotlin/ziputils/EndOfCentralDirectoryRecord.kt
@@ -7,14 +7,14 @@ import java.nio.ByteOrder
* Represents a partial ZIP end of central directory record.
*/
internal class EndOfCentralDirectoryRecord(
- val centralDirectoryOffset: UInt
+ val centralDirectoryOffset: UInt,
) {
- fun eocd64Required(): Boolean =
- centralDirectoryOffset == 0xffffffffU
+ fun eocd64Required(): Boolean = centralDirectoryOffset == 0xffffffffU
companion object {
const val SIGNATURE = 0x06054b50U
const val SIZE = 22
+
/**
* Create `EndOfCentralDirectoryRecord` from raw byte data.
* @throws InvalidDataException Provided `ByteArray` is not a supported EOCD64.
@@ -23,17 +23,20 @@ internal class EndOfCentralDirectoryRecord(
* @return A `EndOfCentralDirectoryRecord`.
*/
@Throws(InvalidDataException::class)
- fun fromByteArray(data: ByteArray, offset: Int): EndOfCentralDirectoryRecord {
+ fun fromByteArray(
+ data: ByteArray,
+ offset: Int,
+ ): EndOfCentralDirectoryRecord {
if (data.size - offset < SIZE) {
throw InvalidDataException("EOCD must be at least 22 bytes")
}
val buf = ByteBuffer.wrap(data, offset, SIZE).order(ByteOrder.LITTLE_ENDIAN)
if (buf.getInt().toUInt() != SIGNATURE) {
- throw InvalidSignatureException("Invalid signature")
+ throw InvalidDataException("Invalid signature")
}
buf.position(offset + 16)
return EndOfCentralDirectoryRecord(
- centralDirectoryOffset = buf.getInt().toUInt()
+ centralDirectoryOffset = buf.getInt().toUInt(),
)
}
}
diff --git a/src/main/kotlin/ziputils/EndOfCentralDirectoryRecord64.kt b/src/main/kotlin/ziputils/EndOfCentralDirectoryRecord64.kt
index 6c63889..6067019 100644
--- a/src/main/kotlin/ziputils/EndOfCentralDirectoryRecord64.kt
+++ b/src/main/kotlin/ziputils/EndOfCentralDirectoryRecord64.kt
@@ -7,11 +7,12 @@ import java.nio.ByteOrder
* Represents a partial ZIP64 end of central directory record.
*/
internal class EndOfCentralDirectoryRecord64(
- val centralDirectoryOffset: ULong
+ val centralDirectoryOffset: ULong,
) {
companion object {
const val SIGNATURE = 0x06064b50U
const val SIZE = 56
+
/**
* Create `EndOfCentralDirectoryRecord64` from raw byte data.
* @throws InvalidDataException Provided `ByteArray` is not a supported EOCD.
@@ -20,17 +21,20 @@ internal class EndOfCentralDirectoryRecord64(
* @return A `EndOfCentralDirectoryRecord64`.
*/
@Throws(InvalidDataException::class)
- fun fromByteArray(data: ByteArray, offset: Int): EndOfCentralDirectoryRecord64 {
+ fun fromByteArray(
+ data: ByteArray,
+ offset: Int,
+ ): EndOfCentralDirectoryRecord64 {
if (data.size - offset < SIZE) {
throw InvalidDataException("EOCD64 must be at least 56 bytes")
}
val buf = ByteBuffer.wrap(data, offset, SIZE).order(ByteOrder.LITTLE_ENDIAN)
if (buf.getInt().toUInt() != SIGNATURE) {
- throw InvalidSignatureException("Invalid signature")
+ throw InvalidDataException("Invalid signature")
}
buf.position(offset + 48)
return EndOfCentralDirectoryRecord64(
- centralDirectoryOffset = buf.getLong().toULong()
+ centralDirectoryOffset = buf.getLong().toULong(),
)
}
}
diff --git a/src/main/kotlin/ziputils/Exceptions.kt b/src/main/kotlin/ziputils/Exceptions.kt
deleted file mode 100644
index a0b8581..0000000
--- a/src/main/kotlin/ziputils/Exceptions.kt
+++ /dev/null
@@ -1,11 +0,0 @@
-package ziputils
-
-/**
- * Represents an invalid raw byte data exception.
- */
-class InvalidDataException(message: String): Exception(message)
-
-/**
- * Represents an invalid raw byte signature exception.
- */
-class InvalidSignatureException(message: String): Exception(message)
diff --git a/src/main/kotlin/ziputils/ExtraFieldRecord.kt b/src/main/kotlin/ziputils/ExtraFieldRecord.kt
index c5f67de..f577386 100644
--- a/src/main/kotlin/ziputils/ExtraFieldRecord.kt
+++ b/src/main/kotlin/ziputils/ExtraFieldRecord.kt
@@ -5,5 +5,5 @@ package ziputils
*/
internal open class ExtraFieldRecord(
val id: UShort,
- val size: UShort
+ val size: UShort,
)
\ No newline at end of file
diff --git a/src/main/kotlin/ziputils/InvalidDataException.kt b/src/main/kotlin/ziputils/InvalidDataException.kt
new file mode 100644
index 0000000..21b50a1
--- /dev/null
+++ b/src/main/kotlin/ziputils/InvalidDataException.kt
@@ -0,0 +1,6 @@
+package ziputils
+
+/**
+ * Represents an invalid raw byte data exception.
+ */
+class InvalidDataException(message: String) : Exception(message)
\ No newline at end of file
diff --git a/src/main/kotlin/ziputils/Zip64ExtraFieldRecord.kt b/src/main/kotlin/ziputils/Zip64ExtraFieldRecord.kt
index 0f6a827..70b1d81 100644
--- a/src/main/kotlin/ziputils/Zip64ExtraFieldRecord.kt
+++ b/src/main/kotlin/ziputils/Zip64ExtraFieldRecord.kt
@@ -8,8 +8,8 @@ internal class Zip64ExtraFieldRecord(
val uncompressedSize: ULong?,
val compressedSize: ULong?,
val localHeaderOffset: ULong?,
- val disk: UInt?
-): ExtraFieldRecord(ID, size) {
+ val disk: UInt?,
+) : ExtraFieldRecord(ID, size) {
companion object {
const val ID: UShort = 0x0001U
}