Compare commits
10 Commits
899faba255
...
2ed1b06f5e
Author | SHA1 | Date | |
---|---|---|---|
2ed1b06f5e | |||
b3bbd2e5d1 | |||
6507518cd7 | |||
9238d80d89 | |||
33f00e4489 | |||
8d724457e1 | |||
f260ee1c9f | |||
8abb1408f0 | |||
0f889a68ab | |||
cf9173b7ee |
2
.gitignore
vendored
2
.gitignore
vendored
@ -1 +1,3 @@
|
||||
forever
|
||||
forever-files
|
||||
/forever.exe
|
||||
|
12
.idea/dataSources.xml
generated
Normal file
12
.idea/dataSources.xml
generated
Normal file
@ -0,0 +1,12 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="DataSourceManagerImpl" format="xml" multifile-model="true">
|
||||
<data-source source="LOCAL" name="ForeverFiles.db" uuid="e957d09a-a7fb-406a-a58c-3019b7380d7d">
|
||||
<driver-ref>sqlite.xerial</driver-ref>
|
||||
<synchronize>true</synchronize>
|
||||
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
|
||||
<jdbc-url>jdbc:sqlite:$USER_HOME$/AppData/Roaming/ForeverFiles/db/ForeverFiles.db</jdbc-url>
|
||||
<working-dir>$ProjectFileDir$</working-dir>
|
||||
</data-source>
|
||||
</component>
|
||||
</project>
|
4
.idea/forever-files.iml
generated
4
.idea/forever-files.iml
generated
@ -2,7 +2,9 @@
|
||||
<module type="WEB_MODULE" version="4">
|
||||
<component name="Go" enabled="true" />
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<excludeFolder url="file://$MODULE_DIR$/.idea/dataSources" />
|
||||
</content>
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
|
6
.idea/sqldialects.xml
generated
Normal file
6
.idea/sqldialects.xml
generated
Normal file
@ -0,0 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="SqlDialectMappings">
|
||||
<file url="PROJECT" dialect="SQLite" />
|
||||
</component>
|
||||
</project>
|
@ -1,31 +1,20 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/urfave/cli/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
commands := []*cli.Command{
|
||||
&cli.Command{
|
||||
Name: "start",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "Start the application service",
|
||||
Action: func(c *cli.Context) error {
|
||||
//d := daemon.NewDaemon()
|
||||
//d.Start()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
gather(),
|
||||
plan(),
|
||||
}
|
||||
|
||||
app := &cli.App{
|
||||
Name: "ForeverFiles",
|
||||
Name: "forever",
|
||||
Usage: "Create backups designed to last forever",
|
||||
Version: "v1.0.0",
|
||||
Description: "ForeverFiles is a system for storing files forever.",
|
||||
@ -37,7 +26,7 @@ func main() {
|
||||
Email: "mason@masonitestudios.com",
|
||||
},
|
||||
},
|
||||
Copyright: fmt.Sprintf("%v Masonite Studios LLC", time.Now().Year()),
|
||||
Copyright: "2024 Masonite Studios LLC",
|
||||
UseShortOptionHandling: true,
|
||||
}
|
||||
err := app.Run(os.Args)
|
||||
@ -46,16 +35,3 @@ func main() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func getConn(dialAddr string) (*grpc.ClientConn, error) {
|
||||
conn, err := grpc.DialContext(
|
||||
context.Background(),
|
||||
dialAddr,
|
||||
//grpc.WithBlock(),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
)
|
||||
if err != nil {
|
||||
return &grpc.ClientConn{}, fmt.Errorf("failed to dial server: %w", err)
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
12
cmd/cli/flags.go
Normal file
12
cmd/cli/flags.go
Normal file
@ -0,0 +1,12 @@
|
||||
package main
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
func baseDirFlag() *cli.StringFlag {
|
||||
return &cli.StringFlag{
|
||||
Name: "baseDir",
|
||||
Usage: "The base directory to gather info from",
|
||||
Aliases: []string{"b"},
|
||||
Value: ".",
|
||||
}
|
||||
}
|
69
cmd/cli/gather.go
Normal file
69
cmd/cli/gather.go
Normal file
@ -0,0 +1,69 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dustin/go-humanize"
|
||||
|
||||
"forever-files/db"
|
||||
"forever-files/source"
|
||||
"forever-files/types"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func gather() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "gather",
|
||||
Aliases: []string{"g"},
|
||||
Usage: "Collects the files to be backed up and stores their info in the database",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "reset",
|
||||
Usage: "Reset the database before gathering info",
|
||||
Aliases: []string{"r"},
|
||||
Action: func(c *cli.Context, reset bool) error {
|
||||
if reset {
|
||||
err := db.DeleteDB(types.AppName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting db: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
baseDirFlag(),
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
|
||||
store, err := db.NewDB(types.AppName)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error creating db: %w", err))
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
err = store.Migrate()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error migrating db: %w", err))
|
||||
}
|
||||
|
||||
baseDir := c.String("baseDir")
|
||||
|
||||
err = source.GatherInfo(baseDir, store)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error gathering info: %w", err)
|
||||
}
|
||||
|
||||
fileCount, err := store.GetFileCount()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting file count: %w", err)
|
||||
}
|
||||
totalSize, err := store.GetTotalSize()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting total size: %w", err)
|
||||
}
|
||||
fmt.Printf("Total Files: %v\n", fileCount)
|
||||
fmt.Printf("Total Size: %v\n", humanize.Bytes(uint64(totalSize)))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
137
cmd/cli/plan.go
Normal file
137
cmd/cli/plan.go
Normal file
@ -0,0 +1,137 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"forever-files/db"
|
||||
"forever-files/partitioner"
|
||||
"forever-files/types"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/urfave/cli/v2"
|
||||
"math"
|
||||
)
|
||||
|
||||
func plan() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "plan",
|
||||
Aliases: []string{"p"},
|
||||
Usage: "Reads the database and plans the partitions for the backup, stores the partitions in the database",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "targetSize",
|
||||
Usage: "The target size for each partition, valid options are DVD-SL, DVD-DL, BD-SL, BD-DL, BD-TL, BD-QL\nor you can provide any specific size e.g. 4.6GB, 8.1GB, 25GB, 50GB, 100GB, 128GB or 5MB!",
|
||||
Aliases: []string{"s"},
|
||||
Value: "DVD-SL",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "verbose",
|
||||
Usage: "Print the files in each partition",
|
||||
Aliases: []string{"v"},
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "left-pack",
|
||||
Usage: "Use the left-pack algorithm to calculate partitions",
|
||||
Aliases: []string{"l"},
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "reset",
|
||||
Usage: "Reset the database before planning partitions",
|
||||
Aliases: []string{"r"},
|
||||
Value: false,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
|
||||
store, err := db.NewDB(types.AppName)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error creating db: %w", err))
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
err = store.Migrate()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error migrating db: %w", err))
|
||||
}
|
||||
|
||||
if c.Bool("reset") {
|
||||
err = store.RemovePartitionAssignment()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resetting partitions in the db: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
size := int64(4600000000) // size of a single layer DVD
|
||||
targetSize := c.String("targetSize")
|
||||
switch targetSize {
|
||||
case "DVD-SL":
|
||||
size = 4600000000
|
||||
case "DVD-DL":
|
||||
size = 8100000000
|
||||
case "BD-SL":
|
||||
size = 25000000000
|
||||
case "BD-DL":
|
||||
size = 50000000000
|
||||
case "BD-TL":
|
||||
size = 100000000000
|
||||
case "BD-QL":
|
||||
size = 128000000000
|
||||
default:
|
||||
// try to parse the size from human-readable format
|
||||
usize, err := humanize.ParseBytes(targetSize)
|
||||
if err != nil {
|
||||
fmt.Printf("invalid target size: %v\n", err)
|
||||
fmt.Println("valid options are DVD-SL, DVD-DL, BD-SL, BD-DL, BD-TL, BD-QL")
|
||||
fmt.Println("or you can provide any specific size e.g. 4.6GB, 8.1GB, 25GB, 50GB, 100GB, 128GB or 5MB!")
|
||||
size = 4600000000
|
||||
}
|
||||
if usize > math.MaxInt64 {
|
||||
fmt.Println("size is too large")
|
||||
size = 4600000000
|
||||
}
|
||||
size = int64(usize)
|
||||
}
|
||||
|
||||
fmt.Printf("Target Size: %v\n", humanize.Bytes(uint64(size)))
|
||||
fmt.Println("Calculating partitions...")
|
||||
|
||||
var partitions [][]types.FileMetadata
|
||||
|
||||
if !c.Bool("left-pack") {
|
||||
partitions, err = partitioner.CalculatePartitions(store, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error calculating partitions: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Bool("left-pack") {
|
||||
partitions, err = partitioner.CalculatePartitionsLeftPack(store, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error calculating partitions: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, partition := range partitions {
|
||||
partSize := int64(0)
|
||||
if c.Bool("verbose") {
|
||||
fmt.Printf("Partition %v:\n", i)
|
||||
}
|
||||
for _, file := range partition {
|
||||
if c.Bool("verbose") {
|
||||
fmt.Printf("%v/%v %v\n", file.Path, file.Name, humanize.Bytes(uint64(file.Size)))
|
||||
}
|
||||
partSize += file.Size
|
||||
// save the planned partitions
|
||||
file.PartitionId = fmt.Sprintf("%d", i)
|
||||
err = store.StoreFilePartition(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing file's partition: %w", err)
|
||||
}
|
||||
}
|
||||
fmt.Printf("Partition %v Size: %v files %v\n", i, len(partition), humanize.Bytes(uint64(partSize)))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
242
db/db.go
Normal file
242
db/db.go
Normal file
@ -0,0 +1,242 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"forever-files/types"
|
||||
|
||||
"github.com/kirsle/configdir"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
type DB interface {
|
||||
Close() error
|
||||
Migrate() error
|
||||
StoreFile(fileMetadata types.FileMetadata) error
|
||||
RemoveFile(fileMetadata types.FileMetadata) error
|
||||
StoreFilePartition(fileMetadata types.FileMetadata) error
|
||||
GetTotalSize() (int64, error)
|
||||
RemovePartitionAssignment() error
|
||||
GetFileCount() (int64, error)
|
||||
GetFiles() ([]types.FileMetadata, error)
|
||||
}
|
||||
|
||||
type store struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
type Migrations struct {
|
||||
name string
|
||||
query string
|
||||
}
|
||||
|
||||
var migrations = []Migrations{
|
||||
{
|
||||
name: "001-sourceFiles",
|
||||
query: `CREATE TABLE IF NOT EXISTS files (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
path TEXT NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
hash TEXT NOT NULL,
|
||||
modifiedDate TIMESTAMP NOT NULL,
|
||||
backedUp BOOLEAN NOT NULL,
|
||||
partitionId TEXT DEFAULT ''
|
||||
)`,
|
||||
},
|
||||
{
|
||||
name: "002-fileUniqueConstraint",
|
||||
query: `CREATE UNIQUE INDEX IF NOT EXISTS file_unique ON files (name, path, hash)`,
|
||||
},
|
||||
}
|
||||
|
||||
func NewDB(appName string) (DB, error) {
|
||||
dbPath, err := createDBFileIfNotExist(appName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating db file %w", err)
|
||||
}
|
||||
dbSQL, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening db | %w", err)
|
||||
}
|
||||
return &store{
|
||||
db: dbSQL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DeleteDB(appName string) error {
|
||||
configPath := configdir.LocalConfig(appName)
|
||||
dbPath := path.Join(configPath, "db", fmt.Sprintf("%v.db", appName))
|
||||
err := os.Remove(dbPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting db | %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *store) StoreFile(fileMetadata types.FileMetadata) error {
|
||||
query := `INSERT INTO files (name, path, size, hash, modifiedDate, backedUp) VALUES (?, ?, ?, ?, ?, ?) ON CONFLICT (name, path, hash) DO UPDATE SET size = ?, modifiedDate = ?, backedUp = ?`
|
||||
_, err := d.db.Exec(
|
||||
query,
|
||||
fileMetadata.Name,
|
||||
fileMetadata.Path,
|
||||
fileMetadata.Size,
|
||||
fileMetadata.Hash,
|
||||
fileMetadata.ModifiedDate,
|
||||
fileMetadata.BackedUp,
|
||||
fileMetadata.Size,
|
||||
fileMetadata.ModifiedDate,
|
||||
fileMetadata.BackedUp,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing file metadata | %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *store) RemoveFile(fileMetadata types.FileMetadata) error {
|
||||
query := `DELETE FROM files WHERE name = ? AND path = ? AND hash = ?`
|
||||
_, err := d.db.Exec(query, fileMetadata.Name, fileMetadata.Path, fileMetadata.Hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error removing file metadata | %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *store) StoreFilePartition(fileMetadata types.FileMetadata) error {
|
||||
query := `UPDATE files SET partitionId = ? WHERE name = ? AND path = ? AND hash = ?`
|
||||
_, err := d.db.Exec(
|
||||
query,
|
||||
fileMetadata.PartitionId,
|
||||
fileMetadata.Name,
|
||||
fileMetadata.Path,
|
||||
fileMetadata.Hash,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing file's partiition | %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *store) RemovePartitionAssignment() error {
|
||||
query := `UPDATE files SET partitionId = ''`
|
||||
_, err := d.db.Exec(query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error removing partition assignment | %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *store) GetTotalSize() (int64, error) {
|
||||
var size int64
|
||||
query := `SELECT SUM(size) FROM files`
|
||||
err := d.db.QueryRow(query).Scan(&size)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error getting size | %w", err)
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (d *store) GetFileCount() (int64, error) {
|
||||
var count int64
|
||||
query := `SELECT COUNT(*) FROM files`
|
||||
err := d.db.QueryRow(query).Scan(&count)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error getting count | %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (d *store) GetFiles() ([]types.FileMetadata, error) {
|
||||
var files []types.FileMetadata
|
||||
query := `SELECT name, path, size, hash, modifiedDate, backedUp, partitionId FROM files order by path, name`
|
||||
rows, err := d.db.Query(query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting files | %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var file types.FileMetadata
|
||||
err := rows.Scan(&file.Name, &file.Path, &file.Size, &file.Hash, &file.ModifiedDate, &file.BackedUp, &file.PartitionId)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error scanning file | %w", err)
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *store) Close() error {
|
||||
return d.db.Close()
|
||||
}
|
||||
|
||||
func (d *store) Migrate() error {
|
||||
// check if migration table exists
|
||||
var migrationsCheck string
|
||||
//goland:noinspection SqlResolve
|
||||
err := d.db.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name='migrations'").Scan(&migrationsCheck)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
_, err := d.db.Exec("CREATE TABLE migrations (name TEXT NOT NULL)")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating migrations table | %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("error checking if migrations table exists | %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, migration := range migrations {
|
||||
var migrationInHistory string
|
||||
err = d.db.QueryRow("SELECT name FROM migrations WHERE name = ?", migration.name).Scan(&migrationInHistory)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
_, err := d.db.Exec(migration.query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running migration: %s | %w", migration.name, err)
|
||||
}
|
||||
_, err = d.db.Exec("INSERT INTO migrations (name) VALUES (?)", migration.name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error inserting migration: %s into migrations table | %w", migration.name, err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("error checking if migration: %s has been run | %w", migration.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDBFileIfNotExist(appName string) (string, error) {
|
||||
configPath := configdir.LocalConfig(appName)
|
||||
|
||||
// set up the config directory
|
||||
err := configdir.MakePath(configPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating config directory: %w", err)
|
||||
}
|
||||
|
||||
dbDirectoryPath := path.Join(configPath, "db")
|
||||
dbPath := path.Join(configPath, "db", fmt.Sprintf("%v.db", appName))
|
||||
// Set up the database
|
||||
err = configdir.MakePath(dbDirectoryPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating db directory: %w", err)
|
||||
}
|
||||
|
||||
// If the file doesn't exist, create it, or append to the file
|
||||
f, err := os.OpenFile(dbPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error opening file: %v", err)
|
||||
}
|
||||
defer func(f *os.File) {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
fmt.Println("error closing file")
|
||||
}
|
||||
}(f)
|
||||
return dbPath, nil
|
||||
}
|
23
fileUtilities/hash.go
Normal file
23
fileUtilities/hash.go
Normal file
@ -0,0 +1,23 @@
|
||||
package fileUtilities
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func HashFile(filePath string) ([]byte, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return []byte{}, fmt.Errorf("error opening file for hashing: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, file); err != nil {
|
||||
return []byte{}, fmt.Errorf("error hashing file: %w", err)
|
||||
}
|
||||
|
||||
return h.Sum(nil), nil
|
||||
}
|
55
fileUtilities/parity.go
Normal file
55
fileUtilities/parity.go
Normal file
@ -0,0 +1,55 @@
|
||||
package fileUtilities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func parityStream(in1, in2 io.Reader, out io.Writer) error {
|
||||
var err error
|
||||
byteSize := 1024
|
||||
done1 := false
|
||||
done2 := false
|
||||
for !done1 && !done2 {
|
||||
// get bytes from in1 and in2 and write the parity to buf
|
||||
// if either in1 or in2 is done, write the remaining bytes from the other to buf
|
||||
in1Bytes := make([]byte, byteSize)
|
||||
in2Bytes := make([]byte, byteSize)
|
||||
read1 := 0
|
||||
read2 := 0
|
||||
if !done1 {
|
||||
read1, err = in1.Read(in1Bytes)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
done1 = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if !done2 {
|
||||
read2, err = in2.Read(in2Bytes)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
done2 = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
maxRead := read1
|
||||
if read2 > maxRead {
|
||||
maxRead = read2
|
||||
}
|
||||
|
||||
parityBytes := make([]byte, maxRead)
|
||||
for i := 0; i < maxRead; i++ {
|
||||
parityBytes[i] = in1Bytes[i] ^ in2Bytes[i]
|
||||
}
|
||||
_, err := out.Write(parityBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to buffer: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
70
fileUtilities/parity_test.go
Normal file
70
fileUtilities/parity_test.go
Normal file
@ -0,0 +1,70 @@
|
||||
package fileUtilities
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_createParityFile(t *testing.T) {
|
||||
type args struct {
|
||||
in1 io.Reader
|
||||
in2 io.Reader
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantOut string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "create parity file",
|
||||
args: args{
|
||||
in1: bytes.NewBuffer([]byte{0, 1, 2, 3, 4, 5, 6, 7}),
|
||||
in2: bytes.NewBuffer([]byte{7, 6, 5, 4, 3, 2, 1, 0}),
|
||||
},
|
||||
wantOut: string([]byte{7, 7, 7, 7, 7, 7, 7, 7}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "in1 is longer than in2",
|
||||
args: args{
|
||||
in1: bytes.NewBuffer([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8}),
|
||||
in2: bytes.NewBuffer([]byte{7, 6, 5, 4, 3, 2, 1, 0}),
|
||||
},
|
||||
wantOut: string([]byte{7, 7, 7, 7, 7, 7, 7, 7, 8}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "in2 is longer than in1",
|
||||
args: args{
|
||||
in1: bytes.NewBuffer([]byte{0, 1, 2, 3, 4, 5, 6, 7}),
|
||||
in2: bytes.NewBuffer([]byte{7, 6, 5, 4, 3, 2, 1, 0, 54}),
|
||||
},
|
||||
wantOut: string([]byte{7, 7, 7, 7, 7, 7, 7, 7, 54}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "parity recreates original file",
|
||||
args: args{
|
||||
in1: bytes.NewBuffer([]byte{0, 1, 2, 3, 4, 5, 6, 7}),
|
||||
in2: bytes.NewBuffer([]byte{7, 7, 7, 7, 7, 7, 7, 7, 54}),
|
||||
},
|
||||
wantOut: string([]byte{7, 6, 5, 4, 3, 2, 1, 0, 54}),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
err := parityStream(tt.args.in1, tt.args.in2, out)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parityStream() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotOut := out.String(); gotOut != tt.wantOut {
|
||||
t.Errorf("parityStream() gotOut = %v, want %v", gotOut, tt.wantOut)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
86
fileUtilities/zip.go
Normal file
86
fileUtilities/zip.go
Normal file
@ -0,0 +1,86 @@
|
||||
package fileUtilities
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"forever-files/types"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func CreateZip(fileName, outDir, baseDir string, partition []types.FileMetadata) error {
|
||||
// Create a buffer to write our archive to.
|
||||
outFile := path.Join(outDir, fileName+".zip")
|
||||
zipFile, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening/creating zip file: %w", err)
|
||||
}
|
||||
|
||||
// Create a new zip archive.
|
||||
w := zip.NewWriter(zipFile)
|
||||
|
||||
files := prepFiles(baseDir, partition)
|
||||
|
||||
for _, file := range files {
|
||||
zf, err := w.Create(file.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating zip file: %w", err)
|
||||
}
|
||||
f, err := os.Open(file.Path)
|
||||
if err != nil {
|
||||
fmt.Println(fmt.Sprintf("error opening file: %v", err))
|
||||
fmt.Println(fmt.Sprintf("skipping file: %v", file.Path))
|
||||
continue
|
||||
}
|
||||
if _, err := io.Copy(zf, f); err != nil {
|
||||
return fmt.Errorf("error copying file to zip file: %w", err)
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error closing file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error closing zip file: %w", err)
|
||||
}
|
||||
|
||||
err = zipFile.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error closing zip file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepFiles(baseDir string, partition []types.FileMetadata) []struct {
|
||||
Name, Path string
|
||||
} {
|
||||
var files []struct {
|
||||
Name, Path string
|
||||
}
|
||||
for _, file := range partition {
|
||||
filePath := path.Join(file.Path, file.Name)
|
||||
|
||||
// from zip.Create documentation:
|
||||
// ...The name must be a relative path: it must not start with a
|
||||
// drive letter (e.g. C:) or leading slash, and only forward slashes
|
||||
// are allowed...
|
||||
fileName := strings.Replace(replaceBackslashes(strings.Replace(filePath, baseDir, "", 1)), "/", "", 1)
|
||||
files = append(files, struct {
|
||||
Name, Path string
|
||||
}{
|
||||
Name: fileName,
|
||||
Path: filePath,
|
||||
})
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func replaceBackslashes(input string) string {
|
||||
return strings.ReplaceAll(input, "\\", "/")
|
||||
}
|
36
fileUtilities/zip_test.go
Normal file
36
fileUtilities/zip_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package fileUtilities
|
||||
|
||||
import "testing"
|
||||
|
||||
func Test_replaceBackslashes(t *testing.T) {
|
||||
type args struct {
|
||||
input string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "replace backslashes",
|
||||
args: args{
|
||||
input: "C:\\Users\\james\\Documents\\test",
|
||||
},
|
||||
want: "C:/Users/james/Documents/test",
|
||||
},
|
||||
{
|
||||
name: "no backslashes",
|
||||
args: args{
|
||||
input: "C:/Users/james/Documents/test",
|
||||
},
|
||||
want: "C:/Users/james/Documents/test",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := replaceBackslashes(tt.args.input); got != tt.want {
|
||||
t.Errorf("replaceBackslashes() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
3
go.mod
3
go.mod
@ -3,6 +3,9 @@ module forever-files
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f
|
||||
github.com/mattn/go-sqlite3 v1.14.17
|
||||
github.com/urfave/cli/v2 v2.25.5
|
||||
google.golang.org/grpc v1.55.0
|
||||
)
|
||||
|
6
go.sum
6
go.sum
@ -1,10 +1,16 @@
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f h1:dKccXx7xA56UNqOcFIbuqFjAWPVtP688j5QMgmo6OHU=
|
||||
github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f/go.mod h1:4rEELDSfUAlBSyUjPG0JnaNGjf13JySHFeRdD/3dLP0=
|
||||
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
|
||||
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc=
|
||||
|
57
main.go
Normal file
57
main.go
Normal file
@ -0,0 +1,57 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"forever-files/db"
|
||||
"forever-files/fileUtilities"
|
||||
"forever-files/partitioner"
|
||||
"forever-files/source"
|
||||
"forever-files/types"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
fmt.Printf("%v\n", types.AppName)
|
||||
baseDir := "C:\\Users\\gomas\\Nextcloud"
|
||||
|
||||
store, err := db.NewDB(types.AppName)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error creating db: %w", err))
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
err = store.Migrate()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error migrating db: %w", err))
|
||||
}
|
||||
|
||||
source.GatherInfo(baseDir, store)
|
||||
oneDVDSize := int64(4600000000)
|
||||
//oneBRSize := int64(25000000000) // size of a small Blu-ray disc
|
||||
partitions, err := partitioner.CalculatePartitions(store, oneDVDSize)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error calculating partitions: %w", err))
|
||||
}
|
||||
|
||||
// zip up the files in each partition
|
||||
partitionCount := len(partitions)
|
||||
for i, partition := range partitions {
|
||||
fileName := fmt.Sprintf("partition%0*d", getZeroPadAmount(partitionCount), i)
|
||||
fmt.Printf("Creating zip file: %v\n", fileName)
|
||||
err = fileUtilities.CreateZip(fileName, "C:\\tmp\\", baseDir, partition)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error creating zip: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
// create parities for each zip file pair, figure out how to store the length of each zip file with the parity
|
||||
|
||||
// create a folder for each DVD add the scripts and zip files
|
||||
|
||||
// copy the zip files to the DVD
|
||||
}
|
||||
|
||||
func getZeroPadAmount(n int) int {
|
||||
str := fmt.Sprintf("%d", n)
|
||||
return len(str)
|
||||
}
|
138
partitioner/partitioner.go
Normal file
138
partitioner/partitioner.go
Normal file
@ -0,0 +1,138 @@
|
||||
package partitioner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"forever-files/db"
|
||||
"forever-files/types"
|
||||
"github.com/dustin/go-humanize"
|
||||
"sort"
|
||||
)
|
||||
|
||||
func CalculatePartitions(store db.DB, targetSize int64) (partitions [][]types.FileMetadata, err error) {
|
||||
totalSize, err := store.GetTotalSize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting total size: %w", err)
|
||||
}
|
||||
if targetSize <= 0 {
|
||||
targetSize = totalSize / 2
|
||||
}
|
||||
fmt.Printf("Total Size: %v\n", totalSize)
|
||||
fmt.Printf("Target Size: %v\n", targetSize)
|
||||
|
||||
files, err := store.GetFiles()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting files: %w", err)
|
||||
}
|
||||
partitions = make([][]types.FileMetadata, 0)
|
||||
partitionSize := int64(0)
|
||||
partitionFiles := make([]types.FileMetadata, 0)
|
||||
overSizedFiles := make([]types.FileMetadata, 0)
|
||||
overSizedSize := int64(0)
|
||||
for _, file := range files {
|
||||
if partitionSize+file.Size > targetSize {
|
||||
//fmt.Printf("Partition Size: %v\n", humanize.Bytes(uint64(partitionSize)))
|
||||
partitions = append(partitions, partitionFiles)
|
||||
partitionFiles = make([]types.FileMetadata, 0)
|
||||
partitionSize = 0
|
||||
}
|
||||
if partitionSize < targetSize && partitionSize+file.Size < targetSize {
|
||||
partitionFiles = append(partitionFiles, file)
|
||||
partitionSize += file.Size
|
||||
} else {
|
||||
overSizedFiles = append(overSizedFiles, file)
|
||||
overSizedSize += file.Size
|
||||
}
|
||||
}
|
||||
if len(partitionFiles) > 0 {
|
||||
partitions = append(partitions, partitionFiles)
|
||||
}
|
||||
|
||||
//for _, partition := range partitions {
|
||||
// fmt.Printf("Partition File Count: %v\n", len(partition))
|
||||
//}
|
||||
fmt.Printf("Over Sized File Count: %v\n", len(overSizedFiles))
|
||||
fmt.Printf("Total Over Sized Size: %v\n", humanize.Bytes(uint64(overSizedSize)))
|
||||
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
// CalculatePartitionsLeftPack calculates the partitions efficiently by searching for files that fit the remaining space in each partition
|
||||
func CalculatePartitionsLeftPack(store db.DB, targetSize int64) (partitions [][]types.FileMetadata, err error) {
|
||||
totalSize, err := store.GetTotalSize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting total size: %w", err)
|
||||
}
|
||||
if targetSize <= 0 {
|
||||
targetSize = totalSize / 2
|
||||
}
|
||||
fmt.Printf("Total Size: %v\n", totalSize)
|
||||
fmt.Printf("Target Size: %v\n", targetSize)
|
||||
|
||||
files, err := store.GetFiles()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting files: %w", err)
|
||||
}
|
||||
partitions = make([][]types.FileMetadata, 0)
|
||||
partitionSize := int64(0)
|
||||
partitionFiles := make([]types.FileMetadata, 0)
|
||||
overSizedFiles := make([]types.FileMetadata, 0)
|
||||
overSizedSize := int64(0)
|
||||
|
||||
// sort files by size
|
||||
sort.SliceStable(files, func(i, j int) bool {
|
||||
return files[i].Size > files[j].Size
|
||||
})
|
||||
|
||||
for _, file := range files {
|
||||
if file.Size > targetSize {
|
||||
overSizedFiles = append(overSizedFiles, file)
|
||||
overSizedSize += file.Size
|
||||
file.PartitionId = "-1"
|
||||
} else {
|
||||
// you've hit files that are smaller than the target size
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
partitionIndex := int64(0)
|
||||
// pick the largest file that fits in the partition's remaining space using sort.Search()
|
||||
// for loop counting down for the size of the files slice
|
||||
for i := len(files) - 1; i >= 0; i-- {
|
||||
index := indexOfLargestFittingFile(files, targetSize-partitionSize)
|
||||
if index == -1 {
|
||||
// no files fit in the partition so move on to the next partition
|
||||
partitions = append(partitions, partitionFiles)
|
||||
partitionFiles = make([]types.FileMetadata, 0)
|
||||
partitionSize = 0
|
||||
partitionIndex++
|
||||
index = indexOfLargestFittingFile(files, targetSize-partitionSize)
|
||||
if index == -1 {
|
||||
// no files fit in the new partition so break out of the loop
|
||||
break
|
||||
}
|
||||
}
|
||||
partitionFiles = append(partitionFiles, files[index])
|
||||
partitionSize += files[index].Size
|
||||
files[index].PartitionId = fmt.Sprintf("%d", partitionIndex)
|
||||
// remove the file from the slice
|
||||
files = append(files[:index], files[index+1:]...)
|
||||
}
|
||||
if len(partitionFiles) > 0 {
|
||||
partitions = append(partitions, partitionFiles)
|
||||
}
|
||||
fmt.Printf("Over Sized File Count: %v\n", len(overSizedFiles))
|
||||
fmt.Printf("Total Over Sized Size: %v\n", humanize.Bytes(uint64(overSizedSize)))
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
func indexOfLargestFittingFile(files []types.FileMetadata, remainingSize int64) int {
|
||||
// find the index of the largest file that fits in the remaining space
|
||||
index := sort.Search(len(files), func(i int) bool {
|
||||
return files[i].Size < remainingSize
|
||||
})
|
||||
// if index is == len(files) then there are no files that fit
|
||||
if index == len(files) {
|
||||
return -1
|
||||
}
|
||||
return index
|
||||
}
|
71
source/source.go
Normal file
71
source/source.go
Normal file
@ -0,0 +1,71 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"forever-files/db"
|
||||
"forever-files/types"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// the purpose of this package is to gather information about the source files for the backup
|
||||
// it will store the information in a database
|
||||
// information to gather:
|
||||
// - file name
|
||||
// - file path
|
||||
// - file size
|
||||
// - file hash
|
||||
// - modified date
|
||||
|
||||
func GatherInfo(path string, db db.DB) error {
|
||||
err := walkDir(path, db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error walking directory: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkDir(dirPath string, db db.DB) error {
|
||||
// get list of files in directory
|
||||
directoryEntries, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading directory: %w", err)
|
||||
}
|
||||
for _, entry := range directoryEntries {
|
||||
if entry.IsDir() {
|
||||
err = walkDir(path.Join(dirPath, entry.Name()), db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error walking directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
// gather info
|
||||
fileInfo, err := entry.Info()
|
||||
if err != nil {
|
||||
log.Default().Printf("error getting file info: %v", err)
|
||||
continue
|
||||
}
|
||||
//hash, err := fileUtilities.HashFile(path.Join(dirPath, entry.Name()))
|
||||
//if err != nil {
|
||||
// log.Default().Printf("error hashing file: %v", err)
|
||||
// continue
|
||||
//}
|
||||
// store info
|
||||
//fmt.Printf("Name: %v, Size: %v, Modified Date: %v, Hash: %v\n", fileInfo.Name(), fileInfo.Size(), fileInfo.ModTime(), hash)
|
||||
err = db.StoreFile(types.FileMetadata{
|
||||
Name: fileInfo.Name(),
|
||||
Path: dirPath,
|
||||
Size: fileInfo.Size(),
|
||||
Hash: []byte("test"),
|
||||
//Hash: hash,
|
||||
ModifiedDate: fileInfo.ModTime(),
|
||||
BackedUp: false,
|
||||
})
|
||||
if err != nil {
|
||||
log.Default().Printf("error storing file metadata: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
17
types/types.go
Normal file
17
types/types.go
Normal file
@ -0,0 +1,17 @@
|
||||
package types
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
AppName = "ForeverFiles"
|
||||
)
|
||||
|
||||
type FileMetadata struct {
|
||||
Name string
|
||||
Path string
|
||||
Size int64
|
||||
Hash []byte
|
||||
ModifiedDate time.Time
|
||||
PartitionId string
|
||||
BackedUp bool
|
||||
}
|
Reference in New Issue
Block a user