mirror of
https://github.com/eftechcombr/bacula.git
synced 2025-10-11 03:36:19 +00:00
Build 13.0.3
This commit is contained in:
72
docker/etc/samples/bacula-dir-cloud-aws.conf.sample
Executable file
72
docker/etc/samples/bacula-dir-cloud-aws.conf.sample
Executable file
@@ -0,0 +1,72 @@
|
||||
# bacula-dir-cloud.conf
|
||||
#
|
||||
# JobDefs
|
||||
# Job
|
||||
# Restore
|
||||
# Pool
|
||||
# Autochanger
|
||||
#
|
||||
|
||||
|
||||
# Template to store in cloud
|
||||
JobDefs {
|
||||
Name = "DefaultJobToCloudAWS"
|
||||
Type = Backup
|
||||
Level = Incremental
|
||||
Client = bacula-fd
|
||||
FileSet = "Full Set"
|
||||
Schedule = "WeeklyCycle"
|
||||
Storage = "CloudS3AWS"
|
||||
Messages = Standard
|
||||
Pool = CloudAWS
|
||||
SpoolAttributes = yes
|
||||
Priority = 10
|
||||
Write Bootstrap = "/opt/bacula/working/%c.bsr"
|
||||
}
|
||||
|
||||
# Jobs
|
||||
Job {
|
||||
Name = "BackupClient1ToCloudAWS"
|
||||
JobDefs = "DefaultJobToCloudAWS"
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Restore
|
||||
Job {
|
||||
Name = "RestoreFromCloudAWS"
|
||||
Type = Restore
|
||||
Client=bacula-fd
|
||||
Storage = CloudS3AWS
|
||||
FileSet="Full Set"
|
||||
Pool = CloudAWS
|
||||
Messages = Standard
|
||||
Where = /tmp/bacula-restores
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Cloud Pool definition
|
||||
Pool {
|
||||
Name = CloudAWS
|
||||
Pool Type = Backup
|
||||
Recycle = no # Bacula can automatically recycle Volumes
|
||||
AutoPrune = yes # Prune expired volumes
|
||||
Volume Retention = 365 days # one year
|
||||
Maximum Volume Jobs = 1 #
|
||||
# Maximum Volume Bytes = 100M # Limit Volume size to something reasonable
|
||||
Label Format = "Vol-JobId-${JobId}" # Auto label
|
||||
}
|
||||
|
||||
|
||||
# Autochanger definition
|
||||
Autochanger {
|
||||
Name = "CloudS3AWS"
|
||||
# Do not use "localhost" here
|
||||
Address = bacula-sd # N.B. Use a fully qualified name here
|
||||
SDPort = 9103
|
||||
Password = "TS8EQJ99iLFSK39oJy33YqkZ98v4ZapjRcA+j1N6ED1n"
|
||||
Device = "CloudAutoChangerS3"
|
||||
Media Type = "CloudType"
|
||||
Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
|
||||
}
|
96
docker/etc/samples/bacula-dir-cloud.conf.sample
Executable file
96
docker/etc/samples/bacula-dir-cloud.conf.sample
Executable file
@@ -0,0 +1,96 @@
|
||||
# bacula-dir-cloud.conf
|
||||
#
|
||||
# JobDefs
|
||||
# Job
|
||||
# Restore
|
||||
# Pool
|
||||
# Autochanger
|
||||
#
|
||||
|
||||
|
||||
# Template to store in cloud
|
||||
JobDefs {
|
||||
Name = "DefaultJobToCloud"
|
||||
Type = Backup
|
||||
Level = Incremental
|
||||
Client = bacula-fd
|
||||
FileSet = "Full Set"
|
||||
Schedule = "WeeklyCycle"
|
||||
Storage = "CloudS3"
|
||||
Messages = Standard
|
||||
Pool = Cloud
|
||||
SpoolAttributes = yes
|
||||
Priority = 10
|
||||
Write Bootstrap = "/opt/bacula/working/%c.bsr"
|
||||
}
|
||||
|
||||
# Jobs
|
||||
Job {
|
||||
Name = "BackupClient1ToCloud"
|
||||
JobDefs = "DefaultJobToCloud"
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Restore
|
||||
Job {
|
||||
Name = "RestoreFromCloud"
|
||||
Type = Restore
|
||||
Client=bacula-fd
|
||||
Storage = CloudS3
|
||||
FileSet="Full Set"
|
||||
Pool = Cloud
|
||||
Messages = Standard
|
||||
Where = /tmp/bacula-restores
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Cloud Pool definition
|
||||
Pool {
|
||||
Name = Cloud
|
||||
Pool Type = Backup
|
||||
Recycle = no # Bacula can automatically recycle Volumes
|
||||
AutoPrune = yes # Prune expired volumes
|
||||
Volume Retention = 365 days # one year
|
||||
Maximum Volume Jobs = 1 #
|
||||
# Maximum Volume Bytes = 100M # Limit Volume size to something reasonable
|
||||
Label Format = "Vol-JobId-${JobId}" # Auto label
|
||||
}
|
||||
|
||||
|
||||
# Autochanger definition
|
||||
Autochanger {
|
||||
Name = "CloudS3"
|
||||
# Do not use "localhost" here
|
||||
Address = bacula-sd # N.B. Use a fully qualified name here
|
||||
SDPort = 9103
|
||||
Password = "TS8EQJ99iLFSK39oJy33YqkZ98v4ZapjRcA+j1N6ED1n"
|
||||
Device = "CloudAutoChanger1"
|
||||
Media Type = "CloudType"
|
||||
Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
|
||||
}
|
||||
|
||||
#
|
||||
#Autochanger {
|
||||
# Name = "CloudS3-2"
|
||||
## Do not use "localhost" here
|
||||
# Address = bacula-sd # N.B. Use a fully qualified name here
|
||||
# SDPort = 9103
|
||||
# Password = "TS8EQJ99iLFSK39oJy33YqkZ98v4ZapjRcA+j1N6ED1n"
|
||||
# Device = "CloudAutoChanger2"
|
||||
# Media Type = "CloudType"
|
||||
# Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
|
||||
#}
|
||||
#
|
||||
#Autochanger {
|
||||
# Name = "CloudS3-3"
|
||||
## Do not use "localhost" here
|
||||
# Address = bacula-sd # N.B. Use a fully qualified name here
|
||||
# SDPort = 9103
|
||||
# Password = "TS8EQJ99iLFSK39oJy33YqkZ98v4ZapjRcA+j1N6ED1n"
|
||||
# Device = "CloudAutoChanger3"
|
||||
# Media Type = "CloudType"
|
||||
# Maximum Concurrent Jobs = 10 # run up to 10 jobs a the same time
|
||||
#}
|
||||
#
|
50
docker/etc/samples/bacula-sd-s3.conf.sample
Executable file
50
docker/etc/samples/bacula-sd-s3.conf.sample
Executable file
@@ -0,0 +1,50 @@
|
||||
# Define a virtual autochanger for AWS S3
|
||||
#
|
||||
# Change AccessKey and SecretKey on Cloud resource
|
||||
#
|
||||
|
||||
# Autochangers
|
||||
|
||||
Autochanger {
|
||||
Name = "CloudAutoChangerS3"
|
||||
Device = CloudStorageS3
|
||||
Changer Command = ""
|
||||
Changer Device = /dev/null
|
||||
}
|
||||
|
||||
# Devices
|
||||
|
||||
Device {
|
||||
Name = "CloudStorageS3"
|
||||
Device Type = "Cloud"
|
||||
Cloud = "S3-cloud-us-west-2"
|
||||
Maximum Part Size = 2M
|
||||
Maximum File Size = 2M
|
||||
Media Type = "CloudType"
|
||||
Archive Device = "/tmp"
|
||||
LabelMedia = yes
|
||||
Random Access = yes
|
||||
AutomaticMount = yes
|
||||
RemovableMedia = no
|
||||
AlwaysOpen = no
|
||||
}
|
||||
|
||||
# Cloud providers
|
||||
# Hostname see https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
|
||||
|
||||
Cloud {
|
||||
Name = "S3-cloud-us-west-2"
|
||||
Driver = "S3"
|
||||
HostName = "s3.us-west-2.amazonaws.com"
|
||||
BucketName = "eftechcombr-backup"
|
||||
AccessKey = "ABC"
|
||||
SecretKey = "DEF"
|
||||
Protocol = HTTPS
|
||||
UriStyle = "VirtualHost"
|
||||
Truncate Cache = "AfterUpload"
|
||||
Upload = "EachPart"
|
||||
Region = "us-west-2"
|
||||
MaximumUploadBandwidth = 10MB/s
|
||||
}
|
||||
#
|
||||
#
|
129
docker/etc/samples/bacula-sd-wasabi.conf.sample
Executable file
129
docker/etc/samples/bacula-sd-wasabi.conf.sample
Executable file
@@ -0,0 +1,129 @@
|
||||
# Define a virtual autochanger for Wasabi Cloud
|
||||
#
|
||||
# CloudStorage1 => us-east-2
|
||||
# CloudStorage2 => eu-central-1
|
||||
# CloudStorage3 => us-west-1
|
||||
#
|
||||
# Change AccessKey and SecretKey on Cloud resource
|
||||
#
|
||||
|
||||
# Autochangers
|
||||
|
||||
Autochanger {
|
||||
Name = "CloudAutoChanger1"
|
||||
Device = CloudStorage1
|
||||
Changer Command = ""
|
||||
Changer Device = /dev/null
|
||||
}
|
||||
#
|
||||
#Autochanger {
|
||||
# Name = "CloudAutoChanger2"
|
||||
# Device = CloudStorage2
|
||||
# Changer Command = ""
|
||||
# Changer Device = /dev/null
|
||||
#}
|
||||
#
|
||||
#Autochanger {
|
||||
# Name = "CloudAutoChanger3"
|
||||
# Device = CloudStorage3
|
||||
# Changer Command = ""
|
||||
# Changer Device = /dev/null
|
||||
#}
|
||||
#
|
||||
|
||||
|
||||
# Devices
|
||||
|
||||
Device {
|
||||
Name = "CloudStorage1"
|
||||
Device Type = "Cloud"
|
||||
Cloud = "WasabiS3-cloud-us-east-2"
|
||||
Maximum Part Size = 2M
|
||||
Maximum File Size = 2M
|
||||
Media Type = "CloudType"
|
||||
Archive Device = "/tmp"
|
||||
LabelMedia = yes
|
||||
Random Access = yes
|
||||
AutomaticMount = yes
|
||||
RemovableMedia = no
|
||||
AlwaysOpen = no
|
||||
}
|
||||
|
||||
#
|
||||
#Device {
|
||||
# Name = "CloudStorage2"
|
||||
# Device Type = "Cloud"
|
||||
# Cloud = "WasabiS3-cloud-eu-central-1"
|
||||
# Maximum Part Size = 2M
|
||||
# Maximum File Size = 2M
|
||||
# Media Type = "CloudType"
|
||||
# Archive Device = "/tmp"
|
||||
# LabelMedia = yes
|
||||
# Random Access = yes
|
||||
# AutomaticMount = yes
|
||||
# RemovableMedia = no
|
||||
# AlwaysOpen = no
|
||||
#}
|
||||
#
|
||||
#
|
||||
#Device {
|
||||
# Name = "CloudStorage3"
|
||||
# Device Type = "Cloud"
|
||||
# Cloud = "WasabiS3-cloud-us-west-1"
|
||||
# Maximum Part Size = 2M
|
||||
# Maximum File Size = 2M
|
||||
# Media Type = "CloudType"
|
||||
# Archive Device = "/tmp"
|
||||
# LabelMedia = yes
|
||||
# Random Access = yes
|
||||
# AutomaticMount = yes
|
||||
# RemovableMedia = no
|
||||
# AlwaysOpen = no
|
||||
#}
|
||||
#
|
||||
|
||||
# Cloud providers
|
||||
|
||||
Cloud {
|
||||
Name = "WasabiS3-cloud-us-east-2"
|
||||
Driver = "S3"
|
||||
HostName = "s3.us-east-2.wasabisys.com"
|
||||
BucketName = "eftechcombr-backup"
|
||||
AccessKey = "ABC"
|
||||
SecretKey = "DEF"
|
||||
Protocol = HTTPS
|
||||
UriStyle = "VirtualHost"
|
||||
Truncate Cache = "AfterUpload"
|
||||
Upload = "EachPart"
|
||||
MaximumUploadBandwidth = 10MB/s
|
||||
}
|
||||
#
|
||||
#Cloud {
|
||||
# Name = "WasabiS3-cloud-eu-central-1"
|
||||
# Driver = "S3"
|
||||
# HostName = "s3.eu-central-1.wasabisys.com"
|
||||
# BucketName = "eftechcombr-backup2"
|
||||
# AccessKey = "ABC"
|
||||
# SecretKey = "DEF"
|
||||
# Protocol = HTTPS
|
||||
# UriStyle = "VirtualHost"
|
||||
# Truncate Cache = "AfterUpload"
|
||||
# Upload = "EachPart"
|
||||
# MaximumUploadBandwidth = 10MB/s
|
||||
#}
|
||||
|
||||
|
||||
#Cloud {
|
||||
# Name = "WasabiS3-cloud-us-west-1"
|
||||
# Driver = "S3"
|
||||
# HostName = "s3.us-west-1.wasabisys.com"
|
||||
# BucketName = "eftechcombr-backup3"
|
||||
# AccessKey = "ABC"
|
||||
# SecretKey = "DEF"
|
||||
# Protocol = HTTPS
|
||||
# UriStyle = "VirtualHost"
|
||||
# Truncate Cache = "AfterUpload"
|
||||
# Upload = "EachPart"
|
||||
# MaximumUploadBandwidth = 10MB/s
|
||||
#}
|
||||
#
|
32
docker/etc/samples/clientdefs/ubuntu.conf.sample
Executable file
32
docker/etc/samples/clientdefs/ubuntu.conf.sample
Executable file
@@ -0,0 +1,32 @@
|
||||
Job {
|
||||
Name = "BackupUbuntu"
|
||||
JobDefs = "DefaultJobToCloudAWS"
|
||||
FileSet = "ubuntu-fs"
|
||||
Client = "ubuntu-fd"
|
||||
}
|
||||
|
||||
Client {
|
||||
Name = ubuntu-fd
|
||||
Address = 128.199.45.192
|
||||
FDPort = 9102
|
||||
Catalog = MyCatalog
|
||||
Password = "lE-6z_tYeiRRnNLt_5L4w8KplM9Qb43z7"
|
||||
File Retention = 60 days
|
||||
Job Retention = 6 months
|
||||
AutoPrune = yes
|
||||
}
|
||||
|
||||
FileSet {
|
||||
Name = "ubuntu-fs"
|
||||
Include {
|
||||
Options {
|
||||
Compression=GZIP
|
||||
signature=MD5
|
||||
}
|
||||
File = /home
|
||||
File = /var/lib/mysql
|
||||
File = /root
|
||||
File = /share
|
||||
File = /etc
|
||||
}
|
||||
}
|
38
docker/etc/samples/clientdefs/w2k16.conf.sample
Executable file
38
docker/etc/samples/clientdefs/w2k16.conf.sample
Executable file
@@ -0,0 +1,38 @@
|
||||
Job {
|
||||
Name = "BackupW2k16ToAWS"
|
||||
JobDefs = "DefaultJobToCloudAWS"
|
||||
Client = "w2k16-fd"
|
||||
Fileset = "w2k16-fs"
|
||||
}
|
||||
|
||||
FileSet {
|
||||
Name = "w2k16-fs"
|
||||
Include {
|
||||
Options {
|
||||
signature = MD5
|
||||
compression = GZIP
|
||||
IgnoreCase = yes
|
||||
}
|
||||
File = "C:/documents and settings"
|
||||
File = "C:/Users"
|
||||
File = "C:/share"
|
||||
}
|
||||
Exclude {
|
||||
File = "*.mp3"
|
||||
File = "*.mp4"
|
||||
File = "*.dll"
|
||||
File = "*.exe"
|
||||
File = "*.bin"
|
||||
}
|
||||
}
|
||||
|
||||
Client {
|
||||
Name = w2k16-fd
|
||||
Address = 40.71.101.166
|
||||
FDPort = 9102
|
||||
Catalog = MyCatalog
|
||||
Password = "abc123cde456fgh789"
|
||||
File Retention = 60 days
|
||||
Job Retention = 6 months
|
||||
AutoPrune = yes
|
||||
}
|
Reference in New Issue
Block a user