minor changes

This commit is contained in:
root 2023-10-14 17:46:08 +02:00
parent e1d7932ddc
commit c28ee64a95
16 changed files with 19 additions and 2280 deletions

View File

@ -5,13 +5,13 @@ Director {
WorkingDirectory = "/var/lib/bacula" WorkingDirectory = "/var/lib/bacula"
PidDirectory = "/run/bacula" PidDirectory = "/run/bacula"
MaximumConcurrentJobs = 20 MaximumConcurrentJobs = 20
Password = "AqBDdsQIToKs5mLh4szqSk99gqLtGTWhy" Password = "AqBDdsQIToKs5mLh4szqSk99gqLtGTWhB"
} }
Client { Client {
Name = "bacula-fd" Name = "bacula-fd"
Address = "127.0.0.1" Address = "127.0.0.1"
FdPort = 9102 FdPort = 9102
Password = "PBhBm7HlheYWqVaYv6zDCLpSAzVjSc_OC" Password = "PBhBm7HlheYWqVaYv6zDCLpSAzVjSc_OB"
Catalog = "MyCatalog" Catalog = "MyCatalog"
FileRetention = 5184000 FileRetention = 5184000
JobRetention = 15552000 JobRetention = 15552000
@ -47,28 +47,18 @@ Job {
} }
Priority = 11 Priority = 11
} }
Job {
Name = "NAS01_CIFS_CriticalRole_COMPLETE"
Type = "Backup"
Level = "Incremental"
Messages = "Daemon"
Storage = "vchanger-1"
Pool = "File"
Client = "bacula-fd"
Fileset = "NAS01-CriticalRole-Complete"
Schedule = "WeeklyCycle"
JobDefs = "DefaultJob"
}
Storage { Storage {
Name = "vchanger-1" Name = "vchanger-1"
SdPort = 9103 SdPort = 9103
Address = "127.0.0.1" Address = "127.0.0.1"
Password = "0FbHFYHuylTaTUB9p4ruim8RmBe8u9lmp" Password = "0FbHFYHuylTaTUB9p4ruim8RmBe8u9lmB"
Device = "usb-vchanger-1" Device = "usb-vchanger-1"
MediaType = "File" MediaType = "File"
Autochanger = "vchanger-1" Autochanger = "vchanger-1"
MaximumConcurrentJobs = 20 MaximumConcurrentJobs = 20
} }
Catalog { Catalog {
Name = "MyCatalog" Name = "MyCatalog"
Address = "127.0.0.1" Address = "127.0.0.1"
@ -115,22 +105,6 @@ Fileset {
File = "/var/spool/vchanger" File = "/var/spool/vchanger"
} }
} }
Fileset {
Name = "NAS01-CriticalRole-Complete"
Description = "Alles von Critical Role"
EnableVss = no
Include {
File = "/mnt/cifs/nas01/video2/publik/Critcal Role/"
}
}
Fileset {
Name = "NAS01: Wichtige Dateien"
EnableVss = no
Include {
File = "/mnt/cifs/nas01/safe/"
File = "/mnt/cifs/nas01/daten/dokumente/"
}
}
Pool { Pool {
Name = "Default" Name = "Default"
PoolType = "Backup" PoolType = "Backup"
@ -143,8 +117,8 @@ Pool {
Pool { Pool {
Name = "File" Name = "File"
PoolType = "Backup" PoolType = "Backup"
LabelFormat = "Volvchanger-1_0000_" LabelFormat = "Vchanger-1_"
MaximumVolumes = 100 MaximumVolumes = 20
MaximumVolumeBytes = 53687091200 MaximumVolumeBytes = 53687091200
VolumeRetention = 31536000 VolumeRetention = 31536000
AutoPrune = yes AutoPrune = yes
@ -173,7 +147,7 @@ Messages {
} }
Console { Console {
Name = "bacula-mon" Name = "bacula-mon"
Password = "_7qJzdGPz2h3_BGeXg_qfTI-mGmOdIP4j" Password = "_7qJzdGPz2h3_BGeXg_qfTI-mGmOdIP4B"
CommandAcl = "status" CommandAcl = "status"
CommandAcl = ".status" CommandAcl = ".status"
} }
@ -188,15 +162,3 @@ JobDefs {
WriteBootstrap = "/var/lib/bacula/%c.bsr" WriteBootstrap = "/var/lib/bacula/%c.bsr"
Priority = 10 Priority = 10
} }
Job {
Type = "Backup"
Level = "Incremental"
Client = "bacula-fd"
Fileset = "NAS01: Wichtige Dateien"
Pool = "File"
Messages = "Daemon"
Schedule = "WeeklyCycle"
JobDefs = "DefaultJob"
Storage = "vchanger-1"
Name = "NAS01_CIFS_Wichtige_Dateien"
}

View File

@ -16,7 +16,7 @@
# #
Director { Director {
Name = bacula-dir Name = bacula-dir
Password = "PBhBm7HlheYWqVaYv6zDCLpSAzVjSc_OC" Password = "PBhBm7HlheYWqVaYv6zDCLpSAzVjSc_OB"
} }
# #
@ -25,7 +25,7 @@ Director {
# #
Director { Director {
Name = bacula-mon Name = bacula-mon
Password = "MxQb81ils2APPrjjzln2jj085hPhWI-Je" Password = "MxQb81ils2APPrjjzln2jj085hPhWI-JB"
Monitor = yes Monitor = yes
} }

View File

@ -29,7 +29,7 @@ Storage { # definition of myself
# #
Director { Director {
Name = bacula-dir Name = bacula-dir
Password = "0FbHFYHuylTaTUB9p4ruim8RmBe8u9lmp" Password = "0FbHFYHuylTaTUB9p4ruim8RmBe8u9lmB"
} }
# #
@ -38,7 +38,7 @@ Director {
# #
Director { Director {
Name = bacula-mon Name = bacula-mon
Password = "8918u3WssJA-Dw_5suLTOJxKLq4W-vdam" Password = "8918u3WssJA-Dw_5suLTOJxKLq4W-vdaB"
Monitor = yes Monitor = yes
} }

View File

@ -9,5 +9,5 @@ Director {
Name = bacula-dir Name = bacula-dir
DIRport = 9101 DIRport = 9101
address = localhost address = localhost
Password = "AqBDdsQIToKs5mLh4szqSk99gqLtGTWhy" Password = "AqBDdsQIToKs5mLh4szqSk99gqLtGTWhB"
} }

View File

@ -4,9 +4,9 @@
# is used only when you install a new Bacula package, and can be # is used only when you install a new Bacula package, and can be
# safely removed at any time. # safely removed at any time.
DIRPASSWD=AqBDdsQIToKs5mLh4szqSk99gqLtGTWhy DIRPASSWD=AqBDdsQIToKs5mLh4szqSk99gqLtGTWhB
DIRMPASSWD=_7qJzdGPz2h3_BGeXg_qfTI-mGmOdIP4j DIRMPASSWD=_7qJzdGPz2h3_BGeXg_qfTI-mGmOdIP4B
SDPASSWD=0FbHFYHuylTaTUB9p4ruim8RmBe8u9lmp SDPASSWD=0FbHFYHuylTaTUB9p4ruim8RmBe8u9lmB
SDMPASSWD=8918u3WssJA-Dw_5suLTOJxKLq4W-vdam SDMPASSWD=8918u3WssJA-Dw_5suLTOJxKLq4W-vdaB
FDPASSWD=PBhBm7HlheYWqVaYv6zDCLpSAzVjSc_OC FDPASSWD=PBhBm7HlheYWqVaYv6zDCLpSAzVjSc_OB
FDMPASSWD=MxQb81ils2APPrjjzln2jj085hPhWI-Je FDMPASSWD=MxQb81ils2APPrjjzln2jj085hPhWI-JB

View File

@ -1,858 +0,0 @@
#!/bin/sh
#
# baculabackupreport.sh
#
# ------------------------------------------------------------------------------
#
# waa - 20130428 - Initial release.
# Generate basic Bacula backup report.
#
# waa - 20170501 - Change Log moved to bottom of script.
#
# ------------------------------------------------------------------------------
#
# Copyright (c) 2013-2017, William A. Arlofski waa-at-revpol-dot-com
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# System variables
# ----------------
server="localhost"
admin="root"
bcbin="/usr/sbin/bconsole"
sendmail="/usr/sbin/sendmail"
bcconfig="/etc/bacula/bconsole.conf"
# Database variables
# ------------------
dbtype="postgresql" # Supported options are pgsql/postgresql, mysql, mariadb
db="XXX_DBNAME_XXX"
dbuser="XXX_DBUSER_XXX"
dbbindir="/usr/bin"
# dbpass="-pXXX_DBPASSWORD_XXX" # Uncomment and set db password if one is used
# Formatting variables
# --------------------
html="yes" # Generate HTML emails instead of plain text emails?
boldstatus="yes" # Set <b> tag on Status field (only if html="yes")
colorstatusbg="yes" # Colorize the Status cell's background? (only if html="yes")
jobtableheadercolor="#b0b0b0" # Background color for the HTML table's header
jobtablejobcolor="#f4f4f4" # Background color for the job rows in the HTML table
runningjobcolor="#4d79ff" # Background color of the Status cell for "Running" jobs
goodjobcolor="#00f000" # Background color of the Status cell for "OK" jobs
warnjobcolor="#ffff00" # Background color of the Status cell for "OK" jobs (with warnings - well, actually with 'joberrors')
badjobcolor="#cc3300" # Background color of the Status cell for "bad" jobs
goodjobwitherrcolor="#cccc00" # Background color of the Status cell for "OK" jobs (with errors) - Not implemented due to request
fontfamily="Verdana, Arial, Helvetica, sans-serif" # Set the font family to use for HTML emails
fontsize="16px" # Set the font size to use for email title and print summaries
fontsizejobinfo="12px" # Set the font size to use for job information inside of table
fontsizesumlog="10px" # Set the font size of bad logs and job summaries
printsummary="yes" # Print a short summary after the job list table? (Total Jobs, Files & Bytes)
emailsummaries="no" # Email all job summaries. Be careful with this, it can generate very large emails
emailbadlogs="yes" # Email logs of bad jobs or jobs with JobErrors -ne 0. Be careful, this can generate very large emails.
addsubjecticon="yes" # Prepend the email Subject with UTF-8 icons (a 'checkmark', 'circle with slash', or a bold 'x')
nojobsicon="=?utf-8?Q?=E2=8A=98?=" # utf-8 subject icon when no jobs have been run
goodjobsicon="=?utf-8?Q?=E2=9C=94?=" # utf-8 subject icon when all jobs were "OK"
badjobsicon="=?utf-8?Q?=E2=9C=96?=" # utf-8 subject icon when there are jobs with errors etc
starbadjobids="yes" # Prepend an asterisk "*" to jobids of "bad" jobs
sortfield="EndTime" # Which catalog db field to sort on? Multiple,fields,work,here
sortorder="DESC" # Which direction to sort?
emailtitle="Jobs Run On ${server} in the Past ${1} Hours" # This is prepended at the top of the email, before the jobs table
# --------------------------------------------------
# Nothing should need to be modified below this line
# --------------------------------------------------
hist=${1}
if [ -z ${hist} ]; then
echo -e "\nUSE:\n$0 <history in hours>\n"
exit 1
fi
if [ ! -e ${bcconfig} ]; then
echo -e "\nThe bconsole configuration file does not seem to be '${bcconfig}'."
echo -e "Please check the setting for the variable 'bcconfig'.\n"
exit 1
fi
if [ ! -x ${bcbin} ]; then
echo -e "\nThe bconsole binary does not seem to be '${bcbin}', or it is not executable."
echo -e "Please check the setting for the variable 'bcbin'.\n"
exit 1
fi
if [ ! -x ${dbbin} ]; then
echo -e "\nThe database client binary does not seem to be '${dbbin}', or it is not executable."
echo -e "Please check the setting for the variable 'dbbin'.\n"
exit 1
fi
if [ ! -x ${sendmail} ]; then
echo -e "\nThe sendmail binary does not seem to be '${sendmail}', or it is not executable."
echo -e "Please check the setting for the variable 'sendmail'.\n"
exit 1
fi
# Build query based on dbtype. Good thing we have "standards" Sigh...
# -------------------------------------------------------------------
case ${dbtype} in
mysql )
queryresult=$(echo "SELECT JobId, Name, StartTime, EndTime, Type, Level, JobStatus, JobFiles, JobBytes, \
TIMEDIFF (EndTime,StartTime) as RunTime, JobErrors \
FROM Job \
WHERE (RealEndTime >= DATE_ADD(NOW(), INTERVAL -${hist} HOUR) OR JobStatus='R') \
ORDER BY ${sortfield} ${sortorder};" \
| ${dbbindir}/mysql -u ${dbuser} ${dbpass} ${db} \
| sed '/^JobId/d' )
;;
pgsql|postgresql )
queryresult=$(echo "SELECT JobId, Name, StartTime, EndTime, Type, Level, JobStatus, JobFiles, JobBytes, \
AGE(EndTime, StartTime) as RunTime, JobErrors \
FROM Job \
WHERE (RealEndTime >= CURRENT_TIMESTAMP(2) - cast('${hist} HOUR' as INTERVAL) OR JobStatus='R') \
ORDER BY ${sortfield} ${sortorder};" \
| ${dbbindir}/psql -U ${dbuser} ${dbpass} ${db} -0t \
| sed -e 's/|//g' -e '/^$/d' )
;;
mariadb )
queryresult=$(echo "SELECT JobId, Name, StartTime, EndTime, Type, Level, JobStatus, JobFiles, JobBytes, \
TIMEDIFF (EndTime,StartTime) as RunTime, JobErrors \
FROM Job \
WHERE (RealEndTime >= DATE_ADD(NOW(), INTERVAL -${hist} HOUR) OR JobStatus='R') \
ORDER BY ${sortfield} ${sortorder};" \
| ${dbbindir}/mysql -u ${dbuser} -p${dbpass} ${db} -s -N )
;;
* )
echo "dbtype of '${dbtype}' is invalid. Please set dbtype variable to 'mysql', 'pgsql', or 'mariadb'"
exit 1
;;
esac
# If we have no jobs to report on, then
# we need to skip the entire awk script
# and some bash stuff and jump all the
# way to about line 673
# -------------------------------------
if [ -z "${queryresult}" ]; then
results="0"
else
results="1"
# Now for some fun with awk
# -------------------------
IFS=" "
msg=$(echo ${queryresult} | \
LC_ALL=en_US.UTF-8 \
awk \
-v html="${html}" \
-v boldstatus="${boldstatus}" \
-v colorstatusbg="${colorstatusbg}" \
-v jobtableheadercolor="${jobtableheadercolor}" \
-v jobtablejobcolor="${jobtablejobcolor}" \
-v runningjobcolor="${runningjobcolor}" \
-v goodjobcolor="${goodjobcolor}" \
-v goodjobwitherrcolor="${goodjobwitherrcolor}" \
-v warnjobcolor="${warnjobcolor}" \
-v badjobcolor="${badjobcolor}" \
-v printsummary="${printsummary}" \
-v starbadjobids="${starbadjobids}" \
'BEGIN { awkerr = 0 }
{star = " " }
# List of possible jobstatus codes
# --------------------------------
# Enter SQL query: SELECT * FROM status;
# +-----------+---------------------------------+----------+
# | jobstatus | jobstatuslong | severity |
# +-----------+---------------------------------+----------+
# | C | Created, not yet running | 15 |
# | R | Running | 15 |
# | B | Blocked | 15 |
# | T | Completed successfully | 10 |
# | E | Terminated with errors | 25 |
# | e | Non-fatal error | 20 |
# | f | Fatal error | 100 |
# | D | Verify found differences | 15 |
# | A | Canceled by user | 90 |
# | F | Waiting for Client | 15 |
# | S | Waiting for Storage daemon | 15 |
# | m | Waiting for new media | |
# | M | Waiting for media mount | 15 |
# | s | Waiting for storage resource | 15 |
# | j | Waiting for job resource | 15 |
# | c | Waiting for client resource | 15 |
# | d | Waiting on maximum jobs | 15 |
# | t | Waiting on start time | 15 |
# | p | Waiting on higher priority jobs | 15 |
# | a | SD despooling attributes | 15 |
# | i | Doing batch insert file records | 15 |
# | I | Incomplete Job | 25 |
# +-----------+---------------------------------+----------+
# Is this job still running?
# If a job is still running, then there will be no "Stop Time"
# fields, so $9 (jobstatus) will be shifted left two columns
# to $7, and we will need to test and then reassign these variables
# Note, this seems to be required for PostgreSQL, but MariaDB and
# MySQL return all zeros for the date and time for running jobs
# -----------------------------------------------------------------
{ if ($7 == "R" && $8 ~ /^[0-9]+/)
{
$13 = $10
$11 = $9
$10 = $8
$9 = $7
$8 = $6
$7 = $5
$5 = "--=Still Running=--"
$6 = ""
}
}
# Assign words to job status code characters
# ------------------------------------------
# First, check to see if we need to generate an HTML email
{ if (html == "yes")
{
# Set default opening and closing tags for status cell
# ----------------------------------------------------
tdo = "<td align=\"center\">"
tdc = "</td>"
# Check to see if the job is "OK" then assign
# the "goodjobcolor" to the cell background
# -------------------------------------------
if ($9 ~ /[T]/ && $13 == 0)
{
if (colorstatusbg == "yes")
# Assign jobs that are OK or Running the goodjobcolor
# ---------------------------------------------------
{
tdo = "<td align=\"center\" bgcolor=\"" goodjobcolor "\">"
}
# Should the status be bolded?
# ----------------------------
if (boldstatus == "yes")
{
tdo=tdo"<b>"
tdc="</b>"tdc
}
status["T"]=tdo"-OK-"tdc
# If it is a good job, but with errors or warnings
# then we will assign the warnjobcolor
# ------------------------------------------------
} else if ($9 == "T" && $13 != 0)
{
if (colorstatusbg == "yes")
# Assign OK jobs with errors the warnjobcolor
# -------------------------------------------
{
tdo = "<td align=\"center\" bgcolor=\"" warnjobcolor "\">"
}
# Should the status be bolded?
# ----------------------------
if (boldstatus == "yes")
{
tdo=tdo"<b>"
tdc="</b>"tdc
}
# Since the "W" jobstatus never appears in the DB, we manually
# assign it here so it can be recognized later on in the script
# -------------------------------------------------------------
$9 = "W"
status["W"]=tdo"OK/Warnings"tdc
# If the job is still running we will
# assign it the runningjobcolor
# -----------------------------------
} else if ($9 == "R")
{
if (colorstatusbg == "yes")
# Assign running jobs the runningjobcolor
# ---------------------------------------
{
tdo = "<td align=\"center\" bgcolor=\"" runningjobcolor "\">"
}
# Should the status be bolded?
# ----------------------------
if (boldstatus == "yes")
{
tdo=tdo"<b>"
tdc="</b>"tdc
}
status["R"]=tdo"Running"tdc
# If it is a bad job, then
# we assign the badjobcolor
# -------------------------
} else if ($9 ~ /[ABDef]/)
{
if (colorstatusbg == "yes")
# Assign bad jobs the badjobcolor
# -------------------------------
{
tdo = "<td align=\"center\" bgcolor=\"" badjobcolor "\">"
}
# Should the status be bolded?
# ----------------------------
if (boldstatus == "yes")
{
tdo=tdo"<b>"
tdc="</b>"tdc
}
status["A"]=tdo"Aborted"tdc
status["D"]=tdo"Verify Diffs"tdc
status["f"]=tdo"Failed"tdc
# If it is a job with warnings or errors, assign the job the warnjobcolor
# I have never seen a "W" status in the db. Jobs that are "OK -- with warnings"
# still have a "T" jobstatus, but the joberrors field is incremented in the db
# -----------------------------------------------------------------------------
} else if ($9 ~ /[EI]/)
{
if (colorstatusbg == "yes")
# Assign job the warnjobcolor
# ---------------------------
{
tdo = "<td align=\"center\" bgcolor=\"" warnjobcolor "\">"
}
# Should the status be bolded?
# ----------------------------
if (boldstatus == "yes")
{
tdo=tdo"<b>"
tdc="</b>"tdc
}
status["E"]=tdo"OK, w/Errors"tdc
status["I"]=tdo"Incomplete"tdc
}
} else
# $html is not "yes" so statuses will be normal text
# --------------------------------------------------
{
status["A"]=" Aborted "
status["D"]=" Verify Diffs "
status["E"]=" OK, w/Errors "
status["f"]=" Failed "
status["I"]=" Incomplete "
status["R"]=" Running "
status["T"]=" -OK- "
# Since the "W" jobstatus never appears in the DB, we manually
# assign it here so it can be recognized later on in the script
# -------------------------------------------------------------
if ($9 == "T" && $13 != 0)
{ $9 = "W"
status["W"]=" OK/Warnings "
}
}
}
# These status characters seem to only
# be Director "in memory" statuses. They
# do not get entered into the DB ever so we
# cannot catch them with the db query we use
# I might have to query the DIR as well as
# the DB to be able to capture these
# ------------------------------------------
{
status["C"]=" Created "
status["B"]=" Blocked "
status["F"]=" Wait FD "
status["S"]=" Wait SD "
status["m"]=" Wait New Media"
status["M"]=" Wait Mount "
status["s"]=" Wait Storage"
status["j"]=" Wait Job "
status["c"]=" Wait Client "
status["d"]=" Wait Max Jobs"
status["t"]="Wait Start Time"
status["p"]=" Wait Priority"
status["a"]=" Despool Attrs"
status["i"]=" Batch Insert "
status["L"]="Spool Last Data"
}
# Assign words to job type code characters
# ----------------------------------------
{
jobtype["D"]="Admin"
jobtype["B"]="Backup"
jobtype["C"]="Copy"
jobtype["c"]="Control"
jobtype["R"]="Restore"
jobtype["V"]="Verify"
}
# Assign words to job level code characters
# -----------------------------------------
{
level["F"]="Full"
level["I"]="Incr"
level["D"]="Diff"
level["f"]="VFul"
level["-"]="----"
}
# Assign words to Verify job level code characters
# ------------------------------------------------
{
level["A"]="VVol"
level["C"]="VCat"
level["V"]="Init"
level["O"]="VV2C"
level["d"]="VD2C"
}
# Check to see if the job did not "T"erminate OK then increment $awkerr,
# and prepend the JobId with an asterisk for quick visual identification
# of problem jobs.
# Need to choose between a positive or negative test of the job status code
# -------------------------------------------------------------------------
# Negative check - testing for non existence of all "good" status codes
# $9 !~ /[TRCFSMmsjcdtpai]/ { awkerr++; $1 = "* "$1 }
# Positive check - testing the existence of all "bad" status codes
# good { if ($9 ~ /[ABDEIWef]/ || $13 != 0) { awkerr++; if (starbadjobids == "yes") { star = "*" } } }
{ if ($9 ~ /[ABDEIef]/) { awkerr++; if (starbadjobids == "yes") { star = "*" } } }
# If the job is an Admin, Copy, Control,
# Restore, or Migration job it will have
# no real "Level", so we set it to "----"
# ---------------------------------------
{ if ($7 ~ /[CcDRm]/) { $8 = "-" } }
# Print out each job, formatted with the following fields:
# JobId Name Status Errors Type Level Files Bytes StartTime EndTime RunTime
# -------------------------------------------------------------------------
{ if (html == "yes")
{ printf("<tr bgcolor=\"%s\"> \
<td align=\"center\">%s%s%s</td> \
<td>%s</td> \
%s \
<td align=\"right\">%'"'"'d</td> \
<td align=\"center\">%s</td> \
<td align=\"center\">%s</td> \
<td align=\"right\">%'"'"'d</td> \
<td align=\"right\">%'"'"'9.2f GB</td> \
<td align=\"center\">%s %s</td> \
<td align=\"center\">%s %s</td> \
<td align=\"center\">%s</td> \
</tr>\n", \
jobtablejobcolor, star, $1, star, $2, status[$9], $13, jobtype[$7], level[$8], $10, $11/(1024*1024*1024), $3, $4, $5, $6, $12);
} else
{ printf("%s %-7s %-14s %16s %'"'"'12d %8s %6s %'"'"'9d %'"'"'9.2f GB %11s %-9s %-10s %-9s %-9s\n", \
star, $1, $2, status[$9], $13, jobtype[$7], level[$8], $10, $11/(1024*1024*1024), $3, $4, $5, $6, $12);
}
}
# Count the number of jobs
# ------------------------
{ totaljobs++ }
# Count the number of files and bytes from all jobs
# -------------------------------------------------
{ files += $10 }
{ bytes += $11 }
# Finally, print out the summaries
# --------------------------------
END {
if (printsummary == "yes")
{ if (html == "yes")
{
printf("</table>")
printf("<br>\
<hr align=\"left\" width=\"25%\">\
<table width=\"25%\">\
<tr><td><b>Total Jobs</b></td><td align=\"center\"><b>:</b></td> <td align=\"right\"><b>%'"'"'15d</b></td></tr>\
<tr><td><b>Total Files</b></td><td align=\"center\"><b>:</b></td> <td align=\"right\"><b>%'"'"'15d</b></td></tr>\
<tr><td><b>Total Bytes</b></td><td align=\"center\"><b>:</b></td> <td align=\"right\"><b>%'"'"'15.2f GB</b></td></tr>\
</table>\
<hr align=\"left\" width=\"25%\">",\
totaljobs, files, bytes/(1024*1024*1024));
} else
printf("\
=================================\n\
Total Jobs : %'"'"'15d\n\
Total Files : %'"'"'15d\n\
Total Bytes : %'"'"'15.2f GB\n\
=================================\n",\
totaljobs, files, bytes/(1024*1024*1024));
} exit awkerr }
')
# Any failed jobs, or jobs with errors?
# -------------------------------------
numbadjobs=$?
# Do we email the job summaries?
# ------------------------------
if [ ${emailsummaries} == "yes" ]; then
# Get all of the jobids from the query results, but
# skip any running jobs because they will not have
# a summary in the DB until the job has terminated
# -------------------------------------------------
alljobids=$(echo "${queryresult}" \
| awk '{ if ($7 != "R") printf("%s ", $1) }')
# If no jobids were returned, skip creating
# the header and looping through zero records
# -------------------------------------------
if [ ! -z "${alljobids}" ]; then
# Generate the header
# -------------------
msg="${msg}"$(
if [ ${html} == "yes" ]; then
echo "<pre>====================================="
else
echo -e "\n\n\n====================================="
fi
echo "Job Summaries of All Terminated Jobs:"
echo "====================================="
)
# Get the job logs from all jobs and just grep for the summary
# ------------------------------------------------------------
for jobid in ${alljobids}; do
msg="${msg}"$(
echo -e "\n--------------"
echo "JobId: ${jobid}"
echo "--------------"
echo "llist joblog jobid=${jobid}" | ${bcbin} -c ${bcconfig} | grep -A31 "^ Build OS:"
echo "======================================================================"
)
done
if [ ${html} == "yes" ]; then
msg=${msg}$(echo "</pre>")
fi
fi
fi
# Do we email the bad job logs with the report?
# ---------------------------------------------
if [ ${emailbadlogs} == "yes" ]; then
# Get the badjobs, or the good jobs with
# JobErrors != 0 from the query results
# --------------------------------------
badjobids=$(echo "${queryresult}" \
| awk '{ if ($9 ~ /[ABDEIef]/ || ($9 == "T" && $13 != 0)) printf("%s ", $1) }')
# If no jobids were returned, skip creating
# the header and looping through zero records
# -------------------------------------------
if [ ! -z "${badjobids}" ]; then
# Generate the header
# -------------------
msg="${msg}"$(
if [ ${html} == "yes" ]; then
echo "<pre>=========================================================="
else
echo -e "\n\n\n=========================================================="
fi
echo "Job logs of failed jobs, or good jobs with JobErrors != 0:"
echo "=========================================================="
)
# Get the bad job's log from the Director via bconsole
# ----------------------------------------------------
for jobid in ${badjobids}; do
msg="${msg}"$(
echo -e "\n--------------"
echo "JobId: ${jobid}"
echo "--------------"
echo "llist joblog jobid=${jobid}" | ${bcbin} -c ${bcconfig}
echo "======================================================================"
)
done
if [ ${html} == "yes" ]; then
msg=${msg}$(echo "</pre>")
fi
fi
fi
# Prepend the header to the $msg output
# -------------------------------------
if [ ${html} == "yes" ]; then
msg="<html>
<head><meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">
<style>
body {font-family:$fontfamily; font-size:$fontsize;} td {font-size:$fontsizejobinfo;} pre {font-size:$fontsizesumlog;}
</style>
</head>
<body>
<p><u><b>${emailtitle}</b></u></p>
<table width=\"98%\" align=\"center\" border=\"1\" cellpadding=\"2\" cellspacing=\"0\">
<tr bgcolor=\"${jobtableheadercolor}\">
<td align=\"center\"><b>Job ID</b></td>
<td align=\"center\"><b>Job Name</b></td>
<td align=\"center\"><b>Status</b></td>
<td align=\"center\"><b>Errors</b></td>
<td align=\"center\"><b>Type</b></td>
<td align=\"center\"><b>Level</b></td>
<td align=\"center\"><b>Files</b></td>
<td align=\"center\"><b>Bytes</b></td>
<td align=\"center\"><b>Start Time</b></td>
<td align=\"center\"><b>End Time</b></td>
<td align=\"center\"><b>Run Time</b></td>
</tr>
${msg}
</body></html>"
else
msg="
${emailtitle}
------------------------------------------
JobId Job Name Status Errors Type Level Files Bytes Start Time End Time Run Time
----- -------------- --------------- ---------- ------- ----- -------- ----------- ------------------- ------------------- --------
${msg}"
fi
fi # If there were zero results returned from the
# SQL the query, we skip the entire awk script,
# and a lot of other bash stuff that generates
# the email body and we end up here
# -------------------------------------------------
if [ ${results} -eq 0 ]; then
status="No Jobs Have Been Run"
subjecticon="${nojobsicon}"
msg="Nothing to see here..."
else
# Totally unnecessary, but, well... OCD... :)
# --------------------------------------------
if [ ${numbadjobs} -ne 0 ]; then
if [ ${numbadjobs} -eq 1 ]; then
job="Job"
else
job="Jobs"
fi
status="(${numbadjobs}) ${job} with Errors"
subjecticon="${badjobsicon}"
else
status="All Jobs OK"
subjecticon="${goodjobsicon}"
fi
fi
# More silliness
# --------------
if [ ${hist} -eq 1 ]; then
hour="Hour"
else
hour="Hours"
fi
# Email the report
# ----------------
(
echo "To: ${admin}"
echo "From: ${admin}"
if [ ${addsubjecticon} == "yes" ]; then
echo "Subject: ${subjecticon} ${server} - ${status} in the Past ${hist} ${hour}"
else
echo "Subject: ${server} - ${status} in the Past ${hist} ${hour}"
fi
if [ ${html} == "yes" ] && [ ${results} -ne 0 ]; then
echo "Content-Type: text/html"
echo "MIME-Version: 1.0"
fi
echo ""
echo "${msg}"
) | /usr/sbin/sendmail -t
# -------------
# End of script
# -------------
# ----------
# Change Log
# ----------
# ----------------------------
# William A. Arlofski
# Reverse Polarity, LLC
# helpdesk@revpol.com
# http://www.revpol.com/bacula
# ----------------------------
#
#
# 20130428 - Initial release
# Generate and email a basic Bacula backup report
# 1st command line parameter is expected to be a
# number of hours. No real error checking is done
#
# 20131224 - Removed "AND JobStatus='T'" to get all backup jobs
# whether running, or completed with errors etc.
# - Added Several fields "StartTime", "EndTime",
# "JobFiles"
# - Removed "JobType" because we are only selecting
# jobs of type "Backup" (AND Type='B')
# - Modified header lines and printf lines for better
# formatting
#
# 20140107 - Modified script to include more information and cleaned
# up the output formatting
#
# 20150704 - Added ability to work with MySQL or Postgresql
#
# 20150723 - Modified query, removed "Type='B'" clause to catch all jobs,
# including Copy jobs, Admin jobs etc. Modified header, and
# output string to match new query and include job's "Type"
# column.
#
# 20170225 - Rewrote awk script so that a status/summary could be set in
# the email report's subject. eg:
# Subject: "serverName - All Jobs OK in the past x hours"
# Subject: "serverName - x Jobs FAILED in the past y hours"
#
# 20170303 - Fixed output in cases where there are jobs running and there
# is no "Stop Time" for a job.
#
# 20170406 - Some major modifications:
# - Added feature to spell out words instead of using the
# single character codes for Job type, Job Status, and
# Job Level - Including the different levels for Verify
# jobs
# - If a job terminates with an error or warning, then the
# job's line in the output is prepended with an asterisk
# "*" for quick visual identification
# - Modified the outputs of the files and bytes fields to
# include commas when the number is > 999
# - Added totals to the end of the report for Jobs, Files,
# and Bytes
# - Added $sortfield and $sortorder variables to allow output
# to be sorted as desired
# - Set the level of a job to "----" when the level is not
# applicable as in Restore jobs, Admin jobs etc.
#
# 20170408 - Some minor cleanup, and moving things around
# - Added $emailsummaries variable to append the job summaries
# to the end of the report.
# - Added $emailbadlogs variable to append full joblogs of jobs
# which have failed or jobs with errors to the end of the report
# for quick access to investigate failed jobs.
#
# 20170417 - Added some tests for binaries and the bconsole config file
#
# 20170429 - Thanks to Chris Couture for contacting me and submitting a
# working query for MariaDB. I have added 'mariadb' as a new
# dbtype option.
# - Thanks to Chris Couture for the ideas and some code examples
# to create an HTML email.
# - Added $html variable to enable HTML emails.
# - Added $boldstatus variable to make the Status <b>bold</b>
# in HTML emails.
# - Added $colorstatusbg variable to color the background of
# the Status cell in HTML emails.
# - Thanks to Chris Couture for the idea of adding RunTime
# to the email output.
# - Thanks to Chris Couture for the idea of using some unicode
# characters (a 'checkmark'or a bold 'x') in the Subject:
# to quickly see if everything ran OK.
# - Added $addsubjecticon variable to enable/disable the
# prepending of this icon to the Subject.
# - Added $printsumary variable to give the option to print the
# total Jobs, Files, and Bytes after the job listing table.
# - Added $starbadjobids variable to enable/disable prepending
# the bad jobids with an asterisk "*".
# - Modified the way the email is built at the end. Thanks to
# Chris Courture again for this nice idea.
# - Added $jobtableheadercolor, $jobtablejobcolor, $goodjobcolor,
# $goodjobwitherrcolor, $runningjobcolor, $warnjobcolor, and
# $badjobcolor variables to colorize HTML emails
# - Added $emailtitle variable for the title at the top
# - Added $fontfamily, $fontsize, $fontsizejobinfo, and $fontsizesumlog
# variables to allow styling of the HTML output (Thanks again Chris)
# - Added $nojobsicon, $goodjobsicon, and $badjobsicon variables to
# allow setting the prepended utf-8 subject icon character
# - Reformatted things so that if there are no jobs returned by the
# SQL query, the email message sent is nice and short
# - Modified the license to allow for inclusion into Bacula Community,
# and possibly the Enterprise Edition releases
#
# 20170430 - Modified the order of the fields to make more sense
# - Re-aligned the text email so that when an asterisk is pre-pended it
# does not shift the whole line
#
# 20170508 - Re-worked some of the logic so that good jobs (JobStatus="T") which
# have errors will have their status listed as "OK/Warnings", and it
# will not trigger as a "bad job" on the JobErrors, so it will not
# have an asterisk prepended to the JobId in the job listing. I think
# this fix is more of a temporary change in the hopes that a "W"
# status to represent "good jobs with warnings" is implemented in the
# db in the future.
# - Added an "Errors" column to the table to show "JobErrors" from the
# db.
# - Some minor variable name changes and other minor changes
#
# 20170511 - Minor adjustments to the alignment formatting of the text email
# - Minor 'case' changes to a couple levels (Init & VCat)
#
# ------------------------------------------------------------------------------
# I like small tabs. Use :set list in vim to see tabbing etc
# vim: set tabstop=2:softtabstop=2:shiftwidth=2 #

View File

@ -1,33 +0,0 @@
print fail_time
print my_name
print exename
print exepath
print assert_msg
print db_engine_name
print version
print host_os
print distname
print distver
print host_name
print dist_name
show env TestName
bt
thread apply all bt
f 0
info locals
f 1
info locals
f 2
info locals
f 3
info locals
f 4
info locals
f 5
info locals
f 6
info locals
f 7
info locals
detach
quit

View File

@ -1,25 +0,0 @@
#!/bin/sh
#
# Copyright (C) 2000-2020 Kern Sibbald
# License: BSD 2-Clause; see file LICENSE-FOSS
#
# This script deletes a catalog dump
#
db_name=${db_name:-bacula}
rm -f /var/lib/bacula/${db_name}.sql
#
# We recommend that you email a copy of the bsr file that was
# made for the Job that runs this script to yourself.
# You just need to put the bsr file in /opt/bacula/bsr/catalog.bsr
# or adjust the script below. Please replace all %xxx% with what
# is appropriate at your site.
#
#${exec_prefix}/bin/bsmtp -h %smtp-server% -s "catalog.bsr" \
# %your-name@company.org% </opt/bacula/bsr/catalog.bsr
#
# The following script will email a summary of the backup jobs that
# were completed in the last 24 hours
#/etc/bacula/scripts/baculabackupreport 24
#

View File

@ -1,397 +0,0 @@
#!/bin/sh
#
# Bacula interface to virtual autoloader using disk storage
#
# Written by Kern Sibbald
#
# Bacula(R) - The Network Backup Solution
#
# Copyright (C) 2000-2016 Kern Sibbald
#
# The original author of Bacula is Kern Sibbald, with contributions
# from many others, a complete list can be found in the file AUTHORS.
#
# You may use this file and others of this release according to the
# license defined in the LICENSE file, which includes the Affero General
# Public License, v3.0 ("AGPLv3") and some additional permissions and
# terms pursuant to its AGPLv3 Section 7.
#
# This notice must be preserved when any source code is
# conveyed and/or propagated.
#
# Bacula(R) is a registered trademark of Kern Sibbald.
# If you set in your Device resource
#
# Changer Command = "path-to-this-script/disk-changer %c %o %S %a %d"
# you will have the following input to this script:
#
# So Bacula will always call with all the following arguments, even though
# in come cases, not all are used. Note, the Volume name is not always
# included.
#
# disk-changer "changer-device" "command" "slot" "archive-device" "drive-index" "volume"
# $1 $2 $3 $4 $5 $6
#
# By default the autochanger has 10 Volumes and 1 Drive.
#
# Note: For this script to work, you *must" specify
# Device Type = File
# in each of the Devices associated with your AutoChanger resource.
#
# changer-device is the name of a file that overrides the default
# volumes and drives. It may have:
# maxslot=n where n is one based (default 10)
# maxdrive=m where m is zero based (default 1 -- i.e. 2 drives)
#
# This code can also simulate barcodes. You simply put
# a list of the slots and barcodes in the "base" directory/barcodes.
# See below for the base directory definition. Example of a
# barcodes file:
# /var/bacula/barcodes
# 1:Vol001
# 2:Vol002
# ...
#
# archive-device is the name of the base directory where you want the
# Volumes stored appended with /drive0 for the first drive; /drive1
# for the second drive, ... For example, you might use
# /var/bacula/drive0 Note: you must not have a trailing slash, and
# the string (e.g. /drive0) must be unique, and it must not match
# any other part of the directory name. These restrictions could be
# easily removed by any clever script jockey.
#
# Full example: disk-changer /var/bacula/conf load 1 /var/bacula/drive0 0 TestVol001
#
# The Volumes will be created with names slot1, slot2, slot3, ... maxslot in the
# base directory. In the above example the base directory is /var/bacula.
# However, as with tapes, their Bacula Volume names will be stored inside the
# Volume label. In addition to the Volumes (e.g. /var/bacula/slot1,
# /var/bacula/slot3, ...) this script will create a /var/bacula/loadedn
# file to keep track of what Slot is loaded. You should not change this file.
#
# Modified 8 June 2010 to accept Volume names from the calling program as arg 6.
# In this case, rather than storing the data in slotn, it is stored in the
# Volume name. Note: for this to work, Volume names may not include spaces.
#
wd=/var/lib/bacula
#
# log whats done
#
# to turn on logging, uncomment the following line
#touch $wd/disk-changer.log
#
dbgfile="$wd/disk-changer.log"
debug() {
if test -f $dbgfile; then
echo "`date +\"%Y%m%d-%H:%M:%S\"` $*" >> $dbgfile
fi
}
#
# Create a temporary file
#
make_temp_file() {
TMPFILE=`mktemp -t mtx.XXXXXXXXXX`
if test x${TMPFILE} = x; then
TMPFILE="$wd/disk-changer.$$"
if test -f ${TMPFILE}; then
echo "Temp file security problem on: ${TMPFILE}"
exit 1
fi
fi
}
# check parameter count on commandline
#
check_parm_count() {
pCount=$1
pCountNeed=$2
if test $pCount -lt $pCountNeed; then
echo "usage: disk-changer ctl-device command [slot archive-device drive-index]"
echo " Insufficient number of arguments arguments given."
if test $pCount -lt 2; then
echo " Mimimum usage is first two arguments ..."
else
echo " Command expected $pCountNeed arguments"
fi
exit 1
fi
}
#
# Strip off the final name in order to get the Directory ($dir)
# that we are dealing with.
#
get_dir() {
bn=`basename $device`
dir=`echo "$device" | sed -e s%/$bn%%g`
if [ ! -d $dir ]; then
echo "ERROR: Autochanger directory \"$dir\" does not exist."
echo " You must create it."
exit 1
fi
}
#
# Get the Volume name from the call line, or directly from
# the volslotn information.
#
get_vol() {
havevol=0
debug "vol=$volume"
if test "x$volume" != x && test "x$volume" != "x*NONE*" ; then
debug "touching $dir/$volume"
touch $dir/$volume
echo "$volume" >$dir/volslot${slot}
havevol=1
elif [ -f $dir/volslot${slot} ]; then
volume=`cat $dir/volslot${slot}`
havevol=1
fi
}
# Setup arguments
ctl=$1
cmd="$2"
slot=$3
device=$4
drive=$5
volume=$6
# set defaults
maxdrive=1
maxslot=10
# Pull in conf file
if [ -f $ctl ]; then
. $ctl
fi
# Check for special cases where only 2 arguments are needed,
# all others are a minimum of 5
#
case $2 in
list|listall)
check_parm_count $# 2
;;
slots)
check_parm_count $# 2
;;
transfer)
check_parm_count $# 4
if [ $slot -gt $maxslot ]; then
echo "Slot ($slot) out of range (1-$maxslot)"
debug "Error: Slot ($slot) out of range (1-$maxslot)"
exit 1
fi
;;
*)
check_parm_count $# 5
if [ $drive -gt $maxdrive ]; then
echo "Drive ($drive) out of range (0-$maxdrive)"
debug "Error: Drive ($drive) out of range (0-$maxdrive)"
exit 1
fi
if [ $slot -gt $maxslot ]; then
echo "Slot ($slot) out of range (1-$maxslot)"
debug "Error: Slot ($slot) out of range (1-$maxslot)"
exit 1
fi
;;
esac
debug "Parms: $ctl $cmd $slot $device $drive $volume $havevol"
case $cmd in
unload)
debug "Doing disk -f $ctl unload $slot $device $drive $volume"
get_dir
if [ -f $dir/loaded${drive} ]; then
ld=`cat $dir/loaded${drive}`
else
echo "Storage Element $slot is Already Full"
debug "Unload error: $dir/loaded${drive} is already unloaded"
exit 1
fi
if [ $slot -eq $ld ]; then
echo "0" >$dir/loaded${drive}
unlink $device 2>/dev/null >/dev/null
unlink ${device}.add 2>/dev/null >/dev/null
rm -f ${device} ${device}.add
else
echo "Storage Element $slot is Already Full"
debug "Unload error: $dir/loaded${drive} slot=$ld is already unloaded"
exit 1
fi
;;
load)
debug "Doing disk $ctl load $slot $device $drive $volume"
get_dir
i=0
# Check if slot already in a drive
while [ $i -le $maxdrive ]; do
if [ -f $dir/loaded${i} ]; then
ld=`cat $dir/loaded${i}`
else
ld=0
fi
if [ $ld -eq $slot ]; then
echo "Drive ${i} Full (Storage element ${ld} loaded)"
debug "Load error: Cannot load Slot=${ld} in drive=$drive. Already in drive=${i}"
exit 1
fi
i=`expr $i + 1`
done
# Check if we have a Volume name
get_vol
if [ $havevol -eq 0 ]; then
# check if slot exists
if [ ! -f $dir/slot${slot} ] ; then
echo "source Element Address $slot is Empty"
debug "Load error: source Element Address $slot is Empty"
exit 1
fi
fi
if [ -f $dir/loaded${drive} ]; then
ld=`cat $dir/loaded${drive}`
else
ld=0
fi
if [ $ld -ne 0 ]; then
echo "Drive ${drive} Full (Storage element ${ld} loaded)"
echo "Load error: Drive ${drive} Full (Storage element ${ld} loaded)"
exit 1
fi
echo "0" >$dir/loaded${drive}
unlink $device 2>/dev/null >/dev/null
unlink ${device}.add 2>/dev/null >/dev/null
rm -f ${device} ${device}.add
if [ $havevol -ne 0 ]; then
ln -s $dir/$volume $device
ln -s $dir/${volume}.add ${device}.add
rtn=$?
else
ln -s $dir/slot${slot} $device
ln -s $dir/slot${slot}.add ${device}.add
rtn=$?
fi
if [ $rtn -eq 0 ]; then
echo $slot >$dir/loaded${drive}
fi
exit $rtn
;;
list)
debug "Doing disk -f $ctl -- to list volumes"
get_dir
if [ -f $dir/barcodes ]; then
cat $dir/barcodes
else
i=1
while [ $i -le $maxslot ]; do
slot=$i
volume=
get_vol
if [ $havevol -eq 0 ]; then
echo "$i:"
else
echo "$i:$volume"
fi
i=`expr $i + 1`
done
fi
exit 0
;;
listall)
# ***FIXME*** must add new Volume stuff
make_temp_file
debug "Doing disk -f $ctl -- to list volumes"
get_dir
if [ ! -f $dir/barcodes ]; then
exit 0
fi
# we print drive content seen by autochanger
# and we also remove loaded media from the barcode list
i=0
while [ $i -le $maxdrive ]; do
if [ -f $dir/loaded${i} ]; then
ld=`cat $dir/loaded${i}`
v=`awk -F: "/^$ld:/"' { print $2 }' $dir/barcodes`
echo "D:$i:F:$ld:$v"
echo "^$ld:" >> $TMPFILE
fi
i=`expr $i + 1`
done
# Empty slots are not in barcodes file
# When we detect a gap, we print missing rows as empty
# At the end, we fill the gap between the last entry and maxslot
grep -v -f $TMPFILE $dir/barcodes | sort -n | \
perl -ne 'BEGIN { $cur=1 }
if (/(\d+):(.+)?/) {
if ($cur == $1) {
print "S:$1:F:$2\n"
} else {
while ($cur < $1) {
print "S:$cur:E\n";
$cur++;
}
}
$cur++;
}
END { while ($cur < '"$maxslot"') { print "S:$cur:E\n"; $cur++; } } '
rm -f $TMPFILE
exit 0
;;
transfer)
# ***FIXME*** must add new Volume stuff
get_dir
make_temp_file
slotdest=$device
if [ -f $dir/slot{$slotdest} ]; then
echo "destination Element Address $slot is Full"
exit 1
fi
if [ ! -f $dir/slot${slot} ] ; then
echo "source Element Address $slot is Empty"
exit 1
fi
echo "Transfering $slot to $slotdest"
mv $dir/slot${slot} $dir/slot{$slotdest}
mv $dir/slot${slot}.add $dir/slot{$slotdest}.add
if [ -f $dir/barcodes ]; then
sed "s/^$slot:/$slotdest:/" > $TMPFILE
sort -n $TMPFILE > $dir/barcodes
fi
exit 0
;;
loaded)
debug "Doing disk -f $ctl $drive -- to find what is loaded"
get_dir
if [ -f $dir/loaded${drive} ]; then
a=`cat $dir/loaded${drive}`
else
a="0"
fi
debug "Loaded: drive=$drive is $a"
echo $a
exit
;;
slots)
debug "Doing disk -f $ctl -- to get count of slots"
echo $maxslot
;;
esac

View File

@ -1,84 +0,0 @@
#!/bin/sh
#
# Copyright (C) 2000-2018 Kern Sibbald
# License: BSD 2-Clause; see file LICENSE-FOSS
#
# Bacula interface to get worm status of tape
#
# isworm %l (control device name)
#
# Typical output:
# sdparm --page=0x1D -f /dev/sg0
# /dev/st0: HP Ultrium 5-SCSI I5AW [tape]
# Medium configuration (SSC) mode page:
# WORMM 1 [cha: n, def: 1, sav: 1]
# WMLR 1 [cha: n, def: 1, sav: 1]
# WMFR 2 [cha: n, def: 2, sav: 2]
#
# Where WORMM is worm mode
# WMLR is worm mode label restrictions
# 0 - No blocks can be overwritten
# 1 - Some types of format labels may not be overwritten
# 2 - All format labels can be overwritten
# WMFR is worm mode filemark restrictions
# 0-1 - Reserved
# 2 - Any number of filemarks immediately preceding EOD can be
# overwritten except file mark closest to BOP (beginning of
# partition).
# 3 - Any number of filemarks immediately preceding the EOD
# can be overwritten
# 4-FF - Reserved
#
if [ x$1 = x ] ; then
echo "First argument missing. Must be device control name."
exit 1
fi
sdparm=`which sdparm`
if [ x${sdparm} = x ] ; then
echo "sdparm program not found, but is required."
exit 0
fi
#
# This should be the correct way to determine if the tape is WORM
# but it does not work for mhvtl. Comment out the next 5 lines
# and the code that follows will detect correctly on mhtvl.
#
worm=`$sdparm --page=0x1D -f $1 |grep " *WORMM"|cut -b12-16|sed "s:^ *::"`
if [ $? = 0 ] ; then
echo $worm
exit 0
fi
tapeinfo=`which tapeinfo`
if [ x${tapeinfo} = x ] ; then
echo "tapeinfo program not found, but is required."
exit 1
fi
#
# Unfortunately IBM and HP handle the Medium Type differently,
# so we detect the vendor and get the appropriate Worm flag.
#
vendor=`$tapeinfo -f $1|grep "^Vendor ID:"|cut -b13-15`
if [ x$vendor = xHP ] ; then
worm=`$tapeinfo -f $1|grep "^Medium Type: 0x"|cut -b16-16`
echo $worm
exit 0
fi
if [ x$vendor = xIBM ] ; then
worm=`$tapeinfo -f $1|grep "^Medium Type: 0x"|cut -b17-17`
if [ x$worm = xc ]; then
echo "1"
exit 0
fi
if [ x$worm = xC ]; then
echo "1"
exit 0
fi
fi
echo "0"
exit 0

View File

@ -1,111 +0,0 @@
#!/bin/sh
#
# Copyright (C) 2000-2020 Kern Sibbald
# License: BSD 2-Clause; see file LICENSE-FOSS
#
# This script dumps your Bacula catalog in ASCII format
# It works for MySQL, SQLite, and PostgreSQL
#
# $1 is the name of the database to be backed up and the name
# of the output file (default = bacula).
# $2 is the user name with which to access the database
# (default = bacula).
# $3 is the password with which to access the database or "" if no password
# (default ""). WARNING!!! Passing the password via the command line is
# insecure and should not be used since any user can display the command
# line arguments and the environment using ps. Please consult your
# MySQL or PostgreSQL manual for secure methods of specifying the
# password.
# $4 is the host on which the database is located
# (default "")
# $5 is the type of database
#
#
default_db_type=postgresql
user=${2:-bacula}
#
# See if the fifth argument is a valid backend name.
# If so the user overrides the default database backend.
#
if [ $# -ge 5 ]; then
case $5 in
sqlite3)
db_type=$5
;;
mysql)
db_type=$5
;;
postgresql)
db_type=$5
;;
ingres)
db_type=$5
;;
*)
;;
esac
fi
#
# If no new db_type is gives use the default db_type.
#
if [ -z "${db_type}" ]; then
db_type="${default_db_type}"
fi
cd /var/lib/bacula
rm -f $1.sql
case ${db_type} in
sqlite3)
BINDIR=/usr/bin
echo ".dump" | ${BINDIR}/sqlite3 $1.db >$1.sql
;;
mysql)
BINDIR=/usr/bin
if test $# -gt 2; then
MYSQLPASSWORD=" --password=$3"
else
MYSQLPASSWORD=""
fi
if test $# -gt 3; then
MYSQLHOST=" --host=$4"
else
MYSQLHOST=""
fi
${BINDIR}/mysqldump -u ${user}${MYSQLPASSWORD}${MYSQLHOST} -f --opt $1 >$1.sql
;;
postgresql)
BINDIR=/usr/bin
if test $# -gt 2; then
PGPASSWORD=$3
export PGPASSWORD
fi
if test $# -gt 3; then
PGHOST=" --host=$4"
else
PGHOST=""
fi
# you could also add --compress for compression. See man pg_dump
exec ${BINDIR}/pg_dump -c $PGHOST -U $user $1 >$1.sql
;;
esac
#
# To read back a MySQL database use:
# cd /var/lib/bacula
# rm -f ${BINDIR}/../var/bacula/*
# mysql <bacula.sql
#
# To read back a SQLite database use:
# cd /var/lib/bacula
# rm -f bacula.db
# sqlite bacula.db <bacula.sql
#
# To read back a PostgreSQL database use:
# cd /var/lib/bacula
# dropdb bacula
# createdb bacula -T template0 -E SQL_ASCII
# psql bacula <bacula.sql
#

View File

@ -1,198 +0,0 @@
#!/usr/bin/perl
#
# Author: Eric Bollengier, Copyright, 2006-2017
# License: BSD 2-Clause; see file LICENSE-FOSS
use strict;
=head1 SCRIPT
This script dumps your Bacula catalog in ASCII format
It works for MySQL, SQLite, and PostgreSQL
=head1 USAGE
make_catalog_backup.pl [-m] MyCatalog
=head1 LICENSE
Author: Eric Bollengier, 2010
License: BSD 2-Clause; see file LICENSE-FOSS
=cut
my $cat = shift or die "Usage: $0 [-m] catalogname";
my $mode = "dump";
if ($cat eq '-m') {
$mode = "analyse";
$cat = shift or die "Usage: $0 [-m] catalogname";
}
my $dir_conf='/usr/sbin/dbcheck -B -c /etc/bacula/bacula-dir.conf';
my $wd = "/var/lib/bacula";
sub dump_sqlite3
{
my %args = @_;
exec("echo .dump | sqlite3 '$wd/$args{db_name}.db' > '$wd/$args{db_name}.sql'");
print "Error while executing sqlite dump $!\n";
return 1;
}
# TODO: use just ENV and drop the pg_service.conf file
sub setup_env_pgsql
{
my %args = @_;
my $username = getpwuid $ENV{'UID'};
umask(0077);
if ($args{db_address}) {
$ENV{PGHOST}=$args{db_address};
}
if ($args{db_socket}) {
$ENV{PGHOST}=$args{db_socket};
}
if ($args{db_port}) {
$ENV{PGPORT}=$args{db_port};
}
if ($args{db_user}) {
$ENV{PGUSER}=$args{db_user};
}
if ($args{db_password}) {
$ENV{PGPASSWORD}=$args{db_password};
}
$ENV{PGDATABASE}=$args{db_name};
system("echo '\\q' | HOME='$wd' psql") == 0 or die "$username doesn't have access to the catalog database\n";
}
sub dump_pgsql
{
my %args = @_;
setup_env_pgsql(%args);
exec("HOME='$wd' pg_dump -c > '$wd/$args{db_name}.sql'");
print "Error while executing postgres dump $!\n";
return 1; # in case of error
}
sub analyse_pgsql
{
my %args = @_;
setup_env_pgsql(%args);
my @output =`LANG=C HOME='$wd' vacuumdb -z 2>&1`;
my $exitcode = $? >> 8;
print grep { !/^WARNING:\s+skipping\s\"(pg_|sql_)/ } @output;
if ($exitcode != 0) {
print "Error while executing postgres analyse. Exitcode=$exitcode\n";
}
return $exitcode;
}
sub setup_env_mysql
{
my %args = @_;
umask(0077);
unlink("$wd/.my.cnf");
open(MY, ">$wd/.my.cnf")
or die "Can't open $wd/.my.cnf for writing $@";
$args{db_address} = $args{db_address} || "localhost";
my $addr = "host=$args{db_address}";
if ($args{db_socket}) { # unix socket is fastest than net socket
$addr = "socket=\"$args{db_socket}\"";
}
my $mode = $args{mode} || 'client';
print MY "[$mode]
$addr
user=\"$args{db_user}\"
password=\"$args{db_password}\"
";
if ($args{db_port}) {
print MY "port=$args{db_port}\n";
}
close(MY);
}
sub dump_mysql
{
my %args = @_;
setup_env_mysql(%args);
exec("HOME='$wd' mysqldump -f --opt $args{db_name} > '$wd/$args{db_name}.sql'");
print "Error while executing mysql dump $!\n";
return 1;
}
sub analyse_mysql
{
my %args = @_;
$args{mode} = 'mysqlcheck';
setup_env_mysql(%args);
exec("HOME='$wd' mysqlcheck -a $args{db_name}");
print "Error while executing mysql analyse $!\n";
return 1;
}
sub handle_catalog
{
my ($mode, %args) = @_;
if (exists $args{working_dir} and $wd ne $args{working_dir}) {
$wd = $args{working_dir};
}
if ($args{db_type} eq 'SQLite3') {
$ENV{PATH}="/usr/bin:$ENV{PATH}";
if ($mode eq 'dump') {
dump_sqlite3(%args);
}
} elsif ($args{db_type} eq 'PostgreSQL') {
$ENV{PATH}="/usr/bin:$ENV{PATH}";
if ($mode eq 'dump') {
dump_pgsql(%args);
} else {
analyse_pgsql(%args);
}
} elsif ($args{db_type} eq 'MySQL') {
$ENV{PATH}="/usr/bin:$ENV{PATH}";
if ($mode eq 'dump') {
dump_mysql(%args);
} else {
analyse_mysql(%args);
}
} else {
die "This database type isn't supported";
}
}
open(FP, "$dir_conf -C '$cat'|") or die "Can't get catalog information $@";
# catalog=MyCatalog
# db_type=SQLite
# db_name=regress
# db_driver=
# db_user=regress
# db_password=
# db_address=
# db_port=0
# db_socket=
my %cfg;
while(my $l = <FP>)
{
if ($l =~ /catalog=(.+)/) {
if (exists $cfg{catalog} and $cfg{catalog} eq $cat) {
exit handle_catalog($mode, %cfg);
}
%cfg = (); # reset
}
if ($l =~ /(\w+)=(.+)/) {
$cfg{$1}=$2;
}
}
if (exists $cfg{catalog} and $cfg{catalog} eq $cat) {
exit handle_catalog($mode, %cfg);
}
print "Can't find your catalog ($cat) in director configuration\n";
exit 1;

View File

@ -1,353 +0,0 @@
#!/bin/sh
#
# Bacula(R) - The Network Backup Solution
#
# Copyright (C) 2000-2016 Kern Sibbald
#
# The original author of Bacula is Kern Sibbald, with contributions
# from many others, a complete list can be found in the file AUTHORS.
#
# You may use this file and others of this release according to the
# license defined in the LICENSE file, which includes the Affero General
# Public License, v3.0 ("AGPLv3") and some additional permissions and
# terms pursuant to its AGPLv3 Section 7.
#
# This notice must be preserved when any source code is
# conveyed and/or propagated.
#
# Bacula(R) is a registered trademark of Kern Sibbald.
#
# If you set in your Device resource
#
# Changer Command = "path-to-this-script/mtx-changer %c %o %S %a %d"
# you will have the following input to this script:
#
# So Bacula will always call with all the following arguments, even though
# in come cases, not all are used.
#
# mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index"
# $1 $2 $3 $4 $5
#
# for example:
#
# mtx-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system)
#
# will request to load the first cartidge into drive 0, where
# the SCSI control channel is /dev/sg0, and the read/write device
# is /dev/nst0.
#
# The commands are:
# Command Function
# unload unload a given slot
# load load a given slot
# loaded which slot is loaded?
# list list Volume names (requires barcode reader)
# slots how many slots total?
# listall list all info
# transfer
#
# Slots are numbered from 1 ...
# Drives are numbered from 0 ...
#
#
# If you need to an offline, refer to the drive as $4
# e.g. mt -f $4 offline
#
# Many changers need an offline after the unload. Also many
# changers need a sleep 60 after the mtx load.
#
# N.B. If you change the script, take care to return either
# the mtx exit code or a 0. If the script exits with a non-zero
# exit code, Bacula will assume the request failed.
#
# myversion must be the same as version in mtx-changer.conf
myversion=2
# source our conf file
if test ! -f /etc/bacula/scripts/mtx-changer.conf ; then
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "ERROR: /etc/bacula/scripts/mtx-changer.conf file not found!!!!"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
exit 1
fi
. /etc/bacula/scripts/mtx-changer.conf
if test "${version}" != "${myversion}" ; then
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "ERROR: /etc/bacula/scripts/mtx-changer.conf has wrong version. Wanted ${myversion}, got ${version} !!!"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
exit 1
fi
MTX=/usr/sbin/mtx
if test ${debug_log} -ne 0 ; then
touch /var/lib/bacula/mtx.log
fi
dbgfile="/var/lib/bacula/mtx.log"
debug() {
if test -f $dbgfile -a ${debug_level} -ge $1; then
echo "`date +%m%d-%H:%M:%S.%N|cut -c1-16` ${chgr_id} $2" >> $dbgfile
fi
}
#
# Create a temporary file
#
make_temp_file() {
TMPFILE=`mktemp /var/lib/bacula/mtx.XXXXXXXXXX`
if test x${TMPFILE} = x; then
TMPFILE="/var/lib/bacula/mtx.$$"
if test -f ${TMPFILE}; then
echo "ERROR: Temp file security problem on: ${TMPFILE}"
exit 1
fi
fi
}
#
# Create a temporary file for stderr
#
# Note, this file is used because sometime mtx emits
# unexpected error messages followed by the output
# expected during success.
# So we separate STDOUT and STDERR in
# certain of the mtx commands. The contents of STDERR
# is then printed after the STDOUT produced by mtx
# thus we sometimes get better changer results.
#
make_err_file() {
ERRFILE=`mktemp /var/lib/bacula/mtx.err.XXXXXXXXXX`
if test x${ERRFILE} = x; then
ERRFILE="/var/lib/bacula/mtx.err.$$"
if test -f ${ERRFILE}; then
echo "ERROR: Temp file security problem on: ${ERRFILE}"
exit 1
fi
fi
}
#
# The purpose of this function to wait a maximum
# time for the drive. It will
# return as soon as the drive is ready, or after
# waiting a maximum of 300 seconds.
# Note, this is very system dependent, so if you are
# not running on Linux, you will probably need to
# re-write it, or at least change the grep target.
# We've attempted to get the appropriate OS grep targets
# in the code at the top of this script.
#
wait_for_drive() {
i=0
while [ $i -le 300 ]; do # Wait max 300 seconds
if mt -f $1 status 2>&1 | grep "${ready}" >/dev/null 2>&1; then
break
fi
debug $dbglvl "Device $1 - not ready, retrying..."
sleep 1
i=`expr $i + 1`
done
}
# check parameter count on commandline
#
check_parm_count() {
pCount=$1
pCountNeed=$2
if test $pCount -lt $pCountNeed; then
echo "ERROR: usage: mtx-changer ctl-device command [slot archive-device drive-index]"
echo " Insufficient number of arguments given."
if test $pCount -lt 2; then
echo " Mimimum usage is first two arguments ..."
else
echo " Command expected $pCountNeed arguments"
fi
exit 1
fi
}
# Check for special cases where only 2 arguments are needed,
# all others are a minimum of 5
#
case $2 in
list|listall)
check_parm_count $# 2
;;
slots)
check_parm_count $# 2
;;
transfer)
check_parm_count $# 4
;;
*)
check_parm_count $# 5
;;
esac
# Setup arguments
ctl=$1
cmd="$2"
slot=$3
device=$4
drive=$5
debug $dbglvl "Parms: $ctl $cmd $slot $device $drive"
case $cmd in
unload)
if test ${offline} -eq 1 ; then
mt -f $device offline
fi
if test ${offline_sleep} -ne 0 ; then
sleep ${offline_sleep}
fi
make_err_file
for i in 1 2 3 4 5 ; do
debug $idbglvl "Doing mtx -f $ctl unload slot=$slot drv=$drive"
${MTX} -f $ctl unload $slot $drive 2>${ERRFILE}
rtn=$?
if test $rtn -eq 0 ; then
break
fi
grep "Error Code=" ${ERRFILE} 2>/dev/null 1>/dev/null
if test $? -ne 0 ; then
break
fi
sleep $i
done
cat ${ERRFILE}
rm -f ${ERRFILE} >/dev/null 2>&1
if test $rtn -ne 0 ; then
debug $idbglvl "FAIL: mtx -f $ctl unload slot=$slot drv=$drive"
fi
exit $rtn
;;
load)
make_err_file
for i in 1 2 3 4 5 ; do
debug $idbglvl "Doing mtx -f $ctl load slot=$slot drv=$drive"
${MTX} -f $ctl load $slot $drive 2>${ERRFILE}
rtn=$?
if test $rtn -eq 0 ; then
break
fi
grep "Error Code=" ${ERRFILE} 2>/dev/null 1>/dev/null
if test $? -ne 0 ; then
break
fi
sleep $i
done
if test ${load_sleep} -ne 0 ; then
sleep ${load_sleep}
fi
wait_for_drive $device
cat ${ERRFILE}
rm -f ${ERRFILE} >/dev/null 2>&1
if test $rtn -ne 0 ; then
debug $idbglvl "FAIL: mtx -f $ctl load slot=$slot drv=$drive"
fi
exit $rtn
;;
list)
make_temp_file
if test ${inventory} -ne 0 ; then
${MTX} -f $ctl inventory
fi
debug $dbglvl "Doing mtx -f $ctl list"
${MTX} -f $ctl status >${TMPFILE}
rtn=$?
if test ${vxa_packetloader} -ne 0 ; then
cat ${TMPFILE} | grep " *Storage Element [0-9]*:.*Full" | sed "s/ Storage Element //" | sed "s/Full :VolumeTag=//"
else
cat ${TMPFILE} | grep " Storage Element [0-9]*:.*Full" | awk "{print \$3 \$4}" | sed "s/Full *\(:VolumeTag=\)*//"
fi
cat ${TMPFILE} | grep "^Data Transfer Element [0-9]*:Full (Storage Element [0-9]" | awk '{printf "%s:%s\n",$7,$10}'
rm -f ${TMPFILE} >/dev/null 2>&1
if test $rtn -ne 0 ; then
debug $idbglvl "FAIL: mtx -f $ctl list"
fi
exit $rtn
;;
listall)
# Drive content: D:Drive num:F:Slot loaded:Volume Name
# D:0:F:2:vol2 or D:Drive num:E
# D:1:F:42:vol42
# D:3:E
#
# Slot content:
# S:1:F:vol1 S:Slot num:F:Volume Name
# S:2:E or S:Slot num:E
# S:3:F:vol4
#
# Import/Export tray slots:
# I:10:F:vol10 I:Slot num:F:Volume Name
# I:11:E or I:Slot num:E
# I:12:F:vol40
make_temp_file
if test ${inventory} -ne 0 ; then
${MTX} -f $ctl inventory
fi
debug $dbglvl "Doing mtx -f $ctl -- to list all"
${MTX} -f $ctl status >${TMPFILE}
rtn=$?
# can be converted to awk+sed+cut, see below
perl -ne '
/Data Transfer Element (\d+):Empty/ && print "D:$1:E\n";
/Data Transfer Element (\d+):Full \(Storage Element (\d+) Loaded\)(:VolumeTag =\s*(.+))?/ && print "D:$1:F:$2:$4\n";
/Storage Element (\d+):Empty/ && print "S:$1:E\n";
/Storage Element (\d+):Full( :VolumeTag=(.+))?/ && print "S:$1:F:$3\n";
/Storage Element (\d+) IMPORT.EXPORT:Empty/ && print "I:$1:E\n";
/Storage Element (\d+) IMPORT.EXPORT:Full( :VolumeTag=(.+))?/ && print "I:$1:F:$3\n";' ${TMPFILE}
# If perl isn't installed, you can use by those commands
#cat ${TMPFILE} | grep "Data Transfer Element" | awk "{print \"D:\"\$4 \$7 \$9 \$10}" | sed "s/=/:/" | sed "s/Full/F:/" | sed "s/Empty/E/"
#cat ${TMPFILE} | grep -v "Data Transfer Element" | grep "Storage Element" | grep -v "IMPORT/EXPORT" | awk "{print \"S:\"\$3 \$4 \$5}" | sed "s/IMPORT\/EXPORT//" | sed "s/Full *:VolumeTag=/F:/" | sed "s/Empty/E/"
#cat ${TMPFILE} | grep -v "Data Transfer Element" | grep "Storage Element" | grep "IMPORT/EXPORT" | awk "{print \"I:\"\$3 \$4 \$5}" | sed "s/IMPORT\/EXPORT//" | sed "s/Full *:VolumeTag=/F:/" | sed "s/Empty/E/"
rm -f ${TMPFILE} >/dev/null 2>&1
exit $rtn
;;
transfer)
slotdest=$device
debug $dbglvl "Doing transfer from $slot to $slotdest"
${MTX} -f $ctl transfer $slot $slotdest
rtn=$?
if test $rtn -ne 0 ; then
debug $idbglvl "FAIL: mtx -f $ctl transfer from=$slot to=$slotdest"
fi
exit $rtn
;;
loaded)
make_temp_file
debug $idbglvl "Doing mtx -f $ctl $drive -- to find what is loaded"
${MTX} -f $ctl status >${TMPFILE}
rtn=$?
cat ${TMPFILE} | grep "^Data Transfer Element $drive:Full" | awk "{print \$7}"
cat ${TMPFILE} | grep "^Data Transfer Element $drive:Empty" | awk "{print 0}"
rm -f ${TMPFILE} >/dev/null 2>&1
if test $rtn -ne 0 ; then
debug $idbglvl "FAIL: mtx -f $ctl loaded drv=$drive"
fi
exit $rtn
;;
slots)
debug $dbglvl "Doing mtx -f $ctl -- to get count of slots"
${MTX} -f $ctl status | grep " *Storage Changer" | awk "{print \$5}"
rtn=$?
if test $rtn -ne 0 ; then
debug $idbglvl "FAIL: mtx -f $ctl slots"
fi
;;
esac

View File

@ -1,89 +0,0 @@
#
# Copyright (C) 2000-2015 Kern Sibbald
# License: BSD 2-Clause; see file LICENSE-FOSS
#
#
# This file is sourced by the mtx-changer script every time it runs.
# You can put your site customization here, and when you do an
# upgrade, the process should not modify this file. Thus you
# preserve your mtx-changer configuration.
#
# We update the version when an incompatible change
# to mtx-changer or this conf file is made, such as
# adding a new required variable.
version=2
# Set to 1 if you want to do offline before unload
offline=0
# Set to amount of time in seconds to wait after an offline
offline_sleep=0
# Set to amount of time in seconds to wait after a load
load_sleep=0
# Set to 1 to do an inventory before a status. Not normally needed.
inventory=0
# If you have a VXA PacketLoader, it might display a different
# Storage Element line, so try setting the following to 1
vxa_packetloader=0
#
# Debug logging
#
# If you have multiple SD's, set this differently for each one
# so you know which message comes from which one. This can
# be any string, and will appear in each debug message just
# after the time stamp.
chgr_id=0
# Set to 1 if you want debug info written to a log
debug_log=0
# Set to debug level you want to see
# 0 is off
# 10 is important events (load, unload, loaded)
# 100 is everything
# Note debug_log must be set to 1 for anything to be generated
#
debug_level=10
# Debug levels by importance
# Normally you do not need to change this
dbglvl=100
# More important messages
idbglvl=10
#
# mt status output
# SunOS No Additional Sense
# FreeBSD Current Driver State: at rest.
# Linux ONLINE
# Note Debian has a different mt than the standard Linux version.
# When no tape is in the drive it waits 2 minutes.
# When a tape is in the drive, it prints user unfriendly output.
# Note, with Ubuntu Gusty (8.04), there are two versions of mt,
# so we attempt to figure out which one.
#
OS=`uname`
case ${OS} in
SunOS)
ready="No Additional Sense"
;;
FreeBSD)
ready="Current Driver State: at rest."
;;
Linux)
ready="ONLINE"
if test -f /etc/debian_version ; then
mt --version|grep "mt-st" >/dev/null 2>&1
if test $? -eq 1 ; then
ready="drive status"
fi
fi
;;
esac

View File

@ -1,7 +0,0 @@
#
# See the file <bacula-source>/examples/sample-query.sql
# for some sample queries.
#
# 1
:The default file is empty, see sample-query.sql (in /opt/bacula/scripts or <bacula-source>/examples) for samples
SELECT 'See sample-query.sql (in /opt/bacula/scripts or <bacula-source>/examples) for samples' AS Info;

View File

@ -1,68 +0,0 @@
#!/bin/sh
#
# Copyright (C) 2000-2016 Kern Sibbald
# License: BSD 2-Clause; see file LICENSE-FOSS
#
# Bacula interface to tapeinfo to get tape alerts
#
# tapealert %l (control device name)
#
# Note: you must have in your SD Device resource:
# Alert Command = /full-path/tapealert %l
# Control Device = /dev/sg0n (where this is the scsi control
# device for the device you are using).
#
# Note: to test
# 1. uncomment out the DEBUG=1 line below
# 2. Possibly remove or add TapeAlert[nn]: that you want to test.
# Note, the message following the : is not used.
# 3. Run Bacula
#
#DEBUG=1
tapeinfo=`which tapeinfo`
if [ x${tapeinfo} = x ] ; then
echo "tapeinfo program not found, but is required."
exit 1
fi
if [ x$1 = x ] ; then
echo "First argument missing. Must be device control name."
exit 1
fi
if [ x$DEBUG = x ] ; then
$tapeinfo -f $1 |grep "^TapeAlert" - |cut -b1-13
exit $?
else
# For testing only
cat <<EOF |grep "^TapeAlert" - |cut -b1-13
Product Type: Tape Drive
Vendor ID: 'IBM '
Product ID: 'ULTRIUM-TD6 '
Revision: 'G350'
Attached Changer API: No
SerialNumber: 'F3A2930090'
TapeAlert[3]: Hard Error: Uncorrectable read/write error.
TapeAlert[5]: Read Failure: Tape faulty or tape drive broken.
TapeAlert[39]: Undefined.
MinBlock: 1
MaxBlock: 8388608
SCSI ID: 9
SCSI LUN: 0
Ready: yes
BufferedMode: yes
Medium Type: 0x58
Density Code: 0x58
BlockSize: 0
DataCompEnabled: yes
DataCompCapable: yes
DataDeCompEnabled: yes
CompType: 0xff
DeCompType: 0xff
EOF
fi