Driver now works when the database system is local, remote, or on docker
This commit is contained in:
parent
8ee52a64bf
commit
6283186d76
|
@ -30,4 +30,5 @@ lib/
|
||||||
# controller configuration files
|
# controller configuration files
|
||||||
config/*
|
config/*
|
||||||
!config/sample_*_config.json
|
!config/sample_*_config.json
|
||||||
|
*.pid
|
||||||
|
|
||||||
|
|
|
@ -43,6 +43,9 @@ DB_PORT = '5432'
|
||||||
# Path to the configuration file on the database server
|
# Path to the configuration file on the database server
|
||||||
DB_CONF = '/etc/postgresql/9.6/main/postgresql.conf'
|
DB_CONF = '/etc/postgresql/9.6/main/postgresql.conf'
|
||||||
|
|
||||||
|
# Path to the directory for storing database dump files
|
||||||
|
DB_DUMP_DIR = '/var/lib/postgresql/9.6/main/dumpfiles'
|
||||||
|
|
||||||
# Base config settings to always include when installing new configurations
|
# Base config settings to always include when installing new configurations
|
||||||
BASE_DB_CONF = {
|
BASE_DB_CONF = {
|
||||||
'track_counts': 'on',
|
'track_counts': 'on',
|
||||||
|
@ -69,8 +72,7 @@ ORACLE_AWR_ENABLED = False
|
||||||
#==========================================================
|
#==========================================================
|
||||||
|
|
||||||
# Path to this driver
|
# Path to this driver
|
||||||
DRIVER_HOME = os.path.realpath(__file__)
|
DRIVER_HOME = os.path.dirname(os.path.realpath(__file__))
|
||||||
print('DRIVER HOME: {}'.format(DRIVER_HOME))
|
|
||||||
|
|
||||||
# Path to the directory for storing results
|
# Path to the directory for storing results
|
||||||
RESULT_DIR = os.path.join(DRIVER_HOME, 'results')
|
RESULT_DIR = os.path.join(DRIVER_HOME, 'results')
|
||||||
|
|
|
@ -20,10 +20,18 @@ import logging
|
||||||
from logging.handlers import RotatingFileHandler
|
from logging.handlers import RotatingFileHandler
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
from fabric.api import env, local, task, lcd
|
from fabric.api import env, lcd, local, settings, show, task
|
||||||
from fabric.state import output as fabric_output
|
from fabric.state import output as fabric_output
|
||||||
|
|
||||||
import driver_config as dconf
|
from utils import file_exists, get, load_driver_conf, parse_bool, put, run, sudo
|
||||||
|
|
||||||
|
fabric_output.update({
|
||||||
|
'running': True,
|
||||||
|
'stdout': True,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Loads the driver config file (default: driver_config.py)
|
||||||
|
dconf = load_driver_conf()
|
||||||
|
|
||||||
|
|
||||||
def _setup():
|
def _setup():
|
||||||
|
@ -53,22 +61,22 @@ def _setup():
|
||||||
raise ValueError(("Invalid HOST_CONN: {}. Valid values are "
|
raise ValueError(("Invalid HOST_CONN: {}. Valid values are "
|
||||||
"'local', 'remote', or 'docker'.").format(dconf.HOST_CONN))
|
"'local', 'remote', or 'docker'.").format(dconf.HOST_CONN))
|
||||||
|
|
||||||
# Create all output directories
|
# Update Fabric's host list
|
||||||
for d in (dconf.RESULT_DIR, dconf.LOG_DIR, dconf.DB_DUMP_DIR):
|
env.hosts = [LOGIN]
|
||||||
|
|
||||||
|
# Create local directories
|
||||||
|
for d in (dconf.RESULT_DIR, dconf.LOG_DIR, dconf.TEMP_DIR):
|
||||||
os.makedirs(d, exist_ok=True)
|
os.makedirs(d, exist_ok=True)
|
||||||
|
|
||||||
|
# Copy Oracle scripts
|
||||||
|
if dconf.DB_TYPE == 'oracle':
|
||||||
|
put('./oracleScripts', '/home/oracle')
|
||||||
|
sudo('chown -R oracle:oinstall /home/oracle/oracleScripts')
|
||||||
|
|
||||||
|
|
||||||
_setup()
|
_setup()
|
||||||
|
|
||||||
|
|
||||||
# Fabric environment settings
|
|
||||||
env.hosts = [LOGIN]
|
|
||||||
fabric_output.update({
|
|
||||||
'running': True,
|
|
||||||
'stdout': True,
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
# Configure logging
|
# Configure logging
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
LOG.setLevel(getattr(logging, dconf.LOG_LEVEL, logging.DEBUG))
|
LOG.setLevel(getattr(logging, dconf.LOG_LEVEL, logging.DEBUG))
|
||||||
|
@ -84,19 +92,13 @@ FileHandler.setFormatter(Formatter)
|
||||||
LOG.addHandler(FileHandler)
|
LOG.addHandler(FileHandler)
|
||||||
|
|
||||||
|
|
||||||
def _parse_bool(value):
|
|
||||||
if not isinstance(value, bool):
|
|
||||||
value = str(value).lower() == 'true'
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def check_disk_usage():
|
def check_disk_usage():
|
||||||
partition = dconf.DATABASE_DISK
|
partition = dconf.DATABASE_DISK
|
||||||
disk_use = 0
|
disk_use = 0
|
||||||
if partition:
|
if partition:
|
||||||
cmd = "df -h {}".format(partition)
|
cmd = "df -h {}".format(partition)
|
||||||
out = local(cmd, capture=True).splitlines()[1]
|
out = run(cmd).splitlines()[1]
|
||||||
m = re.search(r'\d+(?=%)', out)
|
m = re.search(r'\d+(?=%)', out)
|
||||||
if m:
|
if m:
|
||||||
disk_use = int(m.group(0))
|
disk_use = int(m.group(0))
|
||||||
|
@ -106,22 +108,21 @@ def check_disk_usage():
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def check_memory_usage():
|
def check_memory_usage():
|
||||||
cmd = 'free -m -h'
|
run('free -m -h')
|
||||||
local(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def create_controller_config():
|
def create_controller_config():
|
||||||
if dconf.DB_TYPE == 'postgres':
|
if dconf.DB_TYPE == 'postgres':
|
||||||
dburl_fmt = 'jdbc:postgresql://localhost:5432/{db}'.format
|
dburl_fmt = 'jdbc:postgresql://{host}:{port}/{db}'.format
|
||||||
elif dconf.DB_TYPE == 'oracle':
|
elif dconf.DB_TYPE == 'oracle':
|
||||||
dburl_fmt = 'jdbc:oracle:thin:@localhost:1521:{db}'.format
|
dburl_fmt = 'jdbc:oracle:thin:@{host}:{port}:{db}'.format
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
|
|
||||||
config = dict(
|
config = dict(
|
||||||
database_type=dconf.DB_TYPE,
|
database_type=dconf.DB_TYPE,
|
||||||
database_url=dburl_fmt(db=dconf.DB_NAME),
|
database_url=dburl_fmt(host=dconf.DB_HOST, port=dconf.DB_PORT, db=dconf.DB_NAME),
|
||||||
username=dconf.DB_USER,
|
username=dconf.DB_USER,
|
||||||
password=dconf.DB_PASSWORD,
|
password=dconf.DB_PASSWORD,
|
||||||
upload_code='DEPRECATED',
|
upload_code='DEPRECATED',
|
||||||
|
@ -136,34 +137,60 @@ def create_controller_config():
|
||||||
@task
|
@task
|
||||||
def restart_database():
|
def restart_database():
|
||||||
if dconf.DB_TYPE == 'postgres':
|
if dconf.DB_TYPE == 'postgres':
|
||||||
cmd = 'sudo -u postgres pg_ctl -D {} -w -t 600 restart -m fast'.format(dconf.PG_DATADIR)
|
if dconf.HOST_CONN == 'docker':
|
||||||
|
# Restarting the docker container here is the cleanest way to do it
|
||||||
|
# becaues there's no init system running and the only process running
|
||||||
|
# in the container is postgres itself
|
||||||
|
local('docker restart {}'.format(dconf.CONTAINER_NAME))
|
||||||
|
else:
|
||||||
|
sudo('pg_ctl -D {} -w -t 600 restart -m fast'.format(dconf.PG_DATADIR), user=dconf.DB_USER)
|
||||||
elif dconf.DB_TYPE == 'oracle':
|
elif dconf.DB_TYPE == 'oracle':
|
||||||
cmd = 'sh oracleScripts/shutdownOracle.sh && sh oracleScripts/startupOracle.sh'
|
run('sh oracleScripts/shutdownOracle.sh && sh oracleScripts/startupOracle.sh')
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
local(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def drop_database():
|
def drop_database():
|
||||||
if dconf.DB_TYPE == 'postgres':
|
if dconf.DB_TYPE == 'postgres':
|
||||||
cmd = "PGPASSWORD={} dropdb -e --if-exists {} -U {}".\
|
run("PGPASSWORD={} dropdb -e --if-exists {} -U {} -h {}".format(
|
||||||
format(dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_USER)
|
dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_USER, dconf.DB_HOST))
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
local(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def create_database():
|
def create_database():
|
||||||
if dconf.DB_TYPE == 'postgres':
|
if dconf.DB_TYPE == 'postgres':
|
||||||
cmd = "PGPASSWORD={} createdb -e {} -U {}".\
|
run("PGPASSWORD={} createdb -e {} -U {} -h {}".format(
|
||||||
format(dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_USER)
|
dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_USER, dconf.DB_HOST))
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
local(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
|
@task
|
||||||
|
def create_user():
|
||||||
|
if dconf.DB_TYPE == 'postgres':
|
||||||
|
sql = "CREATE USER {} SUPERUSER PASSWORD '{}';".format(dconf.DB_USER, dconf.DB_PASSWORD)
|
||||||
|
run("PGPASSWORD={} psql -c \\\"{}\\\" -U postgres -h {}".format(
|
||||||
|
dconf.DB_PASSWORD, sql, dconf.DB_USER, dconf.DB_HOST))
|
||||||
|
elif dconf.DB_TYPE == 'oracle':
|
||||||
|
run('sh oracleScripts/createUser.sh {} {}'.format(dconf.DB_USER, dconf.DB_PASSWORD))
|
||||||
|
else:
|
||||||
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
|
|
||||||
|
|
||||||
|
@task
|
||||||
|
def drop_user():
|
||||||
|
if dconf.DB_TYPE == 'postgres':
|
||||||
|
sql = "DROP USER IF EXISTS {};".format(dconf.DB_USER)
|
||||||
|
run("PGPASSWORD={} psql -c \\\"{}\\\" -U postgres -h {}".format(
|
||||||
|
dconf.DB_PASSWORD, sql, dconf.DB_HOST))
|
||||||
|
elif dconf.DB_TYPE == 'oracle':
|
||||||
|
run('sh oracleScripts/dropUser.sh {}'.format(dconf.DB_USER))
|
||||||
|
else:
|
||||||
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def reset_conf():
|
def reset_conf():
|
||||||
change_conf()
|
change_conf()
|
||||||
|
@ -174,7 +201,9 @@ def change_conf(next_conf=None):
|
||||||
signal = "# configurations recommended by ottertune:\n"
|
signal = "# configurations recommended by ottertune:\n"
|
||||||
next_conf = next_conf or {}
|
next_conf = next_conf or {}
|
||||||
|
|
||||||
with open(dconf.DB_CONF, 'r') as f:
|
tmp_conf_in = os.path.join(dconf.TEMP_DIR, os.path.basename(dconf.DB_CONF) + '.in')
|
||||||
|
get(dconf.DB_CONF, tmp_conf_in)
|
||||||
|
with open(tmp_conf_in, 'r') as f:
|
||||||
lines = f.readlines()
|
lines = f.readlines()
|
||||||
|
|
||||||
if signal not in lines:
|
if signal not in lines:
|
||||||
|
@ -183,7 +212,8 @@ def change_conf(next_conf=None):
|
||||||
signal_idx = lines.index(signal)
|
signal_idx = lines.index(signal)
|
||||||
lines = lines[0:signal_idx + 1]
|
lines = lines[0:signal_idx + 1]
|
||||||
if dconf.BASE_DB_CONF:
|
if dconf.BASE_DB_CONF:
|
||||||
assert isinstance(dconf.BASE_DB_CONF, dict)
|
assert isinstance(dconf.BASE_DB_CONF, dict), \
|
||||||
|
(type(dconf.BASE_DB_CONF), dconf.BASE_DB_CONF)
|
||||||
base_conf = ['{} = {}\n'.format(*c) for c in sorted(dconf.BASE_DB_CONF.items())]
|
base_conf = ['{} = {}\n'.format(*c) for c in sorted(dconf.BASE_DB_CONF.items())]
|
||||||
lines.extend(base_conf)
|
lines.extend(base_conf)
|
||||||
|
|
||||||
|
@ -202,12 +232,13 @@ def change_conf(next_conf=None):
|
||||||
lines.append('{} = {}\n'.format(name, value))
|
lines.append('{} = {}\n'.format(name, value))
|
||||||
lines.append('\n')
|
lines.append('\n')
|
||||||
|
|
||||||
tmpconf = 'tmp_' + os.path.basename(dconf.DB_CONF)
|
tmp_conf_out = os.path.join(dconf.TEMP_DIR, os.path.basename(dconf.DB_CONF) + '.out')
|
||||||
with open(tmpconf, 'w') as f:
|
with open(tmp_conf_out, 'w') as f:
|
||||||
f.write(''.join(lines))
|
f.write(''.join(lines))
|
||||||
|
|
||||||
local('sudo cp {0} {0}.ottertune.bak'.format(dconf.DB_CONF))
|
run('cp {0} {0}.ottertune.bak'.format(dconf.DB_CONF))
|
||||||
local('sudo mv {} {}'.format(tmpconf, dconf.DB_CONF))
|
put(tmp_conf_out, dconf.DB_CONF, use_sudo=False)
|
||||||
|
local('rm -f {} {}'.format(tmp_conf_in, tmp_conf_out))
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
|
@ -249,7 +280,7 @@ def signal_controller():
|
||||||
pidfile = os.path.join(dconf.CONTROLLER_HOME, 'pid.txt')
|
pidfile = os.path.join(dconf.CONTROLLER_HOME, 'pid.txt')
|
||||||
with open(pidfile, 'r') as f:
|
with open(pidfile, 'r') as f:
|
||||||
pid = int(f.read())
|
pid = int(f.read())
|
||||||
cmd = 'sudo kill -2 {}'.format(pid)
|
cmd = 'kill -2 {}'.format(pid)
|
||||||
with lcd(dconf.CONTROLLER_HOME): # pylint: disable=not-context-manager
|
with lcd(dconf.CONTROLLER_HOME): # pylint: disable=not-context-manager
|
||||||
local(cmd)
|
local(cmd)
|
||||||
|
|
||||||
|
@ -276,8 +307,11 @@ def save_next_config(next_config, t=None):
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def free_cache():
|
def free_cache():
|
||||||
cmd = 'sync; sudo bash -c "echo 1 > /proc/sys/vm/drop_caches"'
|
if dconf.HOST_CONN != 'docker': # Read-only file system
|
||||||
local(cmd)
|
with show('everything'), settings(warn_only=True):
|
||||||
|
res = sudo("sync && echo 3 | tee /proc/sys/vm/drop_caches")
|
||||||
|
if res.failed:
|
||||||
|
LOG.error('%s (return code %s)', res.stderr.strip(), res.return_code)
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
|
@ -369,7 +403,7 @@ def get_result(max_time_sec=180, interval_sec=5, upload_code=None):
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def download_debug_info(pprint=False):
|
def download_debug_info(pprint=False):
|
||||||
pprint = _parse_bool(pprint)
|
pprint = parse_bool(pprint)
|
||||||
url = '{}/dump/{}'.format(dconf.WEBSITE_URL, dconf.UPLOAD_CODE)
|
url = '{}/dump/{}'.format(dconf.WEBSITE_URL, dconf.UPLOAD_CODE)
|
||||||
params = {'pp': int(True)} if pprint else {}
|
params = {'pp': int(True)} if pprint else {}
|
||||||
rsp = requests.get(url, params=params)
|
rsp = requests.get(url, params=params)
|
||||||
|
@ -391,14 +425,13 @@ def download_debug_info(pprint=False):
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def add_udf():
|
def add_udf():
|
||||||
cmd = 'sudo python3 ./LatencyUDF.py ../controller/output/'
|
local('python3 ./LatencyUDF.py ../controller/output/')
|
||||||
local(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def upload_batch(result_dir=None, sort=True, upload_code=None):
|
def upload_batch(result_dir=None, sort=True, upload_code=None):
|
||||||
result_dir = result_dir or dconf.RESULT_DIR
|
result_dir = result_dir or dconf.RESULT_DIR
|
||||||
sort = _parse_bool(sort)
|
sort = parse_bool(sort)
|
||||||
results = glob.glob(os.path.join(result_dir, '*__summary.json'))
|
results = glob.glob(os.path.join(result_dir, '*__summary.json'))
|
||||||
if sort:
|
if sort:
|
||||||
results = sorted(results)
|
results = sorted(results)
|
||||||
|
@ -415,44 +448,50 @@ def upload_batch(result_dir=None, sort=True, upload_code=None):
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def dump_database():
|
def dump_database():
|
||||||
db_file_path = os.path.join(dconf.DB_DUMP_DIR, dconf.DB_NAME + '.dump')
|
dumpfile = os.path.join(dconf.DB_DUMP_DIR, dconf.DB_NAME + '.dump')
|
||||||
if os.path.exists(db_file_path):
|
if file_exists(dumpfile):
|
||||||
LOG.info('%s already exists ! ', db_file_path)
|
LOG.info('%s already exists ! ', dumpfile)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
LOG.info('Dump database %s to %s', dconf.DB_NAME, db_file_path)
|
LOG.info('Dump database %s to %s', dconf.DB_NAME, dumpfile)
|
||||||
# You must create a directory named dpdata through sqlplus in your Oracle database
|
|
||||||
if dconf.DB_TYPE == 'oracle':
|
if dconf.DB_TYPE == 'oracle':
|
||||||
cmd = 'expdp {}/{}@{} schemas={} dumpfile={}.dump DIRECTORY=dpdata'.format(
|
cmd = 'sh oracleScripts/dumpOracle.sh {} {} {} {}'.format(
|
||||||
'c##tpcc', 'oracle', 'orcldb', 'c##tpcc', 'orcldb')
|
dconf.DB_USER, dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_DUMP_DIR)
|
||||||
|
|
||||||
elif dconf.DB_TYPE == 'postgres':
|
elif dconf.DB_TYPE == 'postgres':
|
||||||
cmd = 'PGPASSWORD={} pg_dump -U {} -F c -d {} > {}'.format(dconf.DB_PASSWORD,
|
cmd = 'PGPASSWORD={} pg_dump -U {} -h {} -F c -d {} > {}'.format(
|
||||||
dconf.DB_USER,
|
dconf.DB_PASSWORD, dconf.DB_USER, dconf.DB_HOST, dconf.DB_NAME,
|
||||||
dconf.DB_NAME,
|
dumpfile)
|
||||||
db_file_path)
|
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
local(cmd)
|
run(cmd)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def restore_database():
|
def restore_database():
|
||||||
|
dumpfile = os.path.join(dconf.DB_DUMP_DIR, dconf.DB_NAME + '.dump')
|
||||||
|
if not file_exists(dumpfile):
|
||||||
|
raise FileNotFoundError("Database dumpfile '{}' does not exist!".format(dumpfile))
|
||||||
|
|
||||||
if dconf.DB_TYPE == 'oracle':
|
if dconf.DB_TYPE == 'oracle':
|
||||||
# You must create a directory named dpdata through sqlplus in your Oracle database
|
# You must create a directory named dpdata through sqlplus in your Oracle database
|
||||||
# The following script assumes such directory exists.
|
# The following script assumes such directory exists.
|
||||||
# You may want to modify the username, password, and dump file name in the script
|
drop_user()
|
||||||
cmd = 'sh oracleScripts/restoreOracle.sh'
|
create_user()
|
||||||
|
cmd = 'sh oracleScripts/restoreOracle.sh {} {}'.format(dconf.DB_USER, dconf.DB_NAME)
|
||||||
|
|
||||||
elif dconf.DB_TYPE == 'postgres':
|
elif dconf.DB_TYPE == 'postgres':
|
||||||
db_file_path = '{}/{}.dump'.format(dconf.DB_DUMP_DIR, dconf.DB_NAME)
|
|
||||||
drop_database()
|
drop_database()
|
||||||
create_database()
|
create_database()
|
||||||
cmd = 'PGPASSWORD={} pg_restore -U {} -n public -j 8 -F c -d {} {}'.\
|
cmd = 'PGPASSWORD={} pg_restore -U {} -h {} -n public -j 8 -F c -d {} {}'.format(
|
||||||
format(dconf.DB_PASSWORD, dconf.DB_USER, dconf.DB_NAME, db_file_path)
|
dconf.DB_PASSWORD, dconf.DB_USER, dconf.DB_HOST, dconf.DB_NAME, dumpfile)
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
|
|
||||||
LOG.info('Start restoring database')
|
LOG.info('Start restoring database')
|
||||||
local(cmd)
|
run(cmd)
|
||||||
LOG.info('Finish restoring database')
|
LOG.info('Finish restoring database')
|
||||||
|
|
||||||
|
|
||||||
|
@ -485,17 +524,14 @@ def _ready_to_shut_down_controller():
|
||||||
|
|
||||||
|
|
||||||
def clean_logs():
|
def clean_logs():
|
||||||
# remove oltpbench log
|
# remove oltpbench and controller log files
|
||||||
cmd = 'rm -f {}'.format(dconf.OLTPBENCH_LOG)
|
local('rm -f {} {}'.format(dconf.OLTPBENCH_LOG, dconf.CONTROLLER_LOG))
|
||||||
local(cmd)
|
|
||||||
|
|
||||||
# remove controller log
|
|
||||||
cmd = 'rm -f {}'.format(dconf.CONTROLLER_LOG)
|
|
||||||
local(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def loop(i):
|
def loop(i):
|
||||||
|
i = int(i)
|
||||||
|
|
||||||
# free cache
|
# free cache
|
||||||
free_cache()
|
free_cache()
|
||||||
|
|
||||||
|
@ -571,7 +607,7 @@ def run_loops(max_iter=1):
|
||||||
restore_database()
|
restore_database()
|
||||||
|
|
||||||
LOG.info('The %s-th Loop Starts / Total Loops %s', i + 1, max_iter)
|
LOG.info('The %s-th Loop Starts / Total Loops %s', i + 1, max_iter)
|
||||||
loop(i % dconf.RELOAD_INTERVAL)
|
loop(i % dconf.RELOAD_INTERVAL if dconf.RELOAD_INTERVAL > 0 else i)
|
||||||
LOG.info('The %s-th Loop Ends / Total Loops %s', i + 1, max_iter)
|
LOG.info('The %s-th Loop Ends / Total Loops %s', i + 1, max_iter)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
USERNAME="$1"
|
||||||
|
PASSWORD="$2"
|
||||||
|
|
||||||
|
sqlplus / as sysdba <<EOF
|
||||||
|
CREATE USER $USERNAME IDENTIFIED BY $PASSWORD;
|
||||||
|
GRANT EXECUTE ON DBMS_WORKLOAD_CAPTURE TO $USERNAME;
|
||||||
|
GRANT EXECUTE ON DBMS_WORKLOAD_REPLAY TO $USERNAME;
|
||||||
|
GRANT CREATE SESSION TO $USERNAME;
|
||||||
|
GRANT CREATE ANY DIRECTORY TO $USERNAME;
|
||||||
|
GRANT SELECT_CATALOG_ROLE TO $USERNAME;
|
||||||
|
GRANT BECOME USER TO $USERNAME;
|
||||||
|
GRANT CONNECT, RESOURCE, DBA TO $USERNAME;
|
||||||
|
quit
|
||||||
|
EOF
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
USERNAME="$1"
|
||||||
|
|
||||||
|
sqlplus / as sysdba <<EOF
|
||||||
|
drop user $USERNAME cascade;
|
||||||
|
quit
|
||||||
|
EOF
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
USERNAME="$1"
|
||||||
|
PASSWORD="$2"
|
||||||
|
DB_NAME="$3"
|
||||||
|
DP_PATH="$4"
|
||||||
|
DP_DIR="DPDATA"
|
||||||
|
DP_FILE="${DB_NAME}.dump"
|
||||||
|
|
||||||
|
# Make sure the physical directory exists
|
||||||
|
mkdir -p "$DP_PATH"
|
||||||
|
|
||||||
|
# Make sure the DB directory object exists
|
||||||
|
sqlplus / as sysdba <<EOF
|
||||||
|
CREATE OR REPLACE DIRECTORY $DP_DIR AS '$DP_PATH';
|
||||||
|
quit
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Export the data
|
||||||
|
expdp $USERNAME/$PASSWORD@$DB_NAME \
|
||||||
|
schemas=$USERNAME \
|
||||||
|
dumpfile=$DP_FILE \
|
||||||
|
DIRECTORY=$DP_DIR
|
||||||
|
|
|
@ -1,23 +1,19 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
su - oracle <<EON
|
|
||||||
oracle #system password
|
|
||||||
|
|
||||||
|
USERNAME="$1"
|
||||||
|
DP_FILE="${2}.dump"
|
||||||
|
DP_DIR="DPDATA"
|
||||||
|
|
||||||
|
# Import the data
|
||||||
|
impdp 'userid="/ as sysdba"' \
|
||||||
|
schemas=$USERNAME \
|
||||||
|
dumpfile=$DP_FILE \
|
||||||
|
DIRECTORY=$DP_DIR
|
||||||
|
|
||||||
|
# Restart the database
|
||||||
sqlplus / as sysdba <<EOF
|
sqlplus / as sysdba <<EOF
|
||||||
drop user c##tpcc cascade;
|
|
||||||
# username
|
|
||||||
create user c##tpcc identified by oracle;
|
|
||||||
# username password
|
|
||||||
quit
|
|
||||||
EOF
|
|
||||||
|
|
||||||
impdp 'userid="/ as sysdba"' schemas=c##tpcc dumpfile=orcldb.dump DIRECTORY=dpdata
|
|
||||||
# username database_name db_directory
|
|
||||||
|
|
||||||
sqlplus / as sysdba <<EOF #restart the database
|
|
||||||
shutdown immediate
|
shutdown immediate
|
||||||
startup
|
startup
|
||||||
quit
|
quit
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
exit
|
|
||||||
EON
|
|
||||||
|
|
|
@ -1,11 +1,7 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
su - oracle <<EON
|
|
||||||
oracle
|
|
||||||
|
|
||||||
sqlplus / as sysdba <<EOF
|
sqlplus / as sysdba <<EOF
|
||||||
shutdown immediate
|
shutdown immediate
|
||||||
exit
|
exit
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
exit
|
|
||||||
EON
|
|
||||||
|
|
|
@ -1,11 +1,7 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
su - oracle <<EON
|
|
||||||
oracle
|
|
||||||
|
|
||||||
sqlplus / as sysdba <<EOF
|
sqlplus / as sysdba <<EOF
|
||||||
exec dbms_workload_repository.create_snapshot;
|
exec dbms_workload_repository.create_snapshot;
|
||||||
quit
|
quit
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
exit
|
|
||||||
EON
|
|
||||||
|
|
|
@ -1,11 +1,7 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
su - oracle <<EON
|
|
||||||
oracle
|
|
||||||
|
|
||||||
sqlplus / as sysdba <<EOF
|
sqlplus / as sysdba <<EOF
|
||||||
startup
|
startup
|
||||||
quit
|
quit
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
exit
|
|
||||||
EON
|
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
import importlib
|
||||||
|
import os
|
||||||
|
|
||||||
|
from fabric.api import hide, local, settings, task
|
||||||
|
from fabric.api import get as _get, put as _put, run as _run, sudo as _sudo
|
||||||
|
|
||||||
|
dconf = None
|
||||||
|
|
||||||
|
|
||||||
|
def load_driver_conf():
|
||||||
|
driver_conf = os.environ.get('DRIVER_CONFIG', 'driver_config')
|
||||||
|
if driver_conf.endswith('.py'):
|
||||||
|
driver_conf = driver_conf[:-len('.py')]
|
||||||
|
dmod = importlib.import_module(driver_conf)
|
||||||
|
global dconf
|
||||||
|
if not dconf:
|
||||||
|
dconf = dmod
|
||||||
|
return dmod
|
||||||
|
|
||||||
|
|
||||||
|
def parse_bool(value):
|
||||||
|
if not isinstance(value, bool):
|
||||||
|
value = str(value).lower() == 'true'
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
@task
|
||||||
|
def run(cmd, **kwargs):
|
||||||
|
try:
|
||||||
|
if dconf.HOST_CONN == 'remote':
|
||||||
|
res = _run(cmd, **kwargs)
|
||||||
|
elif dconf.HOST_CONN == 'local':
|
||||||
|
res = local(cmd, capture=True, **kwargs)
|
||||||
|
else: # docker
|
||||||
|
opts = ''
|
||||||
|
cmdd = cmd
|
||||||
|
if cmd.endswith('&'):
|
||||||
|
cmdd = cmd[:-1].strip()
|
||||||
|
opts = '-d '
|
||||||
|
res = local('docker exec {} -ti {} /bin/bash -c "{}"'.format(
|
||||||
|
opts, dconf.CONTAINER_NAME, cmdd),
|
||||||
|
capture=True, **kwargs)
|
||||||
|
except TypeError as e:
|
||||||
|
err = str(e).strip()
|
||||||
|
if 'unexpected keyword argument' in err:
|
||||||
|
offender = err.rsplit(' ', 1)[-1][1:-1]
|
||||||
|
kwargs.pop(offender)
|
||||||
|
res = run(cmd, **kwargs)
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@task
|
||||||
|
def sudo(cmd, user=None, **kwargs):
|
||||||
|
if dconf.HOST_CONN == 'remote':
|
||||||
|
res = _sudo(cmd, user=user, **kwargs)
|
||||||
|
|
||||||
|
elif dconf.HOST_CONN == 'local':
|
||||||
|
pre_cmd = 'sudo '
|
||||||
|
if user:
|
||||||
|
pre_cmd += '-u {} '.format(user)
|
||||||
|
res = local(pre_cmd + cmd, capture=True, **kwargs)
|
||||||
|
|
||||||
|
else: # docker
|
||||||
|
user = user or 'root'
|
||||||
|
opts = '-ti -u {}'.format(user or 'root')
|
||||||
|
if user == 'root':
|
||||||
|
opts += ' -w /'
|
||||||
|
res = local('docker exec {} {} /bin/bash -c "{}"'.format(
|
||||||
|
opts, dconf.CONTAINER_NAME, cmd), capture=True)
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@task
|
||||||
|
def get(remote_path, local_path, use_sudo=False):
|
||||||
|
use_sudo = parse_bool(use_sudo)
|
||||||
|
|
||||||
|
if dconf.HOST_CONN == 'remote':
|
||||||
|
res = _get(remote_path, local_path, use_sudo=use_sudo)
|
||||||
|
elif dconf.HOST_CONN == 'local':
|
||||||
|
pre_cmd = 'sudo ' if use_sudo else ''
|
||||||
|
opts = '-r' if os.path.isdir(remote_path) else ''
|
||||||
|
res = local('{}cp {} {} {}'.format(pre_cmd, opts, remote_path, local_path))
|
||||||
|
else: # docker
|
||||||
|
res = local('docker cp {}:{} {}'.format(dconf.CONTAINER_NAME, remote_path, local_path))
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@task
|
||||||
|
def put(local_path, remote_path, use_sudo=False):
|
||||||
|
use_sudo = parse_bool(use_sudo)
|
||||||
|
|
||||||
|
if dconf.HOST_CONN == 'remote':
|
||||||
|
res = _put(local_path, remote_path, use_sudo=use_sudo)
|
||||||
|
elif dconf.HOST_CONN == 'local':
|
||||||
|
pre_cmd = 'sudo ' if use_sudo else ''
|
||||||
|
opts = '-r' if os.path.isdir(local_path) else ''
|
||||||
|
res = local('{}cp {} {} {}'.format(pre_cmd, opts, local_path, remote_path))
|
||||||
|
else: # docker
|
||||||
|
res = local('docker cp {} {}:{}'.format(local_path, dconf.CONTAINER_NAME, remote_path))
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@task
|
||||||
|
def file_exists(filename):
|
||||||
|
with settings(warn_only=True), hide('warnings'):
|
||||||
|
res = run('[ -f {} ]'.format(filename))
|
||||||
|
return res.return_code == 0
|
|
@ -6,7 +6,7 @@ local_settings.py
|
||||||
|
|
||||||
# celery/celerybeat #
|
# celery/celerybeat #
|
||||||
#####################
|
#####################
|
||||||
celerybeat-schedule
|
celerybeat-schedule*
|
||||||
*.pid
|
*.pid
|
||||||
|
|
||||||
# Raw data files #
|
# Raw data files #
|
||||||
|
|
Loading…
Reference in New Issue