support mysql
This commit is contained in:
parent
5b697b9c56
commit
5c422dd010
|
@ -45,7 +45,7 @@ dependencies {
|
||||||
compile group: 'commons-cli', name: 'commons-cli', version: '1.2'
|
compile group: 'commons-cli', name: 'commons-cli', version: '1.2'
|
||||||
|
|
||||||
// https://mvnrepository.com/artifact/mysql/mysql-connector-java
|
// https://mvnrepository.com/artifact/mysql/mysql-connector-java
|
||||||
compile group: 'mysql', name: 'mysql-connector-java', version: '5.1.6'
|
compile group: 'mysql', name: 'mysql-connector-java', version: '8.0.12'
|
||||||
|
|
||||||
// https://mvnrepository.com/artifact/org.postgresql/postgresql
|
// https://mvnrepository.com/artifact/org.postgresql/postgresql
|
||||||
compile group: 'org.postgresql', name: 'postgresql', version: '9.4-1201-jdbc41'
|
compile group: 'org.postgresql', name: 'postgresql', version: '9.4-1201-jdbc41'
|
||||||
|
|
|
@ -16,6 +16,7 @@ import java.sql.ResultSet;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import java.sql.Statement;
|
import java.sql.Statement;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.HashMap;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
/** */
|
/** */
|
||||||
|
@ -28,6 +29,10 @@ public class MySQLCollector extends DBCollector {
|
||||||
|
|
||||||
private static final String METRICS_SQL = "SHOW STATUS";
|
private static final String METRICS_SQL = "SHOW STATUS";
|
||||||
|
|
||||||
|
private static final String METRICS_SQL2 =
|
||||||
|
"SELECT name, count FROM INFORMATION_SCHEMA.INNODB_METRICS where subsystem = 'transaction';";
|
||||||
|
private HashMap<String, String> innodbMetrics = new HashMap<>();
|
||||||
|
|
||||||
public MySQLCollector(String oriDBUrl, String username, String password) {
|
public MySQLCollector(String oriDBUrl, String username, String password) {
|
||||||
try {
|
try {
|
||||||
Connection conn = DriverManager.getConnection(oriDBUrl, username, password);
|
Connection conn = DriverManager.getConnection(oriDBUrl, username, password);
|
||||||
|
@ -50,6 +55,12 @@ public class MySQLCollector extends DBCollector {
|
||||||
while (out.next()) {
|
while (out.next()) {
|
||||||
dbMetrics.put(out.getString(1).toLowerCase(), out.getString(2));
|
dbMetrics.put(out.getString(1).toLowerCase(), out.getString(2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out = s.executeQuery(METRICS_SQL2);
|
||||||
|
while (out.next()) {
|
||||||
|
innodbMetrics.put(out.getString(1).toLowerCase(), out.getString(2));
|
||||||
|
}
|
||||||
|
|
||||||
conn.close();
|
conn.close();
|
||||||
} catch (SQLException e) {
|
} catch (SQLException e) {
|
||||||
LOG.error("Error while collecting DB parameters: " + e.getMessage());
|
LOG.error("Error while collecting DB parameters: " + e.getMessage());
|
||||||
|
@ -93,6 +104,13 @@ public class MySQLCollector extends DBCollector {
|
||||||
}
|
}
|
||||||
// "global" is a a placeholder
|
// "global" is a a placeholder
|
||||||
jobGlobal.put("global", job);
|
jobGlobal.put("global", job);
|
||||||
|
|
||||||
|
JSONObject job2 = new JSONObject();
|
||||||
|
for (Map.Entry<String, String> entry : innodbMetrics.entrySet()) {
|
||||||
|
job2.put(entry.getKey(), entry.getValue());
|
||||||
|
}
|
||||||
|
jobGlobal.put("innodb_metrics", job2);
|
||||||
|
|
||||||
stringer.value(jobGlobal);
|
stringer.value(jobGlobal);
|
||||||
stringer.key(JSON_LOCAL_KEY);
|
stringer.key(JSON_LOCAL_KEY);
|
||||||
stringer.value(null);
|
stringer.value(null);
|
||||||
|
|
|
@ -5,7 +5,7 @@ import os
|
||||||
#==========================================================
|
#==========================================================
|
||||||
|
|
||||||
# Location of the database host relative to this driver
|
# Location of the database host relative to this driver
|
||||||
# Valid values: local, remote, or docker
|
# Valid values: local, remote, docker or remote_docker
|
||||||
HOST_CONN = 'local'
|
HOST_CONN = 'local'
|
||||||
|
|
||||||
# The name of the Docker container for the target database
|
# The name of the Docker container for the target database
|
||||||
|
@ -15,6 +15,7 @@ CONTAINER_NAME = None # e.g., 'postgres_container'
|
||||||
# Host SSH login credentials (only required if HOST_CONN=remote)
|
# Host SSH login credentials (only required if HOST_CONN=remote)
|
||||||
LOGIN_NAME = None
|
LOGIN_NAME = None
|
||||||
LOGIN_HOST = None
|
LOGIN_HOST = None
|
||||||
|
LOGIN_PASSWORD = None
|
||||||
LOGIN_PORT = None # Set when using a port other than the SSH default
|
LOGIN_PORT = None # Set when using a port other than the SSH default
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,7 +23,7 @@ LOGIN_PORT = None # Set when using a port other than the SSH default
|
||||||
# DATABASE OPTIONS
|
# DATABASE OPTIONS
|
||||||
#==========================================================
|
#==========================================================
|
||||||
|
|
||||||
# Either Postgres or Oracle
|
# Postgres, Oracle or Mysql
|
||||||
DB_TYPE = 'postgres'
|
DB_TYPE = 'postgres'
|
||||||
|
|
||||||
# Name of the database
|
# Name of the database
|
||||||
|
@ -50,12 +51,19 @@ DB_CONF = '/etc/postgresql/9.6/main/postgresql.conf'
|
||||||
DB_DUMP_DIR = '/var/lib/postgresql/9.6/main/dumpfiles'
|
DB_DUMP_DIR = '/var/lib/postgresql/9.6/main/dumpfiles'
|
||||||
|
|
||||||
# Base config settings to always include when installing new configurations
|
# Base config settings to always include when installing new configurations
|
||||||
BASE_DB_CONF = {
|
if DB_TYPE == 'mysql':
|
||||||
'track_counts': 'on',
|
BASE_DB_CONF = {
|
||||||
'track_functions': 'all',
|
'innodb_monitor_enable': 'all',
|
||||||
'track_io_timing': 'on',
|
}
|
||||||
'autovacuum': 'off',
|
elif DB_TYPE == 'postgres':
|
||||||
}
|
BASE_DB_CONF = {
|
||||||
|
'track_counts': 'on',
|
||||||
|
'track_functions': 'all',
|
||||||
|
'track_io_timing': 'on',
|
||||||
|
'autovacuum': 'off',
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
BASE_DB_CONF = None
|
||||||
|
|
||||||
# Name of the device on the database server to monitor the disk usage, or None to disable
|
# Name of the device on the database server to monitor the disk usage, or None to disable
|
||||||
DATABASE_DISK = None
|
DATABASE_DISK = None
|
||||||
|
@ -88,7 +96,11 @@ RESULT_DIR = os.path.join(DRIVER_HOME, 'results')
|
||||||
TEMP_DIR = '/tmp/driver'
|
TEMP_DIR = '/tmp/driver'
|
||||||
|
|
||||||
# Path to the directory for storing database dump files
|
# Path to the directory for storing database dump files
|
||||||
DB_DUMP_DIR = os.path.join(DRIVER_HOME, 'dumpfiles')
|
if DB_DUMP_DIR is None:
|
||||||
|
DB_DUMP_DIR = os.path.join(DRIVER_HOME, 'dumpfiles')
|
||||||
|
if not os.path.exists(DB_DUMP_DIR):
|
||||||
|
os.mkdir(DB_DUMP_DIR)
|
||||||
|
|
||||||
|
|
||||||
# Reload the database after running this many iterations
|
# Reload the database after running this many iterations
|
||||||
RELOAD_INTERVAL = 10
|
RELOAD_INTERVAL = 10
|
||||||
|
@ -102,7 +114,7 @@ MAX_DISK_USAGE = 90
|
||||||
WARMUP_ITERATIONS = 0
|
WARMUP_ITERATIONS = 0
|
||||||
|
|
||||||
# Let the database initialize for this many seconds after it restarts
|
# Let the database initialize for this many seconds after it restarts
|
||||||
RESTART_SLEEP_SEC = 300
|
RESTART_SLEEP_SEC = 30
|
||||||
|
|
||||||
#==========================================================
|
#==========================================================
|
||||||
# OLTPBENCHMARK OPTIONS
|
# OLTPBENCHMARK OPTIONS
|
||||||
|
@ -123,7 +135,7 @@ OLTPBENCH_BENCH = 'tpcc'
|
||||||
#==========================================================
|
#==========================================================
|
||||||
|
|
||||||
# Path to the controller directory
|
# Path to the controller directory
|
||||||
CONTROLLER_HOME = os.path.expanduser('~/ottertune/client/controller')
|
CONTROLLER_HOME = DRIVER_HOME + '/../controller'
|
||||||
|
|
||||||
# Path to the controller configuration file
|
# Path to the controller configuration file
|
||||||
CONTROLLER_CONFIG = os.path.join(CONTROLLER_HOME, 'config/postgres_config.json')
|
CONTROLLER_CONFIG = os.path.join(CONTROLLER_HOME, 'config/postgres_config.json')
|
||||||
|
|
|
@ -36,6 +36,7 @@ fabric_output.update({
|
||||||
})
|
})
|
||||||
env.abort_exception = FabricException
|
env.abort_exception = FabricException
|
||||||
env.hosts = [dconf.LOGIN]
|
env.hosts = [dconf.LOGIN]
|
||||||
|
env.password = dconf.LOGIN_PASSWORD
|
||||||
|
|
||||||
# Create local directories
|
# Create local directories
|
||||||
for _d in (dconf.RESULT_DIR, dconf.LOG_DIR, dconf.TEMP_DIR):
|
for _d in (dconf.RESULT_DIR, dconf.LOG_DIR, dconf.TEMP_DIR):
|
||||||
|
@ -82,6 +83,8 @@ def create_controller_config():
|
||||||
dburl_fmt = 'jdbc:postgresql://{host}:{port}/{db}'.format
|
dburl_fmt = 'jdbc:postgresql://{host}:{port}/{db}'.format
|
||||||
elif dconf.DB_TYPE == 'oracle':
|
elif dconf.DB_TYPE == 'oracle':
|
||||||
dburl_fmt = 'jdbc:oracle:thin:@{host}:{port}:{db}'.format
|
dburl_fmt = 'jdbc:oracle:thin:@{host}:{port}:{db}'.format
|
||||||
|
elif dconf.DB_TYPE == 'mysql':
|
||||||
|
dburl_fmt = 'jdbc:mysql://{host}:{port}/{db}?useSSL=false'.format
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
|
|
||||||
|
@ -107,9 +110,18 @@ def restart_database():
|
||||||
# becaues there's no init system running and the only process running
|
# becaues there's no init system running and the only process running
|
||||||
# in the container is postgres itself
|
# in the container is postgres itself
|
||||||
local('docker restart {}'.format(dconf.CONTAINER_NAME))
|
local('docker restart {}'.format(dconf.CONTAINER_NAME))
|
||||||
|
elif dconf.HOST_CONN == 'remote_docker':
|
||||||
|
run('docker restart {}'.format(dconf.CONTAINER_NAME), remote_only=True)
|
||||||
else:
|
else:
|
||||||
sudo('pg_ctl -D {} -w -t 600 restart -m fast'.format(
|
sudo('pg_ctl -D {} -w -t 600 restart -m fast'.format(
|
||||||
dconf.PG_DATADIR), user=dconf.ADMIN_USER, capture=False)
|
dconf.PG_DATADIR), user=dconf.ADMIN_USER, capture=False)
|
||||||
|
elif dconf.DB_TYPE == 'mysql':
|
||||||
|
if dconf.HOST_CONN == 'docker':
|
||||||
|
local('docker restart {}'.format(dconf.CONTAINER_NAME))
|
||||||
|
elif dconf.HOST_CONN == 'remote_docker':
|
||||||
|
run('docker restart {}'.format(dconf.CONTAINER_NAME), remote_only=True)
|
||||||
|
else:
|
||||||
|
sudo('service mysql restart')
|
||||||
elif dconf.DB_TYPE == 'oracle':
|
elif dconf.DB_TYPE == 'oracle':
|
||||||
db_log_path = os.path.join(os.path.split(dconf.DB_CONF)[0], 'startup.log')
|
db_log_path = os.path.join(os.path.split(dconf.DB_CONF)[0], 'startup.log')
|
||||||
local_log_path = os.path.join(dconf.LOG_DIR, 'startup.log')
|
local_log_path = os.path.join(dconf.LOG_DIR, 'startup.log')
|
||||||
|
@ -135,6 +147,9 @@ def drop_database():
|
||||||
if dconf.DB_TYPE == 'postgres':
|
if dconf.DB_TYPE == 'postgres':
|
||||||
run("PGPASSWORD={} dropdb -e --if-exists {} -U {} -h {}".format(
|
run("PGPASSWORD={} dropdb -e --if-exists {} -U {} -h {}".format(
|
||||||
dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_USER, dconf.DB_HOST))
|
dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_USER, dconf.DB_HOST))
|
||||||
|
elif dconf.DB_TYPE == 'mysql':
|
||||||
|
run("mysql --user={} --password={} -e 'drop database if exists {}'".format(
|
||||||
|
dconf.DB_USER, dconf.DB_PASSWORD, dconf.DB_NAME))
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
|
|
||||||
|
@ -144,6 +159,9 @@ def create_database():
|
||||||
if dconf.DB_TYPE == 'postgres':
|
if dconf.DB_TYPE == 'postgres':
|
||||||
run("PGPASSWORD={} createdb -e {} -U {} -h {}".format(
|
run("PGPASSWORD={} createdb -e {} -U {} -h {}".format(
|
||||||
dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_USER, dconf.DB_HOST))
|
dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_USER, dconf.DB_HOST))
|
||||||
|
elif dconf.DB_TYPE == 'mysql':
|
||||||
|
run("mysql --user={} --password={} -e 'create database {}'".format(
|
||||||
|
dconf.DB_USER, dconf.DB_PASSWORD, dconf.DB_NAME))
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
|
|
||||||
|
@ -172,9 +190,20 @@ def drop_user():
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def reset_conf():
|
def reset_conf(always=True):
|
||||||
change_conf()
|
if always:
|
||||||
|
change_conf()
|
||||||
|
return
|
||||||
|
|
||||||
|
# reset the config only if it has not been changed by Ottertune,
|
||||||
|
# i.e. OtterTune signal line is not in the config file.
|
||||||
|
signal = "# configurations recommended by ottertune:\n"
|
||||||
|
tmp_conf_in = os.path.join(dconf.TEMP_DIR, os.path.basename(dconf.DB_CONF) + '.in')
|
||||||
|
get(dconf.DB_CONF, tmp_conf_in)
|
||||||
|
with open(tmp_conf_in, 'r') as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
if signal not in lines:
|
||||||
|
change_conf()
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def change_conf(next_conf=None):
|
def change_conf(next_conf=None):
|
||||||
|
@ -191,11 +220,15 @@ def change_conf(next_conf=None):
|
||||||
|
|
||||||
signal_idx = lines.index(signal)
|
signal_idx = lines.index(signal)
|
||||||
lines = lines[0:signal_idx + 1]
|
lines = lines[0:signal_idx + 1]
|
||||||
|
|
||||||
|
if dconf.DB_TYPE == 'mysql':
|
||||||
|
lines.append('[mysqld]\n')
|
||||||
|
|
||||||
if dconf.BASE_DB_CONF:
|
if dconf.BASE_DB_CONF:
|
||||||
assert isinstance(dconf.BASE_DB_CONF, dict), \
|
assert isinstance(dconf.BASE_DB_CONF, dict), \
|
||||||
(type(dconf.BASE_DB_CONF), dconf.BASE_DB_CONF)
|
(type(dconf.BASE_DB_CONF), dconf.BASE_DB_CONF)
|
||||||
base_conf = ['{} = {}\n'.format(*c) for c in sorted(dconf.BASE_DB_CONF.items())]
|
for name, value in sorted(dconf.BASE_DB_CONF.items()):
|
||||||
lines.extend(base_conf)
|
lines.append('{} = {}\n'.format(name, value))
|
||||||
|
|
||||||
if isinstance(next_conf, str):
|
if isinstance(next_conf, str):
|
||||||
with open(next_conf, 'r') as f:
|
with open(next_conf, 'r') as f:
|
||||||
|
@ -209,6 +242,10 @@ def change_conf(next_conf=None):
|
||||||
for name, value in recommendation.items():
|
for name, value in recommendation.items():
|
||||||
if dconf.DB_TYPE == 'oracle' and isinstance(value, str):
|
if dconf.DB_TYPE == 'oracle' and isinstance(value, str):
|
||||||
value = value.strip('B')
|
value = value.strip('B')
|
||||||
|
# If innodb_flush_method is set to NULL on a Unix-like system,
|
||||||
|
# the fsync option is used by default.
|
||||||
|
if name == 'innodb_flush_method' and value == '':
|
||||||
|
value = "fsync"
|
||||||
lines.append('{} = {}\n'.format(name, value))
|
lines.append('{} = {}\n'.format(name, value))
|
||||||
lines.append('\n')
|
lines.append('\n')
|
||||||
|
|
||||||
|
@ -223,6 +260,10 @@ def change_conf(next_conf=None):
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def load_oltpbench():
|
def load_oltpbench():
|
||||||
|
if os.path.exists(dconf.OLTPBENCH_CONFIG) is False:
|
||||||
|
msg = 'oltpbench config {} does not exist, '.format(dconf.OLTPBENCH_CONFIG)
|
||||||
|
msg += 'please double check the option in driver_config.py'
|
||||||
|
raise Exception(msg)
|
||||||
cmd = "./oltpbenchmark -b {} -c {} --create=true --load=true".\
|
cmd = "./oltpbenchmark -b {} -c {} --create=true --load=true".\
|
||||||
format(dconf.OLTPBENCH_BENCH, dconf.OLTPBENCH_CONFIG)
|
format(dconf.OLTPBENCH_BENCH, dconf.OLTPBENCH_CONFIG)
|
||||||
with lcd(dconf.OLTPBENCH_HOME): # pylint: disable=not-context-manager
|
with lcd(dconf.OLTPBENCH_HOME): # pylint: disable=not-context-manager
|
||||||
|
@ -231,6 +272,10 @@ def load_oltpbench():
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def run_oltpbench():
|
def run_oltpbench():
|
||||||
|
if os.path.exists(dconf.OLTPBENCH_CONFIG) is False:
|
||||||
|
msg = 'oltpbench config {} does not exist, '.format(dconf.OLTPBENCH_CONFIG)
|
||||||
|
msg += 'please double check the option in driver_config.py'
|
||||||
|
raise Exception(msg)
|
||||||
cmd = "./oltpbenchmark -b {} -c {} --execute=true -s 5 -o outputfile".\
|
cmd = "./oltpbenchmark -b {} -c {} --execute=true -s 5 -o outputfile".\
|
||||||
format(dconf.OLTPBENCH_BENCH, dconf.OLTPBENCH_CONFIG)
|
format(dconf.OLTPBENCH_BENCH, dconf.OLTPBENCH_CONFIG)
|
||||||
with lcd(dconf.OLTPBENCH_HOME): # pylint: disable=not-context-manager
|
with lcd(dconf.OLTPBENCH_HOME): # pylint: disable=not-context-manager
|
||||||
|
@ -239,6 +284,10 @@ def run_oltpbench():
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def run_oltpbench_bg():
|
def run_oltpbench_bg():
|
||||||
|
if os.path.exists(dconf.OLTPBENCH_CONFIG) is False:
|
||||||
|
msg = 'oltpbench config {} does not exist, '.format(dconf.OLTPBENCH_CONFIG)
|
||||||
|
msg += 'please double check the option in driver_config.py'
|
||||||
|
raise Exception(msg)
|
||||||
cmd = "./oltpbenchmark -b {} -c {} --execute=true -s 5 -o outputfile > {} 2>&1 &".\
|
cmd = "./oltpbenchmark -b {} -c {} --execute=true -s 5 -o outputfile > {} 2>&1 &".\
|
||||||
format(dconf.OLTPBENCH_BENCH, dconf.OLTPBENCH_CONFIG, dconf.OLTPBENCH_LOG)
|
format(dconf.OLTPBENCH_BENCH, dconf.OLTPBENCH_CONFIG, dconf.OLTPBENCH_LOG)
|
||||||
with lcd(dconf.OLTPBENCH_HOME): # pylint: disable=not-context-manager
|
with lcd(dconf.OLTPBENCH_HOME): # pylint: disable=not-context-manager
|
||||||
|
@ -247,8 +296,8 @@ def run_oltpbench_bg():
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def run_controller():
|
def run_controller():
|
||||||
if not os.path.exists(dconf.CONTROLLER_CONFIG):
|
LOG.info('Controller config path: %s', dconf.CONTROLLER_CONFIG)
|
||||||
create_controller_config()
|
create_controller_config()
|
||||||
cmd = 'gradle run -PappArgs="-c {} -d output/" --no-daemon > {}'.\
|
cmd = 'gradle run -PappArgs="-c {} -d output/" --no-daemon > {}'.\
|
||||||
format(dconf.CONTROLLER_CONFIG, dconf.CONTROLLER_LOG)
|
format(dconf.CONTROLLER_CONFIG, dconf.CONTROLLER_LOG)
|
||||||
with lcd(dconf.CONTROLLER_HOME): # pylint: disable=not-context-manager
|
with lcd(dconf.CONTROLLER_HOME): # pylint: disable=not-context-manager
|
||||||
|
@ -287,9 +336,9 @@ def save_next_config(next_config, t=None):
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def free_cache():
|
def free_cache():
|
||||||
if dconf.HOST_CONN != 'docker':
|
if dconf.HOST_CONN not in ['docker', 'remote_docker']:
|
||||||
with show('everything'), settings(warn_only=True): # pylint: disable=not-context-manager
|
with show('everything'), settings(warn_only=True): # pylint: disable=not-context-manager
|
||||||
res = sudo("sh -c \"echo 3 > /proc/sys/vm/drop_caches\"")
|
res = sudo("sh -c \"echo 3 > /proc/sys/vm/drop_caches\"", remote_only=True)
|
||||||
if res.failed:
|
if res.failed:
|
||||||
LOG.error('%s (return code %s)', res.stderr.strip(), res.return_code)
|
LOG.error('%s (return code %s)', res.stderr.strip(), res.return_code)
|
||||||
|
|
||||||
|
@ -299,7 +348,6 @@ def upload_result(result_dir=None, prefix=None, upload_code=None):
|
||||||
result_dir = result_dir or os.path.join(dconf.CONTROLLER_HOME, 'output')
|
result_dir = result_dir or os.path.join(dconf.CONTROLLER_HOME, 'output')
|
||||||
prefix = prefix or ''
|
prefix = prefix or ''
|
||||||
upload_code = upload_code or dconf.UPLOAD_CODE
|
upload_code = upload_code or dconf.UPLOAD_CODE
|
||||||
|
|
||||||
files = {}
|
files = {}
|
||||||
for base in ('summary', 'knobs', 'metrics_before', 'metrics_after'):
|
for base in ('summary', 'knobs', 'metrics_before', 'metrics_after'):
|
||||||
fpath = os.path.join(result_dir, prefix + base + '.json')
|
fpath = os.path.join(result_dir, prefix + base + '.json')
|
||||||
|
@ -451,7 +499,7 @@ def dump_database():
|
||||||
LOG.info('%s already exists ! ', dumpfile)
|
LOG.info('%s already exists ! ', dumpfile)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if dconf.ORACLE_FLASH_BACK:
|
if dconf.DB_TYPE == 'oracle' and dconf.ORACLE_FLASH_BACK:
|
||||||
LOG.info('create restore point %s for database %s in %s', dconf.RESTORE_POINT,
|
LOG.info('create restore point %s for database %s in %s', dconf.RESTORE_POINT,
|
||||||
dconf.DB_NAME, dconf.RECOVERY_FILE_DEST)
|
dconf.DB_NAME, dconf.RECOVERY_FILE_DEST)
|
||||||
else:
|
else:
|
||||||
|
@ -469,6 +517,9 @@ def dump_database():
|
||||||
run('PGPASSWORD={} pg_dump -U {} -h {} -F c -d {} > {}'.format(
|
run('PGPASSWORD={} pg_dump -U {} -h {} -F c -d {} > {}'.format(
|
||||||
dconf.DB_PASSWORD, dconf.DB_USER, dconf.DB_HOST, dconf.DB_NAME,
|
dconf.DB_PASSWORD, dconf.DB_USER, dconf.DB_HOST, dconf.DB_NAME,
|
||||||
dumpfile))
|
dumpfile))
|
||||||
|
elif dconf.DB_TYPE == 'mysql':
|
||||||
|
sudo('mysqldump --user={} --password={} --databases {} > {}'.format(
|
||||||
|
dconf.DB_USER, dconf.DB_PASSWORD, dconf.DB_NAME, dumpfile))
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
return True
|
return True
|
||||||
|
@ -501,6 +552,8 @@ def restore_database():
|
||||||
create_database()
|
create_database()
|
||||||
run('PGPASSWORD={} pg_restore -U {} -h {} -n public -j 8 -F c -d {} {}'.format(
|
run('PGPASSWORD={} pg_restore -U {} -h {} -n public -j 8 -F c -d {} {}'.format(
|
||||||
dconf.DB_PASSWORD, dconf.DB_USER, dconf.DB_HOST, dconf.DB_NAME, dumpfile))
|
dconf.DB_PASSWORD, dconf.DB_USER, dconf.DB_HOST, dconf.DB_NAME, dumpfile))
|
||||||
|
elif dconf.DB_TYPE == 'mysql':
|
||||||
|
run('mysql --user={} --password={} < {}'.format(dconf.DB_USER, dconf.DB_PASSWORD, dumpfile))
|
||||||
else:
|
else:
|
||||||
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE))
|
||||||
LOG.info('Finish restoring database')
|
LOG.info('Finish restoring database')
|
||||||
|
@ -603,7 +656,9 @@ def loop(i):
|
||||||
def run_loops(max_iter=10):
|
def run_loops(max_iter=10):
|
||||||
# dump database if it's not done before.
|
# dump database if it's not done before.
|
||||||
dump = dump_database()
|
dump = dump_database()
|
||||||
|
# put the BASE_DB_CONF in the config file
|
||||||
|
# e.g., mysql needs to set innodb_monitor_enable to track innodb metrics
|
||||||
|
reset_conf(False)
|
||||||
for i in range(int(max_iter)):
|
for i in range(int(max_iter)):
|
||||||
# restart database
|
# restart database
|
||||||
restart_succeeded = restart_database()
|
restart_succeeded = restart_database()
|
||||||
|
@ -622,12 +677,14 @@ def run_loops(max_iter=10):
|
||||||
|
|
||||||
# reload database periodically
|
# reload database periodically
|
||||||
if dconf.RELOAD_INTERVAL > 0:
|
if dconf.RELOAD_INTERVAL > 0:
|
||||||
|
# wait 5 secs after restarting databases
|
||||||
|
time.sleep(5)
|
||||||
if i % dconf.RELOAD_INTERVAL == 0:
|
if i % dconf.RELOAD_INTERVAL == 0:
|
||||||
if i == 0 and dump is False:
|
if i == 0 and dump is False:
|
||||||
restore_database()
|
restore_database()
|
||||||
elif i > 0:
|
elif i > 0:
|
||||||
restore_database()
|
restore_database()
|
||||||
|
LOG.info('Wait %s seconds after restarting database', dconf.RESTART_SLEEP_SEC)
|
||||||
time.sleep(dconf.RESTART_SLEEP_SEC)
|
time.sleep(dconf.RESTART_SLEEP_SEC)
|
||||||
LOG.info('The %s-th Loop Starts / Total Loops %s', i + 1, max_iter)
|
LOG.info('The %s-th Loop Starts / Total Loops %s', i + 1, max_iter)
|
||||||
loop(i % dconf.RELOAD_INTERVAL if dconf.RELOAD_INTERVAL > 0 else i)
|
loop(i % dconf.RELOAD_INTERVAL if dconf.RELOAD_INTERVAL > 0 else i)
|
||||||
|
|
|
@ -23,7 +23,7 @@ def load_driver_conf():
|
||||||
if dconf.HOST_CONN == 'local':
|
if dconf.HOST_CONN == 'local':
|
||||||
login_str = 'localhost'
|
login_str = 'localhost'
|
||||||
|
|
||||||
elif dconf.HOST_CONN == 'remote':
|
elif dconf.HOST_CONN in ['remote', 'remote_docker']:
|
||||||
if not dconf.LOGIN_HOST:
|
if not dconf.LOGIN_HOST:
|
||||||
raise ValueError("LOGIN_HOST must be set if HOST_CONN=remote")
|
raise ValueError("LOGIN_HOST must be set if HOST_CONN=remote")
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ def get_content(response):
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def run(cmd, capture=True, **kwargs):
|
def run(cmd, capture=True, remote_only=False, **kwargs):
|
||||||
capture = parse_bool(capture)
|
capture = parse_bool(capture)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -73,14 +73,23 @@ def run(cmd, capture=True, **kwargs):
|
||||||
res = _run(cmd, **kwargs)
|
res = _run(cmd, **kwargs)
|
||||||
elif dconf.HOST_CONN == 'local':
|
elif dconf.HOST_CONN == 'local':
|
||||||
res = local(cmd, capture=capture, **kwargs)
|
res = local(cmd, capture=capture, **kwargs)
|
||||||
else: # docker
|
else: # docker or remote_docker
|
||||||
opts = ''
|
opts = ''
|
||||||
cmdd = cmd
|
cmdd = cmd
|
||||||
if cmd.endswith('&'):
|
if cmd.endswith('&'):
|
||||||
cmdd = cmd[:-1].strip()
|
cmdd = cmd[:-1].strip()
|
||||||
opts = '-d '
|
opts = '-d '
|
||||||
res = local('docker exec {} -ti {} /bin/bash -c "{}"'.format(
|
if remote_only:
|
||||||
opts, dconf.CONTAINER_NAME, cmdd), capture=capture, **kwargs)
|
docker_cmd = cmdd
|
||||||
|
else:
|
||||||
|
docker_cmd = 'docker exec {} -ti {} /bin/bash -c "{}"'.format(
|
||||||
|
opts, dconf.CONTAINER_NAME, cmdd)
|
||||||
|
if dconf.HOST_CONN == 'docker':
|
||||||
|
res = local(docker_cmd, capture=capture, **kwargs)
|
||||||
|
elif dconf.HOST_CONN == 'remote_docker':
|
||||||
|
res = _run(docker_cmd, **kwargs)
|
||||||
|
else:
|
||||||
|
raise Exception('wrong HOST_CONN type {}'.format(dconf.HOST_CONN))
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
err = str(e).strip()
|
err = str(e).strip()
|
||||||
if 'unexpected keyword argument' in err:
|
if 'unexpected keyword argument' in err:
|
||||||
|
@ -93,7 +102,7 @@ def run(cmd, capture=True, **kwargs):
|
||||||
|
|
||||||
|
|
||||||
@task
|
@task
|
||||||
def sudo(cmd, user=None, capture=True, **kwargs):
|
def sudo(cmd, user=None, capture=True, remote_only=False, **kwargs):
|
||||||
capture = parse_bool(capture)
|
capture = parse_bool(capture)
|
||||||
|
|
||||||
if dconf.HOST_CONN == 'remote':
|
if dconf.HOST_CONN == 'remote':
|
||||||
|
@ -105,14 +114,22 @@ def sudo(cmd, user=None, capture=True, **kwargs):
|
||||||
pre_cmd += '-u {} '.format(user)
|
pre_cmd += '-u {} '.format(user)
|
||||||
res = local(pre_cmd + cmd, capture=capture, **kwargs)
|
res = local(pre_cmd + cmd, capture=capture, **kwargs)
|
||||||
|
|
||||||
else: # docker
|
else: # docker or remote_docker
|
||||||
user = user or 'root'
|
user = user or 'root'
|
||||||
opts = '-ti -u {}'.format(user or 'root')
|
opts = '-ti -u {}'.format(user or 'root')
|
||||||
if user == 'root':
|
if user == 'root':
|
||||||
opts += ' -w /'
|
opts += ' -w /'
|
||||||
res = local('docker exec {} {} /bin/bash -c "{}"'.format(
|
if remote_only:
|
||||||
opts, dconf.CONTAINER_NAME, cmd), capture=capture)
|
docker_cmd = cmd
|
||||||
|
else:
|
||||||
|
docker_cmd = 'docker exec {} {} /bin/bash -c "{}"'.format(
|
||||||
|
opts, dconf.CONTAINER_NAME, cmd)
|
||||||
|
if dconf.HOST_CONN == 'docker':
|
||||||
|
res = local(docker_cmd, capture=capture, **kwargs)
|
||||||
|
elif dconf.HOST_CONN == 'remote_docker':
|
||||||
|
res = _sudo(docker_cmd, **kwargs)
|
||||||
|
else:
|
||||||
|
raise Exception('wrong HOST_CONN type {}'.format(dconf.HOST_CONN))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@ -126,8 +143,15 @@ def get(remote_path, local_path, use_sudo=False):
|
||||||
pre_cmd = 'sudo ' if use_sudo else ''
|
pre_cmd = 'sudo ' if use_sudo else ''
|
||||||
opts = '-r' if os.path.isdir(remote_path) else ''
|
opts = '-r' if os.path.isdir(remote_path) else ''
|
||||||
res = local('{}cp {} {} {}'.format(pre_cmd, opts, remote_path, local_path))
|
res = local('{}cp {} {} {}'.format(pre_cmd, opts, remote_path, local_path))
|
||||||
else: # docker
|
else: # docker or remote_docker
|
||||||
res = local('docker cp {}:{} {}'.format(dconf.CONTAINER_NAME, remote_path, local_path))
|
docker_cmd = 'docker cp -L {}:{} {}'.format(dconf.CONTAINER_NAME, remote_path, local_path)
|
||||||
|
if dconf.HOST_CONN == 'docker':
|
||||||
|
res = local(docker_cmd)
|
||||||
|
elif dconf.HOST_CONN == 'remote_docker':
|
||||||
|
res = sudo(docker_cmd, remote_only=True)
|
||||||
|
res = _get(local_path, local_path, use_sudo)
|
||||||
|
else:
|
||||||
|
raise Exception('wrong HOST_CONN type {}'.format(dconf.HOST_CONN))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@ -141,8 +165,15 @@ def put(local_path, remote_path, use_sudo=False):
|
||||||
pre_cmd = 'sudo ' if use_sudo else ''
|
pre_cmd = 'sudo ' if use_sudo else ''
|
||||||
opts = '-r' if os.path.isdir(local_path) else ''
|
opts = '-r' if os.path.isdir(local_path) else ''
|
||||||
res = local('{}cp {} {} {}'.format(pre_cmd, opts, local_path, remote_path))
|
res = local('{}cp {} {} {}'.format(pre_cmd, opts, local_path, remote_path))
|
||||||
else: # docker
|
else: # docker or remote_docker
|
||||||
res = local('docker cp {} {}:{}'.format(local_path, dconf.CONTAINER_NAME, remote_path))
|
docker_cmd = 'docker cp -L {} {}:{}'.format(local_path, dconf.CONTAINER_NAME, remote_path)
|
||||||
|
if dconf.HOST_CONN == 'docker':
|
||||||
|
res = local(docker_cmd)
|
||||||
|
elif dconf.HOST_CONN == 'remote_docker':
|
||||||
|
res = _put(local_path, local_path, use_sudo=True)
|
||||||
|
res = sudo(docker_cmd, remote_only=True)
|
||||||
|
else:
|
||||||
|
raise Exception('wrong HOST_CONN type {}'.format(dconf.HOST_CONN))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -89,8 +89,11 @@ class BaseParser:
|
||||||
return converted
|
return converted
|
||||||
|
|
||||||
def convert_real(self, real_value, metadata):
|
def convert_real(self, real_value, metadata):
|
||||||
return float(real_value)
|
try:
|
||||||
|
return float(real_value)
|
||||||
|
except ValueError:
|
||||||
|
raise Exception('Cannot convert knob {} from {} to float'.format(
|
||||||
|
metadata.name, real_value))
|
||||||
def convert_string(self, string_value, metadata):
|
def convert_string(self, string_value, metadata):
|
||||||
return string_value
|
return string_value
|
||||||
|
|
||||||
|
@ -124,10 +127,10 @@ class BaseParser:
|
||||||
|
|
||||||
if metadata.vartype == VarType.BOOL:
|
if metadata.vartype == VarType.BOOL:
|
||||||
if not self._check_knob_bool_val(value):
|
if not self._check_knob_bool_val(value):
|
||||||
raise Exception('Knob boolean value not valid! '
|
raise Exception('Knob {} boolean value not valid! '
|
||||||
'Boolean values should be one of: {}, '
|
'Boolean values should be one of: {}, '
|
||||||
'but the actual value is: {}'
|
'but the actual value is: {}'
|
||||||
.format(self.valid_boolean_val_to_string(),
|
.format(name, self.valid_boolean_val_to_string(),
|
||||||
str(value)))
|
str(value)))
|
||||||
conv_value = self.convert_bool(value, metadata)
|
conv_value = self.convert_bool(value, metadata)
|
||||||
|
|
||||||
|
@ -137,17 +140,17 @@ class BaseParser:
|
||||||
elif metadata.vartype == VarType.INTEGER:
|
elif metadata.vartype == VarType.INTEGER:
|
||||||
conv_value = self.convert_integer(value, metadata)
|
conv_value = self.convert_integer(value, metadata)
|
||||||
if not self._check_knob_num_in_range(conv_value, metadata):
|
if not self._check_knob_num_in_range(conv_value, metadata):
|
||||||
raise Exception('Knob integer num value not in range! '
|
raise Exception('Knob {} integer num value not in range! '
|
||||||
'min: {}, max: {}, actual: {}'
|
'min: {}, max: {}, actual: {}'
|
||||||
.format(metadata.minval,
|
.format(name, metadata.minval,
|
||||||
metadata.maxval, str(conv_value)))
|
metadata.maxval, str(conv_value)))
|
||||||
|
|
||||||
elif metadata.vartype == VarType.REAL:
|
elif metadata.vartype == VarType.REAL:
|
||||||
conv_value = self.convert_real(value, metadata)
|
conv_value = self.convert_real(value, metadata)
|
||||||
if not self._check_knob_num_in_range(conv_value, metadata):
|
if not self._check_knob_num_in_range(conv_value, metadata):
|
||||||
raise Exception('Knob real num value not in range! '
|
raise Exception('Knob {} real num value not in range! '
|
||||||
'min: {}, max: {}, actual: {}'
|
'min: {}, max: {}, actual: {}'
|
||||||
.format(metadata.minval,
|
.format(name, metadata.minval,
|
||||||
metadata.maxval, str(conv_value)))
|
metadata.maxval, str(conv_value)))
|
||||||
|
|
||||||
elif metadata.vartype == VarType.STRING:
|
elif metadata.vartype == VarType.STRING:
|
||||||
|
@ -328,7 +331,8 @@ class BaseParser:
|
||||||
'Invalid metric type: {}'.format(metric.metric_type))
|
'Invalid metric type: {}'.format(metric.metric_type))
|
||||||
return valid_metrics, diffs
|
return valid_metrics, diffs
|
||||||
|
|
||||||
def calculate_change_in_metrics(self, metrics_start, metrics_end, fix_metric_type=True):
|
def calculate_change_in_metrics(self, metrics_start, metrics_end,
|
||||||
|
fix_metric_type=True, allow_negative=True):
|
||||||
metric_catalog = {m.name: m for m in MetricCatalog.objects.filter(dbms__id=self.dbms_id)}
|
metric_catalog = {m.name: m for m in MetricCatalog.objects.filter(dbms__id=self.dbms_id)}
|
||||||
adjusted_metrics = {}
|
adjusted_metrics = {}
|
||||||
|
|
||||||
|
@ -350,13 +354,18 @@ class BaseParser:
|
||||||
if fix_metric_type:
|
if fix_metric_type:
|
||||||
if adj_val < 0:
|
if adj_val < 0:
|
||||||
adj_val = end_val
|
adj_val = end_val
|
||||||
LOG.debug("Changing metric %s from COUNTER to STATISTICS", met_name)
|
LOG.warning("Changing metric %s from COUNTER to STATISTICS", met_name)
|
||||||
met_info.metric_type = MetricType.STATISTICS
|
met_info.metric_type = MetricType.STATISTICS
|
||||||
met_info.save()
|
met_info.save()
|
||||||
assert adj_val >= 0, \
|
if allow_negative:
|
||||||
'{} wrong metric type: {} (start={}, end={}, diff={})'.format(
|
LOG.warning('%s metric type %s value is negative (start=%s, end=%s, diff=%s)',
|
||||||
met_name, MetricType.name(met_info.metric_type), start_val,
|
met_name, MetricType.name(met_info.metric_type), start_val, end_val,
|
||||||
end_val, end_val - start_val)
|
end_val - start_val)
|
||||||
|
else:
|
||||||
|
assert adj_val >= 0, \
|
||||||
|
'{} wrong metric type: {} (start={}, end={}, diff={})'.format(
|
||||||
|
met_name, MetricType.name(met_info.metric_type), start_val,
|
||||||
|
end_val, end_val - start_val)
|
||||||
|
|
||||||
adjusted_metrics[met_name] = adj_val
|
adjusted_metrics[met_name] = adj_val
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -87,10 +87,11 @@ class TargetObjectives:
|
||||||
from ..myrocks.target_objective import target_objective_list as _myrocks_list # pylint: disable=import-outside-toplevel
|
from ..myrocks.target_objective import target_objective_list as _myrocks_list # pylint: disable=import-outside-toplevel
|
||||||
from ..oracle.target_objective import target_objective_list as _oracle_list # pylint: disable=import-outside-toplevel
|
from ..oracle.target_objective import target_objective_list as _oracle_list # pylint: disable=import-outside-toplevel
|
||||||
from ..postgres.target_objective import target_objective_list as _postgres_list # pylint: disable=import-outside-toplevel
|
from ..postgres.target_objective import target_objective_list as _postgres_list # pylint: disable=import-outside-toplevel
|
||||||
|
from ..mysql.target_objective import target_objective_list as _mysql_list # pylint: disable=import-outside-toplevel
|
||||||
|
|
||||||
if not self.registered():
|
if not self.registered():
|
||||||
LOG.info('Registering target objectives...')
|
LOG.info('Registering target objectives...')
|
||||||
full_list = _myrocks_list + _oracle_list + _postgres_list
|
full_list = _myrocks_list + _oracle_list + _postgres_list + _mysql_list
|
||||||
for dbms_type, target_objective_instance in full_list:
|
for dbms_type, target_objective_instance in full_list:
|
||||||
dbmss = models.DBMSCatalog.objects.filter(type=dbms_type)
|
dbmss = models.DBMSCatalog.objects.filter(type=dbms_type)
|
||||||
name = target_objective_instance.name
|
name = target_objective_instance.name
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
#
|
||||||
|
# OtterTune - parser.py
|
||||||
|
#
|
||||||
|
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||||
|
#
|
||||||
|
|
||||||
|
from website.types import KnobUnitType
|
||||||
|
from website.utils import ConversionUtil
|
||||||
|
from ..base.parser import BaseParser # pylint: disable=relative-beyond-top-level
|
||||||
|
|
||||||
|
|
||||||
|
# pylint: disable=no-self-use
|
||||||
|
class MysqlParser(BaseParser):
|
||||||
|
|
||||||
|
def __init__(self, dbms_obj):
|
||||||
|
super().__init__(dbms_obj)
|
||||||
|
self.bytes_system = (
|
||||||
|
(1024 ** 4, 'T'),
|
||||||
|
(1024 ** 3, 'G'),
|
||||||
|
(1024 ** 2, 'M'),
|
||||||
|
(1024 ** 1, 'k'),
|
||||||
|
)
|
||||||
|
self.time_system = None
|
||||||
|
self.min_bytes_unit = 'k'
|
||||||
|
self.valid_true_val = ("on", "true", "yes", '1', 'enabled')
|
||||||
|
self.valid_false_val = ("off", "false", "no", '0', 'disabled')
|
||||||
|
|
||||||
|
def convert_integer(self, int_value, metadata):
|
||||||
|
# Collected knobs/metrics do not show unit, convert to int directly
|
||||||
|
if len(str(int_value)) == 0:
|
||||||
|
# The value collected from the database is empty
|
||||||
|
return 0
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
converted = int(int_value)
|
||||||
|
except ValueError:
|
||||||
|
converted = int(float(int_value))
|
||||||
|
|
||||||
|
except ValueError:
|
||||||
|
raise Exception('Invalid integer format for {}: {}'.format(
|
||||||
|
metadata.name, int_value))
|
||||||
|
return converted
|
||||||
|
|
||||||
|
def format_integer(self, int_value, metadata):
|
||||||
|
int_value = int(round(int_value))
|
||||||
|
if int_value > 0 and metadata.unit == KnobUnitType.BYTES:
|
||||||
|
int_value = ConversionUtil.get_human_readable2(
|
||||||
|
int_value, self.bytes_system, self.min_bytes_unit)
|
||||||
|
return int_value
|
||||||
|
|
||||||
|
def parse_version_string(self, version_string):
|
||||||
|
s = version_string.split('.')[0] + '.' + version_string.split('.')[1]
|
||||||
|
return s
|
|
@ -0,0 +1,14 @@
|
||||||
|
#
|
||||||
|
# OtterTune - target_objective.py
|
||||||
|
#
|
||||||
|
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||||
|
#
|
||||||
|
|
||||||
|
from website.types import DBMSType
|
||||||
|
from ..base.target_objective import BaseThroughput # pylint: disable=relative-beyond-top-level
|
||||||
|
|
||||||
|
target_objective_list = tuple((DBMSType.MYSQL, target_obj) for target_obj in [ # pylint: disable=invalid-name
|
||||||
|
BaseThroughput(transactions_counter=('innodb_metrics.trx_rw_commits',
|
||||||
|
'innodb_metrics.trx_ro_commits',
|
||||||
|
'innodb_metrics.trx_nl_ro_commits'))
|
||||||
|
])
|
|
@ -10,6 +10,7 @@ from website.types import DBMSType
|
||||||
from .myrocks.parser import MyRocksParser
|
from .myrocks.parser import MyRocksParser
|
||||||
from .postgres.parser import PostgresParser
|
from .postgres.parser import PostgresParser
|
||||||
from .oracle.parser import OracleParser
|
from .oracle.parser import OracleParser
|
||||||
|
from .mysql.parser import MysqlParser
|
||||||
|
|
||||||
_DBMS_PARSERS = {}
|
_DBMS_PARSERS = {}
|
||||||
|
|
||||||
|
@ -25,6 +26,8 @@ def _get(dbms_id):
|
||||||
clz = MyRocksParser
|
clz = MyRocksParser
|
||||||
elif obj.type == DBMSType.ORACLE:
|
elif obj.type == DBMSType.ORACLE:
|
||||||
clz = OracleParser
|
clz = OracleParser
|
||||||
|
elif obj.type == DBMSType.MYSQL:
|
||||||
|
clz = MysqlParser
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError('Implement me! {}'.format(obj))
|
raise NotImplementedError('Implement me! {}'.format(obj))
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,30 @@
|
||||||
"version":"5.6"
|
"version":"5.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"model":"website.DBMSCatalog",
|
||||||
|
"pk":11,
|
||||||
|
"fields":{
|
||||||
|
"type":1,
|
||||||
|
"version":"5.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"model":"website.DBMSCatalog",
|
||||||
|
"pk":12,
|
||||||
|
"fields":{
|
||||||
|
"type":1,
|
||||||
|
"version":"5.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"model":"website.DBMSCatalog",
|
||||||
|
"pk":13,
|
||||||
|
"fields":{
|
||||||
|
"type":1,
|
||||||
|
"version":"8.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"model":"website.DBMSCatalog",
|
"model":"website.DBMSCatalog",
|
||||||
"pk":121,
|
"pk":121,
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -59,7 +59,7 @@ class Migration(migrations.Migration):
|
||||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||||
('name', models.CharField(max_length=128)),
|
('name', models.CharField(max_length=128)),
|
||||||
('vartype', models.IntegerField(choices=[(1, 'STRING'), (2, 'INTEGER'), (3, 'REAL'), (4, 'BOOL'), (5, 'ENUM'), (6, 'TIMESTAMP')], verbose_name='variable type')),
|
('vartype', models.IntegerField(choices=[(1, 'STRING'), (2, 'INTEGER'), (3, 'REAL'), (4, 'BOOL'), (5, 'ENUM'), (6, 'TIMESTAMP')], verbose_name='variable type')),
|
||||||
('unit', models.IntegerField(choices=[(1, 'bytes'), (2, 'milliseconds'), (3, 'other')])),
|
('unit', models.IntegerField(choices=[(1, 'bytes'), (2, 'milliseconds'), (3, 'other'), (4, 'microseconds'), (5, 'seconds')])),
|
||||||
('category', models.TextField(null=True)),
|
('category', models.TextField(null=True)),
|
||||||
('summary', models.TextField(null=True, verbose_name='description')),
|
('summary', models.TextField(null=True, verbose_name='description')),
|
||||||
('description', models.TextField(null=True)),
|
('description', models.TextField(null=True)),
|
||||||
|
|
|
@ -25,6 +25,8 @@ def load_initial_data(apps, schema_editor):
|
||||||
"oracle-121_metrics.json",
|
"oracle-121_metrics.json",
|
||||||
"oracle-19_knobs.json",
|
"oracle-19_knobs.json",
|
||||||
"oracle-19_metrics.json",
|
"oracle-19_metrics.json",
|
||||||
|
"mysql-56_knobs.json",
|
||||||
|
"mysql-56_metrics.json",
|
||||||
]
|
]
|
||||||
for fixture in initial_data_fixtures:
|
for fixture in initial_data_fixtures:
|
||||||
call_command("loaddata", fixture, app_label="website")
|
call_command("loaddata", fixture, app_label="website")
|
||||||
|
|
|
@ -35,6 +35,17 @@ DEFAULT_TUNABLE_KNOBS = {
|
||||||
"global.shared_pool_size",
|
"global.shared_pool_size",
|
||||||
"global.sort_area_size",
|
"global.sort_area_size",
|
||||||
},
|
},
|
||||||
|
DBMSType.MYSQL: {
|
||||||
|
"global.innodb_buffer_pool_size",
|
||||||
|
"global.innodb_thread_sleep_delay",
|
||||||
|
"global.innodb_flush_method",
|
||||||
|
"global.innodb_log_file_size",
|
||||||
|
"global.innodb_max_dirty_pages_pct_lwm",
|
||||||
|
"global.innodb_read_ahead_threshold",
|
||||||
|
"global.innodb_adaptive_max_sleep_delay",
|
||||||
|
"global.innodb_buffer_pool_instances",
|
||||||
|
"global.thread_cache_size",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Bytes in a GB
|
# Bytes in a GB
|
||||||
|
|
|
@ -130,11 +130,15 @@ class KnobUnitType(BaseType):
|
||||||
BYTES = 1
|
BYTES = 1
|
||||||
MILLISECONDS = 2
|
MILLISECONDS = 2
|
||||||
OTHER = 3
|
OTHER = 3
|
||||||
|
MICROSECONDS = 4
|
||||||
|
SECONDS = 5
|
||||||
|
|
||||||
TYPE_NAMES = {
|
TYPE_NAMES = {
|
||||||
BYTES: 'bytes',
|
BYTES: 'bytes',
|
||||||
MILLISECONDS: 'milliseconds',
|
MILLISECONDS: 'milliseconds',
|
||||||
OTHER: 'other',
|
OTHER: 'other',
|
||||||
|
MICROSECONDS: 'microseconds',
|
||||||
|
SECONDS: 'seconds',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -611,8 +611,19 @@ def handle_result_files(session, files, execution_times=None):
|
||||||
dbms = DBMSCatalog.objects.get(
|
dbms = DBMSCatalog.objects.get(
|
||||||
type=dbms_type, version=dbms_version)
|
type=dbms_type, version=dbms_version)
|
||||||
except ObjectDoesNotExist:
|
except ObjectDoesNotExist:
|
||||||
return HttpResponse('{} v{} is not yet supported.'.format(
|
try:
|
||||||
dbms_type, dbms_version))
|
dbms_version = parser.parse_version_string(dbms_type, dbms_version)
|
||||||
|
except Exception: # pylint: disable=broad-except
|
||||||
|
LOG.warning('Cannot parse dbms version %s', dbms_version)
|
||||||
|
return HttpResponse('{} v{} is not yet supported.'.format(
|
||||||
|
dbms_type, dbms_version))
|
||||||
|
try:
|
||||||
|
# Check that we support this DBMS and version
|
||||||
|
dbms = DBMSCatalog.objects.get(
|
||||||
|
type=dbms_type, version=dbms_version)
|
||||||
|
except ObjectDoesNotExist:
|
||||||
|
return HttpResponse('{} v{} is not yet supported.'.format(
|
||||||
|
dbms_type, dbms_version))
|
||||||
|
|
||||||
if dbms != session.dbms:
|
if dbms != session.dbms:
|
||||||
return HttpResponse('The DBMS must match the type and version '
|
return HttpResponse('The DBMS must match the type and version '
|
||||||
|
|
Loading…
Reference in New Issue