Support adding custom target objectives to the website/db/*/target_objective.py modules

This commit is contained in:
Dana Van Aken 2019-10-08 19:26:38 -04:00
parent f68c23e975
commit 01b4ec3f53
18 changed files with 304 additions and 203 deletions

View File

@ -7,8 +7,7 @@
from abc import ABCMeta, abstractmethod
import mock
from django.test import TestCase
from website.db import parser
# from website.db.parser.postgres import PostgresParser
from website.db import parser, target_objectives
from website.types import BooleanType, DBMSType, VarType, KnobUnitType, MetricType
from website.models import DBMSCatalog, KnobCatalog
@ -254,18 +253,23 @@ class PostgresParserTests(BaseParserTests, TestCase):
def test_convert_dbms_metrics(self):
super().test_convert_dbms_metrics()
target_obj = target_objectives.THROUGHPUT
target_obj_instance = target_objectives.get_target_objective_instance(
self.test_dbms.dbms_id, target_obj)
txns_counter = target_obj_instance.transactions_counter
test_metrics = {}
for key in list(self.test_dbms.numeric_metric_catalog_.keys()):
test_metrics[key] = 2
test_metrics['pg_stat_database.xact_commit'] = 10
test_metrics[txns_counter] = 10
test_metrics['pg_FAKE_METRIC'] = 0
self.assertEqual(test_metrics.get('throughput_txn_per_sec'), None)
self.assertEqual(test_metrics.get(target_obj), None)
test_convert_metrics = self.test_dbms.convert_dbms_metrics(test_metrics, 0.1)
test_convert_metrics = self.test_dbms.convert_dbms_metrics(test_metrics, 0.1, target_obj)
for key, metadata in list(self.test_dbms.numeric_metric_catalog_.items()):
if (key == self.test_dbms.transactions_counter):
if key == txns_counter:
self.assertEqual(test_convert_metrics[key], 10 / 0.1)
continue
if metadata.metric_type == MetricType.COUNTER:
@ -273,12 +277,9 @@ class PostgresParserTests(BaseParserTests, TestCase):
else: # MetricType.STATISTICS
self.assertEqual(test_convert_metrics[key], 2)
self.assertEqual(test_convert_metrics['throughput_txn_per_sec'], 100)
self.assertEqual(test_convert_metrics[target_obj], 100)
self.assertEqual(test_convert_metrics.get('pg_FAKE_METRIC'), None)
def test_properties(self):
self.assertEqual(self.test_dbms.transactions_counter, 'pg_stat_database.xact_commit')
def test_parse_version_string(self):
self.assertTrue(self.test_dbms.parse_version_string("9.6.1"), "9.6")
self.assertTrue(self.test_dbms.parse_version_string("9.6.3"), "9.6")

View File

@ -14,6 +14,7 @@ from django.core.urlresolvers import reverse
from django.test import TestCase
from .utils import (TEST_BASIC_SESSION_ID, TEST_PASSWORD, TEST_PROJECT_ID, TEST_USERNAME)
from website.db import target_objectives
class UserAuthViewTests(TestCase):
@ -142,6 +143,7 @@ class SessionViewsTests(TestCase):
'name': 'test_create_basic_session',
'description': 'testing create basic session...',
'tuning_session': 'no_tuning_session',
'target_objective': target_objectives.get_default_target_objective(),
'algorithm': 1,
'cpu': '2',
'memory': '16',

View File

@ -3,3 +3,5 @@
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
from .base.target_objective import target_objectives

View File

@ -8,12 +8,14 @@ from collections import OrderedDict
from website.models import KnobCatalog, KnobUnitType, MetricCatalog
from website.types import BooleanType, MetricType, VarType
from website.utils import ConversionUtil
from .. import target_objectives
# pylint: disable=no-self-use
class BaseParser:
def __init__(self, dbms_obj):
self.dbms_id = int(dbms_obj.pk)
knobs = KnobCatalog.objects.filter(dbms=dbms_obj)
self.knob_catalog_ = {k.name: k for k in knobs}
self.tunable_knob_catalog_ = {
@ -37,26 +39,6 @@ class BaseParser:
self.min_bytes_unit = 'kB'
self.min_time_unit = 'ms'
@property
def transactions_counter(self):
raise NotImplementedError()
@property
def latency_timer(self):
raise NotImplementedError()
def target_metric(self, target_objective=None):
if target_objective == 'throughput_txn_per_sec' or target_objective is None:
# throughput
res = self.transactions_counter
elif target_objective == '99th_lat_ms':
# 99 percentile latency
res = self.latency_timer
else:
raise Exception("Target Objective {} Not Supported".format(target_objective))
return res
def parse_version_string(self, version_string):
return version_string
@ -178,38 +160,47 @@ class BaseParser:
return knob_data
def _check_knob_num_in_range(self, value, mdata):
return value >= float(mdata.minval) and value <= float(mdata.maxval)
return float(mdata.minval) <= value <= float(mdata.maxval)
def _check_knob_bool_val(self, value):
if isinstance(str, value):
value = value.lower()
return value in self.valid_true_val or value in self.valid_false_val
def convert_dbms_metrics(self, metrics, observation_time, target_objective=None):
# if len(metrics) != len(self.numeric_metric_catalog_):
# raise Exception('The number of metrics should be equal!')
def convert_dbms_metrics(self, metrics, observation_time, target_objective):
metric_data = {}
for name, metadata in list(self.numeric_metric_catalog_.items()):
# Same as metric_data except COUNTER metrics are not divided by the time
base_metric_data = {}
for name, metadata in self.numeric_metric_catalog_.items():
value = metrics[name]
if metadata.metric_type == MetricType.COUNTER:
converted = self.convert_integer(value, metadata)
metric_data[name] = float(converted) / observation_time
elif metadata.metric_type == MetricType.STATISTICS:
converted = self.convert_integer(value, metadata)
metric_data[name] = float(converted)
if metadata.vartype == VarType.INTEGER:
converted = float(self.convert_integer(value, metadata))
elif metadata.vartype == VarType.REAL:
converted = self.convert_real(value, metadata)
else:
raise Exception(
raise ValueError(
("Found non-numeric metric '{}' in the numeric "
"metric catalog: value={}, type={}").format(
name, value, VarType.name(metadata.vartype)))
if metadata.metric_type == MetricType.COUNTER:
assert isinstance(converted, float)
base_metric_data[name] = converted
metric_data[name] = converted / observation_time
elif metadata.metric_type == MetricType.STATISTICS:
assert isinstance(converted, float)
base_metric_data[name] = converted
metric_data[name] = converted
else:
raise ValueError(
'Unknown metric type for {}: {}'.format(name, metadata.metric_type))
if target_objective is not None and self.target_metric(target_objective) not in metric_data:
raise Exception("Cannot find objective function")
if target_objective is not None:
metric_data[target_objective] = metric_data[self.target_metric(target_objective)]
else:
# default
metric_data['throughput_txn_per_sec'] = \
metric_data[self.target_metric(target_objective)]
target_objective_instance = target_objectives.get_target_objective_instance(
self.dbms_id, target_objective)
metric_data[target_objective] = target_objective_instance.compute(
base_metric_data, observation_time)
return metric_data
@ -355,9 +346,6 @@ class BaseParser:
enumvals = metadata.enumvals.split(',')
return enumvals[int(round(enum_value))]
# def format_integer(self, int_value, metadata):
# return int(round(int_value))
def format_integer(self, int_value, metadata):
int_value = int(round(int_value))
if metadata.unit != KnobUnitType.OTHER and int_value > 0:

View File

@ -0,0 +1,129 @@
#
# OtterTune - target_objective.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import logging
from collections import OrderedDict
from website import models, types
LOG = logging.getLogger(__name__)
# Direction of performance improvement
LESS_IS_BETTER = '(less is better)'
MORE_IS_BETTER = '(more is better)'
THROUGHPUT = 'throughput_txn_per_sec'
class BaseMetric:
_improvement_choices = (LESS_IS_BETTER, MORE_IS_BETTER, '')
def __init__(self, name, pprint=None, unit='events / second', short_unit='events/sec',
improvement='', scale=1):
if improvement not in self._improvement_choices:
raise ValueError("Improvement must be one of: {}".format(
', '.join("'{}'".format(ic) for ic in self._improvement_choices)))
if scale != 1:
raise NotImplementedError()
self.name = name
self.pprint = pprint or name
self.unit = unit
self.short_unit = short_unit
self.improvement = improvement
self.scale = scale
class BaseTargetObjective(BaseMetric):
_improvement_choices = (LESS_IS_BETTER, MORE_IS_BETTER)
def __init__(self, name, pprint, unit, short_unit, improvement, scale=1):
super().__init__(name=name, pprint=pprint, unit=unit, short_unit=short_unit,
improvement=improvement, scale=scale)
def compute(self, metrics, observation_time):
raise NotImplementedError()
class BaseThroughput(BaseTargetObjective):
def __init__(self, transactions_counter):
super().__init__(name=THROUGHPUT, pprint='Throughput',
unit='transactions / second', short_unit='txn/sec',
improvement=MORE_IS_BETTER)
self.transactions_counter = transactions_counter
def compute(self, metrics, observation_time):
return float(metrics[self.transactions_counter]) / observation_time
class TargetObjectives:
LESS_IS_BETTER = LESS_IS_BETTER
MORE_IS_BETTER = MORE_IS_BETTER
THROUGHPUT = THROUGHPUT
def __init__(self):
self._registry = {}
self._metric_metadatas = {}
self._default_target_objective = THROUGHPUT
def register(self):
from ..myrocks.target_objective import target_objective_list as _myrocks_list
from ..oracle.target_objective import target_objective_list as _oracle_list
from ..postgres.target_objective import target_objective_list as _postgres_list
if not self.registered():
LOG.info('Registering target objectives...')
full_list = _myrocks_list + _oracle_list + _postgres_list
for dbms_type, target_objective_instance in full_list:
dbmss = models.DBMSCatalog.objects.filter(type=dbms_type)
name = target_objective_instance.name
for dbms in dbmss:
dbms_id = int(dbms.pk)
if dbms_id not in self._registry:
self._registry[dbms_id] = {}
self._registry[dbms_id][name] = target_objective_instance
if dbms_id not in self._metric_metadatas:
numeric_metrics = models.MetricCatalog.objects.filter(dbms=dbms).exclude(
metric_type=types.MetricType.INFO).values_list('name', flat=True)
self._metric_metadatas[dbms_id] = [(mname, BaseMetric(mname)) for mname
in sorted(numeric_metrics)]
def registered(self):
return len(self._registry) > 0
def get_metric_metadata(self, dbms_id, target_objective):
if not self.registered():
self.register()
dbms_id = int(dbms_id)
metadata = list(self._metric_metadatas[dbms_id])
target_objective_instance = self._registry[dbms_id][target_objective]
metadata.insert(0, (target_objective, target_objective_instance))
return OrderedDict(metadata)
def get_default_target_objective(self):
return self._default_target_objective
def get_target_objective_instance(self, dbms_id, target_objective):
if not self.registered():
self.register()
dbms_id = int(dbms_id)
instance = self._registry[dbms_id][target_objective]
return instance
def __repr__(self):
s = 'TargetObjectives = (\n'
for dbms_id, entry in self._registry.items(): # pylint: disable=not-an-iterable
s += ' {}:\n'.format(models.DBMSCatalog.objects.get(id=dbms_id).full_name)
for name in entry.keys():
s += ' {}\n'.format(name)
s += ')\n'
return s
target_objectives = TargetObjectives() # pylint: disable=invalid-name

View File

@ -8,19 +8,12 @@ import re
from collections import OrderedDict
from ..base.parser import BaseParser
from .. import target_objectives
from website.types import MetricType, VarType
class MyRocksParser(BaseParser):
@property
def transactions_counter(self):
return 'session_status.questions'
@property
def latency_timer(self):
raise NotImplementedError()
def parse_version_string(self, version_string):
dbms_version = version_string.split(',')[0]
return re.search(r'\d+\.\d+(?=\.\d+)', dbms_version).group(0)
@ -145,28 +138,42 @@ class MyRocksParser(BaseParser):
valid_metrics, self.metric_catalog_, default_value='0')
return valid_metrics, diffs
def convert_dbms_metrics(self, metrics, observation_time, target_objective=None):
def convert_dbms_metrics(self, metrics, observation_time, target_objective):
base_metric_data = {}
metric_data = {}
for name, value in list(metrics.items()):
prt_name = self.partial_name(name)
if prt_name in self.numeric_metric_catalog_:
metadata = self.numeric_metric_catalog_[prt_name]
if metadata.metric_type == MetricType.COUNTER:
converted = self.convert_integer(value, metadata)
metric_data[name] = float(converted) / observation_time
if metadata.vartype == VarType.INTEGER:
converted = float(self.convert_integer(value, metadata))
elif metadata.vartype == VarType.REAL:
converted = self.convert_real(value, metadata)
else:
raise Exception('Unknown metric type for {}: {}'.format(
name, metadata.metric_type))
raise ValueError(
("Found non-numeric metric '{}' in the numeric "
"metric catalog: value={}, type={}").format(
name, value, VarType.name(metadata.vartype)))
if target_objective is not None and self.target_metric(target_objective) not in metric_data:
raise Exception("Cannot find objective function")
if metadata.metric_type == MetricType.COUNTER:
assert isinstance(converted, float)
base_metric_data[name] = converted
metric_data[name] = converted / observation_time
elif metadata.metric_type == MetricType.STATISTICS:
assert isinstance(converted, float)
base_metric_data[name] = converted
metric_data[name] = converted
else:
raise ValueError(
'Unknown metric type for {}: {}'.format(name, metadata.metric_type))
target_objective_instance = target_objectives.get_target_objective_instance(
self.dbms_id, target_objective)
metric_data[target_objective] = target_objective_instance.compute(
base_metric_data, observation_time)
if target_objective is not None:
metric_data[target_objective] = metric_data[self.target_metric(target_objective)]
else:
# default
metric_data['throughput_txn_per_sec'] = \
metric_data[self.target_metric(target_objective)]
return metric_data
def convert_dbms_knobs(self, knobs):

View File

@ -0,0 +1,12 @@
#
# OtterTune - target_objective.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
from ..base.target_objective import BaseThroughput
from website.types import DBMSType
target_objective_list = tuple((DBMSType.MYROCKS, target_obj) for target_obj in [ # pylint: disable=invalid-name
BaseThroughput(transactions_counter='session_status.questions')
])

View File

@ -20,11 +20,3 @@ class OracleParser(BaseParser):
(1024 ** 1, 'k'),
)
self.min_bytes_unit = 'k'
@property
def transactions_counter(self):
return 'global.user commits'
@property
def latency_timer(self):
raise NotImplementedError()

View File

@ -0,0 +1,27 @@
#
# OtterTune - target_objective.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
from ..base.target_objective import BaseTargetObjective, BaseThroughput, LESS_IS_BETTER
from website.types import DBMSType
class DBTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='db_time', pprint='DB Time', unit='milliseconds', short_unit='ms',
improvement=LESS_IS_BETTER)
def compute(self, metrics, observation_time):
metric_names = ('global.db cpu', 'global.cursor: pin s wait on x.time_waited',
'global.user i/o wait time')
db_time = float(sum(metrics[mname] for mname in metric_names)) / observation_time
return db_time
target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint: disable=invalid-name
BaseThroughput(transactions_counter='global.user commits'),
DBTime(),
])

View File

@ -52,7 +52,7 @@ def convert_dbms_knobs(dbms_id, knobs):
return _get(dbms_id).convert_dbms_knobs(knobs)
def convert_dbms_metrics(dbms_id, numeric_metrics, observation_time, target_objective=None):
def convert_dbms_metrics(dbms_id, numeric_metrics, observation_time, target_objective):
return _get(dbms_id).convert_dbms_metrics(
numeric_metrics, observation_time, target_objective)

View File

@ -19,14 +19,6 @@ class PostgresParser(BaseParser):
self.bytes_system = [(f, s) for f, s in ConversionUtil.DEFAULT_BYTES_SYSTEM
if s in ('TB', 'GB', 'MB', 'kB')]
@property
def transactions_counter(self):
return 'pg_stat_database.xact_commit'
@property
def latency_timer(self):
raise NotImplementedError()
def parse_version_string(self, version_string):
dbms_version = version_string.split(',')[0]
return re.search(r'\d+\.\d+(?=\.\d+)', dbms_version).group(0)

View File

@ -0,0 +1,12 @@
#
# OtterTune - target_objective.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
from ..base.target_objective import BaseThroughput
from website.types import DBMSType
target_objective_list = tuple((DBMSType.POSTGRES, target_obj) for target_obj in [ # pylint: disable=invalid-name
BaseThroughput(transactions_counter='pg_stat_database.xact_commit')
])

View File

@ -31,7 +31,7 @@
"project": 1,
"upload_code": "1234567890",
"tuning_session": "no_tuning_session",
"target_objective": null,
"target_objective": "throughput_txn_per_sec",
"creation_time": "2017-11-30T02:00:49.611Z",
"last_update": "2017-11-30T02:00:49.611Z"
},

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2019-10-08 03:47
# Generated by Django 1.10.1 on 2019-10-08 21:38
from __future__ import unicode_literals
from django.conf import settings
@ -184,7 +184,7 @@ class Migration(migrations.Migration):
('last_update', models.DateTimeField()),
('upload_code', models.CharField(max_length=30, unique=True)),
('tuning_session', models.CharField(choices=[('tuning_session', 'Tuning Session'), ('no_tuning_session', 'No Tuning'), ('randomly_generate', 'Randomly Generate')], default='tuning_session', max_length=64, verbose_name='session type')),
('target_objective', models.CharField(choices=[('throughput_txn_per_sec', 'Throughput'), ('99th_lat_ms', '99 Percentile Latency')], max_length=64, null=True)),
('target_objective', models.CharField(default='throughput_txn_per_sec', max_length=64)),
('dbms', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.DBMSCatalog')),
('hardware', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.Hardware')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.Project')),

View File

@ -3,12 +3,13 @@
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
from collections import namedtuple, OrderedDict
from collections import OrderedDict
from django.contrib.auth.models import User
from django.db import models, DEFAULT_DB_ALIAS
from django.utils.timezone import now
from .db import target_objectives
from .types import (DBMSType, LabelStyleType, MetricType, KnobUnitType,
PipelineTaskType, VarType, KnobResourceType,
WorkloadStatusType, AlgorithmType, StorageType)
@ -84,65 +85,7 @@ class KnobCatalog(BaseModel):
resource = models.IntegerField(choices=KnobResourceType.choices(), default=4)
MetricMeta = namedtuple('MetricMeta',
['name', 'pprint', 'unit', 'short_unit', 'scale', 'improvement'])
class MetricManager(models.Manager):
# Direction of performance improvement
LESS_IS_BETTER = '(less is better)'
MORE_IS_BETTER = '(more is better)'
# Possible objective functions
THROUGHPUT = 'throughput_txn_per_sec'
THROUGHPUT_META = (THROUGHPUT, 'Throughput',
'transactions / second',
'txn/sec', 1, MORE_IS_BETTER)
LATENCY_99 = '99th_lat_ms'
LATENCY_99_META = (LATENCY_99, '99 Percentile Latency',
'milliseconds', 'ms', 1, LESS_IS_BETTER)
# Objective function metric metadata
OBJ_META = {THROUGHPUT: THROUGHPUT_META, LATENCY_99: LATENCY_99_META}
@staticmethod
def get_default_metrics(target_objective=None):
# get the target_objective, return the default one if target_objective is None
if target_objective is not None:
default_metrics = [target_objective]
else:
default_metrics = [MetricManager.get_default_objective_function()]
return default_metrics
@staticmethod
def get_default_objective_function():
return MetricManager.THROUGHPUT
@staticmethod
def get_metric_meta(dbms, target_objective=None):
numeric_metric_names = MetricCatalog.objects.filter(dbms=dbms).exclude(
metric_type=MetricType.INFO).values_list('name', flat=True)
numeric_metrics = {}
for metname in numeric_metric_names:
numeric_metrics[metname] = MetricMeta(
metname, metname, 'events / second', 'events/sec', 1, '')
sorted_metrics = [(mname, mmeta) for mname, mmeta in
sorted(numeric_metrics.items())]
if target_objective is not None:
mname = target_objective
else:
mname = MetricManager.get_default_objective_function()
mmeta = MetricManager.OBJ_META[mname]
sorted_metrics.insert(0, (mname, MetricMeta(*mmeta)))
return OrderedDict(sorted_metrics)
class MetricCatalog(BaseModel):
objects = MetricManager()
dbms = models.ForeignKey(DBMSCatalog)
name = models.CharField(max_length=128)
vartype = models.IntegerField(choices=VarType.choices())
@ -187,6 +130,13 @@ class Hardware(BaseModel):
class Session(BaseModel):
TUNING_OPTIONS = OrderedDict([
("tuning_session", "Tuning Session"),
("no_tuning_session", "No Tuning"),
("randomly_generate", "Randomly Generate")
])
user = models.ForeignKey(User)
name = models.CharField(max_length=64, verbose_name="session name")
description = models.TextField(null=True, blank=True)
@ -204,24 +154,16 @@ class Session(BaseModel):
last_update = models.DateTimeField()
upload_code = models.CharField(max_length=30, unique=True)
TUNING_OPTIONS = OrderedDict([
("tuning_session", "Tuning Session"),
("no_tuning_session", "No Tuning"),
("randomly_generate", "Randomly Generate")
])
tuning_session = models.CharField(choices=TUNING_OPTIONS.items(),
max_length=64, default='tuning_session',
verbose_name="session type")
verbose_name='session type')
TARGET_OBJECTIVES = [
('throughput_txn_per_sec', 'Throughput'),
('99th_lat_ms', '99 Percentile Latency')
]
target_objective = models.CharField(choices=TARGET_OBJECTIVES, max_length=64, null=True)
target_objective = models.CharField(
max_length=64, default=target_objectives.get_default_target_objective())
def clean(self):
if self.target_objective is None:
self.target_objective = MetricManager.get_default_objective_function()
self.target_objective = target_objectives.get_default_target_objective()
def delete(self, using=DEFAULT_DB_ALIAS, keep_parents=False):
SessionKnob.objects.get(session=self).delete()
@ -282,7 +224,7 @@ class DataManager(models.Manager):
@staticmethod
def create_name(data_obj, key):
ts = data_obj.creation_time.strftime("%m-%d-%y")
return (key + '@' + ts + '#' + str(data_obj.pk))
return key + '@' + ts + '#' + str(data_obj.pk)
class KnobDataManager(DataManager):

View File

@ -18,9 +18,8 @@ from analysis.gp_tf import GPRGD
from analysis.nn_tf import NeuralNet
from analysis.preprocessing import Bin, DummyEncoder
from analysis.constraints import ParamConstraintHelper
from website.models import (PipelineData, PipelineRun, Result, Workload, KnobCatalog,
MetricCatalog, SessionKnob)
from website.db import parser
from website.models import PipelineData, PipelineRun, Result, Workload, KnobCatalog, SessionKnob
from website import db
from website.types import PipelineTaskType, AlgorithmType
from website.utils import DataUtil, JSONUtil
from website.settings import IMPORTANT_KNOB_NUMBER, NUM_SAMPLES, TOP_NUM_CONFIG # pylint: disable=no-name-in-module
@ -102,14 +101,14 @@ class ConfigurationRecommendation(UpdateTask): # pylint: disable=abstract-metho
result = Result.objects.get(pk=result_id)
# Replace result with formatted result
formatted_params = parser.format_dbms_knobs(result.dbms.pk, retval['recommendation'])
formatted_params = db.parser.format_dbms_knobs(result.dbms.pk, retval['recommendation'])
task_meta = TaskMeta.objects.get(task_id=task_id)
retval['recommendation'] = formatted_params
task_meta.result = retval
task_meta.save()
# Create next configuration to try
config = parser.create_knob_configuration(result.dbms.pk, retval['recommendation'])
config = db.parser.create_knob_configuration(result.dbms.pk, retval['recommendation'])
retval['recommendation'] = config
result.next_configuration = JSONUtil.dumps(retval)
result.save()
@ -267,8 +266,8 @@ def train_ddpg(result_id):
target_objective))
objective = metric_data[target_obj_idx]
base_objective = base_metric_data[target_obj_idx]
metric_meta = MetricCatalog.objects.get_metric_meta(result.session.dbms,
result.session.target_objective)
metric_meta = db.target_objectives.get_metric_metadata(
result.session.dbms.pk, result.session.target_objective)
# Calculate the reward
objective = objective / base_objective
@ -407,12 +406,9 @@ def configuration_recommendation(recommendation_input):
'metrics (target_obj={})').format(len(target_obj_idx),
target_objective))
metric_meta = MetricCatalog.objects.get_metric_meta(newest_result.session.dbms,
newest_result.session.target_objective)
if metric_meta[target_objective].improvement == '(less is better)':
lessisbetter = True
else:
lessisbetter = False
metric_meta = db.target_objectives.get_metric_metadata(
newest_result.session.dbms.pk, newest_result.session.target_objective)
lessisbetter = metric_meta[target_objective].improvement == db.target_objectives.LESS_IS_BETTER
y_workload = y_workload[:, target_obj_idx]
y_target = y_target[:, target_obj_idx]

View File

@ -27,11 +27,10 @@ from django.views.decorators.csrf import csrf_exempt
from django.forms.models import model_to_dict
from pytz import timezone
from .db import parser
from .db import parser, target_objectives
from .forms import NewResultForm, ProjectForm, SessionForm, SessionKnobForm
from .models import (BackupData, DBMSCatalog, KnobCatalog, KnobData, MetricCatalog,
MetricData, MetricManager, Project, Result, Session, Workload,
SessionKnob)
MetricData, Project, Result, Session, Workload, SessionKnob)
from .tasks import (aggregate_target_results, map_workload, train_ddpg,
configuration_recommendation, configuration_recommendation_ddpg)
from .types import (DBMSType, KnobUnitType, MetricType,
@ -246,8 +245,9 @@ def session_view(request, project_id, session_id):
default_workload = 'show_none'
default_confs = 'none'
default_metrics = MetricCatalog.objects.get_default_metrics(session.target_objective)
metric_meta = MetricCatalog.objects.get_metric_meta(session.dbms, session.target_objective)
default_metrics = [session.target_objective]
metric_meta = target_objectives.get_metric_metadata(
session.dbms.pk, session.target_objective)
knobs = SessionKnob.objects.get_knobs_for_session(session)
knob_names = [knob["name"] for knob in knobs if knob["tunable"]]
@ -330,7 +330,7 @@ def create_or_edit_session(request, project_id, session_id=''):
'dbms': DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.6'),
'algorithm': AlgorithmType.GPR,
'target_objective': 'throughput_txn_per_sec',
'target_objective': target_objectives.get_default_target_objective(),
})
form = SessionForm(**form_kwargs)
context = {
@ -393,12 +393,12 @@ def result_view(request, project_id, session_id, result_id):
target = get_object_or_404(Result, pk=result_id)
session = target.session
default_metrics = MetricCatalog.objects.get_default_metrics(session.target_objective)
metric_meta = MetricCatalog.objects.get_metric_meta(session.dbms, session.target_objective)
metric_data = JSONUtil.loads(target.metric_data.data)
# default_metrics = [session.target_objective]
metric_meta = target_objectives.get_metric_metadata(session.dbms.pk, session.target_objective)
# metric_data = JSONUtil.loads(target.metric_data.data)
default_metrics = {mname: metric_data[mname] * metric_meta[mname].scale
for mname in default_metrics}
# default_metrics = {mname: metric_data[mname] * metric_meta[mname].scale
# for mname in default_metrics}
status = None
if target.task_ids is not None:
@ -459,11 +459,11 @@ def handle_result_files(session, files):
observation_time = summary['observation_time']
start_time = datetime.fromtimestamp(
# int(summary['start_time']), # unit: seconds
int(summary['start_time']) / 1000, # unit: ms
int(float(summary['start_time']) / 1000), # unit: ms
timezone(TIME_ZONE))
end_time = datetime.fromtimestamp(
# int(summary['end_time']), # unit: seconds
int(summary['end_time']) / 1000, # unit: ms
int(float(summary['end_time']) / 1000), # unit: ms
timezone(TIME_ZONE))
# Check if workload name only contains alpha-numeric, underscore and hyphen
@ -715,8 +715,8 @@ def workload_view(request, project_id, session_id, wkld_id): # pylint: disable=
default_knob_confs = [c for c, _ in list(knob_conf_map.values())][:5]
LOG.debug("default_knob_confs: %s", default_knob_confs)
metric_meta = MetricCatalog.objects.get_metric_meta(session.dbms, session.target_objective)
default_metrics = MetricCatalog.objects.get_default_metrics(session.target_objective)
metric_meta = target_objectives.get_metric_metadata(session.dbms.pk, session.target_objective)
default_metrics = [session.target_objective]
labels = Workload.get_labels()
labels['title'] = 'Workload Information'
@ -799,9 +799,9 @@ def get_workload_data(request):
results = Result.objects.filter(workload=workload)
result_data = {r.pk: JSONUtil.loads(r.metric_data.data) for r in results}
results = sorted(results, key=lambda x: int(result_data[x.pk][MetricManager.THROUGHPUT]))
results = sorted(results, key=lambda x: int(result_data[x.pk][target_objectives.THROUGHPUT]))
default_metrics = MetricCatalog.objects.get_default_metrics(session.target_objective)
default_metrics = [session.target_objective]
metrics = request.GET.get('met', ','.join(default_metrics)).split(',')
metrics = [m for m in metrics if m != 'none']
if len(metrics) == 0:
@ -810,7 +810,7 @@ def get_workload_data(request):
data_package = {'results': [],
'error': 'None',
'metrics': metrics}
metric_meta = MetricCatalog.objects.get_metric_meta(session.dbms, session.target_objective)
metric_meta = target_objectives.get_metric_metadata(session.dbms.pk, session.target_objective)
for met in data_package['metrics']:
met_info = metric_meta[met]
data_package['results'].append({'data': [[]], 'tick': [],
@ -869,9 +869,8 @@ def get_timeline_data(request):
if session.user != request.user:
return HttpResponse(JSONUtil.dumps(data_package), content_type='application/json')
default_metrics = MetricCatalog.objects.get_default_metrics(session.target_objective)
metric_meta = MetricCatalog.objects.get_metric_meta(session.dbms, session.target_objective)
default_metrics = [session.target_objective]
metric_meta = target_objectives.get_metric_metadata(session.dbms.pk, session.target_objective)
for met in default_metrics:
met_info = metric_meta[met]
columnnames.append(met_info.pprint + ' (' + met_info.short_unit + ')')