save ddpg model in database
This commit is contained in:
parent
c8fbaf6e4b
commit
a3fcf59f07
|
@ -1,7 +1,7 @@
|
|||
#
|
||||
# __init__.py
|
||||
# OtterTune - __init__.py
|
||||
#
|
||||
# Copyright
|
||||
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||
#
|
||||
|
||||
|
||||
|
|
|
@ -1,18 +1,16 @@
|
|||
#
|
||||
# ddpg.py
|
||||
# OtterTune - ddpg.py
|
||||
#
|
||||
# Copyright
|
||||
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||
#
|
||||
"""
|
||||
Deep Deterministic Policy Gradient Model
|
||||
# from: https://github.com/KqSMea8/CDBTune
|
||||
# Zhang, Ji, et al. "An end-to-end automatic cloud database tuning system using
|
||||
# deep reinforcement learning." Proceedings of the 2019 International Conference
|
||||
# on Management of Data. ACM, 2019
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
import pickle
|
||||
import math
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
@ -21,12 +19,11 @@ import torch.nn.functional as F
|
|||
import torch.optim as optimizer
|
||||
from torch.autograd import Variable
|
||||
|
||||
from analysis.ddpg.OUProcess import OUProcess
|
||||
from analysis.ddpg.ou_process import OUProcess
|
||||
from analysis.ddpg.prioritized_replay_memory import PrioritizedReplayMemory
|
||||
from analysis.util import get_analysis_logger
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
sys.path.append('../')
|
||||
LOG = get_analysis_logger(__name__)
|
||||
|
||||
|
||||
# code from https://github.com/Kaixhin/NoisyNet-A3C/blob/master/model.py
|
||||
|
@ -37,6 +34,8 @@ class NoisyLinear(nn.Linear):
|
|||
self.sigma_init = sigma_init
|
||||
self.sigma_weight = Parameter(torch.Tensor(out_features, in_features))
|
||||
self.sigma_bias = Parameter(torch.Tensor(out_features))
|
||||
self.epsilon_weight = None
|
||||
self.epsilon_bias = None
|
||||
self.register_buffer('epsilon_weight', torch.zeros(out_features, in_features))
|
||||
self.register_buffer('epsilon_bias', torch.zeros(out_features))
|
||||
self.reset_parameters()
|
||||
|
@ -55,7 +54,6 @@ class NoisyLinear(nn.Linear):
|
|||
return F.linear(x, self.weight + self.sigma_weight * Variable(self.epsilon_weight),
|
||||
self.bias + self.sigma_bias * Variable(self.epsilon_bias))
|
||||
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
def sample_noise(self):
|
||||
self.epsilon_weight = torch.randn(self.out_features, self.in_features)
|
||||
self.epsilon_bias = torch.randn(self.out_features)
|
||||
|
@ -63,7 +61,6 @@ class NoisyLinear(nn.Linear):
|
|||
def remove_noise(self):
|
||||
self.epsilon_weight = torch.zeros(self.out_features, self.in_features)
|
||||
self.epsilon_bias = torch.zeros(self.out_features)
|
||||
# pylint: enable=attribute-defined-outside-init
|
||||
|
||||
|
||||
class Normalizer(object):
|
||||
|
@ -88,71 +85,6 @@ class Normalizer(object):
|
|||
return self.normalize(x)
|
||||
|
||||
|
||||
class ActorLow(nn.Module):
|
||||
|
||||
def __init__(self, n_states, n_actions, ):
|
||||
super(ActorLow, self).__init__()
|
||||
self.layers = nn.Sequential(
|
||||
nn.BatchNorm1d(n_states),
|
||||
nn.Linear(n_states, 32),
|
||||
nn.LeakyReLU(negative_slope=0.2),
|
||||
nn.BatchNorm1d(32),
|
||||
nn.Linear(32, n_actions),
|
||||
nn.LeakyReLU(negative_slope=0.2)
|
||||
)
|
||||
self._init_weights()
|
||||
self.out_func = nn.Tanh()
|
||||
|
||||
def _init_weights(self):
|
||||
|
||||
for m in self.layers:
|
||||
if isinstance(m, nn.Linear):
|
||||
m.weight.data.normal_(0.0, 1e-3)
|
||||
m.bias.data.uniform_(-0.1, 0.1)
|
||||
|
||||
def forward(self, x): # pylint: disable=arguments-differ
|
||||
|
||||
out = self.layers(x)
|
||||
|
||||
return self.out_func(out)
|
||||
|
||||
|
||||
class CriticLow(nn.Module):
|
||||
|
||||
def __init__(self, n_states, n_actions):
|
||||
super(CriticLow, self).__init__()
|
||||
self.state_input = nn.Linear(n_states, 32)
|
||||
self.action_input = nn.Linear(n_actions, 32)
|
||||
self.act = nn.LeakyReLU(negative_slope=0.2)
|
||||
self.state_bn = nn.BatchNorm1d(n_states)
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(64, 1),
|
||||
nn.LeakyReLU(negative_slope=0.2),
|
||||
)
|
||||
self._init_weights()
|
||||
|
||||
def _init_weights(self):
|
||||
self.state_input.weight.data.normal_(0.0, 1e-3)
|
||||
self.state_input.bias.data.uniform_(-0.1, 0.1)
|
||||
|
||||
self.action_input.weight.data.normal_(0.0, 1e-3)
|
||||
self.action_input.bias.data.uniform_(-0.1, 0.1)
|
||||
|
||||
for m in self.layers:
|
||||
if isinstance(m, nn.Linear):
|
||||
m.weight.data.normal_(0.0, 1e-3)
|
||||
m.bias.data.uniform_(-0.1, 0.1)
|
||||
|
||||
def forward(self, x, action): # pylint: disable=arguments-differ
|
||||
x = self.state_bn(x)
|
||||
x = self.act(self.state_input(x))
|
||||
action = self.act(self.action_input(action))
|
||||
|
||||
_input = torch.cat([x, action], dim=1)
|
||||
value = self.layers(_input)
|
||||
return value
|
||||
|
||||
|
||||
class Actor(nn.Module):
|
||||
|
||||
def __init__(self, n_states, n_actions, noisy=False):
|
||||
|
@ -235,36 +167,17 @@ class Critic(nn.Module):
|
|||
|
||||
class DDPG(object):
|
||||
|
||||
def __init__(self, n_states, n_actions, opt=None, ouprocess=True, mean_var_path=None,
|
||||
supervised=False):
|
||||
""" DDPG Algorithms
|
||||
Args:
|
||||
n_states: int, dimension of states
|
||||
n_actions: int, dimension of actions
|
||||
opt: dict, params
|
||||
supervised, bool, pre-train the actor with supervised learning
|
||||
"""
|
||||
def __init__(self, n_states, n_actions, model_name='', alr=0.001, clr=0.001,
|
||||
gamma=0.9, batch_size=32, tau=0.002, memory_size=100000,
|
||||
ouprocess=True, mean_var_path=None, supervised=False):
|
||||
self.n_states = n_states
|
||||
self.n_actions = n_actions
|
||||
|
||||
if opt is None:
|
||||
opt = {
|
||||
'model': '',
|
||||
'alr': 0.001,
|
||||
'clr': 0.001,
|
||||
'gamma': 0.9,
|
||||
'batch_size': 32,
|
||||
'tau': 0.002,
|
||||
'memory_size': 100000
|
||||
}
|
||||
|
||||
# Params
|
||||
self.alr = opt['alr']
|
||||
self.clr = opt['clr']
|
||||
self.model_name = opt['model']
|
||||
self.batch_size = opt['batch_size']
|
||||
self.gamma = opt['gamma']
|
||||
self.tau = opt['tau']
|
||||
self.alr = alr
|
||||
self.clr = clr
|
||||
self.model_name = model_name
|
||||
self.batch_size = batch_size
|
||||
self.gamma = gamma
|
||||
self.tau = tau
|
||||
self.ouprocess = ouprocess
|
||||
|
||||
if mean_var_path is None:
|
||||
|
@ -287,9 +200,8 @@ class DDPG(object):
|
|||
self._build_network()
|
||||
LOG.info('Finish Initializing Networks')
|
||||
|
||||
self.replay_memory = PrioritizedReplayMemory(capacity=opt['memory_size'])
|
||||
self.replay_memory = PrioritizedReplayMemory(capacity=memory_size)
|
||||
self.noise = OUProcess(n_actions)
|
||||
# LOG.info('DDPG Initialzed!')
|
||||
|
||||
@staticmethod
|
||||
def totensor(x):
|
||||
|
@ -460,6 +372,13 @@ class DDPG(object):
|
|||
'{}_critic.pth'.format(model_name)
|
||||
)
|
||||
|
||||
def set_model(self, actor_dict, critic_dict):
|
||||
self.actor.load_state_dict(pickle.loads(actor_dict))
|
||||
self.critic.load_state_dict(pickle.loads(critic_dict))
|
||||
|
||||
def get_model(self):
|
||||
return pickle.dumps(self.actor.state_dict()), pickle.dumps(self.critic.state_dict())
|
||||
|
||||
def save_actor(self, path):
|
||||
""" save actor network
|
||||
Args:
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
#
|
||||
# OUProcess.py
|
||||
# OtterTune - ou_process.py
|
||||
#
|
||||
# Copyright
|
||||
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||
#
|
||||
# from: https://github.com/KqSMea8/CDBTune
|
||||
# Zhang, Ji, et al. "An end-to-end automatic cloud database tuning system using
|
||||
# deep reinforcement learning." Proceedings of the 2019 International Conference
|
||||
# on Management of Data. ACM, 2019
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
# from https://github.com/songrotek/DDPG/blob/master/ou_noise.py
|
||||
class OUProcess(object):
|
||||
|
||||
def __init__(self, n_actions, theta=0.15, mu=0, sigma=0.1, ):
|
||||
|
@ -28,14 +31,3 @@ class OUProcess(object):
|
|||
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
|
||||
self.current_value = x + dx
|
||||
return self.current_value
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import matplotlib.pyplot as plt # pylint: disable=wrong-import-position
|
||||
ou = OUProcess(3, theta=0.3) # pylint: disable=invalid-name
|
||||
states = [] # pylint: disable=invalid-name
|
||||
for i in range(1000):
|
||||
states.append(ou.noise())
|
||||
|
||||
plt.plot(states)
|
||||
plt.show()
|
|
@ -1,8 +1,13 @@
|
|||
#
|
||||
# prioritized_replay_memory.py
|
||||
# OtterTune - prioritized_replay_memory.py
|
||||
#
|
||||
# Copyright
|
||||
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||
#
|
||||
# from: https://github.com/KqSMea8/CDBTune
|
||||
# Zhang, Ji, et al. "An end-to-end automatic cloud database tuning system using
|
||||
# deep reinforcement learning." Proceedings of the 2019 International Conference
|
||||
# on Management of Data. ACM, 2019
|
||||
|
||||
import random
|
||||
import pickle
|
||||
import numpy as np
|
||||
|
@ -119,3 +124,9 @@ class PrioritizedReplayMemory(object):
|
|||
with open(path, 'rb') as f:
|
||||
_memory = pickle.load(f)
|
||||
self.tree = _memory['tree']
|
||||
|
||||
def get(self):
|
||||
return pickle.dumps({"tree": self.tree})
|
||||
|
||||
def set(self, binary):
|
||||
self.tree = pickle.loads(binary)['tree']
|
||||
|
|
|
@ -185,6 +185,9 @@ class Migration(migrations.Migration):
|
|||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('name', models.CharField(max_length=64, verbose_name=b'session name')),
|
||||
('description', models.TextField(blank=True, null=True)),
|
||||
('ddpg_actor_model', models.BinaryField(null=True, blank=True)),
|
||||
('ddpg_critic_model', models.BinaryField(null=True, blank=True)),
|
||||
('ddpg_reply_memory', models.BinaryField(null=True, blank=True)),
|
||||
('creation_time', models.DateTimeField()),
|
||||
('last_update', models.DateTimeField()),
|
||||
('upload_code', models.CharField(max_length=30, unique=True)),
|
||||
|
|
|
@ -187,6 +187,9 @@ class Session(BaseModel):
|
|||
hardware = models.ForeignKey(Hardware)
|
||||
algorithm = models.IntegerField(choices=AlgorithmType.choices(),
|
||||
default=AlgorithmType.OTTERTUNE)
|
||||
ddpg_actor_model = models.BinaryField(null=True, blank=True)
|
||||
ddpg_critic_model = models.BinaryField(null=True, blank=True)
|
||||
ddpg_reply_memory = models.BinaryField(null=True, blank=True)
|
||||
|
||||
project = models.ForeignKey(Project)
|
||||
creation_time = models.DateTimeField()
|
||||
|
|
|
@ -353,7 +353,7 @@ class BaseParser(object, metaclass=ABCMeta):
|
|||
|
||||
def format_enum(self, enum_value, metadata):
|
||||
enumvals = metadata.enumvals.split(',')
|
||||
return enumvals[enum_value]
|
||||
return enumvals[int(round(enum_value))]
|
||||
|
||||
def format_integer(self, int_value, metadata):
|
||||
return int(round(int_value))
|
||||
|
|
|
@ -35,7 +35,7 @@ MAX_TRAIN_SIZE = 7000
|
|||
# Batch size in GPR model
|
||||
BATCH_SIZE = 3000
|
||||
|
||||
# Threads for TensorFlow config
|
||||
# Threads for TensorFlow config
|
||||
NUM_THREADS = 4
|
||||
|
||||
# ---GRADIENT DESCENT CONSTANTS---
|
||||
|
@ -54,3 +54,19 @@ DEFAULT_EPSILON = 1e-6
|
|||
DEFAULT_SIGMA_MULTIPLIER = 3.0
|
||||
|
||||
DEFAULT_MU_MULTIPLIER = 1.0
|
||||
|
||||
# ---CONSTRAINTS CONSTANTS---
|
||||
# Batch size in DDPG model
|
||||
DDPG_BATCH_SIZE = 32
|
||||
|
||||
# Learning rate of actor network
|
||||
ACTOR_LEARNING_RATE = 0.001
|
||||
|
||||
# Learning rate of critic network
|
||||
CRITIC_LEARNING_RATE = 0.001
|
||||
|
||||
# The impact of future reward on the decision
|
||||
GAMMA = 0.1
|
||||
|
||||
# The changing rate of the target network
|
||||
TAU = 0.002
|
||||
|
|
|
@ -7,7 +7,7 @@ from .async_tasks import (aggregate_target_results,
|
|||
configuration_recommendation,
|
||||
map_workload,
|
||||
train_ddpg,
|
||||
run_ddpg)
|
||||
configuration_recommendation_ddpg)
|
||||
|
||||
|
||||
from .periodic_tasks import (run_background_tasks)
|
||||
|
|
|
@ -5,14 +5,12 @@
|
|||
#
|
||||
import random
|
||||
import queue
|
||||
from os.path import dirname, abspath, join
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
from celery.task import task, Task
|
||||
from celery.utils.log import get_task_logger
|
||||
from djcelery.models import TaskMeta
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.preprocessing import StandardScaler, MinMaxScaler
|
||||
|
||||
from analysis.ddpg.ddpg import DDPG
|
||||
from analysis.gp import GPRNP
|
||||
|
@ -29,7 +27,10 @@ from website.settings import (DEFAULT_LENGTH_SCALE, DEFAULT_MAGNITUDE,
|
|||
MAX_TRAIN_SIZE, BATCH_SIZE, NUM_THREADS,
|
||||
DEFAULT_RIDGE, DEFAULT_LEARNING_RATE,
|
||||
DEFAULT_EPSILON, MAX_ITER, GPR_EPS,
|
||||
DEFAULT_SIGMA_MULTIPLIER, DEFAULT_MU_MULTIPLIER)
|
||||
DEFAULT_SIGMA_MULTIPLIER, DEFAULT_MU_MULTIPLIER,
|
||||
DDPG_BATCH_SIZE, ACTOR_LEARNING_RATE,
|
||||
CRITIC_LEARNING_RATE, GAMMA, TAU)
|
||||
|
||||
from website.settings import INIT_FLIP_PROB, FLIP_PROB_DECAY
|
||||
from website.types import VarType
|
||||
|
||||
|
@ -235,10 +236,10 @@ def train_ddpg(result_id):
|
|||
|
||||
# Clean knob data
|
||||
cleaned_agg_data = clean_knob_data(agg_data['X_matrix'], agg_data['X_columnlabels'], session)
|
||||
agg_data['X_matrix'] = np.array(cleaned_agg_data[0]).flatten()
|
||||
agg_data['X_columnlabels'] = np.array(cleaned_agg_data[1]).flatten()
|
||||
knob_data = DataUtil.normalize_knob_data(agg_data['X_matrix'],
|
||||
agg_data['X_columnlabels'], session)
|
||||
knob_data = np.array(cleaned_agg_data[0])
|
||||
knob_labels = np.array(cleaned_agg_data[1])
|
||||
knob_bounds = np.vstack(DataUtil.get_knob_bounds(knob_labels.flatten(), session))
|
||||
knob_data = MinMaxScaler().fit(knob_bounds).transform(knob_data)[0]
|
||||
knob_num = len(knob_data)
|
||||
metric_num = len(metric_data)
|
||||
LOG.info('knob_num: %d, metric_num: %d', knob_num, metric_num)
|
||||
|
@ -276,26 +277,23 @@ def train_ddpg(result_id):
|
|||
* (2 * prev_objective - objective) / prev_objective
|
||||
|
||||
# Update ddpg
|
||||
project_root = dirname(dirname(dirname(abspath(__file__))))
|
||||
saved_memory = join(project_root, 'checkpoint/reply_memory_' + session.project.name)
|
||||
saved_model = join(project_root, 'checkpoint/ddpg_' + session.project.name)
|
||||
ddpg = DDPG(n_actions=knob_num, n_states=metric_num)
|
||||
if os.path.exists(saved_memory):
|
||||
ddpg.replay_memory.load_memory(saved_memory)
|
||||
ddpg.load_model(saved_model)
|
||||
ddpg = DDPG(n_actions=knob_num, n_states=metric_num, alr=ACTOR_LEARNING_RATE,
|
||||
clr=CRITIC_LEARNING_RATE, gamma=GAMMA, batch_size=DDPG_BATCH_SIZE, tau=TAU)
|
||||
if session.ddpg_actor_model and session.ddpg_critic_model:
|
||||
ddpg.set_model(session.ddpg_actor_model, session.ddpg_critic_model)
|
||||
if session.ddpg_reply_memory:
|
||||
ddpg.replay_memory.set(session.ddpg_reply_memory)
|
||||
ddpg.add_sample(prev_metric_data, knob_data, reward, metric_data, False)
|
||||
if len(ddpg.replay_memory) > 32:
|
||||
ddpg.update()
|
||||
checkpoint_dir = join(project_root, 'checkpoint')
|
||||
if not os.path.exists(checkpoint_dir):
|
||||
os.makedirs(checkpoint_dir)
|
||||
ddpg.replay_memory.save(saved_memory)
|
||||
ddpg.save_model(saved_model)
|
||||
session.ddpg_actor_model, session.ddpg_critic_model = ddpg.get_model()
|
||||
session.ddpg_reply_memory = ddpg.replay_memory.get()
|
||||
session.save()
|
||||
return result_info
|
||||
|
||||
|
||||
@task(base=ConfigurationRecommendation, name='run_ddpg')
|
||||
def run_ddpg(result_info):
|
||||
@task(base=ConfigurationRecommendation, name='configuration_recommendation_ddpg')
|
||||
def configuration_recommendation_ddpg(result_info): # pylint: disable=invalid-name
|
||||
LOG.info('Use ddpg to recommend configuration')
|
||||
result_id = result_info['newest_result_id']
|
||||
result = Result.objects.filter(pk=result_id)
|
||||
|
@ -305,20 +303,20 @@ def run_ddpg(result_info):
|
|||
cleaned_agg_data = clean_knob_data(agg_data['X_matrix'], agg_data['X_columnlabels'],
|
||||
session)
|
||||
knob_labels = np.array(cleaned_agg_data[1]).flatten()
|
||||
knob_data = np.array(cleaned_agg_data[0]).flatten()
|
||||
knob_num = len(knob_data)
|
||||
knob_num = len(knob_labels)
|
||||
metric_num = len(metric_data)
|
||||
|
||||
project_root = dirname(dirname(dirname(abspath(__file__))))
|
||||
saved_memory = join(project_root, 'checkpoint/reply_memory_' + session.project.name)
|
||||
saved_model = join(project_root, 'checkpoint/ddpg_' + session.project.name)
|
||||
ddpg = DDPG(n_actions=knob_num, n_states=metric_num)
|
||||
if os.path.exists(saved_memory):
|
||||
ddpg.replay_memory.load_memory(saved_memory)
|
||||
ddpg.load_model(saved_model)
|
||||
ddpg = DDPG(n_actions=knob_num, n_states=metric_num, alr=ACTOR_LEARNING_RATE,
|
||||
clr=CRITIC_LEARNING_RATE, gamma=GAMMA, batch_size=DDPG_BATCH_SIZE, tau=TAU)
|
||||
if session.ddpg_actor_model is not None and session.ddpg_critic_model is not None:
|
||||
ddpg.set_model(session.ddpg_actor_model, session.ddpg_critic_model)
|
||||
if session.ddpg_reply_memory is not None:
|
||||
ddpg.replay_memory.set(session.ddpg_reply_memory)
|
||||
knob_data = ddpg.choose_action(metric_data)
|
||||
LOG.info('recommended knob: %s', knob_data)
|
||||
knob_data = DataUtil.denormalize_knob_data(knob_data, knob_labels, session)
|
||||
|
||||
knob_bounds = np.vstack(DataUtil.get_knob_bounds(knob_labels, session))
|
||||
knob_data = MinMaxScaler().fit(knob_bounds).inverse_transform(knob_data.reshape(1, -1))[0]
|
||||
conf_map = {k: knob_data[i] for i, k in enumerate(knob_labels)}
|
||||
conf_map_res = {}
|
||||
conf_map_res['status'] = 'good'
|
||||
|
|
|
@ -93,30 +93,35 @@ class TaskUtil(object):
|
|||
class DataUtil(object):
|
||||
|
||||
@staticmethod
|
||||
def normalize_knob_data(knob_values, knob_labels, session):
|
||||
for i, knob in enumerate(knob_labels):
|
||||
def get_knob_bounds(knob_labels, session):
|
||||
minvals = []
|
||||
maxvals = []
|
||||
for _, knob in enumerate(knob_labels):
|
||||
knob_object = KnobCatalog.objects.get(dbms=session.dbms, name=knob, tunable=True)
|
||||
minval = float(knob_object.minval)
|
||||
maxval = float(knob_object.maxval)
|
||||
knob_new = SessionKnob.objects.filter(knob=knob_object, session=session, tunable=True)
|
||||
if knob_new.exists():
|
||||
minval = float(knob_new[0].minval)
|
||||
maxval = float(knob_new[0].maxval)
|
||||
knob_values[i] = (knob_values[i] - minval) / (maxval - minval)
|
||||
knob_values[i] = max(0, min(knob_values[i], 1))
|
||||
return knob_values
|
||||
|
||||
@staticmethod
|
||||
def denormalize_knob_data(knob_values, knob_labels, session):
|
||||
for i, knob in enumerate(knob_labels):
|
||||
knob_object = KnobCatalog.objects.get(dbms=session.dbms, name=knob, tunable=True)
|
||||
minval = float(knob_object.minval)
|
||||
maxval = float(knob_object.maxval)
|
||||
knob_session_object = SessionKnob.objects.filter(knob=knob_object, session=session,
|
||||
tunable=True)
|
||||
if knob_session_object.exists():
|
||||
minval = float(knob_session_object[0].minval)
|
||||
maxval = float(knob_session_object[0].maxval)
|
||||
else:
|
||||
minval = float(knob_object.minval)
|
||||
maxval = float(knob_object.maxval)
|
||||
minvals.append(minval)
|
||||
maxvals.append(maxval)
|
||||
return np.array(minvals), np.array(maxvals)
|
||||
|
||||
@staticmethod
|
||||
def denormalize_knob_data(knob_values, knob_labels, session):
|
||||
for i, knob in enumerate(knob_labels):
|
||||
knob_object = KnobCatalog.objects.get(dbms=session.dbms, name=knob, tunable=True)
|
||||
knob_session_object = SessionKnob.objects.filter(knob=knob_object, session=session,
|
||||
tunable=True)
|
||||
if knob_session_object.exists():
|
||||
minval = float(knob_session_object[0].minval)
|
||||
maxval = float(knob_session_object[0].maxval)
|
||||
else:
|
||||
minval = float(knob_object.minval)
|
||||
maxval = float(knob_object.maxval)
|
||||
knob_values[i] = knob_values[i] * (maxval - minval) + minval
|
||||
return knob_values
|
||||
|
||||
|
|
|
@ -30,8 +30,8 @@ from .models import (BackupData, DBMSCatalog, KnobCatalog, KnobData, MetricCatal
|
|||
MetricData, MetricManager, Project, Result, Session, Workload,
|
||||
SessionKnob)
|
||||
from .parser import Parser
|
||||
from .tasks import (aggregate_target_results, map_workload, train_ddpg, run_ddpg,
|
||||
configuration_recommendation)
|
||||
from .tasks import (aggregate_target_results, map_workload, train_ddpg,
|
||||
configuration_recommendation, configuration_recommendation_ddpg)
|
||||
from .types import (DBMSType, KnobUnitType, MetricType,
|
||||
TaskType, VarType, WorkloadStatusType, AlgorithmType)
|
||||
from .utils import JSONUtil, LabelUtil, MediaUtil, TaskUtil
|
||||
|
|
Loading…
Reference in New Issue