restore CDBTune
This commit is contained in:
parent
1e17c78956
commit
5579d8d94f
|
@ -3,7 +3,7 @@
|
|||
#
|
||||
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
|
||||
#
|
||||
# from: https://github.com/KqSMea8/CDBTune
|
||||
# from: https://github.com/KqSMea8/use_default
|
||||
# Zhang, Ji, et al. "An end-to-end automatic cloud database tuning system using
|
||||
# deep reinforcement learning." Proceedings of the 2019 International Conference
|
||||
# on Management of Data. ACM, 2019
|
||||
|
@ -23,8 +23,22 @@ LOG = get_analysis_logger(__name__)
|
|||
|
||||
class Actor(nn.Module):
|
||||
|
||||
def __init__(self, n_states, n_actions, hidden_sizes):
|
||||
def __init__(self, n_states, n_actions, hidden_sizes, use_default):
|
||||
super(Actor, self).__init__()
|
||||
if use_default:
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(n_states, 128),
|
||||
nn.LeakyReLU(negative_slope=0.2),
|
||||
nn.BatchNorm1d(hidden_sizes[0]),
|
||||
nn.Linear(128, 128),
|
||||
nn.Tanh(),
|
||||
nn.Dropout(0.3),
|
||||
nn.Linear(128, 128),
|
||||
nn.Tanh(),
|
||||
nn.Linear(128, 64),
|
||||
nn.Linear(64, n_actions)
|
||||
)
|
||||
else:
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(n_states, hidden_sizes[0]),
|
||||
nn.LeakyReLU(negative_slope=0.2),
|
||||
|
@ -58,11 +72,26 @@ class Actor(nn.Module):
|
|||
|
||||
class Critic(nn.Module):
|
||||
|
||||
def __init__(self, n_states, n_actions, hidden_sizes):
|
||||
def __init__(self, n_states, n_actions, hidden_sizes, use_default):
|
||||
super(Critic, self).__init__()
|
||||
self.act = nn.Tanh()
|
||||
if use_default:
|
||||
self.state_input = nn.Linear(n_states, 128)
|
||||
self.action_input = nn.Linear(n_actions, 128)
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(256, 256),
|
||||
nn.LeakyReLU(negative_slope=0.2),
|
||||
nn.BatchNorm1d(256),
|
||||
nn.Linear(256, 256),
|
||||
nn.Linear(256, 64),
|
||||
nn.Tanh(),
|
||||
nn.Dropout(0.3),
|
||||
nn.BatchNorm1d(64),
|
||||
nn.Linear(64, 1)
|
||||
)
|
||||
else:
|
||||
self.state_input = nn.Linear(n_states, hidden_sizes[0])
|
||||
self.action_input = nn.Linear(n_actions, hidden_sizes[0])
|
||||
self.act = nn.Tanh()
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(hidden_sizes[0] * 2, hidden_sizes[1]),
|
||||
nn.LeakyReLU(negative_slope=0.2),
|
||||
|
@ -72,7 +101,7 @@ class Critic(nn.Module):
|
|||
nn.Tanh(),
|
||||
nn.Dropout(0.3),
|
||||
nn.BatchNorm1d(hidden_sizes[2]),
|
||||
nn.Linear(hidden_sizes[2], 1),
|
||||
nn.Linear(hidden_sizes[2], 1)
|
||||
)
|
||||
self._init_weights()
|
||||
|
||||
|
@ -101,7 +130,8 @@ class DDPG(object):
|
|||
|
||||
def __init__(self, n_states, n_actions, model_name='', alr=0.001, clr=0.001,
|
||||
gamma=0.9, batch_size=32, tau=0.002, shift=0, memory_size=100000,
|
||||
a_hidden_sizes=[128, 128, 64], c_hidden_sizes=[128, 256, 64]):
|
||||
a_hidden_sizes=[128, 128, 64], c_hidden_sizes=[128, 256, 64],
|
||||
use_default=False):
|
||||
self.n_states = n_states
|
||||
self.n_actions = n_actions
|
||||
self.alr = alr
|
||||
|
@ -113,6 +143,7 @@ class DDPG(object):
|
|||
self.a_hidden_sizes = a_hidden_sizes
|
||||
self.c_hidden_sizes = c_hidden_sizes
|
||||
self.shift = shift
|
||||
self.use_default = use_default
|
||||
|
||||
self._build_network()
|
||||
|
||||
|
@ -124,10 +155,12 @@ class DDPG(object):
|
|||
return Variable(torch.FloatTensor(x))
|
||||
|
||||
def _build_network(self):
|
||||
self.actor = Actor(self.n_states, self.n_actions, self.a_hidden_sizes)
|
||||
self.target_actor = Actor(self.n_states, self.n_actions, self.a_hidden_sizes)
|
||||
self.critic = Critic(self.n_states, self.n_actions, self.c_hidden_sizes)
|
||||
self.target_critic = Critic(self.n_states, self.n_actions, self.c_hidden_sizes)
|
||||
self.actor = Actor(self.n_states, self.n_actions, self.a_hidden_sizes, self.use_default)
|
||||
self.target_actor = Actor(self.n_states, self.n_actions, self.a_hidden_sizes,
|
||||
self.use_default)
|
||||
self.critic = Critic(self.n_states, self.n_actions, self.c_hidden_sizes, self.use_default)
|
||||
self.target_critic = Critic(self.n_states, self.n_actions, self.c_hidden_sizes,
|
||||
self.use_default)
|
||||
|
||||
# Copy actor's parameters
|
||||
self._update_target(self.target_actor, self.actor, tau=1.0)
|
||||
|
|
|
@ -52,11 +52,11 @@ DEFAULT_LEARNING_RATE = 0.01
|
|||
# a small bias when using training data points as starting points.
|
||||
GPR_EPS = 0.001
|
||||
|
||||
DEFAULT_RIDGE = 1.0
|
||||
DEFAULT_RIDGE = 0.01
|
||||
|
||||
DEFAULT_EPSILON = 1e-6
|
||||
|
||||
DEFAULT_SIGMA_MULTIPLIER = 1.0
|
||||
DEFAULT_SIGMA_MULTIPLIER = 3.0
|
||||
|
||||
DEFAULT_MU_MULTIPLIER = 1.0
|
||||
|
||||
|
@ -84,6 +84,13 @@ DNN_DEBUG = True
|
|||
DNN_DEBUG_INTERVAL = 100
|
||||
|
||||
# ---DDPG CONSTRAINTS CONSTANTS---
|
||||
|
||||
# Use a simple reward
|
||||
DDPG_SIMPLE_REWARD = True
|
||||
|
||||
# The weight of future rewards in Q value
|
||||
DDPG_GAMMA = 0.0
|
||||
|
||||
# Batch size in DDPG model
|
||||
DDPG_BATCH_SIZE = 32
|
||||
|
||||
|
@ -101,3 +108,14 @@ ACTOR_HIDDEN_SIZES = [128, 128, 64]
|
|||
|
||||
# The number of hidden units in each layer of the critic MLP
|
||||
CRITIC_HIDDEN_SIZES = [64, 128, 64]
|
||||
|
||||
# Use the same setting from the CDBTune paper
|
||||
USE_DEFAULT = True
|
||||
# Overwrite the DDPG settings if using CDBTune
|
||||
if USE_DEFAULT:
|
||||
DDPG_SIMPLE_REWARD = False
|
||||
DDPG_GAMMA = 0.99
|
||||
DDPG_BATCH_SIZE = 32
|
||||
ACTOR_LEARNING_RATE = 0.001
|
||||
CRITIC_LEARNING_RATE = 0.001
|
||||
UPDATE_EPOCHS = 1
|
||||
|
|
|
@ -36,6 +36,7 @@ from website.settings import (USE_GPFLOW, DEFAULT_LENGTH_SCALE, DEFAULT_MAGNITUD
|
|||
DEFAULT_EPSILON, MAX_ITER, GPR_EPS,
|
||||
DEFAULT_SIGMA_MULTIPLIER, DEFAULT_MU_MULTIPLIER,
|
||||
DEFAULT_UCB_SCALE, HP_LEARNING_RATE, HP_MAX_ITER,
|
||||
DDPG_SIMPLE_REWARD, DDPG_GAMMA, USE_DEFAULT,
|
||||
DDPG_BATCH_SIZE, ACTOR_LEARNING_RATE,
|
||||
CRITIC_LEARNING_RATE, UPDATE_EPOCHS,
|
||||
ACTOR_HIDDEN_SIZES, CRITIC_HIDDEN_SIZES,
|
||||
|
@ -285,18 +286,25 @@ def train_ddpg(result_id):
|
|||
result = Result.objects.get(pk=result_id)
|
||||
session = Result.objects.get(pk=result_id).session
|
||||
session_results = Result.objects.filter(session=session,
|
||||
creation_time__lte=result.creation_time)
|
||||
creation_time__lt=result.creation_time)
|
||||
result_info = {}
|
||||
result_info['newest_result_id'] = result_id
|
||||
|
||||
# Extract data from result
|
||||
# Extract data from result and previous results
|
||||
result = Result.objects.filter(pk=result_id)
|
||||
if len(session_results) == 0:
|
||||
base_result_id = result_id
|
||||
prev_result_id = result_id
|
||||
else:
|
||||
base_result_id = session_results[0].pk
|
||||
prev_result_id = session_results[len(session_results)-1].pk
|
||||
base_result = Result.objects.filter(pk=base_result_id)
|
||||
prev_result = Result.objects.filter(pk=prev_result_id)
|
||||
|
||||
agg_data = DataUtil.aggregate_data(result)
|
||||
metric_data = agg_data['y_matrix'].flatten()
|
||||
base_metric_data = (DataUtil.aggregate_data(base_result))['y_matrix'].flatten()
|
||||
prev_metric_data = (DataUtil.aggregate_data(prev_result))['y_matrix'].flatten()
|
||||
metric_scalar = MinMaxScaler().fit(metric_data.reshape(1, -1))
|
||||
normalized_metric_data = metric_scalar.transform(metric_data.reshape(1, -1))[0]
|
||||
|
||||
|
@ -323,21 +331,37 @@ def train_ddpg(result_id):
|
|||
target_objective))
|
||||
objective = metric_data[target_obj_idx]
|
||||
base_objective = base_metric_data[target_obj_idx]
|
||||
prev_objective = prev_metric_data[target_obj_idx]
|
||||
metric_meta = db.target_objectives.get_metric_metadata(
|
||||
result.session.dbms.pk, result.session.target_objective)
|
||||
|
||||
# Calculate the reward
|
||||
if DDPG_SIMPLE_REWARD:
|
||||
objective = objective / base_objective
|
||||
if metric_meta[target_objective].improvement == '(less is better)':
|
||||
reward = -objective
|
||||
else:
|
||||
reward = objective
|
||||
else:
|
||||
if metric_meta[target_objective].improvement == '(less is better)':
|
||||
if objective - base_objective <= 0: # positive reward
|
||||
reward = (np.square((2 * base_objective - objective) / base_objective) - 1)\
|
||||
* abs(2 * prev_objective - objective) / prev_objective
|
||||
else: # negative reward
|
||||
reward = -(np.square(objective / base_objective) - 1) * objective / prev_objective
|
||||
else:
|
||||
if objective - base_objective > 0: # positive reward
|
||||
reward = (np.square(objective / base_objective) - 1) * objective / prev_objective
|
||||
else: # negative reward
|
||||
reward = -(np.square((2 * base_objective - objective) / base_objective) - 1)\
|
||||
* abs(2 * prev_objective - objective) / prev_objective
|
||||
LOG.info('reward: %f', reward)
|
||||
|
||||
# Update ddpg
|
||||
ddpg = DDPG(n_actions=knob_num, n_states=metric_num, alr=ACTOR_LEARNING_RATE,
|
||||
clr=CRITIC_LEARNING_RATE, gamma=0, batch_size=DDPG_BATCH_SIZE,
|
||||
a_hidden_sizes=ACTOR_HIDDEN_SIZES, c_hidden_sizes=CRITIC_HIDDEN_SIZES)
|
||||
clr=CRITIC_LEARNING_RATE, gamma=DDPG_GAMMA, batch_size=DDPG_BATCH_SIZE,
|
||||
a_hidden_sizes=ACTOR_HIDDEN_SIZES, c_hidden_sizes=CRITIC_HIDDEN_SIZES,
|
||||
use_default=USE_DEFAULT)
|
||||
if session.ddpg_actor_model and session.ddpg_critic_model:
|
||||
ddpg.set_model(session.ddpg_actor_model, session.ddpg_critic_model)
|
||||
if session.ddpg_reply_memory:
|
||||
|
@ -368,7 +392,7 @@ def configuration_recommendation_ddpg(result_info): # pylint: disable=invalid-n
|
|||
metric_num = len(metric_data)
|
||||
|
||||
ddpg = DDPG(n_actions=knob_num, n_states=metric_num, a_hidden_sizes=ACTOR_HIDDEN_SIZES,
|
||||
c_hidden_sizes=CRITIC_HIDDEN_SIZES)
|
||||
c_hidden_sizes=CRITIC_HIDDEN_SIZES, use_default=USE_DEFAULT)
|
||||
if session.ddpg_actor_model is not None and session.ddpg_critic_model is not None:
|
||||
ddpg.set_model(session.ddpg_actor_model, session.ddpg_critic_model)
|
||||
if session.ddpg_reply_memory is not None:
|
||||
|
@ -646,9 +670,8 @@ def configuration_recommendation(recommendation_input):
|
|||
epsilon=DEFAULT_EPSILON,
|
||||
max_iter=MAX_ITER,
|
||||
sigma_multiplier=DEFAULT_SIGMA_MULTIPLIER,
|
||||
mu_multiplier=DEFAULT_MU_MULTIPLIER,
|
||||
ridge=DEFAULT_RIDGE)
|
||||
model.fit(X_scaled, y_scaled, X_min, X_max)
|
||||
mu_multiplier=DEFAULT_MU_MULTIPLIER)
|
||||
model.fit(X_scaled, y_scaled, X_min, X_max, ridge=DEFAULT_RIDGE)
|
||||
res = model.predict(X_samples, constraint_helper=constraint_helper)
|
||||
|
||||
best_config_idx = np.argmin(res.minl.ravel())
|
||||
|
|
Loading…
Reference in New Issue