Add machine learning model ddpg
This commit is contained in:
committed by
Dana Van Aken
parent
b9dc726b9c
commit
c83f2649b6
@@ -5,7 +5,9 @@
|
||||
#
|
||||
from .async_tasks import (aggregate_target_results,
|
||||
configuration_recommendation,
|
||||
map_workload)
|
||||
map_workload,
|
||||
train_ddpg,
|
||||
run_ddpg)
|
||||
|
||||
|
||||
from .periodic_tasks import (run_background_tasks)
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#
|
||||
import random
|
||||
import queue
|
||||
from os.path import dirname, abspath, join
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
from celery.task import task, Task
|
||||
@@ -12,6 +14,7 @@ from celery.utils.log import get_task_logger
|
||||
from djcelery.models import TaskMeta
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
|
||||
from analysis.ddpg.ddpg import DDPG
|
||||
from analysis.gp import GPRNP
|
||||
from analysis.gp_tf import GPRGD
|
||||
from analysis.preprocessing import Bin, DummyEncoder
|
||||
@@ -30,6 +33,7 @@ from website.settings import (DEFAULT_LENGTH_SCALE, DEFAULT_MAGNITUDE,
|
||||
from website.settings import INIT_FLIP_PROB, FLIP_PROB_DECAY
|
||||
from website.types import VarType
|
||||
|
||||
|
||||
LOG = get_task_logger(__name__)
|
||||
|
||||
|
||||
@@ -41,6 +45,17 @@ class UpdateTask(Task): # pylint: disable=abstract-method
|
||||
self.default_retry_delay = 60
|
||||
|
||||
|
||||
class TrainDDPG(UpdateTask): # pylint: disable=abstract-method
|
||||
def on_success(self, retval, task_id, args, kwargs):
|
||||
super(TrainDDPG, self).on_success(retval, task_id, args, kwargs)
|
||||
|
||||
# Completely delete this result because it's huge and not
|
||||
# interesting
|
||||
task_meta = TaskMeta.objects.get(task_id=task_id)
|
||||
task_meta.result = None
|
||||
task_meta.save()
|
||||
|
||||
|
||||
class AggregateTargetResults(UpdateTask): # pylint: disable=abstract-method
|
||||
|
||||
def on_success(self, retval, task_id, args, kwargs):
|
||||
@@ -194,6 +209,126 @@ def gen_random_data(knobs):
|
||||
return random_knob_result
|
||||
|
||||
|
||||
@task(base=TrainDDPG, name='train_ddpg')
|
||||
def train_ddpg(result_id):
|
||||
LOG.info('Add training data to ddpg and train ddpg')
|
||||
result = Result.objects.get(pk=result_id)
|
||||
session = Result.objects.get(pk=result_id).session
|
||||
session_results = Result.objects.filter(session=session,
|
||||
creation_time__lt=result.creation_time)
|
||||
result_info = {}
|
||||
result_info['newest_result_id'] = result_id
|
||||
if len(session_results) == 0:
|
||||
LOG.info('No previous result. Abort.')
|
||||
return result_info
|
||||
prev_result_id = session_results[len(session_results) - 1].pk
|
||||
base_result_id = session_results[0].pk
|
||||
prev_result = Result.objects.filter(pk=prev_result_id)
|
||||
base_result = Result.objects.filter(pk=base_result_id)
|
||||
|
||||
# Extract data from result
|
||||
result = Result.objects.filter(pk=result_id)
|
||||
agg_data = DataUtil.aggregate_data(result)
|
||||
metric_data = agg_data['y_matrix'].flatten()
|
||||
prev_metric_data = (DataUtil.aggregate_data(prev_result))['y_matrix'].flatten()
|
||||
base_metric_data = (DataUtil.aggregate_data(base_result))['y_matrix'].flatten()
|
||||
|
||||
# Clean knob data
|
||||
cleaned_agg_data = clean_knob_data(agg_data['X_matrix'], agg_data['X_columnlabels'], session)
|
||||
agg_data['X_matrix'] = np.array(cleaned_agg_data[0]).flatten()
|
||||
agg_data['X_columnlabels'] = np.array(cleaned_agg_data[1]).flatten()
|
||||
knob_data = DataUtil.normalize_knob_data(agg_data['X_matrix'],
|
||||
agg_data['X_columnlabels'], session)
|
||||
knob_num = len(knob_data)
|
||||
metric_num = len(metric_data)
|
||||
LOG.info('knob_num: %d, metric_num: %d', knob_num, metric_num)
|
||||
|
||||
# Filter ys by current target objective metric
|
||||
result = Result.objects.get(pk=result_id)
|
||||
target_objective = result.session.target_objective
|
||||
target_obj_idx = [i for i, n in enumerate(agg_data['y_columnlabels']) if n == target_objective]
|
||||
if len(target_obj_idx) == 0:
|
||||
raise Exception(('Could not find target objective in metrics '
|
||||
'(target_obj={})').format(target_objective))
|
||||
elif len(target_obj_idx) > 1:
|
||||
raise Exception(('Found {} instances of target objective in '
|
||||
'metrics (target_obj={})').format(len(target_obj_idx),
|
||||
target_objective))
|
||||
objective = metric_data[target_obj_idx]
|
||||
prev_objective = prev_metric_data[target_obj_idx]
|
||||
base_objective = base_metric_data[target_obj_idx]
|
||||
metric_meta = MetricCatalog.objects.get_metric_meta(result.session.dbms,
|
||||
result.session.target_objective)
|
||||
|
||||
# Calculate the reward
|
||||
reward = 0
|
||||
if metric_meta[target_objective].improvement == '(less is better)':
|
||||
if objective - base_objective <= 0:
|
||||
reward = -(np.square(objective / base_objective) - 1) * objective / prev_objective
|
||||
else:
|
||||
reward = (np.square((2 * base_objective - objective) / base_objective) - 1)\
|
||||
* (2 * prev_objective - objective) / prev_objective
|
||||
else:
|
||||
if objective - base_objective > 0:
|
||||
reward = (np.square(objective / base_objective) - 1) * objective / prev_objective
|
||||
else:
|
||||
reward = -(np.square((2 * base_objective - objective) / base_objective) - 1)\
|
||||
* (2 * prev_objective - objective) / prev_objective
|
||||
|
||||
# Update ddpg
|
||||
project_root = dirname(dirname(dirname(abspath(__file__))))
|
||||
saved_memory = join(project_root, 'checkpoint/reply_memory_' + session.project.name)
|
||||
saved_model = join(project_root, 'checkpoint/ddpg_' + session.project.name)
|
||||
ddpg = DDPG(n_actions=knob_num, n_states=metric_num)
|
||||
if os.path.exists(saved_memory):
|
||||
ddpg.replay_memory.load_memory(saved_memory)
|
||||
ddpg.load_model(saved_model)
|
||||
ddpg.add_sample(prev_metric_data, knob_data, reward, metric_data, False)
|
||||
if len(ddpg.replay_memory) > 32:
|
||||
ddpg.update()
|
||||
checkpoint_dir = join(project_root, 'checkpoint')
|
||||
if not os.path.exists(checkpoint_dir):
|
||||
os.makedirs(checkpoint_dir)
|
||||
ddpg.replay_memory.save(saved_memory)
|
||||
ddpg.save_model(saved_model)
|
||||
return result_info
|
||||
|
||||
|
||||
@task(base=ConfigurationRecommendation, name='run_ddpg')
|
||||
def run_ddpg(result_info):
|
||||
LOG.info('Use ddpg to recommend configuration')
|
||||
result_id = result_info['newest_result_id']
|
||||
result = Result.objects.filter(pk=result_id)
|
||||
session = Result.objects.get(pk=result_id).session
|
||||
agg_data = DataUtil.aggregate_data(result)
|
||||
metric_data = agg_data['y_matrix'].flatten()
|
||||
cleaned_agg_data = clean_knob_data(agg_data['X_matrix'], agg_data['X_columnlabels'],
|
||||
session)
|
||||
knob_labels = np.array(cleaned_agg_data[1]).flatten()
|
||||
knob_data = np.array(cleaned_agg_data[0]).flatten()
|
||||
knob_num = len(knob_data)
|
||||
metric_num = len(metric_data)
|
||||
|
||||
project_root = dirname(dirname(dirname(abspath(__file__))))
|
||||
saved_memory = join(project_root, 'checkpoint/reply_memory_' + session.project.name)
|
||||
saved_model = join(project_root, 'checkpoint/ddpg_' + session.project.name)
|
||||
ddpg = DDPG(n_actions=knob_num, n_states=metric_num)
|
||||
if os.path.exists(saved_memory):
|
||||
ddpg.replay_memory.load_memory(saved_memory)
|
||||
ddpg.load_model(saved_model)
|
||||
knob_data = ddpg.choose_action(metric_data)
|
||||
LOG.info('recommended knob: %s', knob_data)
|
||||
knob_data = DataUtil.denormalize_knob_data(knob_data, knob_labels, session)
|
||||
conf_map = {k: knob_data[i] for i, k in enumerate(knob_labels)}
|
||||
conf_map_res = {}
|
||||
conf_map_res['status'] = 'good'
|
||||
conf_map_res['recommendation'] = conf_map
|
||||
conf_map_res['info'] = 'INFO: ddpg'
|
||||
for k in knob_labels:
|
||||
LOG.info('%s: %f', k, conf_map[k])
|
||||
return conf_map_res
|
||||
|
||||
|
||||
@task(base=ConfigurationRecommendation, name='configuration_recommendation')
|
||||
def configuration_recommendation(target_data):
|
||||
LOG.info('configuration_recommendation called')
|
||||
|
||||
@@ -65,6 +65,9 @@ urlpatterns = [
|
||||
|
||||
# Back door
|
||||
url(r'^query_and_get/(?P<upload_code>[0-9a-zA-Z]+)$', website_views.give_result, name="backdoor"),
|
||||
|
||||
# train ddpg with results in the given session
|
||||
url(r'^train_ddpg/sessions/(?P<session_id>[0-9]+)$', website_views.train_ddpg_loops, name='train_ddpg_loops'),
|
||||
]
|
||||
|
||||
if settings.DEBUG:
|
||||
|
||||
@@ -20,7 +20,7 @@ from django.utils.text import capfirst
|
||||
from djcelery.models import TaskMeta
|
||||
|
||||
from .types import LabelStyleType, VarType
|
||||
from .models import KnobCatalog, DBMSCatalog
|
||||
from .models import KnobCatalog, DBMSCatalog, SessionKnob
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@@ -92,6 +92,34 @@ class TaskUtil(object):
|
||||
|
||||
class DataUtil(object):
|
||||
|
||||
@staticmethod
|
||||
def normalize_knob_data(knob_values, knob_labels, session):
|
||||
for i, knob in enumerate(knob_labels):
|
||||
knob_object = KnobCatalog.objects.get(dbms=session.dbms, name=knob, tunable=True)
|
||||
minval = float(knob_object.minval)
|
||||
maxval = float(knob_object.maxval)
|
||||
knob_new = SessionKnob.objects.filter(knob=knob_object, session=session, tunable=True)
|
||||
if knob_new.exists():
|
||||
minval = float(knob_new[0].minval)
|
||||
maxval = float(knob_new[0].maxval)
|
||||
knob_values[i] = (knob_values[i] - minval) / (maxval - minval)
|
||||
knob_values[i] = max(0, min(knob_values[i], 1))
|
||||
return knob_values
|
||||
|
||||
@staticmethod
|
||||
def denormalize_knob_data(knob_values, knob_labels, session):
|
||||
for i, knob in enumerate(knob_labels):
|
||||
knob_object = KnobCatalog.objects.get(dbms=session.dbms, name=knob, tunable=True)
|
||||
minval = float(knob_object.minval)
|
||||
maxval = float(knob_object.maxval)
|
||||
knob_session_object = SessionKnob.objects.filter(knob=knob_object, session=session,
|
||||
tunable=True)
|
||||
if knob_session_object.exists():
|
||||
minval = float(knob_session_object[0].minval)
|
||||
maxval = float(knob_session_object[0].maxval)
|
||||
knob_values[i] = knob_values[i] * (maxval - minval) + minval
|
||||
return knob_values
|
||||
|
||||
@staticmethod
|
||||
def aggregate_data(results):
|
||||
knob_labels = list(JSONUtil.loads(results[0].knob_data.data).keys())
|
||||
|
||||
@@ -30,7 +30,7 @@ from .models import (BackupData, DBMSCatalog, KnobCatalog, KnobData, MetricCatal
|
||||
MetricData, MetricManager, Project, Result, Session, Workload,
|
||||
SessionKnob)
|
||||
from .parser import Parser
|
||||
from .tasks import (aggregate_target_results, map_workload,
|
||||
from .tasks import (aggregate_target_results, map_workload, train_ddpg, run_ddpg,
|
||||
configuration_recommendation)
|
||||
from .types import (DBMSType, KnobUnitType, MetricType,
|
||||
TaskType, VarType, WorkloadStatusType, AlgorithmType)
|
||||
@@ -967,3 +967,11 @@ def give_result(request, upload_code): # pylint: disable=unused-argument
|
||||
# success
|
||||
res = Result.objects.get(pk=lastest_result.pk)
|
||||
return HttpResponse(JSONUtil.dumps(res.next_configuration), content_type='application/json')
|
||||
|
||||
|
||||
def train_ddpg_loops(request, session_id): # pylint: disable=unused-argument
|
||||
session = get_object_or_404(Session, pk=session_id, user=request.user) # pylint: disable=unused-variable
|
||||
results = Result.objects.filter(session=session_id)
|
||||
for result in results:
|
||||
train_ddpg(result.pk)
|
||||
return HttpResponse()
|
||||
|
||||
Reference in New Issue
Block a user