diff --git a/script/formatting/config/pylintrc b/script/formatting/config/pylintrc index 02ade7d..4e911c7 100644 --- a/script/formatting/config/pylintrc +++ b/script/formatting/config/pylintrc @@ -13,7 +13,7 @@ profile=no # Add files or directories to the blacklist. They should be base names, not # paths. -ignore=CVS,.git,manage.py,0001_initial.py,0002_enable_compression.py,0003_load_initial_data.py,0004_add_lhs.py,0005_add_workload_field.py,0006_session_hyper_parameters.py,credentials.py,create_knob_settings.py +ignore=CVS,.git,manage.py,0001_initial.py,0002_enable_compression.py,0003_load_initial_data.py,0004_add_lhs.py,0005_add_workload_field.py,0006_session_hyperparameters.py,credentials.py,create_knob_settings.py # ignore-patterns=**/migrations/*.py diff --git a/server/website/tests/test_views.py b/server/website/tests/test_views.py index 80579b9..07928f6 100644 --- a/server/website/tests/test_views.py +++ b/server/website/tests/test_views.py @@ -150,7 +150,7 @@ class SessionViewsTests(TestCase): 'storage': '32', 'storage_type': 5, 'dbms': 1, - 'hyper_parameters': '{}' + 'hyperparameters': '{}' } def setUp(self): diff --git a/server/website/website/forms.py b/server/website/website/forms.py index 7200810..26c8f06 100644 --- a/server/website/website/forms.py +++ b/server/website/website/forms.py @@ -134,12 +134,12 @@ class SessionForm(forms.ModelForm): model = Session fields = ('name', 'description', 'tuning_session', 'dbms', 'cpu', 'memory', 'storage', - 'algorithm', 'target_objective', 'hyper_parameters') + 'algorithm', 'target_objective', 'hyperparameters') widgets = { 'name': forms.TextInput(attrs={'required': True}), 'description': forms.Textarea(attrs={'maxlength': 500, 'rows': 5}), - 'hyper_parameters': forms.Textarea(attrs={'maxlength': 2000, 'rows': 10}), + 'hyperparameters': forms.Textarea(attrs={'maxlength': 5000, 'rows': 15}), } labels = { 'dbms': 'DBMS', diff --git a/server/website/website/migrations/0006_session_hyper_parameters.py b/server/website/website/migrations/0006_session_hyper_parameters.py deleted file mode 100644 index ed15cf7..0000000 --- a/server/website/website/migrations/0006_session_hyper_parameters.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by Django 1.11.23 on 2020-01-12 07:29 -from __future__ import unicode_literals - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ('website', '0005_add_workload_field'), - ] - - operations = [ - migrations.AddField( - model_name='session', - name='hyper_parameters', - field=models.TextField(default='{}'), - ), - ] diff --git a/server/website/website/migrations/0006_session_hyperparameters.py b/server/website/website/migrations/0006_session_hyperparameters.py new file mode 100644 index 0000000..96b2300 --- /dev/null +++ b/server/website/website/migrations/0006_session_hyperparameters.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.23 on 2020-01-18 16:22 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('website', '0005_add_workload_field'), + ] + + operations = [ + migrations.AddField( + model_name='session', + name='hyperparameters', + field=models.TextField(default='{\n "DDPG_ACTOR_HIDDEN_SIZES": [128, 128, 64],\n "DDPG_ACTOR_LEARNING_RATE": 0.02,\n "DDPG_CRITIC_HIDDEN_SIZES": [64, 128, 64],\n "DDPG_CRITIC_LEARNING_RATE": 0.001,\n "DDPG_BATCH_SIZE": 32,\n "DDPG_GAMMA": 0.0,\n "DDPG_SIMPLE_REWARD": true,\n "DDPG_UPDATE_EPOCHS": 30,\n "DDPG_USE_DEFAULT": false,\n "DNN_DEBUG": true,\n "DNN_DEBUG_INTERVAL": 100,\n "DNN_EXPLORE": false,\n "DNN_EXPLORE_ITER": 500,\n "DNN_GD_ITER": 100,\n "DNN_NOISE_SCALE_BEGIN": 0.1,\n "DNN_NOISE_SCALE_END": 0.0,\n "DNN_TRAIN_ITER": 100,\n "FLIP_PROB_DECAY": 0.5,\n "GPR_BATCH_SIZE": 3000,\n "GPR_DEBUG": true,\n "GPR_EPS": 0.001,\n "GPR_EPSILON": 1e-06,\n "GPR_LEARNING_RATE": 0.01,\n "GPR_LENGTH_SCALE": 2.0,\n "GPR_MAGNITUDE": 1.0,\n "GPR_MAX_ITER": 500,\n "GPR_MAX_TRAIN_SIZE": 7000,\n "GPR_MU_MULTIPLIER": 1.0,\n "GPR_MODEL_NAME": "BasicGP",\n "GPR_HP_LEARNING_RATE": 0.001,\n "GPR_HP_MAX_ITER": 5000,\n "GPR_RIDGE": 1.0,\n "GPR_SIGMA_MULTIPLIER": 1.0,\n "GPR_UCB_SCALE": 0.2,\n "GPR_USE_GPFLOW": true,\n "GPR_UCB_BETA": "get_beta_td",\n "IMPORTANT_KNOB_NUMBER": 10000,\n "INIT_FLIP_PROB": 0.3,\n "NUM_SAMPLES": 30,\n "TF_NUM_THREADS": 4,\n "TOP_NUM_CONFIG": 10}'), + ), + ] diff --git a/server/website/website/models.py b/server/website/website/models.py index ee40d33..b3407b6 100644 --- a/server/website/website/models.py +++ b/server/website/website/models.py @@ -162,7 +162,48 @@ class Session(BaseModel): target_objective = models.CharField( max_length=64, default=target_objectives.default()) - hyper_parameters = models.TextField(default="{}") + hyperparameters = models.TextField(default='''{ + "DDPG_ACTOR_HIDDEN_SIZES": [128, 128, 64], + "DDPG_ACTOR_LEARNING_RATE": 0.02, + "DDPG_CRITIC_HIDDEN_SIZES": [64, 128, 64], + "DDPG_CRITIC_LEARNING_RATE": 0.001, + "DDPG_BATCH_SIZE": 32, + "DDPG_GAMMA": 0.0, + "DDPG_SIMPLE_REWARD": true, + "DDPG_UPDATE_EPOCHS": 30, + "DDPG_USE_DEFAULT": false, + "DNN_DEBUG": true, + "DNN_DEBUG_INTERVAL": 100, + "DNN_EXPLORE": false, + "DNN_EXPLORE_ITER": 500, + "DNN_GD_ITER": 100, + "DNN_NOISE_SCALE_BEGIN": 0.1, + "DNN_NOISE_SCALE_END": 0.0, + "DNN_TRAIN_ITER": 100, + "FLIP_PROB_DECAY": 0.5, + "GPR_BATCH_SIZE": 3000, + "GPR_DEBUG": true, + "GPR_EPS": 0.001, + "GPR_EPSILON": 1e-06, + "GPR_LEARNING_RATE": 0.01, + "GPR_LENGTH_SCALE": 2.0, + "GPR_MAGNITUDE": 1.0, + "GPR_MAX_ITER": 500, + "GPR_MAX_TRAIN_SIZE": 7000, + "GPR_MU_MULTIPLIER": 1.0, + "GPR_MODEL_NAME": "BasicGP", + "GPR_HP_LEARNING_RATE": 0.001, + "GPR_HP_MAX_ITER": 5000, + "GPR_RIDGE": 1.0, + "GPR_SIGMA_MULTIPLIER": 1.0, + "GPR_UCB_SCALE": 0.2, + "GPR_USE_GPFLOW": true, + "GPR_UCB_BETA": "get_beta_td", + "IMPORTANT_KNOB_NUMBER": 10000, + "INIT_FLIP_PROB": 0.3, + "NUM_SAMPLES": 30, + "TF_NUM_THREADS": 4, + "TOP_NUM_CONFIG": 10}''') def clean(self): if self.target_objective is None: diff --git a/server/website/website/settings/constants.py b/server/website/website/settings/constants.py index 51da41d..717c7b6 100644 --- a/server/website/website/settings/constants.py +++ b/server/website/website/settings/constants.py @@ -4,8 +4,6 @@ # Copyright (c) 2017-18, Carnegie Mellon University Database Group # - -# --------------------------------------------- # These parameters are not specified for any session, so they can only be set here # address categorical knobs (enum, boolean) @@ -14,133 +12,3 @@ ENABLE_DUMMY_ENCODER = False # ---PIPELINE CONSTANTS--- # how often to run the background tests, in seconds RUN_EVERY = 300 - - -# --------------------------------------------- -# The following parameters can be viewed and modified on the session page on the website - -# ---SAMPLING CONSTANTS--- -# the number of samples (staring points) in gradient descent -NUM_SAMPLES = 30 - -# the number of selected tuning knobs -# set it to a large value if you want to disable the knob identification -# phase (i.e. tune all session knobs) -IMPORTANT_KNOB_NUMBER = 10000 - -# top K config with best performance put into prediction -TOP_NUM_CONFIG = 10 - -# ---CONSTRAINTS CONSTANTS--- -# Initial probability to flip categorical feature in apply_constraints -# server/analysis/constraints.py -INIT_FLIP_PROB = 0.3 - -# The probability that we flip the i_th categorical feature is -# FLIP_PROB_DECAY * (probability we flip (i-1)_th categorical feature) -FLIP_PROB_DECAY = 0.5 - -# ---GPR CONSTANTS--- -USE_GPFLOW = True - -GPR_DEBUG = True - -DEFAULT_LENGTH_SCALE = 2.0 - -DEFAULT_MAGNITUDE = 1.0 - -# Max training size in GPR model -MAX_TRAIN_SIZE = 7000 - -# Batch size in GPR model -BATCH_SIZE = 3000 - -# Threads for TensorFlow config -NUM_THREADS = 4 - -# Value of beta for UCB -UCB_BETA = 'get_beta_td' - -# Name of the GPR model to use (GPFLOW only) -GPR_MODEL_NAME = 'BasicGP' - -# ---GRADIENT DESCENT CONSTANTS--- -# the maximum iterations of gradient descent -MAX_ITER = 500 - -DEFAULT_LEARNING_RATE = 0.01 - -# ---GRADIENT DESCENT FOR GPR--- -# a small bias when using training data points as starting points. -GPR_EPS = 0.001 - -DEFAULT_RIDGE = 1.00 - -DEFAULT_EPSILON = 1e-6 - -DEFAULT_SIGMA_MULTIPLIER = 1.0 - -DEFAULT_MU_MULTIPLIER = 1.0 - -DEFAULT_UCB_SCALE = 0.2 - -# ---HYPERPARAMETER TUNING FOR GPR--- -HP_MAX_ITER = 5000 - -HP_LEARNING_RATE = 0.001 - -# ---GRADIENT DESCENT FOR DNN--- -DNN_TRAIN_ITER = 100 - -# Gradient Descent iteration for recommendation -DNN_GD_ITER = 100 - -DNN_EXPLORE = False - -DNN_EXPLORE_ITER = 500 - -# noise scale for paramater space exploration -DNN_NOISE_SCALE_BEGIN = 0.1 - -DNN_NOISE_SCALE_END = 0.0 - -DNN_DEBUG = True - -DNN_DEBUG_INTERVAL = 100 - -# ---DDPG CONSTRAINTS CONSTANTS--- - -# Use a simple reward -DDPG_SIMPLE_REWARD = True - -# The weight of future rewards in Q value -DDPG_GAMMA = 0.0 - -# Batch size in DDPG model -DDPG_BATCH_SIZE = 32 - -# Learning rate of actor network -ACTOR_LEARNING_RATE = 0.02 - -# Learning rate of critic network -CRITIC_LEARNING_RATE = 0.001 - -# Number of update epochs per iteration -UPDATE_EPOCHS = 30 - -# The number of hidden units in each layer of the actor MLP -ACTOR_HIDDEN_SIZES = [128, 128, 64] - -# The number of hidden units in each layer of the critic MLP -CRITIC_HIDDEN_SIZES = [64, 128, 64] - -# Use the same setting from the CDBTune paper -USE_DEFAULT = False -# Overwrite the DDPG settings if using CDBTune -if USE_DEFAULT: - DDPG_SIMPLE_REWARD = False - DDPG_GAMMA = 0.99 - DDPG_BATCH_SIZE = 32 - ACTOR_LEARNING_RATE = 0.001 - CRITIC_LEARNING_RATE = 0.001 - UPDATE_EPOCHS = 1 diff --git a/server/website/website/tasks/async_tasks.py b/server/website/website/tasks/async_tasks.py index fc4c2da..19caece 100644 --- a/server/website/website/tasks/async_tasks.py +++ b/server/website/website/tasks/async_tasks.py @@ -322,7 +322,7 @@ def train_ddpg(result_id): LOG.info('Add training data to ddpg and train ddpg') result = Result.objects.get(pk=result_id) session = Result.objects.get(pk=result_id).session - params = JSONUtil.loads(session.hyper_parameters) + params = JSONUtil.loads(session.hyperparameters) session_results = Result.objects.filter(session=session, creation_time__lt=result.creation_time) result_info = {} @@ -402,16 +402,18 @@ def train_ddpg(result_id): LOG.info('reward: %f', reward) # Update ddpg - ddpg = DDPG(n_actions=knob_num, n_states=metric_num, alr=params['ACTOR_LEARNING_RATE'], - clr=params['CRITIC_LEARNING_RATE'], gamma=params['DDPG_GAMMA'], - batch_size=params['DDPG_BATCH_SIZE'], a_hidden_sizes=params['ACTOR_HIDDEN_SIZES'], - c_hidden_sizes=params['CRITIC_HIDDEN_SIZES'], use_default=params['USE_DEFAULT']) + ddpg = DDPG(n_actions=knob_num, n_states=metric_num, alr=params['DDPG_ACTOR_LEARNING_RATE'], + clr=params['DDPG_CRITIC_LEARNING_RATE'], gamma=params['DDPG_GAMMA'], + batch_size=params['DDPG_BATCH_SIZE'], + a_hidden_sizes=params['DDPG_ACTOR_HIDDEN_SIZES'], + c_hidden_sizes=params['DDPG_CRITIC_HIDDEN_SIZES'], + use_default=params['DDPG_USE_DEFAULT']) if session.ddpg_actor_model and session.ddpg_critic_model: ddpg.set_model(session.ddpg_actor_model, session.ddpg_critic_model) if session.ddpg_reply_memory: ddpg.replay_memory.set(session.ddpg_reply_memory) ddpg.add_sample(normalized_metric_data, knob_data, reward, normalized_metric_data) - for _ in range(params['UPDATE_EPOCHS']): + for _ in range(params['DDPG_UPDATE_EPOCHS']): ddpg.update() session.ddpg_actor_model, session.ddpg_critic_model = ddpg.get_model() session.ddpg_reply_memory = ddpg.replay_memory.get() @@ -443,7 +445,7 @@ def configuration_recommendation_ddpg(result_info): # pylint: disable=invalid-n result_list = Result.objects.filter(pk=result_id) result = result_list.first() session = result.session - params = JSONUtil.loads(session.hyper_parameters) + params = JSONUtil.loads(session.hyperparameters) agg_data = DataUtil.aggregate_data(result_list) metric_data, _ = clean_metric_data(agg_data['y_matrix'], agg_data['y_columnlabels'], session) metric_data = metric_data.flatten() @@ -455,8 +457,9 @@ def configuration_recommendation_ddpg(result_info): # pylint: disable=invalid-n metric_num = len(metric_data) ddpg = DDPG(n_actions=knob_num, n_states=metric_num, - a_hidden_sizes=params['ACTOR_HIDDEN_SIZES'], - c_hidden_sizes=params['CRITIC_HIDDEN_SIZES'], use_default=params['USE_DEFAULT']) + a_hidden_sizes=params['DDPG_ACTOR_HIDDEN_SIZES'], + c_hidden_sizes=params['DDPG_CRITIC_HIDDEN_SIZES'], + use_default=params['DDPG_USE_DEFAULT']) if session.ddpg_actor_model is not None and session.ddpg_critic_model is not None: ddpg.set_model(session.ddpg_actor_model, session.ddpg_critic_model) if session.ddpg_reply_memory is not None: @@ -492,7 +495,7 @@ def combine_workload(target_data): newest_result = Result.objects.get(pk=target_data['newest_result_id']) session = newest_result.session - params = JSONUtil.loads(session.hyper_parameters) + params = JSONUtil.loads(session.hyperparameters) cleaned_workload_knob_data = clean_knob_data(workload_knob_data["data"], workload_knob_data["columnlabels"], newest_result.session) @@ -661,7 +664,7 @@ def configuration_recommendation(recommendation_input): LOG.info('configuration_recommendation called') newest_result = Result.objects.get(pk=target_data['newest_result_id']) session = newest_result.session - params = JSONUtil.loads(session.hyper_parameters) + params = JSONUtil.loads(session.hyperparameters) if target_data['bad'] is True: target_data_res = create_and_save_recommendation( @@ -726,17 +729,17 @@ def configuration_recommendation(recommendation_input): elif algorithm == AlgorithmType.GPR: # default gpr model - if params['USE_GPFLOW']: + if params['GPR_USE_GPFLOW']: model_kwargs = {} - model_kwargs['model_learning_rate'] = params['HP_LEARNING_RATE'] - model_kwargs['model_maxiter'] = params['HP_MAX_ITER'] + model_kwargs['model_learning_rate'] = params['GPR_HP_LEARNING_RATE'] + model_kwargs['model_maxiter'] = params['GPR_HP_MAX_ITER'] opt_kwargs = {} - opt_kwargs['learning_rate'] = params['DEFAULT_LEARNING_RATE'] - opt_kwargs['maxiter'] = params['MAX_ITER'] + opt_kwargs['learning_rate'] = params['GPR_LEARNING_RATE'] + opt_kwargs['maxiter'] = params['GPR_MAX_ITER'] opt_kwargs['bounds'] = [X_min, X_max] opt_kwargs['debug'] = params['GPR_DEBUG'] - opt_kwargs['ucb_beta'] = ucb.get_ucb_beta(params['UCB_BETA'], - scale=params['DEFAULT_UCB_SCALE'], + opt_kwargs['ucb_beta'] = ucb.get_ucb_beta(params['GPR_UCB_BETA'], + scale=params['GPR_UCB_SCALE'], t=i + 1., ndim=X_scaled.shape[1]) tf.reset_default_graph() graph = tf.get_default_graph() @@ -745,17 +748,17 @@ def configuration_recommendation(recommendation_input): **model_kwargs) res = tf_optimize(m.model, X_samples, **opt_kwargs) else: - model = GPRGD(length_scale=params['DEFAULT_LENGTH_SCALE'], - magnitude=params['DEFAULT_MAGNITUDE'], - max_train_size=params['MAX_TRAIN_SIZE'], - batch_size=params['BATCH_SIZE'], - num_threads=params['NUM_THREADS'], - learning_rate=params['DEFAULT_LEARNING_RATE'], - epsilon=params['DEFAULT_EPSILON'], - max_iter=params['MAX_ITER'], - sigma_multiplier=params['DEFAULT_SIGMA_MULTIPLIER'], - mu_multiplier=params['DEFAULT_MU_MULTIPLIER'], - ridge=params['DEFAULT_RIDGE']) + model = GPRGD(length_scale=params['GPR_LENGTH_SCALE'], + magnitude=params['GPR_MAGNITUDE'], + max_train_size=params['GPR_MAX_TRAIN_SIZE'], + batch_size=params['GPR_BATCH_SIZE'], + num_threads=params['TF_NUM_THREADS'], + learning_rate=params['GPR_LEARNING_RATE'], + epsilon=params['GPR_EPSILON'], + max_iter=params['GPR_MAX_ITER'], + sigma_multiplier=params['GPR_SIGMA_MULTIPLIER'], + mu_multiplier=params['GPR_MU_MULTIPLIER'], + ridge=params['GPR_RIDGE']) model.fit(X_scaled, y_scaled, X_min, X_max) res = model.predict(X_samples, constraint_helper=constraint_helper) @@ -814,7 +817,7 @@ def map_workload(map_workload_input): newest_result = Result.objects.get(pk=target_data['newest_result_id']) session = newest_result.session - params = JSONUtil.loads(session.hyper_parameters) + params = JSONUtil.loads(session.hyperparameters) target_workload = newest_result.workload X_columnlabels = np.array(target_data['X_columnlabels']) y_columnlabels = np.array(target_data['y_columnlabels']) @@ -929,11 +932,11 @@ def map_workload(map_workload_input): # and then predict the performance of each metric for each of # the knob configurations attempted so far by the target. y_col = y_col.reshape(-1, 1) - model = GPRNP(length_scale=params['DEFAULT_LENGTH_SCALE'], - magnitude=params['DEFAULT_MAGNITUDE'], - max_train_size=params['MAX_TRAIN_SIZE'], - batch_size=params['BATCH_SIZE']) - model.fit(X_scaled, y_col, ridge=params['DEFAULT_RIDGE']) + model = GPRNP(length_scale=params['GPR_LENGTH_SCALE'], + magnitude=params['GPR_MAGNITUDE'], + max_train_size=params['GPR_MAX_TRAIN_SIZE'], + batch_size=params['GPR_BATCH_SIZE']) + model.fit(X_scaled, y_col, ridge=params['GPR_RIDGE']) predictions[:, j] = model.predict(X_target).ypreds.ravel() # Bin each of the predicted metric columns by deciles and then # compute the score (i.e., distance) between the target workload diff --git a/server/website/website/templates/edit_session.html b/server/website/website/templates/edit_session.html index f93ca66..9af3c6a 100644 --- a/server/website/website/templates/edit_session.html +++ b/server/website/website/templates/edit_session.html @@ -49,9 +49,9 @@ {{ form.target_objective.label_tag }} {{ form.target_objective }} - - {{ form.hyper_parameters.label_tag }} - {{ form.hyper_parameters }} + + {{ form.hyperparameters.label_tag }} + {{ form.hyperparameters }} {{ form.gen_upload_code.label_tag }} diff --git a/server/website/website/views.py b/server/website/website/views.py index d9e608b..46bf4e8 100644 --- a/server/website/website/views.py +++ b/server/website/website/views.py @@ -330,14 +330,12 @@ def create_or_edit_session(request, project_id, session_id=''): else: # Return a new form with defaults for creating a new session session = None - hyper_parameters = JSONUtil.dumps(utils.get_constants()) form_kwargs.update( initial={ 'dbms': DBMSCatalog.objects.get( type=DBMSType.POSTGRES, version='9.6'), 'algorithm': AlgorithmType.GPR, - 'target_objective': target_objectives.default(), - 'hyper_parameters': hyper_parameters + 'target_objective': target_objectives.default() }) form = SessionForm(**form_kwargs) context = { @@ -1457,36 +1455,31 @@ def create_test_website(request): # pylint: disable=unused-argument password='ottertune_test_user') test_project = Project.objects.create(user=test_user, name='ottertune_test_project', creation_time=now(), last_update=now()) - hyper_parameters = JSONUtil.dumps(utils.get_constants()) # create no tuning session s1 = Session.objects.create(name='test_session_no_tuning', tuning_session='no_tuning_session', dbms_id=1, hardware=test_hardware, project=test_project, creation_time=now(), last_update=now(), user=test_user, - upload_code='ottertuneTestNoTuning', - hyper_parameters=hyper_parameters) + upload_code='ottertuneTestNoTuning') set_default_knobs(s1) # create gpr session s2 = Session.objects.create(name='test_session_gpr', tuning_session='tuning_session', dbms_id=1, hardware=test_hardware, project=test_project, creation_time=now(), last_update=now(), algorithm=AlgorithmType.GPR, - upload_code='ottertuneTestTuningGPR', user=test_user, - hyper_parameters=hyper_parameters) + upload_code='ottertuneTestTuningGPR', user=test_user) set_default_knobs(s2) # create dnn session s3 = Session.objects.create(name='test_session_dnn', tuning_session='tuning_session', dbms_id=1, hardware=test_hardware, project=test_project, creation_time=now(), last_update=now(), algorithm=AlgorithmType.DNN, - upload_code='ottertuneTestTuningDNN', user=test_user, - hyper_parameters=hyper_parameters) + upload_code='ottertuneTestTuningDNN', user=test_user) set_default_knobs(s3) # create ddpg session s4 = Session.objects.create(name='test_session_ddpg', tuning_session='tuning_session', dbms_id=1, hardware=test_hardware, project=test_project, creation_time=now(), last_update=now(), user=test_user, upload_code='ottertuneTestTuningDDPG', - algorithm=AlgorithmType.DDPG, - hyper_parameters=hyper_parameters) + algorithm=AlgorithmType.DDPG) set_default_knobs(s4) response = HttpResponse("Success: create test website successfully") return response