move dnn parameters to constant file

This commit is contained in:
bohanjason 2019-09-28 01:42:56 -04:00 committed by Dana Van Aken
parent 82a7d859c2
commit c37ef9c072
3 changed files with 35 additions and 11 deletions

View File

@ -42,20 +42,37 @@ NUM_THREADS = 4
# the maximum iterations of gradient descent # the maximum iterations of gradient descent
MAX_ITER = 500 MAX_ITER = 500
DEFAULT_LEARNING_RATE = 0.01
# ---GRADIENT DESCENT FOR GPR---
# a small bias when using training data points as starting points. # a small bias when using training data points as starting points.
GPR_EPS = 0.001 GPR_EPS = 0.001
DEFAULT_RIDGE = 0.01 DEFAULT_RIDGE = 0.01
DEFAULT_LEARNING_RATE = 0.01
DEFAULT_EPSILON = 1e-6 DEFAULT_EPSILON = 1e-6
DEFAULT_SIGMA_MULTIPLIER = 3.0 DEFAULT_SIGMA_MULTIPLIER = 3.0
DEFAULT_MU_MULTIPLIER = 1.0 DEFAULT_MU_MULTIPLIER = 1.0
# ---CONSTRAINTS CONSTANTS--- # ---GRADIENT DESCENT FOR DNN---
DNN_TRAIN_ITER = 500
DNN_EXPLORE = False
DNN_EXPLORE_ITER = 500
# noise scale for paramater space exploration
DNN_NOISE_SCALE_BEGIN = 0.1
DNN_NOISE_SCALE_END = 0.0
DNN_DEBUG = True
DNN_DEBUG_INTERVAL = 100
# ---DDPG CONSTRAINTS CONSTANTS---
# Batch size in DDPG model # Batch size in DDPG model
DDPG_BATCH_SIZE = 32 DDPG_BATCH_SIZE = 32

View File

@ -31,7 +31,10 @@ from website.settings import (DEFAULT_LENGTH_SCALE, DEFAULT_MAGNITUDE,
DEFAULT_EPSILON, MAX_ITER, GPR_EPS, DEFAULT_EPSILON, MAX_ITER, GPR_EPS,
DEFAULT_SIGMA_MULTIPLIER, DEFAULT_MU_MULTIPLIER, DEFAULT_SIGMA_MULTIPLIER, DEFAULT_MU_MULTIPLIER,
DDPG_BATCH_SIZE, ACTOR_LEARNING_RATE, DDPG_BATCH_SIZE, ACTOR_LEARNING_RATE,
CRITIC_LEARNING_RATE, GAMMA, TAU) CRITIC_LEARNING_RATE, GAMMA, TAU,
DNN_TRAIN_ITER, DNN_EXPLORE, DNN_EXPLORE_ITER,
DNN_NOISE_SCALE_BEGIN, DNN_NOISE_SCALE_END,
DNN_DEBUG, DNN_DEBUG_INTERVAL)
from website.settings import INIT_FLIP_PROB, FLIP_PROB_DECAY from website.settings import INIT_FLIP_PROB, FLIP_PROB_DECAY
from website.settings import MODEL_DIR from website.settings import MODEL_DIR
@ -553,12 +556,14 @@ def configuration_recommendation(recommendation_input):
model_nn = NeuralNet(weights_file=full_path, model_nn = NeuralNet(weights_file=full_path,
n_input=X_samples.shape[1], n_input=X_samples.shape[1],
batch_size=X_samples.shape[0], batch_size=X_samples.shape[0],
explore_iters=500, explore_iters=DNN_EXPLORE_ITER,
noise_scale_begin=0.1, noise_scale_begin=DNN_NOISE_SCALE_BEGIN,
noise_scale_end=0, noise_scale_end=DNN_NOISE_SCALE_END,
debug=True) debug=DNN_DEBUG,
model_nn.fit(X_scaled, y_scaled) debug_interval=DNN_DEBUG_INTERVAL)
res = model_nn.recommend(X_samples, X_min, X_max, explore=True) model_nn.fit(X_scaled, y_scaled, fit_epochs=DNN_TRAIN_ITER)
res = model_nn.recommend(X_samples, X_min, X_max,
explore=DNN_EXPLORE, recommend_epochs=MAX_ITER)
elif algorithm == 'gpr': elif algorithm == 'gpr':
# default gpr model # default gpr model
model = GPRGD(length_scale=DEFAULT_LENGTH_SCALE, model = GPRGD(length_scale=DEFAULT_LENGTH_SCALE,

View File

@ -539,11 +539,13 @@ def handle_result_files(session, files):
response = chain(aggregate_target_results.s(result.pk, 'dnn'), response = chain(aggregate_target_results.s(result.pk, 'dnn'),
map_workload.s(), map_workload.s(),
configuration_recommendation.s()).apply_async() configuration_recommendation.s()).apply_async()
taskmeta_ids = [] taskmeta_ids = []
current_task = response current_task = response
while current_task: while current_task:
taskmeta_ids.append(current_task.id) taskmeta_ids.insert(0, current_task.id)
current_task = current_task.parent current_task = current_task.parent
result.task_ids = ','.join(taskmeta_ids) result.task_ids = ','.join(taskmeta_ids)
result.save() result.save()
return HttpResponse("Result stored successfully! Running tuner...(status={}) Result ID:{} " return HttpResponse("Result stored successfully! Running tuner...(status={}) Result ID:{} "