more info in result

This commit is contained in:
bohanjason 2020-05-22 03:05:23 -04:00 committed by Bohan Zhang
parent 3ff9698295
commit 131645f059
1 changed files with 15 additions and 7 deletions

View File

@ -272,11 +272,17 @@ def preprocessing(result_id, algorithm):
if not has_pipeline_data and session.tuning_session == 'tuning_session': if not has_pipeline_data and session.tuning_session == 'tuning_session':
LOG.info("%s: Background tasks haven't ran for this workload yet, " LOG.info("%s: Background tasks haven't ran for this workload yet, "
"picking data with lhs.", task_name) "picking data with lhs.", task_name)
target_data['debug'] = ("Background tasks haven't ran for this workload yet. "
"If this keeps happening, please make sure Celery periodic "
"tasks are running on the server.")
if results_cnt == 0 and session.tuning_session == 'tuning_session': if results_cnt == 0 and session.tuning_session == 'tuning_session':
LOG.info("%s: Not enough data in this session, picking data with lhs.", task_name) LOG.info("%s: Not enough data in this session, picking data with lhs.", task_name)
target_data['debug'] = "Not enough data in this session, picking data with lhs."
if skip_ddpg: if skip_ddpg:
LOG.info("%s: The most recent result cannot be used by DDPG, picking data with lhs.", LOG.info("%s: The most recent result cannot be used by DDPG, picking data with lhs.",
task_name) task_name)
target_data['debug'] = ("The most recent result cannot be used by DDPG,"
"picking data with lhs.")
all_samples = JSONUtil.loads(session.lhs_samples) all_samples = JSONUtil.loads(session.lhs_samples)
if len(all_samples) == 0: if len(all_samples) == 0:
@ -575,13 +581,14 @@ def check_early_return(target_data, algorithm):
newest_result = Result.objects.get(pk=result_id) newest_result = Result.objects.get(pk=result_id)
if target_data.get('status', 'good') != 'good': # No status or status is not 'good' if target_data.get('status', 'good') != 'good': # No status or status is not 'good'
if target_data['status'] == 'random': if target_data['status'] == 'random':
info = 'The config is generated by Random' info = 'The config is generated by Random.'
elif target_data['status'] == 'lhs': elif target_data['status'] == 'lhs':
info = 'The config is generated by LHS' info = 'The config is generated by LHS.'
elif target_data['status'] == 'range_test': elif target_data['status'] == 'range_test':
info = 'Searching for valid knob ranges' info = 'Searching for valid knob ranges.'
else: else:
info = 'Unknown' info = 'Unknown.'
info += ' ' + target_data.get('debug', '')
target_data_res = create_and_save_recommendation( target_data_res = create_and_save_recommendation(
recommended_knobs=target_data['config_recommend'], result=newest_result, recommended_knobs=target_data['config_recommend'], result=newest_result,
status=target_data['status'], info=info, pipeline_run=None) status=target_data['status'], info=info, pipeline_run=None)
@ -877,8 +884,9 @@ def configuration_recommendation(recommendation_input):
break break
res = None res = None
info_msg = 'INFO: training data size is {}. '.format(X_scaled.shape[0])
if algorithm == AlgorithmType.DNN: if algorithm == AlgorithmType.DNN:
info_msg += 'Recommended by DNN.'
# neural network model # neural network model
model_nn = NeuralNet(n_input=X_samples.shape[1], model_nn = NeuralNet(n_input=X_samples.shape[1],
batch_size=X_samples.shape[0], batch_size=X_samples.shape[0],
@ -897,6 +905,7 @@ def configuration_recommendation(recommendation_input):
session.save() session.save()
elif algorithm == AlgorithmType.GPR: elif algorithm == AlgorithmType.GPR:
info_msg += 'Recommended by GPR.'
# default gpr model # default gpr model
if params['GPR_USE_GPFLOW']: if params['GPR_USE_GPFLOW']:
LOG.debug("%s: Running GPR with GPFLOW.", task_name) LOG.debug("%s: Running GPR with GPFLOW.", task_name)
@ -957,8 +966,7 @@ def configuration_recommendation(recommendation_input):
conf_map_res = create_and_save_recommendation( conf_map_res = create_and_save_recommendation(
recommended_knobs=conf_map, result=newest_result, recommended_knobs=conf_map, result=newest_result,
status='good', info='INFO: training data size is {}'.format(X_scaled.shape[0]), status='good', info=info_msg, pipeline_run=target_data['pipeline_run'])
pipeline_run=target_data['pipeline_run'])
exec_time = save_execution_time(start_ts, "configuration_recommendation", newest_result) exec_time = save_execution_time(start_ts, "configuration_recommendation", newest_result)
LOG.debug("\n%s: Result = %s\n", task_name, _task_result_tostring(conf_map_res)) LOG.debug("\n%s: Result = %s\n", task_name, _task_result_tostring(conf_map_res))