From c33625ca17f1a2b4e79a4550116a2b204971a815 Mon Sep 17 00:00:00 2001 From: yangdsh Date: Mon, 3 Feb 2020 20:59:23 +0000 Subject: [PATCH] random->lhs when lacking data for GPR; fix editing session --- client/driver/fabfile.py | 8 ++++++-- server/website/website/tasks/async_tasks.py | 14 +++++++------- server/website/website/utils.py | 4 ++-- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/client/driver/fabfile.py b/client/driver/fabfile.py index 2a1895a..b6f8ffa 100644 --- a/client/driver/fabfile.py +++ b/client/driver/fabfile.py @@ -663,6 +663,10 @@ def _http_content_to_json(content): return json_content, decoded +def _parse_bool(val): + return str(val).lower() == 'true' + + def _modify_website_object(obj_name, action, verbose=False, **kwargs): verbose = _parse_bool(verbose) if obj_name == 'project': @@ -685,11 +689,11 @@ def _modify_website_object(obj_name, action, verbose=False, **kwargs): data[k] = v url_path = '/{}/{}/'.format(action, obj_name) - response = requests.post(CONF['upload_url'] + url_path, data=data) + response = requests.post(dconf.WEBSITE_URL + url_path, data=data) content = response.content.decode('utf-8') if response.status_code != 200: - raise Exception("Failed to {} new {}.\nStatus: {}\nMessage: {}\n".format( + raise Exception("Failed to {} {}.\nStatus: {}\nMessage: {}\n".format( action, obj_name, response.status_code, content)) json_content, decoded = _http_content_to_json(content) diff --git a/server/website/website/tasks/async_tasks.py b/server/website/website/tasks/async_tasks.py index 8277a22..fc066d9 100644 --- a/server/website/website/tasks/async_tasks.py +++ b/server/website/website/tasks/async_tasks.py @@ -168,11 +168,14 @@ def aggregate_target_results(result_id, algorithm): # implement a sampling technique to generate new training data). newest_result = Result.objects.get(pk=result_id) has_pipeline_data = PipelineData.objects.filter(workload=newest_result.workload).exists() - if newest_result.session.tuning_session == 'lhs': + if not has_pipeline_data or newest_result.session.tuning_session == 'lhs': + if not has_pipeline_data and newest_result.session.tuning_session == 'tuning_session': + LOG.debug("Background tasks haven't ran for this workload yet, picking random data.") + all_samples = JSONUtil.loads(newest_result.session.lhs_samples) if len(all_samples) == 0: knobs = SessionKnob.objects.get_knobs_for_session(newest_result.session) - all_samples = gen_lhs_samples(knobs, 100) + all_samples = gen_lhs_samples(knobs, 10) LOG.debug('%s: Generated LHS.\n\ndata=%s\n', AlgorithmType.name(algorithm), JSONUtil.dumps(all_samples[:5], pprint=True)) samples = all_samples.pop() @@ -186,10 +189,7 @@ def aggregate_target_results(result_id, algorithm): LOG.debug('%s: Got LHS config.\n\ndata=%s\n', AlgorithmType.name(algorithm), JSONUtil.dumps(agg_data, pprint=True)) - elif not has_pipeline_data or newest_result.session.tuning_session == 'randomly_generate': - if not has_pipeline_data and newest_result.session.tuning_session == 'tuning_session': - LOG.debug("Background tasks haven't ran for this workload yet, picking random data.") - + elif newest_result.session.tuning_session == 'randomly_generate': result = Result.objects.filter(pk=result_id) knobs = SessionKnob.objects.get_knobs_for_session(newest_result.session) @@ -648,7 +648,7 @@ def configuration_recommendation(recommendation_input): if target_data['bad'] is True: target_data_res = create_and_save_recommendation( recommended_knobs=target_data['config_recommend'], result=newest_result, - status='bad', info='WARNING: no training data, the config is generated randomly', + status='bad', info='WARNING: no training data, the config is generated by LHS', pipeline_run=target_data['pipeline_run']) LOG.debug('%s: Skipping configuration recommendation.\nData:\n%s\n\n', AlgorithmType.name(algorithm), target_data) diff --git a/server/website/website/utils.py b/server/website/website/utils.py index a363b40..1e49d98 100644 --- a/server/website/website/utils.py +++ b/server/website/website/utils.py @@ -499,7 +499,7 @@ def model_to_dict2(m, exclude=None): def check_and_run_celery(): celery_status = os.popen('python3 manage.py celery inspect ping').read() - if 'OK' in celery_status: + if 'pong' in celery_status: return 'celery is running' retries = 0 @@ -510,7 +510,7 @@ def check_and_run_celery(): os.popen('python3 manage.py startcelery &') time.sleep(30 * retries) celery_status = os.popen('python3 manage.py celery inspect ping').read() - if 'OK' in celery_status: + if 'pong' in celery_status: LOG.info('Successfully start celery.') return 'celery stopped but is restarted successfully' LOG.warning('Cannot restart celery.')