save dnn model into database

This commit is contained in:
bohanjason 2019-09-29 20:53:23 -04:00 committed by Dana Van Aken
parent c37ef9c072
commit 25d0838376
6 changed files with 27 additions and 30 deletions

View File

@ -264,6 +264,8 @@ def clean_logs():
@task @task
def lhs_samples(count=10): def lhs_samples(count=10):
if not os.path.exists(CONF['lhs_save_path']):
os.makedirs(CONF['lhs_save_path'])
cmd = 'python3 lhs.py {} {} {}'.format(count, CONF['lhs_knob_path'], CONF['lhs_save_path']) cmd = 'python3 lhs.py {} {} {}'.format(count, CONF['lhs_knob_path'], CONF['lhs_save_path'])
local(cmd) local(cmd)

View File

@ -8,6 +8,7 @@ Created on Sep 16, 2019
@author: Bohan Zhang @author: Bohan Zhang
''' '''
import pickle
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from tensorflow import keras from tensorflow import keras
@ -28,7 +29,6 @@ class NeuralNet(object):
def __init__(self, def __init__(self,
n_input, n_input,
weights_file,
learning_rate=0.01, learning_rate=0.01,
debug=False, debug=False,
debug_interval=100, debug_interval=100,
@ -36,9 +36,6 @@ class NeuralNet(object):
explore_iters=500, explore_iters=500,
noise_scale_begin=0.1, noise_scale_begin=0.1,
noise_scale_end=0): noise_scale_end=0):
# absolute path for the model weitghs file
# one model for each (project, session)
self.weights_file = weights_file
self.history = None self.history = None
self.recommend_iters = 0 self.recommend_iters = 0
@ -58,7 +55,6 @@ class NeuralNet(object):
layers.Dense(64, activation=tf.nn.relu), layers.Dense(64, activation=tf.nn.relu),
layers.Dense(1) layers.Dense(1)
]) ])
self.load_weights()
self.model.compile(loss='mean_squared_error', self.model.compile(loss='mean_squared_error',
optimizer=self.optimizer, optimizer=self.optimizer,
metrics=['mean_squared_error', 'mean_absolute_error']) metrics=['mean_squared_error', 'mean_absolute_error'])
@ -66,17 +62,28 @@ class NeuralNet(object):
self.ops = {} self.ops = {}
self.build_graph() self.build_graph()
def save_weights(self): def save_weights(self, weights_file):
self.model.save_weights(self.weights_file) self.model.save_weights(weights_file)
def load_weights(self): def load_weights(self, weights_file):
try: try:
self.model.load_weights(self.weights_file) self.model.load_weights(weights_file)
if self.debug: if self.debug:
LOG.info('Neural Network Model weights file exists, load weights from the file') LOG.info('Neural Network Model weights file exists, load weights from the file')
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
LOG.info('Weights file does not match neural network model, train model from scratch') LOG.info('Weights file does not match neural network model, train model from scratch')
def get_weights_bin(self):
return pickle.dumps(self.model.get_weights())
def set_weights_bin(self, weights):
try:
self.model.set_weights(pickle.loads(weights))
if self.debug:
LOG.info('Neural Network Model weights exists, load the existing weights')
except Exception: # pylint: disable=broad-except
LOG.info('Weights does not match neural network model, train model from scratch')
# Build same neural network as self.model, But input X is variables, # Build same neural network as self.model, But input X is variables,
# weights are placedholders. Find optimial X using gradient descent. # weights are placedholders. Find optimial X using gradient descent.
def build_graph(self): def build_graph(self):
@ -109,8 +116,6 @@ class NeuralNet(object):
def fit(self, X_train, y_train, fit_epochs=500): def fit(self, X_train, y_train, fit_epochs=500):
self.history = self.model.fit( self.history = self.model.fit(
X_train, y_train, epochs=fit_epochs, verbose=0) X_train, y_train, epochs=fit_epochs, verbose=0)
# save model weights
self.save_weights()
if self.debug: if self.debug:
mse = self.history.history['mean_squared_error'] mse = self.history.history['mean_squared_error']
i = 0 i = 0

View File

@ -188,6 +188,7 @@ class Migration(migrations.Migration):
('ddpg_actor_model', models.BinaryField(null=True, blank=True)), ('ddpg_actor_model', models.BinaryField(null=True, blank=True)),
('ddpg_critic_model', models.BinaryField(null=True, blank=True)), ('ddpg_critic_model', models.BinaryField(null=True, blank=True)),
('ddpg_reply_memory', models.BinaryField(null=True, blank=True)), ('ddpg_reply_memory', models.BinaryField(null=True, blank=True)),
('dnn_model', models.BinaryField(null=True, blank=True)),
('creation_time', models.DateTimeField()), ('creation_time', models.DateTimeField()),
('last_update', models.DateTimeField()), ('last_update', models.DateTimeField()),
('upload_code', models.CharField(max_length=30, unique=True)), ('upload_code', models.CharField(max_length=30, unique=True)),

View File

@ -191,6 +191,7 @@ class Session(BaseModel):
ddpg_actor_model = models.BinaryField(null=True, blank=True) ddpg_actor_model = models.BinaryField(null=True, blank=True)
ddpg_critic_model = models.BinaryField(null=True, blank=True) ddpg_critic_model = models.BinaryField(null=True, blank=True)
ddpg_reply_memory = models.BinaryField(null=True, blank=True) ddpg_reply_memory = models.BinaryField(null=True, blank=True)
dnn_model = models.BinaryField(null=True, blank=True)
project = models.ForeignKey(Project) project = models.ForeignKey(Project)
creation_time = models.DateTimeField() creation_time = models.DateTimeField()

View File

@ -33,9 +33,6 @@ CONFIG_DIR = join(PROJECT_ROOT, 'config')
# Where the log files are stored # Where the log files are stored
LOG_DIR = join(PROJECT_ROOT, 'log') LOG_DIR = join(PROJECT_ROOT, 'log')
# Where the model weight files are stored
MODEL_DIR = join(PROJECT_ROOT, 'model')
# File/directory upload permissions # File/directory upload permissions
FILE_UPLOAD_DIRECTORY_PERMISSIONS = 0o664 FILE_UPLOAD_DIRECTORY_PERMISSIONS = 0o664
FILE_UPLOAD_PERMISSIONS = 0o664 FILE_UPLOAD_PERMISSIONS = 0o664
@ -57,13 +54,6 @@ try:
except OSError: # Invalid permissions except OSError: # Invalid permissions
pass pass
# Try to create the model directory
try:
if not exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
except OSError: # Invalid permissions
pass
# ============================================== # ==============================================
# DEBUG CONFIGURATION # DEBUG CONFIGURATION
# ============================================== # ==============================================

View File

@ -3,7 +3,6 @@
# #
# Copyright (c) 2017-18, Carnegie Mellon University Database Group # Copyright (c) 2017-18, Carnegie Mellon University Database Group
# #
import os
import random import random
import queue import queue
import numpy as np import numpy as np
@ -37,7 +36,6 @@ from website.settings import (DEFAULT_LENGTH_SCALE, DEFAULT_MAGNITUDE,
DNN_DEBUG, DNN_DEBUG_INTERVAL) DNN_DEBUG, DNN_DEBUG_INTERVAL)
from website.settings import INIT_FLIP_PROB, FLIP_PROB_DECAY from website.settings import INIT_FLIP_PROB, FLIP_PROB_DECAY
from website.settings import MODEL_DIR
from website.types import VarType from website.types import VarType
@ -543,27 +541,27 @@ def configuration_recommendation(recommendation_input):
except queue.Empty: except queue.Empty:
break break
# one model for each (project, session) session = newest_result.session
session = newest_result.session.pk
project = newest_result.session.project.pk
full_path = os.path.join(MODEL_DIR, 'p' + str(project) + '_s' + str(session) + '_nn.weights')
res = None res = None
assert algorithm in ['gpr', 'dnn'] assert algorithm in ['gpr', 'dnn']
if algorithm == 'dnn': if algorithm == 'dnn':
# neural network model # neural network model
model_nn = NeuralNet(weights_file=full_path, model_nn = NeuralNet(n_input=X_samples.shape[1],
n_input=X_samples.shape[1],
batch_size=X_samples.shape[0], batch_size=X_samples.shape[0],
explore_iters=DNN_EXPLORE_ITER, explore_iters=DNN_EXPLORE_ITER,
noise_scale_begin=DNN_NOISE_SCALE_BEGIN, noise_scale_begin=DNN_NOISE_SCALE_BEGIN,
noise_scale_end=DNN_NOISE_SCALE_END, noise_scale_end=DNN_NOISE_SCALE_END,
debug=DNN_DEBUG, debug=DNN_DEBUG,
debug_interval=DNN_DEBUG_INTERVAL) debug_interval=DNN_DEBUG_INTERVAL)
if session.dnn_model is not None:
model_nn.set_weights_bin(session.dnn_model)
model_nn.fit(X_scaled, y_scaled, fit_epochs=DNN_TRAIN_ITER) model_nn.fit(X_scaled, y_scaled, fit_epochs=DNN_TRAIN_ITER)
res = model_nn.recommend(X_samples, X_min, X_max, res = model_nn.recommend(X_samples, X_min, X_max,
explore=DNN_EXPLORE, recommend_epochs=MAX_ITER) explore=DNN_EXPLORE, recommend_epochs=MAX_ITER)
session.dnn_model = model_nn.get_weights_bin()
session.save()
elif algorithm == 'gpr': elif algorithm == 'gpr':
# default gpr model # default gpr model
model = GPRGD(length_scale=DEFAULT_LENGTH_SCALE, model = GPRGD(length_scale=DEFAULT_LENGTH_SCALE,