import Dana's new gpr model

This commit is contained in:
yangdsh
2019-11-01 19:04:42 +00:00
committed by Dana Van Aken
parent 8bb3de85e7
commit e86fce59d9
6 changed files with 557 additions and 28 deletions

View File

@@ -0,0 +1,271 @@
#
# OtterTune - analysis/gpr_models.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
# Author: Dana Van Aken
import copy
import json
import os
import gpflow
import numpy as np
import tensorflow as tf
from .gprc import GPRC
class BaseModel(object):
# Min/max bounds for the kernel lengthscales
_LENGTHSCALE_BOUNDS = (0.1, 10.)
# Keys for each kernel's hyperparameters
_KERNEL_HP_KEYS = []
# The key for the likelihood parameter
_LIKELIHOOD_HP_KEY = 'GPRC/likelihood/variance'
def __init__(self, X, y, hyperparameters=None, optimize_hyperparameters=False,
learning_rate=0.001, maxiter=5000, **kwargs):
# Store model kwargs
self._model_kwargs = {
'hyperparameters': hyperparameters,
'optimize_hyperparameters': optimize_hyperparameters,
'learning_rate': learning_rate,
'maxiter': maxiter,
}
# Store kernel kwargs
kernel_kwargs = self._get_kernel_kwargs(X_dim=X.shape[1], **kwargs)
if hyperparameters is not None:
self._assign_kernel_hyperparams(hyperparameters, kernel_kwargs)
self._kernel_kwargs = copy.deepcopy(kernel_kwargs)
# Build the kernels and the model
with gpflow.defer_build():
k = self._build_kernel(kernel_kwargs, optimize_hyperparameters=optimize_hyperparameters, **kwargs)
m = GPRC(X, y, kern=k)
if hyperparameters is not None and self._LIKELIHOOD_HP_KEY in hyperparameters:
m.likelihood.variance = hyperparameters[self._LIKELIHOOD_HP_KEY]
m.compile()
# If enabled, optimize the hyperparameters
if optimize_hyperparameters:
opt = gpflow.train.AdamOptimizer(learning_rate)
opt.minimize(m, maxiter=maxiter)
self._model = m
def _get_kernel_kwargs(self, **kwargs):
return []
def _build_kernel(self, kernel_kwargs, **kwargs):
return None
def get_hyperparameters(self):
return {k: float(v) if v.ndim == 0 else v.tolist()
for k, v in self._model.read_values().items()}
def get_model_parameters(self):
return {
'model_params': copy.deepcopy(self._model_kwargs),
'kernel_params': copy.deepcopy(self._kernel_kwargs)
}
def _assign_kernel_hyperparams(self, hyperparams, kernel_kwargs):
for i, kernel_keys in enumerate(self._KERNEL_HP_KEYS):
for key in kernel_keys:
if key in hyperparams:
argname = key.rsplit('/', 1)[-1]
kernel_kwargs[i][argname] = hyperparams[key]
@staticmethod
def load_hyperparameters(path, hp_idx=0):
with open(path, 'r') as f:
hyperparams = json.load(f)['hyperparameters']
if isinstance(hyperparams, list):
assert hp_idx >= 0, 'hp_idx: {} (expected >= 0)'.format(hp_idx)
if hp_idx >= len(hyperparams):
hp_idx = -1
hyperparams = hyperparams[hp_idx]
return hyperparams
class BasicGP(BaseModel):
_KERNEL_HP_KEYS = [
[
'GPRC/kern/kernels/0/variance',
'GPRC/kern/kernels/0/lengthscales',
],
[
'GPRC/kern/kernels/1/variance',
],
]
def _get_kernel_kwargs(self, **kwargs):
X_dim = kwargs.pop('X_dim')
return [
{
'input_dim': X_dim,
'ARD': True
},
{
'input_dim': X_dim,
},
]
def _build_kernel(self, kernel_kwargs, **kwargs):
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
k1 = gpflow.kernels.White(**kernel_kwargs[1])
if kwargs.pop('optimize_hyperparameters'):
k0.lengthscales.transform = gpflow.transforms.Logistic(
*self._LENGTHSCALE_BOUNDS)
k = k0 + k1
return k
class ContextualGP(BaseModel):
_KERNEL_HP_KEYS = [
[
'GPRC/kern/kernels/0/kernels/0/variance',
'GPRC/kern/kernels/0/kernels/0/lengthscales',
],
[
'GPRC/kern/kernels/0/kernels/1/variance',
'GPRC/kern/kernels/0/kernels/1/lengthscales',
],
[
'GPRC/kern/kernels/1/variance',
]
]
def _get_kernel_kwargs(self, **kwargs):
k0_active_dims = kwargs.pop('k0_active_dims')
k1_active_dims = kwargs.pop('k1_active_dims')
return [
{
'input_dim': len(k0_active_dims),
'active_dims': k0_active_dims,
'ARD': True,
},
{
'input_dim': len(k1_active_dims),
'active_dims': k1_active_dims,
'ARD': True,
},
{
'input_dim': kwargs.pop('X_dim'),
}
]
def _build_kernel(self, kernel_kwargs, **kwargs):
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
k1 = gpflow.kernels.Exponential(**kernel_kwargs[1])
k2 = gpflow.kernels.White(**kernel_kwargs[2])
if kwargs['optimize_hyperparameters']:
k0.lengthscales.transform = gpflow.transforms.Logistic(
*self._LENGTHSCALE_BOUNDS)
k1.lengthscales.transform = gpflow.transforms.Logistic(
*self._LENGTHSCALE_BOUNDS)
k = k0 * k1 + k2
return k
class ContextualGP_Alt0(ContextualGP):
def __init__(self, X, y, hyperparameters=None, optimize_hyperparameters=False,
learning_rate=0.001, maxiter=5000, **kwargs):
self._context_lengthscale_const = kwargs.pop('context_lengthscale_const', 9.0)
super(ContextualGP_Alt0, self).__init__(
X, y, hyperparameters=hyperparameters,
optimize_hyperparameters=optimize_hyperparameters,
learning_rate=learning_rate, maxiter=maxiter, **kwargs)
def _build_kernel(self, kernel_kwargs, **kwargs):
kernel_kwargs[1]['lengthscales'] = np.ones((kernel_kwargs[1]['input_dim'],)) * \
self._context_lengthscale_const
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
k1 = gpflow.kernels.Exponential(**kernel_kwargs[1])
k1.lengthscales.trainable = False
k2 = gpflow.kernels.White(**kernel_kwargs[2])
if kwargs['optimize_hyperparameters']:
k0.lengthscales.transform = gpflow.transforms.Logistic(
*self._LENGTHSCALE_BOUNDS)
k = k0 * k1 + k2
return k
class ContextualGP_Alt1(ContextualGP):
def __init__(self, X, y, hyperparameters=None, optimize_hyperparameters=False,
learning_rate=0.001, maxiter=5000, **kwargs):
self._hyperparams_path = kwargs.pop('hyperparameters_path')
self._hyperparams_idx = kwargs.pop('hyperparameters_idx', 0)
self._context_only = kwargs.pop('context_only', True)
super(ContextualGP_Alt1, self).__init__(
X, y, hyperparameters=hyperparameters,
optimize_hyperparameters=optimize_hyperparameters,
learning_rate=learning_rate, maxiter=maxiter, **kwargs)
def _build_kernel(self, kernel_kwargs, **kwargs):
hyperparams = self.load_hyperparameters(self._hyperparams_path,
self._hyperparams_idx)
if not self._context_only:
kernel_kwargs[0]['lengthscales'] = np.array(
hyperparams['GPRC/kern/kernels/0/kernels/0/lengthscales'])
kernel_kwargs[1]['lengthscales'] = np.array(
hyperparams['GPRC/kern/kernels/0/kernels/1/lengthscales'])
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
k1 = gpflow.kernels.Exponential(**kernel_kwargs[1])
k2 = gpflow.kernels.White(**kernel_kwargs[2])
if not self._context_only:
k0.lengthscales.trainable = False
k1.lengthscales.trainable = False
if self._context_only and kwargs['optimize_hyperparameters']:
k0.lengthscales.transform = gpflow.transforms.Logistic(
*self._LENGTHSCALE_BOUNDS)
k = k0 * k1 + k2
return k
class AdditiveContextualGP(ContextualGP):
def _build_kernel(self, kernel_kwargs, **kwargs):
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
k1 = gpflow.kernels.Exponential(**kernel_kwargs[1])
k2 = gpflow.kernels.White(**kernel_kwargs[2])
if kwargs['optimize_hyperparameters']:
k0.lengthscales.transform = gpflow.transforms.Logistic(
*self._LENGTHSCALE_BOUNDS)
k1.lengthscales.transform = gpflow.transforms.Logistic(
*self._LENGTHSCALE_BOUNDS)
k = k0 + k1 + k2
return k
_MODEL_MAP = {
'BasicGP': BasicGP,
'ContextualGP': ContextualGP,
'ContextualGP_Alt0': ContextualGP_Alt0,
'ContextualGP_Alt1': ContextualGP_Alt1,
'AdditiveContextualGP': AdditiveContextualGP,
}
def create_model(model_name, **kwargs):
# Update tensorflow session settings to enable GPU sharing
gpflow.settings.session.update(gpu_options=tf.GPUOptions(allow_growth=True))
check_valid(model_name)
return _MODEL_MAP[model_name](**kwargs)
def check_valid(model_name):
if model_name not in _MODEL_MAP:
raise ValueError('Invalid GPR model name: {}'.format(model_name))

View File

@@ -0,0 +1,48 @@
#
# OtterTune - analysis/gprc.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
# Author: Dana Van Aken
from __future__ import absolute_import
import tensorflow as tf
from gpflow import settings
from gpflow.decors import autoflow, name_scope, params_as_tensors
from gpflow.models import GPR
class GPRC(GPR):
def __init__(self, X, Y, kern, mean_function=None, **kwargs):
super(GPRC, self).__init__(X, Y, kern, mean_function, **kwargs)
self.cholesky = None
self.alpha = None
@autoflow()
def _compute_cache(self):
K = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance
L = tf.cholesky(K, name='gp_cholesky')
V = tf.matrix_triangular_solve(L, self.Y - self.mean_function(self.X), name='gp_alpha')
return L, V
def update_cache(self):
self.cholesky, self.alpha = self._compute_cache()
@name_scope('predict')
@params_as_tensors
def _build_predict(self, Xnew, full_cov=False):
if self.cholesky is None:
self.update_cache()
Kx = self.kern.K(self.X, Xnew)
A = tf.matrix_triangular_solve(self.cholesky, Kx, lower=True)
fmean = tf.matmul(A, self.alpha, transpose_a=True) + self.mean_function(Xnew)
if full_cov:
fvar = self.kern.K(Xnew) - tf.matmul(A, A, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
fvar = tf.tile(tf.expand_dims(fvar, 2), shape)
else:
fvar = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.reshape(fvar, (-1, 1)), [1, tf.shape(self.Y)[1]])
return fmean, fvar

View File

@@ -0,0 +1,64 @@
#
# OtterTune - analysis/optimize.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
# Author: Dana Van Aken
import numpy as np
import tensorflow as tf
from gpflow import settings
from sklearn.utils import assert_all_finite, check_array
from sklearn.utils.validation import FLOAT_DTYPES
from analysis.util import get_analysis_logger
LOG = get_analysis_logger(__name__)
def tf_optimize(model, Xnew_arr, learning_rate=0.01, maxiter=100, ucb_beta=3.,
active_dims=None, bounds=None):
Xnew_arr = check_array(Xnew_arr, copy=False, warn_on_dtype=True, dtype=FLOAT_DTYPES)
Xnew = tf.Variable(Xnew_arr, name='Xnew', dtype=settings.float_type)
if bounds is None:
lower_bound = tf.constant(-np.infty, dtype=settings.float_type)
upper_bound = tf.constant(np.infty, dtype=settings.float_type)
else:
lower_bound = tf.constant(bounds[0], dtype=settings.float_type)
upper_bound = tf.constant(bounds[1], dtype=settings.float_type)
Xnew_bounded = tf.minimum(tf.maximum(Xnew, lower_bound), upper_bound)
if active_dims:
indices = []
updates = []
n_rows = Xnew_arr.shape[0]
for c in active_dims:
for r in range(n_rows):
indices.append([r, c])
updates.append(Xnew_bounded[r, c])
part_X = tf.scatter_nd(indices, updates, Xnew_arr.shape)
Xin = part_X + tf.stop_gradient(-part_X + Xnew_bounded)
else:
Xin = Xnew_bounded
beta_t = tf.constant(ucb_beta, name='ucb_beta', dtype=settings.float_type)
y_mean_var = model.likelihood.predict_mean_and_var(*model._build_predict(Xin))
loss = tf.subtract(y_mean_var[0], tf.multiply(beta_t, y_mean_var[1]), name='loss_fn')
opt = tf.train.AdamOptimizer(learning_rate)
train_op = opt.minimize(loss)
variables = opt.variables()
init_op = tf.variables_initializer([Xnew] + variables)
session = model.enquire_session(session=None)
with session.as_default():
session.run(init_op)
for i in range(maxiter):
session.run(train_op)
Xnew_value = session.run(Xnew_bounded)
y_mean_value, y_var_value = session.run(y_mean_var)
loss_value = session.run(loss)
assert_all_finite(Xnew_value)
assert_all_finite(y_mean_value)
assert_all_finite(y_var_value)
assert_all_finite(loss_value)
return Xnew_value, y_mean_value, y_var_value, loss_value

View File

@@ -0,0 +1,40 @@
import numpy as np
def get_beta_t(t, **kwargs):
assert t > 0.
return 2. * np.log(t / np.sqrt(np.log(2. * t)))
def get_beta_td(t, ndim, bound=1.0, **kwargs):
assert t > 0.
assert ndim > 0.
assert bound > 0.
bt = 2. * np.log(float(ndim) * t**2 * np.pi**2 / (6. * bound))
return np.sqrt(bt) if bt > 0. else 0.
_UCB_MAP = {
'get_beta_t': get_beta_t,
'get_beta_td': get_beta_td,
}
def get_ucb_beta(ucb_beta, **kwargs):
check_valid(ucb_beta)
if not isinstance(ucb_beta, float):
ucb_beta = _UCB_MAP[ucb_beta](**kwargs)
assert isinstance(ucb_beta, float), type(ucb_beta)
assert ucb_beta >= 0.0
return ucb_beta
def check_valid(ucb_beta):
if isinstance(ucb_beta, float):
if ucb_beta < 0.0:
raise ValueError(("Invalid value for 'ucb_beta': {} "
"(expected >= 0.0)").format(ucb_beta))
else:
if ucb_beta not in _UCB_MAP:
raise ValueError(("Invalid value for 'ucb_beta': {} "
"(expected 'get_beta_t' or 'get_beta_td')").format(ucb_beta))