remove contextual gpr
This commit is contained in:
parent
e86fce59d9
commit
ee53d0cead
|
@ -126,136 +126,8 @@ class BasicGP(BaseModel):
|
||||||
return k
|
return k
|
||||||
|
|
||||||
|
|
||||||
class ContextualGP(BaseModel):
|
|
||||||
|
|
||||||
_KERNEL_HP_KEYS = [
|
|
||||||
[
|
|
||||||
'GPRC/kern/kernels/0/kernels/0/variance',
|
|
||||||
'GPRC/kern/kernels/0/kernels/0/lengthscales',
|
|
||||||
],
|
|
||||||
[
|
|
||||||
'GPRC/kern/kernels/0/kernels/1/variance',
|
|
||||||
'GPRC/kern/kernels/0/kernels/1/lengthscales',
|
|
||||||
],
|
|
||||||
[
|
|
||||||
'GPRC/kern/kernels/1/variance',
|
|
||||||
]
|
|
||||||
]
|
|
||||||
|
|
||||||
def _get_kernel_kwargs(self, **kwargs):
|
|
||||||
k0_active_dims = kwargs.pop('k0_active_dims')
|
|
||||||
k1_active_dims = kwargs.pop('k1_active_dims')
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
'input_dim': len(k0_active_dims),
|
|
||||||
'active_dims': k0_active_dims,
|
|
||||||
'ARD': True,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'input_dim': len(k1_active_dims),
|
|
||||||
'active_dims': k1_active_dims,
|
|
||||||
'ARD': True,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'input_dim': kwargs.pop('X_dim'),
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
def _build_kernel(self, kernel_kwargs, **kwargs):
|
|
||||||
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
|
|
||||||
k1 = gpflow.kernels.Exponential(**kernel_kwargs[1])
|
|
||||||
k2 = gpflow.kernels.White(**kernel_kwargs[2])
|
|
||||||
if kwargs['optimize_hyperparameters']:
|
|
||||||
k0.lengthscales.transform = gpflow.transforms.Logistic(
|
|
||||||
*self._LENGTHSCALE_BOUNDS)
|
|
||||||
k1.lengthscales.transform = gpflow.transforms.Logistic(
|
|
||||||
*self._LENGTHSCALE_BOUNDS)
|
|
||||||
k = k0 * k1 + k2
|
|
||||||
return k
|
|
||||||
|
|
||||||
|
|
||||||
class ContextualGP_Alt0(ContextualGP):
|
|
||||||
|
|
||||||
def __init__(self, X, y, hyperparameters=None, optimize_hyperparameters=False,
|
|
||||||
learning_rate=0.001, maxiter=5000, **kwargs):
|
|
||||||
self._context_lengthscale_const = kwargs.pop('context_lengthscale_const', 9.0)
|
|
||||||
super(ContextualGP_Alt0, self).__init__(
|
|
||||||
X, y, hyperparameters=hyperparameters,
|
|
||||||
optimize_hyperparameters=optimize_hyperparameters,
|
|
||||||
learning_rate=learning_rate, maxiter=maxiter, **kwargs)
|
|
||||||
|
|
||||||
def _build_kernel(self, kernel_kwargs, **kwargs):
|
|
||||||
kernel_kwargs[1]['lengthscales'] = np.ones((kernel_kwargs[1]['input_dim'],)) * \
|
|
||||||
self._context_lengthscale_const
|
|
||||||
|
|
||||||
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
|
|
||||||
k1 = gpflow.kernels.Exponential(**kernel_kwargs[1])
|
|
||||||
k1.lengthscales.trainable = False
|
|
||||||
k2 = gpflow.kernels.White(**kernel_kwargs[2])
|
|
||||||
if kwargs['optimize_hyperparameters']:
|
|
||||||
k0.lengthscales.transform = gpflow.transforms.Logistic(
|
|
||||||
*self._LENGTHSCALE_BOUNDS)
|
|
||||||
k = k0 * k1 + k2
|
|
||||||
return k
|
|
||||||
|
|
||||||
|
|
||||||
class ContextualGP_Alt1(ContextualGP):
|
|
||||||
|
|
||||||
def __init__(self, X, y, hyperparameters=None, optimize_hyperparameters=False,
|
|
||||||
learning_rate=0.001, maxiter=5000, **kwargs):
|
|
||||||
self._hyperparams_path = kwargs.pop('hyperparameters_path')
|
|
||||||
self._hyperparams_idx = kwargs.pop('hyperparameters_idx', 0)
|
|
||||||
self._context_only = kwargs.pop('context_only', True)
|
|
||||||
super(ContextualGP_Alt1, self).__init__(
|
|
||||||
X, y, hyperparameters=hyperparameters,
|
|
||||||
optimize_hyperparameters=optimize_hyperparameters,
|
|
||||||
learning_rate=learning_rate, maxiter=maxiter, **kwargs)
|
|
||||||
|
|
||||||
def _build_kernel(self, kernel_kwargs, **kwargs):
|
|
||||||
hyperparams = self.load_hyperparameters(self._hyperparams_path,
|
|
||||||
self._hyperparams_idx)
|
|
||||||
if not self._context_only:
|
|
||||||
kernel_kwargs[0]['lengthscales'] = np.array(
|
|
||||||
hyperparams['GPRC/kern/kernels/0/kernels/0/lengthscales'])
|
|
||||||
kernel_kwargs[1]['lengthscales'] = np.array(
|
|
||||||
hyperparams['GPRC/kern/kernels/0/kernels/1/lengthscales'])
|
|
||||||
|
|
||||||
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
|
|
||||||
k1 = gpflow.kernels.Exponential(**kernel_kwargs[1])
|
|
||||||
k2 = gpflow.kernels.White(**kernel_kwargs[2])
|
|
||||||
|
|
||||||
if not self._context_only:
|
|
||||||
k0.lengthscales.trainable = False
|
|
||||||
k1.lengthscales.trainable = False
|
|
||||||
|
|
||||||
if self._context_only and kwargs['optimize_hyperparameters']:
|
|
||||||
k0.lengthscales.transform = gpflow.transforms.Logistic(
|
|
||||||
*self._LENGTHSCALE_BOUNDS)
|
|
||||||
k = k0 * k1 + k2
|
|
||||||
return k
|
|
||||||
|
|
||||||
|
|
||||||
class AdditiveContextualGP(ContextualGP):
|
|
||||||
|
|
||||||
def _build_kernel(self, kernel_kwargs, **kwargs):
|
|
||||||
k0 = gpflow.kernels.Exponential(**kernel_kwargs[0])
|
|
||||||
k1 = gpflow.kernels.Exponential(**kernel_kwargs[1])
|
|
||||||
k2 = gpflow.kernels.White(**kernel_kwargs[2])
|
|
||||||
if kwargs['optimize_hyperparameters']:
|
|
||||||
k0.lengthscales.transform = gpflow.transforms.Logistic(
|
|
||||||
*self._LENGTHSCALE_BOUNDS)
|
|
||||||
k1.lengthscales.transform = gpflow.transforms.Logistic(
|
|
||||||
*self._LENGTHSCALE_BOUNDS)
|
|
||||||
k = k0 + k1 + k2
|
|
||||||
return k
|
|
||||||
|
|
||||||
|
|
||||||
_MODEL_MAP = {
|
_MODEL_MAP = {
|
||||||
'BasicGP': BasicGP,
|
'BasicGP': BasicGP,
|
||||||
'ContextualGP': ContextualGP,
|
|
||||||
'ContextualGP_Alt0': ContextualGP_Alt0,
|
|
||||||
'ContextualGP_Alt1': ContextualGP_Alt1,
|
|
||||||
'AdditiveContextualGP': AdditiveContextualGP,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue