-
Notifications
You must be signed in to change notification settings - Fork 61
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
MGP #65
base: master
Are you sure you want to change the base?
MGP #65
Changes from all commits
62febc7
2f554cc
267fdad
be294ee
56e0811
8cb9e83
5eff800
4a54add
1e6d81d
850df94
8c4c239
7869161
2242178
cf672a5
8acfad6
66ff92c
2316ac9
8f609cd
d884538
c0a41e7
c83eb73
c0f5c00
958eb49
0657328
8f79822
2c940bc
3b3bd35
e9d3fd2
908cadd
451f3dd
673fbd4
d24da18
a31dfe7
43daf62
4e69a7f
0b14297
c85fc6e
96c6307
01a67d1
9630095
cd6e969
34ca364
19a6937
94e1028
f2ac9e8
982a957
2cc1855
948965d
4472079
b4c3615
9b95f3c
f8bc92f
79c7761
8e7d9b3
e423653
2302673
2bf2c44
715b99f
0301ffa
c2764eb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,167 @@ | ||
# Copyright 2017 Joachim van der Herten, Nicolas Knudde | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
from .tf_wraps import rowwise_gradients | ||
|
||
from GPflow.param import Parameterized, AutoFlow | ||
from GPflow.model import Model, GPModel | ||
from GPflow.likelihoods import Gaussian | ||
from GPflow import settings | ||
|
||
import tensorflow as tf | ||
|
||
float_type = settings.dtypes.float_type | ||
|
||
|
||
class ModelWrapper(Parameterized): | ||
""" | ||
Class for fast implementation of a wrapper for models defined in GPflow. Once wrapped, all lookups for attributes | ||
which are not found in the wrapper class are automatically forwarded to the wrapped model. | ||
|
||
To influence the I/O of methods on the wrapped class, simply implement the method in the wrapper and call the | ||
appropriate methods on the wrapped class. Specific logic is included to make sure that if AutoFlow methods are | ||
influenced following this pattern, the original AF storage (if existing) is unaffected and a new storage is added | ||
to the subclass. | ||
""" | ||
def __init__(self, model): | ||
""" | ||
:param model: model to be wrapped | ||
""" | ||
super(ModelWrapper, self).__init__() | ||
|
||
assert isinstance(model, (Model, ModelWrapper)) | ||
#: Wrapped model | ||
self.wrapped = model | ||
|
||
def __getattr__(self, item): | ||
""" | ||
If an attribute is not found in this class, it is searched in the wrapped model | ||
""" | ||
# Exception for AF storages, if a method with the same name exists in this class, do not find the cache | ||
# in the wrapped model. | ||
if item.endswith('_AF_storage'): | ||
method = item[1:].rstrip('_AF_storage') | ||
if method in dir(self): | ||
raise AttributeError("{0} has no attribute {1}".format(self.__class__.__name__, item)) | ||
return getattr(self.wrapped, item) | ||
|
||
def __setattr__(self, key, value): | ||
""" | ||
1) If setting :attr:`wrapped` attribute, point parent to this object (the datascaler). | ||
2) If setting the recompilation attribute, always do this on the wrapped class. | ||
""" | ||
if key is 'wrapped': | ||
object.__setattr__(self, key, value) | ||
value.__setattr__('_parent', self) | ||
return | ||
|
||
try: | ||
# If attribute is in this object, set it. Test by using getattribute instead of hasattr to avoid lookup in | ||
# wrapped object. | ||
self.__getattribute__(key) | ||
super(ModelWrapper, self).__setattr__(key, value) | ||
except AttributeError: | ||
# Attribute is not in wrapper. | ||
# In case no wrapped object is set yet (e.g. constructor), set in wrapper. | ||
if 'wrapped' not in self.__dict__: | ||
super(ModelWrapper, self).__setattr__(key, value) | ||
return | ||
|
||
if hasattr(self, key): | ||
# Now use hasattr, we know getattribute already failed so if it returns true, it must be in the wrapped | ||
# object. Hasattr is called on self instead of self.wrapped to account for the different handling of | ||
# AF storages. | ||
# Prefer setting the attribute in the wrapped object if exists. | ||
setattr(self.wrapped, key, value) | ||
else: | ||
# If not, set in wrapper nonetheless. | ||
super(ModelWrapper, self).__setattr__(key, value) | ||
|
||
def __eq__(self, other): | ||
return self.wrapped == other | ||
|
||
def __str__(self, prepend=''): | ||
return self.wrapped.__str__(prepend) | ||
|
||
|
||
class MGP(ModelWrapper): | ||
""" | ||
Marginalisation of the hyperparameters during evaluation time using a Laplace Approximation | ||
Key reference: | ||
|
||
:: | ||
|
||
@article{Garnett:2013, | ||
title={Active learning of linear embeddings for Gaussian processes}, | ||
author={Garnett, Roman and Osborne, Michael A and Hennig, Philipp}, | ||
journal={arXiv preprint arXiv:1310.6740}, | ||
year={2013} | ||
} | ||
""" | ||
|
||
def __init__(self, model): | ||
assert isinstance(model, GPModel), "Object has to be a GP model" | ||
assert isinstance(model.likelihood, Gaussian), "Likelihood has to be Gaussian" | ||
super(MGP, self).__init__(model) | ||
|
||
def build_predict(self, fmean, fvar, theta): | ||
h = tf.hessians(self.build_likelihood() + self.build_prior(), theta)[0] | ||
L = tf.cholesky(-h) | ||
|
||
N = tf.shape(fmean)[0] | ||
D = tf.shape(fmean)[1] | ||
|
||
fmeanf = tf.reshape(fmean, [N * D, 1]) # N*D x 1 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. N x D x 1 :) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No, has to be N*D x 1 so I can use rowwise_gradients, then I reshape later |
||
fvarf = tf.reshape(fvar, [N * D, 1]) # N*D x 1 | ||
|
||
Dfmean = rowwise_gradients(fmeanf, theta) # N*D x k | ||
Dfvar = rowwise_gradients(fvarf, theta) # N*D x k | ||
|
||
tmp1 = tf.transpose(tf.matrix_triangular_solve(L, tf.transpose(Dfmean))) # N*D x k | ||
tmp2 = tf.transpose(tf.matrix_triangular_solve(L, tf.transpose(Dfvar))) # N*D x k | ||
return fmean, 4 / 3 * fvar + tf.reshape(tf.reduce_sum(tf.square(tmp1), axis=1), [N, D]) \ | ||
+ 1 / 3 / (fvar + 1E-3) * tf.reshape(tf.reduce_sum(tf.square(tmp2), axis=1), [N, D]) | ||
|
||
@AutoFlow((float_type, [None, None])) | ||
def predict_f(self, Xnew): | ||
""" | ||
Compute the mean and variance of the latent function(s) at the points | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. update doc string? marginalised around ... |
||
Xnew. | ||
""" | ||
theta = self._predict_f_AF_storage['free_vars'] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ugh There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No other way, have to wait for GPflow issue |
||
fmean, fvar = self.wrapped.build_predict(Xnew) | ||
return self.build_predict(fmean, fvar, theta) | ||
|
||
@AutoFlow((float_type, [None, None])) | ||
def predict_y(self, Xnew): | ||
""" | ||
Compute the mean and variance of held-out data at the points Xnew | ||
""" | ||
theta = self._predict_y_AF_storage['free_vars'] | ||
pred_f_mean, pred_f_var = self.wrapped.build_predict(Xnew) | ||
fmean, fvar = self.wrapped.likelihood.predict_mean_and_var(pred_f_mean, pred_f_var) | ||
return self.build_predict(fmean, fvar, theta) | ||
|
||
@AutoFlow((float_type, [None, None]), (float_type, [None, None])) | ||
def predict_density(self, Xnew, Ynew): | ||
""" | ||
Compute the (log) density of the data Ynew at the points Xnew | ||
|
||
Note that this computes the log density of the data individually, | ||
ignoring correlations between them. The result is a matrix the same | ||
shape as Ynew containing the log densities. | ||
""" | ||
theta = self._predict_density_AF_storage['free_vars'] | ||
pred_f_mean, pred_f_var = self.wrapped.build_predict(Xnew) | ||
pred_f_mean, pred_f_var = self.build_predict(pred_f_mean, pred_f_var, theta) | ||
return self.likelihood.predict_density(pred_f_mean, pred_f_var, Ynew) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
# Copyright 2017 Joachim van der Herten, Nicolas Knudde | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
import tensorflow as tf | ||
from GPflow import settings | ||
|
||
float_type = settings.dtypes.float_type | ||
|
||
|
||
def rowwise_gradients(Y, X): | ||
""" | ||
For a 2D Tensor Y, compute the derivative of each columns w.r.t a 2D tensor X. | ||
|
||
This is done with while_loop, because of a known incompatibility between map_fn and gradients. | ||
""" | ||
num_rows = tf.shape(Y)[0] | ||
num_feat = tf.shape(X)[0] | ||
|
||
def body(old_grads, row): | ||
g = tf.expand_dims(tf.gradients(Y[row], X)[0], axis=0) | ||
new_grads = tf.concat([old_grads, g], axis=0) | ||
return new_grads, row + 1 | ||
|
||
def cond(_, row): | ||
return tf.less(row, num_rows) | ||
|
||
shape_invariants = [tf.TensorShape([None, None]), tf.TensorShape([])] | ||
grads, _ = tf.while_loop(cond, body, [tf.zeros([0, num_feat], float_type), tf.constant(0)], | ||
shape_invariants=shape_invariants) | ||
|
||
return grads |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
import models twice?