diff options
author | David Luevano Alvarado <55825613+luevano@users.noreply.github.com> | 2020-03-26 15:17:25 -0700 |
---|---|---|
committer | David Luevano Alvarado <55825613+luevano@users.noreply.github.com> | 2020-03-26 15:17:25 -0700 |
commit | fe11c6d6d18ba9f56db3793f669edebe402ff44c (patch) | |
tree | e18c5f98225c6b030112bec7c460ae433d4de718 | |
parent | d24688f99009b819a3d12d978eab9a1fc95f1d8e (diff) |
Separate gaussian and laplacian kernels
-rw-r--r-- | ml_exp/kernels.py | 88 |
1 files changed, 62 insertions, 26 deletions
diff --git a/ml_exp/kernels.py b/ml_exp/kernels.py index 23c72475f..d593d83fd 100644 --- a/ml_exp/kernels.py +++ b/ml_exp/kernels.py @@ -20,7 +20,6 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ -# import math import numpy as np try: import tensorflow as tf @@ -30,17 +29,15 @@ except ImportError: TF_AV = False -def laplauss_kernel(X1, +def gaussian_kernel(X1, X2, sigma, - laplauss='gauss', use_tf=True): """ - Calculates the Lpalacian or Gaussian Kernel. + Calculates the Gaussian Kernel. X1: first representations. X2: second representations. sigma: kernel width. - laplauss: which kernel to calculate. use_tf: if tensorflow should be used. """ # If tf is to be used but couldn't be imported, don't try to use it. @@ -49,12 +46,7 @@ def laplauss_kernel(X1, X1_size = X1.shape[0] X2_size = X2.shape[0] - if laplauss == 'gauss': - i_sigma = -0.5 / (sigma**2) - elif laplauss == 'laplace': - i_sigma = -0.5 / sigma - else: - i_sigma = -0.5 / (sigma**2) + i_sigma = -0.5 / (sigma**2) if use_tf: if tf.config.experimental.list_physical_devices('GPU'): @@ -72,15 +64,8 @@ def laplauss_kernel(X1, else: norm = tf.norm(X2 - X1[i], axis=-1) - if laplauss == 'gauss': - return (i + 1, - K.write(i, tf.exp(i_sigma * tf.square(norm)))) - elif laplauss == 'laplace': - return (i + 1, - K.write(i, tf.exp(i_sigma * norm))) - else: - return (i + 1, - K.write(i, tf.exp(i_sigma * tf.square(norm)))) + return (i + 1, + K.write(i, tf.exp(i_sigma * tf.square(norm)))) K = tf.TensorArray(dtype=tf.float64, size=X1_size) @@ -91,17 +76,68 @@ def laplauss_kernel(X1, raise TypeError('No GPU found, could not create Tensor objects.') else: K = np.zeros((X1_size, X2_size), dtype=np.float64) - # Faster way of calculating the kernel (no numba support). for i in range(X1_size): if X2.ndim == 3: norm = np.linalg.norm(X2 - X1[i], axis=(1, 2)) else: norm = np.linalg.norm(X2 - X1[i], axis=-1) - if laplauss == 'gauss': - K[i, :] = np.exp(i_sigma * np.square(norm)) - elif laplauss == 'laplace': - K[i, :] = np.exp(i_sigma * norm) + K[i, :] = np.exp(i_sigma * np.square(norm)) + + return K + + +def laplacian_kernel(X1, + X2, + sigma, + use_tf=True): + """ + Calculates the Laplacian Kernel. + X1: first representations. + X2: second representations. + sigma: kernel width. + use_tf: if tensorflow should be used. + """ + # If tf is to be used but couldn't be imported, don't try to use it. + if use_tf and not TF_AV: + use_tf = False + + X1_size = X1.shape[0] + X2_size = X2.shape[0] + i_sigma = -0.5 / sigma + + if use_tf: + if tf.config.experimental.list_physical_devices('GPU'): + with tf.device('GPU:0'): + X1 = tf.convert_to_tensor(X1) + X2 = tf.convert_to_tensor(X2) + X2r = tf.rank(X2) + + def cond(i, _): + return tf.less(i, X1_size) + + def body(i, K): + if X2r == 3: + norm = tf.norm(X2 - X1[i], axis=(1, 2)) + else: + norm = tf.norm(X2 - X1[i], axis=-1) + + return (i + 1, + K.write(i, tf.exp(i_sigma * norm))) + + K = tf.TensorArray(dtype=tf.float64, + size=X1_size) + i_state = (0, K) + n, K = tf.while_loop(cond, body, i_state) + K = K.stack() + else: + raise TypeError('No GPU found, could not create Tensor objects.') + else: + K = np.zeros((X1_size, X2_size), dtype=np.float64) + for i in range(X1_size): + if X2.ndim == 3: + norm = np.linalg.norm(X2 - X1[i], axis=(1, 2)) else: - K[i, :] = np.exp(i_sigma * np.square(norm)) + norm = np.linalg.norm(X2 - X1[i], axis=-1) + K[i, :] = np.exp(i_sigma * norm) return K |