summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Luevano Alvarado <55825613+luevano@users.noreply.github.com>2020-03-03 23:44:51 -0700
committerDavid Luevano Alvarado <55825613+luevano@users.noreply.github.com>2020-03-03 23:44:51 -0700
commitf4f0cfe0510def48736c4f9f0a4656bf5fb89200 (patch)
tree1490952bde38687c8408ee118e2defe73df1cac6
parent52383ddeb87312708eeb1da765b175fb603f2802 (diff)
Fix normal ml error
-rw-r--r--ml_exp/do_ml.py54
-rw-r--r--ml_exp/kernels.py9
2 files changed, 34 insertions, 29 deletions
diff --git a/ml_exp/do_ml.py b/ml_exp/do_ml.py
index 379d0efd0..57661578a 100644
--- a/ml_exp/do_ml.py
+++ b/ml_exp/do_ml.py
@@ -79,36 +79,50 @@ def simple_ml(descriptors,
printc(f'\tTest size: {test_size}', 'CYAN')
printc(f'\tSigma: {test_size}', 'CYAN')
- X_training = descriptors[:training_size]
- Y_training = energies[:training_size]
- K_training = gaussian_kernel(X_training,
- X_training,
- sigma,
- use_tf=use_tf)
if use_tf:
+ X_training = descriptors[:training_size]
+ Y_training = energies[:training_size]
+ K_training = gaussian_kernel(X_training,
+ X_training,
+ sigma,
+ use_tf=use_tf)
+
# Y_training = tf.expand_dims(Y_training, 1)
alpha = tf.linalg.cholesky_solve(tf.linalg.cholesky(K_training),
Y_training)
- else:
- alpha = LA.cho_solve(LA.cho_factor(K_training),
- Y_training)
- X_test = descriptors[-test_size:]
- Y_test = energies[-test_size:]
- K_test = gaussian_kernel(X_test,
- X_training,
- sigma,
- use_tf=use_tf)
- if use_tf:
+ X_test = descriptors[-test_size:]
+ Y_test = energies[-test_size:]
+ K_test = gaussian_kernel(X_test,
+ X_training,
+ sigma,
+ use_tf=use_tf)
+
# Y_test = tf.expand_dims(Y_test, 1)
Y_predicted = tf.tensordot(K_test, alpha, 1)
- else:
- Y_predicted = np.dot(K_test, alpha)
- print('Ducky')
- if use_tf:
mae = tf.reduce_mean(tf.abs(Y_predicted - Y_test))
else:
+ X_training = descriptors[:training_size]
+ Y_training = energies[:training_size]
+ K_training = gaussian_kernel(X_training,
+ X_training,
+ sigma,
+ use_tf=use_tf)
+
+ # Adding a small value on the diagonal for cho_solve.
+ K_training[np.diag_indices_from(K_training)] += 1e-8
+ alpha = LA.cho_solve(LA.cho_factor(K_training),
+ Y_training)
+
+ X_test = descriptors[-test_size:]
+ Y_test = energies[-test_size:]
+ K_test = gaussian_kernel(X_test,
+ X_training,
+ sigma,
+ use_tf=use_tf)
+ Y_predicted = np.dot(K_test, alpha)
+
mae = np.mean(np.abs(Y_predicted - Y_test))
if show_msgs:
diff --git a/ml_exp/kernels.py b/ml_exp/kernels.py
index 26ff0d77b..c203af30e 100644
--- a/ml_exp/kernels.py
+++ b/ml_exp/kernels.py
@@ -73,13 +73,4 @@ def gaussian_kernel(X1,
else:
norm = np.linalg.norm(X2 - X1[i], axis=-1)
K[i, :] = np.exp(i_sigma * np.square(norm))
-
- # Old way of calculating the kernel (numba support).
- """
- for i, x1 in enumerate(X1):
- for j, x2 in enumerate(X2):
- f_norm = np.linalg.norm(x2 - x1)
- K[i, j] = math.exp(i_sigma * f_norm**2)
- """
-
return K