I have the following two matrix algebra calculations in a large iteration. I am therefore looking to optimize the cacluclation.
1:
F = np.matrix(np.zeros(shape=(n+1,1)))
F[0:n] = x - np.diag(np.array(theta)[0:n].flatten())*self.W*(theta[0:n]-self.m) + theta[0:n]*theta[n]
F[n] = np.sum(theta[0:n]) - 1; #Lagrange multiplier term
2:
J = np.matrix(np.zeros(shape=(n+1,n+1)))
#Now add the dF_lamba/d(theta_i) = 1 and dF_lamba/d(lambda) = 0
J[n,n] = 0
#The following is correct for the off diagonal elements
J[:n,:n] = -np.diag(np.array(theta)[0:n].flatten()) * self.W * np.diag(np.array(theta)[0:n].flatten())
#We now update for the on diagonal elements
J[:n,:n] = (J[:n,:n] - np.diag(np.diag(J[:n,:n])) +
np.diag(np.array(-np.multiply(np.diag(np.diag(self.W)),np.diag(np.array(theta)[0:n].flatten())) * self.W * (theta[0:n] - self.m) + theta[n]).flatten()))
#Finally adjust for the final columns
J[:n,n] = theta[:n]
J[n,:n] = 1
I'm not sure which of the numpy calls are computationally expensive. Is it possible to optimise this in Python to get close to C speed, or would I have to program it in C itself?
TEST FUNCTIONS FOR 1
import numpy as np
def _nonLinEq(m, W, x, theta):
#This outputs the nonlinear equations in theta
#resulting from a the partial log derivative of a multivariate
#normal prior with covariance matrix E, means m and a multiinomial
#likelihood with observations x.
#F = [F_1, ... F_n, F_lambda]' ouput values where F_i = F(theta_i)
n = len(m)
F = define_F(n)
F[0:n] = assign_values_to_F(x, theta, W, m, n)
F[n] = assign_lagrange_multiplier_term(theta, n) #Lagrange multiplier term
return F
def define_F(n):
return np.matrix(np.zeros(shape=(n+1,1)))
def diag_theta(theta, n):
return np.diag(np.array(theta)[0:n].flatten())
def multiply_terms(theta, W, m, n):
return diag_theta(theta, n)*W*(theta[0:n]-m)
def assign_values_to_F(x,theta,W,m,n):
return x - multiply_terms(theta, W, m, n) + theta[0:n]*theta[n]
def assign_lagrange_multiplier_term(theta, n):
return np.sum(theta[0:n]) - 1
def test_nonLinEq():
n = 100
temp = np.random.rand(n)
m = np.transpose(np.matrix(temp/np.sum(temp)))
W = np.matrix(np.diag(np.random.rand(n)))
x = np.transpose(np.matrix(np.floor(np.random.rand(n)*10)))
theta = np.transpose(np.matrix(np.append(np.ones(n)/n, -1)))
for i in range(1000):
_nonLinEq(m, W, x, theta)
Test Functions For 2
def _jacNLE(m, W, x, theta):
#This finds the Jacobian of our non-linear equations
#J = (J_ij) ouput values where F_ij = dF_i/d(theta_j)
n = len(m);
J = define_J(n)
#The following is correct for the off diagonal elements
diag_theta = convert_theta_to_diagonal(theta, n)
J[:n,:n] = input_off_diagonal_J(diag_theta, W)
#We now update for the on diagonal elements
J[:n,:n] = remove_J_diagonal(J, n) + new_diagonal(W, theta, m, diag_theta, n)
#Finally adjust for the final columns
J[:n,n] = theta[:n]
J[n,:n] = 1
return J
def define_J(n):
return np.matrix(np.zeros(shape=(n+1,n+1)))
def convert_theta_to_diagonal(theta, n):
return np.diag(np.array(theta)[0:n].ravel())
def input_off_diagonal_J(diag_theta, W):
return -diag_theta * W * diag_theta
def remove_J_diagonal(J, n):
return J[:n,:n] - np.diag(np.diag(J[:n,:n]))
def matrix_prod(W, diag_theta):
return -np.multiply(np.diag(np.diag(W)),diag_theta)
def new_diagonal(W, theta, m, diag_theta, n):
return np.diag(np.array(matrix_prod(W, diag_theta) * W * (theta[0:n] - m) + theta[n]).ravel())
def test_jacNLE():
n = 2
temp = np.random.rand(n)
m = np.transpose(np.matrix(temp/np.sum(temp)))
W = np.matrix(np.diag(np.random.rand(n)))
x = np.transpose(np.matrix(np.floor(np.random.rand(n)*10)))
theta = np.transpose(np.matrix(np.append(np.ones(n)/n, -1)))
for i in range(1000):
_jacNLE(m, W, x, theta)
cProfileon your code to see which is the most expensive?cProfilemy code and it pointed out the functions containing these two bits of code. I will do as Mr E says to try and get more to the root of the issue.cprofilelog here dumptext.com/hNaGFjnD. As I interpret it, it is saying most of the time is spent in_nonLinEqand_jacNLEwhich are essentially made up of the code in 1 and 2 respectively.