Skip to content

Commit

Permalink
convert numpy float64 to standard float
Browse files Browse the repository at this point in the history
  • Loading branch information
annereinarz committed Jun 17, 2024
1 parent 88217d2 commit 7b0f72c
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 19 deletions.
4 changes: 2 additions & 2 deletions benchmarks/analytic-banana/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ def __call__(self, parameters, config):
y = [(parameters[0][0] / a),
(parameters[0][1] * a + a * b * (parameters[0][0]**2 + a**2))]

return [[multivariate_normal.logpdf(y, [0, 4], [[1.0*scale, 0.5*scale], [0.5*scale, 1.0*scale]])]]
return [[float(multivariate_normal.logpdf(y, [0, 4], [[1.0*scale, 0.5*scale], [0.5*scale, 1.0*scale]]))]]

def supports_evaluate(self):
return True

model = Banana()

umbridge.serve_models([model], 4243)
umbridge.serve_models([model], 4243)
12 changes: 6 additions & 6 deletions benchmarks/analytic-donut/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def get_output_sizes(self, config):

def __call__(self, parameters, config):
r = np.linalg.norm(parameters[0])
return [[ - (r - Donut.radius)**2 / Donut.sigma2 ]]
return [[float(- (r - Donut.radius)**2 / Donut.sigma2) ]]

def supports_evaluate(self):
return True
Expand All @@ -28,8 +28,8 @@ def gradient(self, out_wrt, in_wrt, parameters, sens, config):
r = np.linalg.norm(parameters[0])
if (r == 0):
return [0,0]
return [sens[0] * parameters[0][0] * (Donut.radius / r - 1) * 2 / Donut.sigma2,
sens[0] * parameters[0][1] * (Donut.radius / r - 1) * 2 / Donut.sigma2]
return [float(sens[0] * parameters[0][0] * (Donut.radius / r - 1) * 2 / Donut.sigma2),
float(sens[0] * parameters[0][1] * (Donut.radius / r - 1) * 2 / Donut.sigma2)]

def supports_gradient(self):
return True
Expand All @@ -38,12 +38,12 @@ def apply_jacobian(self, out_wrt, in_wrt, parameters, vec, config):
r = np.linalg.norm(parameters[0])
if (r == 0):
return [0]
return [vec[0] * parameters[0][0] * (Donut.radius / r - 1) * 2 / Donut.sigma2
+ vec[1] * parameters[0][1] * (Donut.radius / r - 1) * 2 / Donut.sigma2]
return [float(vec[0] * parameters[0][0] * (Donut.radius / r - 1) * 2 / Donut.sigma2
+ vec[1] * parameters[0][1] * (Donut.radius / r - 1) * 2 / Donut.sigma2)]

def supports_apply_jacobian(self):
return True

model = Donut()

umbridge.serve_models([model], 4243)
umbridge.serve_models([model], 4243)
12 changes: 6 additions & 6 deletions benchmarks/analytic-funnel/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ def f(x, m, s):
s0 = 3
m1 = 0
s1 = np.exp(parameters[0][0] / 2)
return [[ f(parameters[0][0], m0, s0) + f(parameters[0][1], m1, s1) ]]
return [[ float(f(parameters[0][0], m0, s0) + f(parameters[0][1], m1, s1)) ]]

def supports_evaluate(self):
return True

def gradient(self, out_wrt, in_wrt, parameters, sens, config):
return [self.apply_jacobian(out_wrt, in_wrt, parameters, [sens[0], 0], config)[0],
self.apply_jacobian(out_wrt, in_wrt, parameters, [0, sens[0]], config)[0]]
return [float(self.apply_jacobian(out_wrt, in_wrt, parameters, [sens[0], 0], config)[0]),
float(self.apply_jacobian(out_wrt, in_wrt, parameters, [0, sens[0]], config)[0])]

def supports_gradient(self):
return True
Expand All @@ -46,12 +46,12 @@ def dfds(x, m, s):
m1 = 0
s1 = np.exp(parameters[0][0] / 2)

return [vec[1] * dfdx(parameters[0][1], m1, s1)
+ vec[0] * (dfdx(parameters[0][0], m0, s0) + .5 * s1 * dfds(parameters[0][1], m1, s1))]
return [float(vec[1] * dfdx(parameters[0][1], m1, s1)
+ vec[0] * (dfdx(parameters[0][0], m0, s0) + .5 * s1 * dfds(parameters[0][1], m1, s1)))]

def supports_apply_jacobian(self):
return True

model = Funnel()

umbridge.serve_models([model], 4243)
umbridge.serve_models([model], 4243)
10 changes: 5 additions & 5 deletions benchmarks/analytic-gaussian-mixture/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,14 @@ def __call__(self, parameters, config):

if dens1 + dens2 + dens3 == 0: # log(0) not defined, so return minimal float value
return [[ sys.float_info.min ]]
return [[ np.log(dens1 + dens2 + dens3) ]]
return [[ float(np.log(dens1 + dens2 + dens3)) ]]

def supports_evaluate(self):
return True

def gradient(self, out_wrt, in_wrt, parameters, sens, config):
return [self.apply_jacobian(out_wrt, in_wrt, parameters, [sens[0], 0], config)[0],
self.apply_jacobian(out_wrt, in_wrt, parameters, [0, sens[0]], config)[0]]
return [float(self.apply_jacobian(out_wrt, in_wrt, parameters, [sens[0], 0], config)[0]),
float(self.apply_jacobian(out_wrt, in_wrt, parameters, [0, sens[0]], config)[0])]

def supports_gradient(self):
return True
Expand All @@ -43,14 +43,14 @@ def apply_jacobian(self, out_wrt, in_wrt, parameters, vec, config):
if dens1 + dens2 + dens3 == 0: # Return zero in log(0) case above
return [0]

return [- vec[0] / (dens1 + dens2 + dens3)
return [float(- vec[0] / (dens1 + dens2 + dens3)
* (dens1 * (parameters[0][0] - -1.5) / 0.8
+ dens2 * (parameters[0][0] - 1.5) / 0.8
+ dens3 * (parameters[0][0] - -2) / 0.5)
- vec[1] / (dens1 + dens2 + dens3)
* (dens1 * (parameters[0][1] - -1.5) / 0.8
+ dens2 * (parameters[0][1] - 1.5) / 0.8
+ dens3 * (parameters[0][1] - 2) / 0.5)
+ dens3 * (parameters[0][1] - 2) / 0.5))
]

def supports_apply_jacobian(self):
Expand Down

0 comments on commit 7b0f72c

Please sign in to comment.