动态优化需要帮助:错误消息:f0传递在优化算法中有超过1个维度



我想优化一个火箭轨迹。我想解决的问题在这个视频中有描述:#https://www.youtube.com/watch?v=9qsiCGpvwKA&t=332s。它是关于找到最小的时间让火箭从开始的零高度和零速度到最后的零高度和零速度。上面提到的视频中提供了一个解决方案,但我想自己解决这个问题,而不使用APMonitor的工具箱。

我设置了这段代码作为我的目标函数来优化,通过使用8个控制点来控制火箭的轨迹:

import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import numpy as np
from scipy import interpolate
def rocketmotion(t, Y, x, z):
f = interpolate.interp1d(x, z) 
if t>x[1]:
u=f(x[1])
else:
u=f(t)
s=Y[0];   v=Y[1];   m=Y[2]
ds_dt=v
dv_dt=(u-0.2*v**2)/m
dm_dt=-0.01*u**2
return [ds_dt, dv_dt, dm_dt]

def objectiveFunction(x, other_args):

n_Gridpoints=8
u=np.zeros(n_Gridpoints)
u[0]=x[1]; u[1]=x[2];  u[2]=x[3]; u[3]=x[4]; u[4]=x[5]; u[5]=x[6]; u[6]=x[7]; u[7]=x[8]

Y0=[0, 0, 1]
t_end=x[0]

time_steps=np.linspace(0, t_end, n_Gridpoints)
T=[]; S=[]; V=[]; M=[]
for i in range(n_Gridpoints-1):
x=[time_steps[i], time_steps[i+1]]
z=[u[i], u[i+1]]

if i==0:

Y = solve_ivp(rocketmotion,[0, time_steps[i+1]],Y0, args=[x, z])       

else: 

Y = solve_ivp(rocketmotion,[time_steps[i], time_steps[i+1]], [Y.y[0][-1], Y.y[1][-1], Y.y[2][-1]], args=[x, z])       
T.extend(Y.t)  
S.extend(Y.y[0])
V.extend(Y.y[1])
M.extend(Y.y[2])

execMode=other_args

if execMode==1:

return T[-1]+abs(10-S[-1])*10+abs(V[-1])*10

elif execMode==2:

print((T[-1]+abs(10-S[-1])*10+abs(V[-1])*10).ndim)
return T[-1]+abs(10-S[-1])*10+abs(V[-1])*10

else:  

print((T[-1]+abs(10-S[-1])*10+abs(V[-1])*10).ndim)
print(T[-1]+abs(10-S[-1])*10+abs(V[-1])*10)
return T, S, V, M

我使用奖励塑造来确保最终条件(位置=10,速度=0)将被达到。现在我正在尝试使用SLSQP来优化给定的函数:

from scipy.optimize import minimize
bounds_time=(5, 15)
bounds_u=(-1.1, 1.1)
bounds=[bounds_time, bounds_u, bounds_u, bounds_u, bounds_u, bounds_u, bounds_u, bounds_u, bounds_u]
x0=[7.5, 1.1, 1.1, 0.7, 0.7, 0.7, 0.7, -1.0, -0.8]
execMode=2
other_args=[execMode]
solution = minimize(objectiveFunction,x0,method='SLSQP', bounds=bounds, args=other_args, 
options={'maxiter': 1000, 'disp': True})

我得到错误消息:f0通过的维度大于1。此错误消息发生在SLSQP算法使用的函数approx_derivative中。我不明白这个错误信息。所以我想请求你的帮助。回溯是:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-76-4be01f8fa5f5> in <module>
9 other_args=[execMode]
10 
---> 11 solution = minimize(objectiveFunction,x0,method='SLSQP', bounds=bounds, args=other_args, 
12     options={'maxiter': 1000, 'disp': True})
13 
~miniconda3libsite-packagesscipyoptimize_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
623         return _minimize_cobyla(fun, x0, args, constraints, **options)
624     elif meth == 'slsqp':
--> 625         return _minimize_slsqp(fun, x0, args, jac, bounds,
626                                constraints, callback=callback, **options)
627     elif meth == 'trust-constr':
~miniconda3libsite-packagesscipyoptimizeslsqp.py in _minimize_slsqp(func, x0, args, jac, bounds, constraints, maxiter, ftol, iprint, disp, eps, callback, finite_diff_rel_step, **unknown_options)
367 
368     # ScalarFunction provides function and gradient evaluation
--> 369     sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps,
370                                   finite_diff_rel_step=finite_diff_rel_step,
371                                   bounds=new_bounds)
~miniconda3libsite-packagesscipyoptimizeoptimize.py in _prepare_scalar_function(fun, x0, jac, args, bounds, epsilon, finite_diff_rel_step, hess)
259     # ScalarFunction caches. Reuse of fun(x) during grad
260     # calculation reduces overall function evaluations.
--> 261     sf = ScalarFunction(fun, x0, args, grad, hess,
262                         finite_diff_rel_step, bounds, epsilon=epsilon)
263 
~miniconda3libsite-packagesscipyoptimize_differentiable_functions.py in __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step, finite_diff_bounds, epsilon)
93 
94         self._update_grad_impl = update_grad
---> 95         self._update_grad()
96 
97         # Hessian Evaluation
~miniconda3libsite-packagesscipyoptimize_differentiable_functions.py in _update_grad(self)
169     def _update_grad(self):
170         if not self.g_updated:
--> 171             self._update_grad_impl()
172             self.g_updated = True
173 
~miniconda3libsite-packagesscipyoptimize_differentiable_functions.py in update_grad()
89                 self._update_fun()
90                 self.ngev += 1
---> 91                 self.g = approx_derivative(fun_wrapped, self.x, f0=self.f,
92                                            **finite_diff_options)
93 
~miniconda3libsite-packagesscipyoptimize_numdiff.py in approx_derivative(fun, x0, method, rel_step, abs_step, f0, bounds, sparsity, as_linear_operator, args, kwargs)
386         f0 = np.atleast_1d(f0)
387         if f0.ndim > 1:
--> 388             raise ValueError("`f0` passed has more than 1 dimension.")
389 
390     if np.any((x0 < lb) | (x0 > ub)):
ValueError: `f0` passed has more than 1 dimension

MRE显示问题:

from scipy.optimize import minimize
x0=[7.5, 1.1, 1.1, 0.7, 0.7, 0.7, 0.7, -1.0, -0.8]
execMode=2
other_args=[execMode]
def objectiveFunction(x, other_args):
execMode=other_args
print(execMode)
return x
solution = minimize(objectiveFunction, x0, method='SLSQP', args=other_args)

并且在破碎前打印一堆[2]。你期望的是2(我从elif execMode==2:知道它)。因此,简单的解决方案是:

def objectiveFunction(x, other_args):
...
execMode=other_args[0]
...

如你所见,MRE准备好后,解决问题就容易多了。这就是为什么你被要求在stackoverflow上发布它。

最新更新