我试图在单个GPU上并行安装多个小型凯拉斯型号。由于原因,我需要将它们退出名单并一次训练他们一步。由于我对标准的多处理模块不幸运,所以我使用悲伤。
我尝试做的是这样的事情:
from pathos.multiprocessing import ProcessPool as Pool
import tensorflow as tf
import keras.backend as K
def multiprocess_step(self, model):
K.set_session(sess)
with sess.graph.as_default():
model = step(model, sess)
return model
def step(model, sess):
K.set_session(sess)
with sess.graph.as_default():
model.fit(x=data['X_train'], y=data['y_train'],
batch_size=batch_size
validation_data=(data['X_test'], data['y_test']),
verbose=verbose,
shuffle=True,
initial_epoch=self.step_num - 1)
return model
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = "0"
sess = tf.Session(config=config)
K.set_session(sess)
with sess.graph.as_default():
pool = Pool(8).map
model_list = pool(multiprocess_step, model_list)
但是无论我尝试什么,我都会遇到错误,声称这些模型似乎不在同一图上...
ValueError: Tensor("training/RMSprop/Variable:0", shape=(25, 352), dtype=float32_ref) must be from the same graph as Tensor("RMSprop/rho/read:0", shape=(), dtype=float32).
异常起源于model.fit()行,因此即使我尝试在每个可能的位置设置它?
,我也必须对会话图的分配做错了什么?有人有类似事物的经验吗?
考虑到后端设置为tensorflow的keras。您可以使用代码并进行多个模型调用/多个模型加载的并行处理。
def model1(dir_model):
model = os.path.join(dir_model, 'model.json')
dir_weights = os.path.join(dir_model, 'model.h5')
graph1 = Graph()
with graph1.as_default():
session1 = Session(graph=graph1, config=config)
with session1.as_default():
with open(model, 'r') as data:
model_json = data.read()
model_1 = model_from_json(model_json)
model_1.load_weights(dir_weights)
return model_1,gap_weights,session1,graph1
def model_2(dir_model):
model = os.path.join(dir_model, 'model.json')
dir_weights = os.path.join(dir_model, 'model.h5')
graph2 = Graph()
with graph2.as_default():
session2 = Session(graph=graph2, config=config)
with session2.as_default():
with open(model, 'r') as data:
model_json = data.read()
model_2 = model_from_json(model_json)
model_2.load_weights(dir_weights)
return model_2,session2,graph2
,对于特定模型的调用,请执行以下实验。对于模型1预测执行以下
K.set_session(session2)
with graph2.as_default():
img_pred[img_name] =
patch_dict[np.argmax(np.squeeze(model_2.predict(img_invoke)))
,对于模型2,它与
相同K.set_session(session2)
with graph2.as_default():
img_pred[img_name] =
patch_dict[np.argmax(np.squeeze(model_2.predict(img_invoke)))]
在keras问题跟踪器上提出了以下建议。与使用多处理相比,我不确定该方法的相对优点。
in_1 = Input()
lstm_1 = LSTM(...)(in_1)
out_1 = Dense(...)(lstm_1)
in_2 = Input()
lstm_2 = LSTM(...)(in_2)
out_2 = Dense(...)(lstm_2)
model_1 = Model(input=in_1, output=out_1)
model_2 = Model(input=in_2, output=out_2)
model = Model(input = [in_1, in_2], output = [out_1, out_2])
model.compile(...)
model.fit(...)
model_1.predict(...)
model_2.predict(...)