运行时错误: min():为 input.numel() == 0 指定的预期缩减 dim。使用"dim"参数指定缩小点


RuntimeError                              Traceback (most recent call last)
/tmp/ipykernel_33/2056227650.py in <module>
----> 1 learn.fit_one_cycle(n, max_learning_rate)
2 learn.recorder.plot_losses()
/opt/conda/lib/python3.7/site-packages/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, final_div, wd, callbacks, tot_epochs, start_epoch)
21     callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor, pct_start=pct_start,
22                                        final_div=final_div, tot_epochs=tot_epochs, start_epoch=start_epoch))
---> 23     learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
24 
25 def fit_fc(learn:Learner, tot_epochs:int=1, lr:float=defaults.lr,  moms:Tuple[float,float]=(0.95,0.85), start_pct:float=0.72,
/opt/conda/lib/python3.7/site-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
198         else: self.opt.lr,self.opt.wd = lr,wd
199         callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
--> 200         fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
201 
202     def create_opt(self, lr:Floats, wd:Floats=0.)->None:
/opt/conda/lib/python3.7/site-packages/fastai/basic_train.py in fit(epochs, learn, callbacks, metrics)
104             if not cb_handler.skip_validate and not learn.data.empty_val:
105                 val_loss = validate(learn.model, learn.data.valid_dl, loss_func=learn.loss_func,
--> 106                                        cb_handler=cb_handler, pbar=pbar)
107             else: val_loss=None
108             if cb_handler.on_epoch_end(val_loss): break
/opt/conda/lib/python3.7/site-packages/fastai/basic_train.py in validate(model, dl, loss_func, cb_handler, pbar, average, n_batch)
61             if not is_listy(yb): yb = [yb]
62             nums.append(first_el(yb).shape[0])
---> 63             if cb_handler and cb_handler.on_batch_end(val_losses[-1]): break
64             if n_batch and (len(nums)>=n_batch): break
65         nums = np.array(nums, dtype=np.float32)
/opt/conda/lib/python3.7/site-packages/fastai/callback.py in on_batch_end(self, loss)
306         "Handle end of processing one batch with `loss`."
307         self.state_dict['last_loss'] = loss
--> 308         self('batch_end', call_mets = not self.state_dict['train'])
309         if self.state_dict['train']:
310             self.state_dict['iteration'] += 1
/opt/conda/lib/python3.7/site-packages/fastai/callback.py in __call__(self, cb_name, call_mets, **kwargs)
248         "Call through to all of the `CallbakHandler` functions."
249         if call_mets:
--> 250             for met in self.metrics: self._call_and_update(met, cb_name, **kwargs)
251         for cb in self.callbacks: self._call_and_update(cb, cb_name, **kwargs)
252 
/opt/conda/lib/python3.7/site-packages/fastai/callback.py in _call_and_update(self, cb, cb_name, **kwargs)
239     def _call_and_update(self, cb, cb_name, **kwargs)->None:
240         "Call `cb_name` on `cb` and update the inner state."
--> 241         new = ifnone(getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs), dict())
242         for k,v in new.items():
243             if k not in self.state_dict:
/opt/conda/lib/python3.7/site-packages/object_detection_fastai/callbacks/callbacks.py in on_batch_end(self, last_output, last_target, **kwargs)
125             scores = scores[:total_nms_examples]
126             preds = preds[:total_nms_examples]
--> 127             to_keep = nms(bbox_pred, scores, self.nms_thresh)
128             bbox_pred, preds, scores = bbox_pred[to_keep].cpu(), preds[to_keep].cpu(), scores[to_keep].cpu()
129 
/opt/conda/lib/python3.7/site-packages/object_detection_fastai/helper/object_detection_helper.py in nms(boxes, scores, thresh)
156         mask_keep = iou_vals <= thresh
157         if len(mask_keep.nonzero()) == 0: break
--> 158         idx_first = mask_keep.nonzero().min().item()
159         boxes, scores, indexes = boxes[mask_keep], scores[mask_keep], indexes[mask_keep]
160     return LongTensor(to_keep)
RuntimeError: min(): Expected reduction dim to be specified for input.numel() == 0. Specify the reduction dim with the 'dim' argument.
# %% [code] {"id":"asY9Gl86zcYl","outputId":"71cad981-691c-489e-f203-66ee5df9e25a","execution":{"iopub.status.busy":"2022-05-02T07:47:26.313181Z","iopub.execute_input":"2022-05-02T07:47:26.313767Z","iopub.status.idle":"2022-05-02T07:47:36.173129Z","shell.execute_reply.started":"2022-05-02T07:47:26.313727Z","shell.execute_reply":"2022-05-02T07:47:36.172282Z"}}

%reload_ext autoreload
%autoreload 2
%matplotlib inline
!pip install -U plotly
import json
from pathlib import Path
import plotly
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
from tqdm import tqdm
import pandas as pd
import random
import cv2
# %% [code] {"id":"NvOPXHfk2PzM","outputId":"4849c9ec-32d2-4aeb-b8f6-5a881423392e","execution":{"iopub.status.busy":"2022-05-02T07:47:36.175278Z","iopub.execute_input":"2022-05-02T07:47:36.175539Z","iopub.status.idle":"2022-05-02T07:47:36.278379Z","shell.execute_reply.started":"2022-05-02T07:47:36.175505Z","shell.execute_reply":"2022-05-02T07:47:36.277469Z"}}
folder = "midog-challenge"
midog_folder = Path("../input") / Path(folder)
print(list(midog_folder.glob("*.*")))
# %% [code] {"id":"3hzo-Io-zsRk","outputId":"5a4b2a8b-e79a-4371-dd41-c819c44026a9","execution":{"iopub.status.busy":"2022-05-02T07:47:36.280155Z","iopub.execute_input":"2022-05-02T07:47:36.280431Z","iopub.status.idle":"2022-05-02T07:47:45.692203Z","shell.execute_reply.started":"2022-05-02T07:47:36.280394Z","shell.execute_reply":"2022-05-02T07:47:45.691328Z"}}
!pip install -U object-detection-fastai
from object_detection_fastai.helper.wsi_loader import *
from object_detection_fastai.loss.RetinaNetFocalLoss import RetinaNetFocalLoss
from object_detection_fastai.models.RetinaNet import RetinaNet
from object_detection_fastai.callbacks.callbacks import BBMetrics, PascalVOCMetricByDistance, PascalVOCMetric, PascalVOCMetricByDistance
# %% [code] {"id":"jiZJLWqD5Rpr","execution":{"iopub.status.busy":"2022-05-02T07:47:45.694445Z","iopub.execute_input":"2022-05-02T07:47:45.694685Z","iopub.status.idle":"2022-05-02T07:47:45.801985Z","shell.execute_reply.started":"2022-05-02T07:47:45.694652Z","shell.execute_reply":"2022-05-02T07:47:45.800941Z"}}
image_folder = midog_folder / "images"
hamamatsu_rx_ids = list(range(0, 51))
hamamatsu_360_ids = list(range(51, 101))
aperio_ids = list(range(101, 151))
leica_ids = list(range(151, 201))
# %% [code] {"id":"tNwJXJufaVt-","outputId":"0e710552-9a03-4825-f1a3-97444a2be238","execution":{"iopub.status.busy":"2022-05-02T07:47:45.803635Z","iopub.execute_input":"2022-05-02T07:47:45.804129Z","iopub.status.idle":"2022-05-02T07:47:46.032320Z","shell.execute_reply.started":"2022-05-02T07:47:45.804090Z","shell.execute_reply":"2022-05-02T07:47:46.031354Z"}}

annotation_file = midog_folder / "MIDOG.json"
print(annotation_file,"  ",image_folder)
rows = []
with open(annotation_file) as f:
data = json.load(f)
categories = {1: 'mitotic figure', 2: 'hard negative'}
for row in data["images"]:
file_name = row["file_name"]
image_id = row["id"]
width = row["width"]
height = row["height"]
scanner  = "Hamamatsu XR"
if image_id in hamamatsu_360_ids:
scanner  = "Hamamatsu S360"
if image_id in aperio_ids:
scanner  = "Aperio CS"
if image_id in leica_ids:
scanner  = "Leica GT450"

for annotation in [anno for anno in data['annotations'] if anno["image_id"] == image_id]:
box = annotation["bbox"]
cat = categories[annotation["category_id"]]
rows.append([file_name, image_id, width, height, box, cat, scanner])
df = pd.DataFrame(rows, columns=["file_name", "image_id", "width", "height", "box", "cat", "scanner"])
df.head()
# %% [markdown] {"id":"r2Tm_N5PqbMJ"}
# ### Visual Examples
# %% [code] {"id":"KIALOeDIuCKo","execution":{"iopub.status.busy":"2022-05-02T07:47:53.807583Z","iopub.execute_input":"2022-05-02T07:47:53.807862Z","iopub.status.idle":"2022-05-02T07:47:53.904670Z","shell.execute_reply.started":"2022-05-02T07:47:53.807827Z","shell.execute_reply":"2022-05-02T07:47:53.903659Z"}}
def sample_function(y, classes, size, level_dimensions, level):
width, height = level_dimensions[level]
if len(y[0]) == 0:
return randint(0, width - size[0]), randint(0, height -size[1])
else:
#if randint(0, 5) < 2:
if True:
class_id = np.random.choice(classes, 1)[0] # select a random class
ids = np.array(y[1]) == class_id # filter the annotations according to the selected class
xmin, ymin, _, _ = np.array(y[0])[ids][randint(0, np.count_nonzero(ids) - 1)] # randomly select one of the filtered annotatons as seed for the training patch

# To have the selected annotation not in the center of the patch and an random offset.
xmin += random.randint(-size[0]/2, size[0]/2) 
ymin += random.randint(-size[1]/2, size[1]/2)
xmin, ymin = max(0, int(xmin - size[0] / 2)), max(0, int(ymin -size[1] / 2))
xmin, ymin = min(xmin, width - size[0]), min(ymin, height - size[1])
return xmin, ymin
else:
return randint(0, width - size[0]), randint(0, height -size[1])
# %% [code] {"id":"HH_0sG8w4TfA","outputId":"d9de5667-10c5-46b1-c0e0-28a99e7f95d7","execution":{"iopub.status.busy":"2022-05-02T07:47:58.157246Z","iopub.execute_input":"2022-05-02T07:47:58.157508Z","iopub.status.idle":"2022-05-02T07:48:00.797900Z","shell.execute_reply.started":"2022-05-02T07:47:58.157480Z","shell.execute_reply":"2022-05-02T07:48:00.797151Z"}}
def create_wsi_container(annotations_df: pd.DataFrame):
container = []
for image_name in tqdm(annotations_df["file_name"].unique()):
image_annos = annotations_df[annotations_df["file_name"] == image_name]
bboxes = [box   for box   in image_annos["box"]]
labels = [label for label in image_annos["cat"]]
container.append(SlideContainer(image_folder/image_name, y=[bboxes, labels], level=res_level,width=patch_size, height=patch_size, sample_func=sample_function))
return container
train_scanner = "Aperio CS" #["Hamamatsu XR", "Hamamatsu S360", "Aperio CS"]  
val_scanner = "Hamamatsu XR" #["Hamamatsu XR", "Hamamatsu S360", "Aperio CS"]  
patch_size = 256
res_level = 0
train_annos = df[df["scanner"].isin(train_scanner.split(","))]
train_container = create_wsi_container(train_annos)
val_annos = df[df["scanner"].isin(val_scanner.split(","))]
valid_container = create_wsi_container(val_annos)
f"Created: {len(train_container)} training WSI container and {len(valid_container)} validation WSI container"
# %% [code] {"cellView":"form","id":"Mei_iD1sxJCA","outputId":"2c87f1b1-e9fd-4de1-bd16-6fdc5224d05a","execution":{"iopub.status.busy":"2022-05-02T07:48:18.070337Z","iopub.execute_input":"2022-05-02T07:48:18.070756Z","iopub.status.idle":"2022-05-02T07:48:18.213409Z","shell.execute_reply.started":"2022-05-02T07:48:18.070722Z","shell.execute_reply":"2022-05-02T07:48:18.212544Z"}}
import numpy as np
train_samples_per_scanner = 1500 
val_samples_per_scanner = 500
train_images = list(np.random.choice(train_container, train_samples_per_scanner))
print('training_images =',len(train_images))
valid_images = list(np.random.choice(valid_container, val_samples_per_scanner))
print('validation_images =',len(valid_images))

# %% [code] {"id":"UZKoHWjR1mUG","outputId":"5cd9552e-8a9a-4aeb-f898-614e30e17eca","execution":{"iopub.status.busy":"2022-05-02T07:48:20.127048Z","iopub.execute_input":"2022-05-02T07:48:20.129167Z","iopub.status.idle":"2022-05-02T07:48:48.039604Z","shell.execute_reply.started":"2022-05-02T07:48:20.129128Z","shell.execute_reply":"2022-05-02T07:48:48.038869Z"}}
batch_size = 64
do_flip = True
flip_vert = True 
max_rotate = 90 
max_zoom = 1.1 
max_lighting = 0.2
max_warp = 0.2
p_affine = 0.75 
p_lighting = 0.75 
tfms = get_transforms(do_flip=do_flip,
flip_vert=flip_vert,
max_rotate=max_rotate,
max_zoom=max_zoom,
max_lighting=max_lighting,
max_warp=max_warp,
p_affine=p_affine,
p_lighting=p_lighting)
train, valid = ObjectItemListSlide(train_images), ObjectItemListSlide(valid_images)
item_list = ItemLists(".", train, valid)
lls = item_list.label_from_func(lambda x: x.y, label_cls=SlideObjectCategoryList)
lls = lls.transform(tfms, tfm_y=True, size=patch_size)
data = lls.databunch(bs=batch_size, collate_fn=bb_pad_collate,num_workers=0).normalize()
# %% [code] {"id":"g5PZ9e-c2k1S","outputId":"317422c4-b572-4898-dd82-84c054f98735","execution":{"iopub.status.busy":"2022-05-02T07:48:54.016278Z","iopub.execute_input":"2022-05-02T07:48:54.016534Z","iopub.status.idle":"2022-05-02T07:48:54.455317Z","shell.execute_reply.started":"2022-05-02T07:48:54.016506Z","shell.execute_reply":"2022-05-02T07:48:54.454672Z"}}
scales = [2]

ratios=[1]
#The feature map sizes. [(64,64), (32,32) , (16,16), (8,8), (4,4)]
sizes=[(32,32)]
anchors = create_anchors(sizes=sizes, ratios=ratios, scales=scales)

fig,ax = plt.subplots(figsize=(15,15))
ax.imshow(image2np(data.valid_ds[0][0].data))
for i, bbox in enumerate(anchors[:len(scales)*len(ratios)*len(sizes)]):
bb = bbox.numpy()
x = (bb[0] + 1) * patch_size / 2 
y = (bb[1] + 1) * patch_size / 2 
w = bb[2] * patch_size / 2
h = bb[3] * patch_size / 2

rect = [x,y,w,h]
draw_rect(ax,rect)

# %% [code] {"id":"Mfrl8VJ94edJ","outputId":"3793f0b0-fa4c-4c87-a4ed-14840d18b4c2","execution":{"iopub.status.busy":"2022-05-02T07:49:03.926600Z","iopub.execute_input":"2022-05-02T07:49:03.926887Z","iopub.status.idle":"2022-05-02T07:49:39.263251Z","shell.execute_reply.started":"2022-05-02T07:49:03.926856Z","shell.execute_reply":"2022-05-02T07:49:39.262482Z"}}
all_boxes, all_labels = show_anchors_on_images(data, anchors, figsize=(12, 12))
# %% [code] {"id":"J-qEw_bN41cG","outputId":"eaf1c350-1c1f-4d50-a919-311ee538e612","execution":{"iopub.status.busy":"2022-05-02T07:49:39.264920Z","iopub.execute_input":"2022-05-02T07:49:39.265265Z","iopub.status.idle":"2022-05-02T07:49:39.546091Z","shell.execute_reply.started":"2022-05-02T07:49:39.265226Z","shell.execute_reply":"2022-05-02T07:49:39.545301Z"}}
from fastai.utils.collect_env import show_install
show_install()
# %% [code] {"id":"te6ci-Z35_5I","outputId":"67f2f18b-041b-44fd-fdc6-1a53121c445a","execution":{"iopub.status.busy":"2022-05-02T07:49:39.548856Z","iopub.execute_input":"2022-05-02T07:49:39.549088Z","iopub.status.idle":"2022-05-02T07:49:47.657381Z","shell.execute_reply.started":"2022-05-02T07:49:39.549058Z","shell.execute_reply":"2022-05-02T07:49:47.656648Z"}}
backbone = "ResNet101" #["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet150"]
backbone_model = models.resnet18
if backbone == "ResNet34":
backbone_model = models.resnet34
if backbone == "ResNet50":
backbone_model = models.resnet50
if backbone == "ResNet101":
backbone_model = models.resnet101
if backbone == "ResNet150":
backbone_model = models.resnet150
pre_trained_on_imagenet = True 
encoder = create_body(models.resnet101, pre_trained_on_imagenet, -2)

loss_function = "FocalLoss" 
if loss_function == "FocalLoss":
crit = RetinaNetFocalLoss(anchors)

channels = 128 

final_bias = -4 

n_conv = 3 
model = RetinaNet(encoder, n_classes=data.train_ds.c, 
n_anchors=len(scales) * len(ratios), 
sizes=[size[0] for size in sizes], 
chs=channels, # number of hidden layers for the classification head
final_bias=final_bias,
n_conv=n_conv # Number of hidden layers
)


# %% [code] {"id":"waS3UZNaNz4p","outputId":"9b49bd00-7b21-4cad-8588-a46248953370","execution":{"iopub.status.busy":"2022-05-02T07:49:47.659276Z","iopub.execute_input":"2022-05-02T07:49:47.659604Z","iopub.status.idle":"2022-05-02T07:49:47.758263Z","shell.execute_reply.started":"2022-05-02T07:49:47.659566Z","shell.execute_reply":"2022-05-02T07:49:47.757182Z"}}
voc = PascalVOCMetric(anchors, patch_size, [str(i) for i in data.train_ds.y.classes[1:]])
voc

# %% [code] {"id":"nwgGmvsPZ3bN","execution":{"iopub.status.busy":"2022-05-02T07:49:47.759735Z","iopub.execute_input":"2022-05-02T07:49:47.760020Z","iopub.status.idle":"2022-05-02T07:49:47.973584Z","shell.execute_reply.started":"2022-05-02T07:49:47.759982Z","shell.execute_reply":"2022-05-02T07:49:47.972749Z"}}
learn = Learner(data, model, loss_func=crit, 
callback_fns=[BBMetrics, ShowGraph], 
metrics=[voc]
)
learn.split([model.encoder[6], model.c5top5])
learn.freeze_to(-2)

# %% [code] {"id":"NWwQRvzw6vnr","outputId":"e9c8b89b-0a90-468b-de6d-351fd09bf467","execution":{"iopub.status.busy":"2022-05-02T07:47:46.267766Z","iopub.status.idle":"2022-05-02T07:47:46.268297Z","shell.execute_reply.started":"2022-05-02T07:47:46.268040Z","shell.execute_reply":"2022-05-02T07:47:46.268066Z"}}
learn.lr_find()
learn.recorder.plot(suggestion=True)
# %% [code] {"id":"gK1s61jSJtie","outputId":"aa0f9d65-3d19-42d6-b17e-7a29fe5fca8f","execution":{"iopub.status.busy":"2022-05-01T17:20:57.239658Z","iopub.execute_input":"2022-05-01T17:20:57.239975Z","iopub.status.idle":"2022-05-01T19:08:05.944655Z","shell.execute_reply.started":"2022-05-01T17:20:57.239931Z","shell.execute_reply":"2022-05-01T19:08:05.939947Z"}}
max_learning_rate = 1e-3
n=250
learn.fit_one_cycle(n, max_learning_rate)
learn.recorder.plot_sched()
learn.recorder.plot_losses()

当取max/min的张量为空时,就会出现这个问题。

相关内容

最新更新