why i use this scripts python train_net.py --num-gpu 4 --config-file configs/ovseg_swinB_vitL_bs32_120k.yaml MODEL.CLIP_ADAPTER.CLIP_MODEL_NAME ViT-L/14
to reproduce the results , but after the 14999 iters when start inference on 500 batchs will meet a error
`Traceback (most recent call last):
File "train_net.py", line 302, in
launch(
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/launch.py", line 67, in launch
mp.spawn(
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 230, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 188, in start_processes
while not context.join():
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 150, in join
raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException:
-- Process 3 terminated with the following error:
Traceback (most recent call last):
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
fn(i, *args)
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/launch.py", line 126, in _distributed_worker
main_func(*args)
File "/root/ov-seg/train_net.py", line 296, in main
return trainer.train()
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/defaults.py", line 484, in train
super().train(self.start_iter, self.max_iter)
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/train_loop.py", line 150, in train
self.after_step()
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/train_loop.py", line 180, in after_step
h.after_step()
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/hooks.py", line 552, in after_step
self._do_eval()
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/hooks.py", line 525, in _do_eval
results = self._func()
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/defaults.py", line 453, in test_and_save_results
self._last_eval_results = self.test(self.cfg, self.model)
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/engine/defaults.py", line 608, in test
results_i = inference_on_dataset(model, data_loader, evaluator)
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/detectron2/evaluation/evaluator.py", line 158, in inference_on_dataset
outputs = model(inputs)
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/torch/nn/parallel/distributed.py", line 886, in forward
output = self.module(*inputs[0], **kwargs[0])
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/ov-seg/open_vocab_seg/ovseg_model.py", line 212, in forward
r, regions = self.semantic_inference(
File "/root/ov-seg/open_vocab_seg/ovseg_model.py", line 237, in semantic_inference
clip_cls, regions, valid_flag = self.clip_adapter(
File "/home/Anaconda3/envs/ovseg/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/root/ov-seg/open_vocab_seg/modeling/clip_adapter/adapter.py", line 126, in forward
(regions, unnorm_regions), region_masks, valid_flag = self._preprocess_image(image, mask, normalize=normalize)
ValueError: not enough values to unpack (expected 3, got 2)`