def smoother(y, interval=100): avg = MovingAverage(interval) for i in range(len(y)): avg.append(y[i]) y[i] = avg.get_avg() return y
def play_video(): nonlocal frame_buffer, running, video_fps, is_webcam video_frame_times = MovingAverage(100) frame_time_stabilizer = frame_time_target last_time = None stabilizer_step = 0.0005 while running: frame_time_start = time.time() if not frame_buffer.empty(): next_time = time.time() if last_time is not None: video_frame_times.add(next_time - last_time) video_fps = 1 / video_frame_times.get_avg() cv2.imshow(path, frame_buffer.get()) last_time = next_time #self.image_pub.publish(self.bridge.cv2_to_imgmsg(frame_buffer.get(), "bgr8")) if cv2.waitKey(1) == 27: # Press Escape to close running = False buffer_size = frame_buffer.qsize() if buffer_size < args.video_multiframe: frame_time_stabilizer += stabilizer_step elif buffer_size > args.video_multiframe: frame_time_stabilizer -= stabilizer_step if frame_time_stabilizer < 0: frame_time_stabilizer = 0 new_target = frame_time_stabilizer if is_webcam else max( frame_time_stabilizer, frame_time_target) next_frame_target = max( 2 * new_target - video_frame_times.get_avg(), 0) target_time = frame_time_start + next_frame_target - 0.001 # Let's just subtract a millisecond to be safe # This gives more accurate timing than if sleeping the whole amount at once while time.time() < target_time: time.sleep(0.001)
net = net # cudnn.benchmark = True torch.set_default_tensor_type('torch.FloatTensor') x = torch.zeros((1, 3, cfg.max_size, cfg.max_size)) y = net(x) for p in net.prediction_layers: print(p.last_conv_size) print() for k, a in y.items(): print(k + ': ', a.size(), torch.sum(a)) exit() net(x) # timer.disable('pass2') avg = MovingAverage() try: while True: timer.reset() with timer.env('everything else'): net(x) avg.add(timer.total_time()) print('\033[2J') # Moves console cursor to 0,0 timer.print_stats() print('Avg fps: %.2f\tAvg ms: %.2f ' % (1 / avg.get_avg(), avg.get_avg() * 1000)) except KeyboardInterrupt: pass
def train(): if not os.path.exists(args.save_folder): os.mkdir(args.save_folder) dataset = COCODetection(image_path=cfg.dataset.train_images, info_file=cfg.dataset.train_info, transform=SSDAugmentation(MEANS)) if args.validation_epoch > 0: setup_eval() val_dataset = COCODetection(image_path=cfg.dataset.valid_images, info_file=cfg.dataset.valid_info, transform=BaseTransform(MEANS)) # Parallel wraps the underlying module, but when saving and loading we don't want that yolact_net = Yolact() net = yolact_net net.train() if args.log: log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()), overwrite=(args.resume is None), log_gpu_stats=args.log_gpu) # I don't use the timer during training (I use a different timing method). # Apparently there's a race condition with multiple GPUs, so disable it just to be safe. timer.disable_all() # Both of these can set args.resume to None, so do them before the check if args.resume == 'interrupt': args.resume = SavePath.get_interrupt(args.save_folder) elif args.resume == 'latest': args.resume = SavePath.get_latest(args.save_folder, cfg.name) if args.resume is not None: print('Resuming training, loading {}...'.format(args.resume)) yolact_net.load_weights(args.resume) if args.start_iter == -1: args.start_iter = SavePath.from_str(args.resume).iteration else: print('Initializing weights...') yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path) optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.decay) criterion = MultiBoxLoss(num_classes=cfg.num_classes, pos_threshold=cfg.positive_iou_threshold, neg_threshold=cfg.negative_iou_threshold, negpos_ratio=cfg.ohem_negpos_ratio) if args.batch_alloc is not None: args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')] if sum(args.batch_alloc) != args.batch_size: print( 'Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size)) exit(-1) net = CustomDataParallel(NetLoss(net, criterion)) if args.cuda: net = net.cuda() # Initialize everything if not cfg.freeze_bn: yolact_net.freeze_bn() # Freeze bn so we don't kill our means yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda()) if not cfg.freeze_bn: yolact_net.freeze_bn(True) # loss counters loc_loss = 0 conf_loss = 0 iteration = max(args.start_iter, 0) last_time = time.time() epoch_size = len(dataset) // args.batch_size num_epochs = math.ceil(cfg.max_iter / epoch_size) # Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index step_index = 0 data_loader = data.DataLoader(dataset, args.batch_size, num_workers=args.num_workers, shuffle=True, collate_fn=detection_collate, pin_memory=True) save_path = lambda epoch, iteration: SavePath( cfg.name, epoch, iteration).get_path(root=args.save_folder) time_avg = MovingAverage() global loss_types # Forms the print order loss_avgs = {k: MovingAverage(100) for k in loss_types} print('Begin training!') print() # try-except so you can use ctrl+c to save early and stop training try: for epoch in range(num_epochs): # Resume from start_iter if (epoch + 1) * epoch_size < iteration: continue for datum in data_loader: # Stop if we've reached an epoch if we're resuming from start_iter if iteration == (epoch + 1) * epoch_size: break # Stop at the configured number of iterations even if mid-epoch if iteration == cfg.max_iter: break # Change a config setting if we've reached the specified iteration changed = False for change in cfg.delayed_settings: if iteration >= change[0]: changed = True cfg.replace(change[1]) # Reset the loss averages because things might have changed for avg in loss_avgs: avg.reset() # If a config setting was changed, remove it from the list so we don't keep checking if changed: cfg.delayed_settings = [ x for x in cfg.delayed_settings if x[0] > iteration ] # Warm up by linearly interpolating the learning rate from some smaller value if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until: set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init) # Adjust the learning rate at the given iterations, but also if we resume from past that iteration while step_index < len( cfg.lr_steps ) and iteration >= cfg.lr_steps[step_index]: step_index += 1 set_lr(optimizer, args.lr * (args.gamma**step_index)) # Zero the grad to get ready to compute gradients optimizer.zero_grad() # Forward Pass + Compute loss at the same time (see CustomDataParallel and NetLoss) losses = net(datum) losses = {k: (v).mean() for k, v in losses.items() } # Mean here because Dataparallel loss = sum([losses[k] for k in losses]) # no_inf_mean removes some components from the loss, so make sure to backward through all of it # all_loss = sum([v.mean() for v in losses.values()]) # Backprop loss.backward( ) # Do this to free up vram even if loss is not finite if torch.isfinite(loss).item(): optimizer.step() # Add the loss to the moving average for bookkeeping for k in losses: loss_avgs[k].add(losses[k].item()) cur_time = time.time() elapsed = cur_time - last_time last_time = cur_time # Exclude graph setup from the timing information if iteration != args.start_iter: time_avg.add(elapsed) if iteration % 10 == 0: eta_str = str( datetime.timedelta(seconds=(cfg.max_iter - iteration) * time_avg.get_avg())).split('.')[0] total = sum([loss_avgs[k].get_avg() for k in losses]) loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], []) print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f') % tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True) if args.log: precision = 5 loss_info = { k: round(losses[k].item(), precision) for k in losses } loss_info['T'] = round(loss.item(), precision) if args.log_gpu: log.log_gpu_stats = (iteration % 10 == 0 ) # nvidia-smi is sloooow log.log('train', loss=loss_info, epoch=epoch, iter=iteration, lr=round(cur_lr, 10), elapsed=elapsed) log.log_gpu_stats = args.log_gpu iteration += 1 if iteration % args.save_interval == 0 and iteration != args.start_iter: if args.keep_latest: latest = SavePath.get_latest(args.save_folder, cfg.name) print('Saving state, iter:', iteration) yolact_net.save_weights(save_path(epoch, iteration)) if args.keep_latest and latest is not None: if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval: print('Deleting old save...') os.remove(latest) # This is done per epoch if args.validation_epoch > 0: if epoch % args.validation_epoch == 0 and epoch > 0: compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None) # Compute validation mAP after training is finished compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None) except KeyboardInterrupt: if args.interrupt: print('Stopping early. Saving network...') # Delete previous copy of the interrupted network so we don't spam the weights folder SavePath.remove_interrupt(args.save_folder) yolact_net.save_weights( save_path(epoch, repr(iteration) + '_interrupt')) exit() yolact_net.save_weights(save_path(epoch, iteration))
def evalvideo(self, net: Yolact, path: str): # If the path is a digit, parse it as a webcam index is_webcam = path.isdigit() if is_webcam: vid = cv2.VideoCapture(int(path)) else: vid = cv2.VideoCapture(path) if not vid.isOpened(): print('Could not open video "%s"' % path) exit(-1) net = CustomDataParallel(net).cuda() transform = torch.nn.DataParallel(FastBaseTransform()).cuda() frame_times = MovingAverage(100) fps = 0 # The 0.8 is to account for the overhead of time.sleep frame_time_target = 1 / vid.get(cv2.CAP_PROP_FPS) running = True def cleanup_and_exit(): print() pool.terminate() vid.release() cv2.destroyAllWindows() exit() def get_next_frame(vid): return [vid.read()[1] for _ in range(args.video_multiframe)] def transform_frame(frames): with torch.no_grad(): frames = [ torch.from_numpy(frame).cuda().float() for frame in frames ] return frames, transform(torch.stack(frames, 0)) def eval_network(inp): with torch.no_grad(): frames, imgs = inp return frames, net(imgs) def prep_frame(inp): with torch.no_grad(): frame, preds = inp return self.prep_display(preds, frame, None, None, undo_transform=False, class_color=True) frame_buffer = Queue() video_fps = 0 # All this timing code to make sure that def play_video(): nonlocal frame_buffer, running, video_fps, is_webcam video_frame_times = MovingAverage(100) frame_time_stabilizer = frame_time_target last_time = None stabilizer_step = 0.0005 while running: frame_time_start = time.time() if not frame_buffer.empty(): next_time = time.time() if last_time is not None: video_frame_times.add(next_time - last_time) video_fps = 1 / video_frame_times.get_avg() cv2.imshow(path, frame_buffer.get()) last_time = next_time #self.image_pub.publish(self.bridge.cv2_to_imgmsg(frame_buffer.get(), "bgr8")) if cv2.waitKey(1) == 27: # Press Escape to close running = False buffer_size = frame_buffer.qsize() if buffer_size < args.video_multiframe: frame_time_stabilizer += stabilizer_step elif buffer_size > args.video_multiframe: frame_time_stabilizer -= stabilizer_step if frame_time_stabilizer < 0: frame_time_stabilizer = 0 new_target = frame_time_stabilizer if is_webcam else max( frame_time_stabilizer, frame_time_target) next_frame_target = max( 2 * new_target - video_frame_times.get_avg(), 0) target_time = frame_time_start + next_frame_target - 0.001 # Let's just subtract a millisecond to be safe # This gives more accurate timing than if sleeping the whole amount at once while time.time() < target_time: time.sleep(0.001) extract_frame = lambda x, i: (x[0][i] if x[1][i] is None else x[0][i]. to(x[1][i]['box'].device), [x[1][i]]) # Prime the network on the first frame because I do some thread unsafe things otherwise print('Initializing model... ', end='') eval_network(transform_frame(get_next_frame(vid))) print('Done.') # For each frame the sequence of functions it needs to go through to be processed (in reversed order) sequence = [prep_frame, eval_network, transform_frame] pool = ThreadPool(processes=len(sequence) + args.video_multiframe + 2) pool.apply_async(play_video) active_frames = [] print() while vid.isOpened() and running: start_time = time.time() # Start loading the next frames from the disk next_frames = pool.apply_async(get_next_frame, args=(vid, )) # For each frame in our active processing queue, dispatch a job # for that frame using the current function in the sequence for frame in active_frames: frame['value'] = pool.apply_async(sequence[frame['idx']], args=(frame['value'], )) # For each frame whose job was the last in the sequence (i.e. for all final outputs) for frame in active_frames: if frame['idx'] == 0: frame_buffer.put(frame['value'].get()) # Remove the finished frames from the processing queue active_frames = [x for x in active_frames if x['idx'] > 0] # Finish evaluating every frame in the processing queue and advanced their position in the sequence for frame in list(reversed(active_frames)): frame['value'] = frame['value'].get() frame['idx'] -= 1 if frame['idx'] == 0: # Split this up into individual threads for prep_frame since it doesn't support batch size active_frames += [{ 'value': extract_frame(frame['value'], i), 'idx': 0 } for i in range(1, args.video_multiframe)] frame['value'] = extract_frame(frame['value'], 0) # Finish loading in the next frames and add them to the processing queue active_frames.append({ 'value': next_frames.get(), 'idx': len(sequence) - 1 }) # Compute FPS frame_times.add(time.time() - start_time) fps = args.video_multiframe / frame_times.get_avg() print( '\rProcessing FPS: %.2f | Video Playback FPS: %.2f | Frames in Buffer: %d ' % (fps, video_fps, frame_buffer.qsize()), end='') cleanup_and_exit()
def play_video(): try: nonlocal frame_buffer, running, video_fps, is_webcam, num_frames, frames_displayed, vid_done video_frame_times = MovingAverage(100) frame_time_stabilizer = frame_time_target last_time = None stabilizer_step = 0.0005 progress_bar = ProgressBar(30, num_frames) while running: frame_time_start = time.time() if not frame_buffer.empty(): next_time = time.time() if last_time is not None: video_frame_times.add(next_time - last_time) video_fps = 1 / video_frame_times.get_avg() if out_path is None: cv2.imshow(path, frame_buffer.get()) else: out.write(frame_buffer.get()) frames_displayed += 1 last_time = next_time if out_path is not None: if video_frame_times.get_avg() == 0: fps = 0 else: fps = 1 / video_frame_times.get_avg() progress = frames_displayed / num_frames * 100 progress_bar.set_val(frames_displayed) print( '\rProcessing Frames %s %6d / %6d (%5.2f%%) %5.2f fps ' % (repr(progress_bar), frames_displayed, num_frames, progress, fps), end='') # This is split because you don't want savevideo to require cv2 display functionality (see #197) if out_path is None and cv2.waitKey(1) == 27: # Press Escape to close running = False if not (frames_displayed < num_frames): running = False if not vid_done: buffer_size = frame_buffer.qsize() if buffer_size < args.video_multiframe: frame_time_stabilizer += stabilizer_step elif buffer_size > args.video_multiframe: frame_time_stabilizer -= stabilizer_step if frame_time_stabilizer < 0: frame_time_stabilizer = 0 new_target = frame_time_stabilizer if is_webcam else max( frame_time_stabilizer, frame_time_target) else: new_target = frame_time_target next_frame_target = max( 2 * new_target - video_frame_times.get_avg(), 0) target_time = frame_time_start + next_frame_target - 0.001 # Let's just subtract a millisecond to be safe if out_path is None or args.emulate_playback: # This gives more accurate timing than if sleeping the whole amount at once while time.time() < target_time: time.sleep(0.001) else: # Let's not starve the main thread, now time.sleep(0.001) except: # See issue #197 for why this is necessary import traceback traceback.print_exc()
def evalvideo(net: Yolact, path: str, out_path: str = None): # If the path is a digit, parse it as a webcam index is_webcam = path.isdigit() # If the input image size is constant, this make things faster (hence why we can use it in a video setting). cudnn.benchmark = True if is_webcam: vid = cv2.VideoCapture(int(path)) else: vid = cv2.VideoCapture(path) if not vid.isOpened(): print('Could not open video "%s"' % path) exit(-1) target_fps = round(vid.get(cv2.CAP_PROP_FPS)) frame_width = round(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = round(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) if is_webcam: num_frames = float('inf') else: num_frames = round(vid.get(cv2.CAP_PROP_FRAME_COUNT)) net = CustomDataParallel(net).cuda() transform = torch.nn.DataParallel( FastBaseTransform(with_cuda=net.with_cuda)) if net.with_cuda: transform = transform.cuda() frame_times = MovingAverage(100) fps = 0 frame_time_target = 1 / target_fps running = True fps_str = '' vid_done = False frames_displayed = 0 if out_path is not None: out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), target_fps, (frame_width, frame_height)) def cleanup_and_exit(): print() pool.terminate() vid.release() if out_path is not None: out.release() cv2.destroyAllWindows() exit() def get_next_frame(vid): frames = [] for idx in range(args.video_multiframe): frame = vid.read()[1] if frame is None: return frames frames.append(frame) return frames def transform_frame(frames): with torch.no_grad(): frames = [ torch.from_numpy(frame).cuda().float() for frame in frames ] return frames, transform(torch.stack(frames, 0)) def eval_network(inp): with torch.no_grad(): frames, imgs = inp num_extra = 0 while imgs.size(0) < args.video_multiframe: imgs = torch.cat([imgs, imgs[0].unsqueeze(0)], dim=0) num_extra += 1 out = net(imgs) if num_extra > 0: out = out[:-num_extra] return frames, out def prep_frame(inp, fps_str): with torch.no_grad(): frame, preds = inp return prep_display(preds, frame, None, None, undo_transform=False, class_color=True, fps_str=fps_str) frame_buffer = Queue() video_fps = 0 # All this timing code to make sure that def play_video(): try: nonlocal frame_buffer, running, video_fps, is_webcam, num_frames, frames_displayed, vid_done video_frame_times = MovingAverage(100) frame_time_stabilizer = frame_time_target last_time = None stabilizer_step = 0.0005 progress_bar = ProgressBar(30, num_frames) while running: frame_time_start = time.time() if not frame_buffer.empty(): next_time = time.time() if last_time is not None: video_frame_times.add(next_time - last_time) video_fps = 1 / video_frame_times.get_avg() if out_path is None: cv2.imshow(path, frame_buffer.get()) else: out.write(frame_buffer.get()) frames_displayed += 1 last_time = next_time if out_path is not None: if video_frame_times.get_avg() == 0: fps = 0 else: fps = 1 / video_frame_times.get_avg() progress = frames_displayed / num_frames * 100 progress_bar.set_val(frames_displayed) print( '\rProcessing Frames %s %6d / %6d (%5.2f%%) %5.2f fps ' % (repr(progress_bar), frames_displayed, num_frames, progress, fps), end='') # This is split because you don't want savevideo to require cv2 display functionality (see #197) if out_path is None and cv2.waitKey(1) == 27: # Press Escape to close running = False if not (frames_displayed < num_frames): running = False if not vid_done: buffer_size = frame_buffer.qsize() if buffer_size < args.video_multiframe: frame_time_stabilizer += stabilizer_step elif buffer_size > args.video_multiframe: frame_time_stabilizer -= stabilizer_step if frame_time_stabilizer < 0: frame_time_stabilizer = 0 new_target = frame_time_stabilizer if is_webcam else max( frame_time_stabilizer, frame_time_target) else: new_target = frame_time_target next_frame_target = max( 2 * new_target - video_frame_times.get_avg(), 0) target_time = frame_time_start + next_frame_target - 0.001 # Let's just subtract a millisecond to be safe if out_path is None or args.emulate_playback: # This gives more accurate timing than if sleeping the whole amount at once while time.time() < target_time: time.sleep(0.001) else: # Let's not starve the main thread, now time.sleep(0.001) except: # See issue #197 for why this is necessary import traceback traceback.print_exc() extract_frame = lambda x, i: (x[0][i] if x[1][i]['detection'] is None else x[0][i].to(x[1][i]['detection']['box'].device ), [x[1][i]]) # Prime the network on the first frame because I do some thread unsafe things otherwise print('Initializing model... ', end='') first_batch = eval_network(transform_frame(get_next_frame(vid))) print('Done.') # For each frame the sequence of functions it needs to go through to be processed (in reversed order) sequence = [prep_frame, eval_network, transform_frame] pool = ThreadPool(processes=len(sequence) + args.video_multiframe + 2) pool.apply_async(play_video) active_frames = [{ 'value': extract_frame(first_batch, i), 'idx': 0 } for i in range(len(first_batch[0]))] print() if out_path is None: print('Press Escape to close.') try: while vid.isOpened() and running: # Hard limit on frames in buffer so we don't run out of memory >.> while frame_buffer.qsize() > 100: time.sleep(0.001) start_time = time.time() # Start loading the next frames from the disk if not vid_done: next_frames = pool.apply_async(get_next_frame, args=(vid, )) else: next_frames = None if not (vid_done and len(active_frames) == 0): # For each frame in our active processing queue, dispatch a job # for that frame using the current function in the sequence for frame in active_frames: _args = [frame['value']] if frame['idx'] == 0: _args.append(fps_str) frame['value'] = pool.apply_async(sequence[frame['idx']], args=_args) # For each frame whose job was the last in the sequence (i.e. for all final outputs) for frame in active_frames: if frame['idx'] == 0: frame_buffer.put(frame['value'].get()) # Remove the finished frames from the processing queue active_frames = [x for x in active_frames if x['idx'] > 0] # Finish evaluating every frame in the processing queue and advanced their position in the sequence for frame in list(reversed(active_frames)): frame['value'] = frame['value'].get() frame['idx'] -= 1 if frame['idx'] == 0: # Split this up into individual threads for prep_frame since it doesn't support batch size active_frames += [{ 'value': extract_frame(frame['value'], i), 'idx': 0 } for i in range(1, len(frame['value'][0]))] frame['value'] = extract_frame(frame['value'], 0) # Finish loading in the next frames and add them to the processing queue if next_frames is not None: frames = next_frames.get() if len(frames) == 0: vid_done = True else: active_frames.append({ 'value': frames, 'idx': len(sequence) - 1 }) # Compute FPS frame_times.add(time.time() - start_time) fps = args.video_multiframe / frame_times.get_avg() else: fps = 0 fps_str = 'Processing FPS: %.2f | Video Playback FPS: %.2f | Frames in Buffer: %d' % ( fps, video_fps, frame_buffer.qsize()) if not args.display_fps: print('\r' + fps_str + ' ', end='') except KeyboardInterrupt: print('\nStopping...') cleanup_and_exit()
def evaluate(net: Yolact, dataset, train_mode=False): net.detect.use_fast_nms = args.fast_nms net.detect.use_cross_class_nms = args.cross_class_nms cfg.mask_proto_debug = args.mask_proto_debug # TODO Currently we do not support Fast Mask Re-scroing in evalimage, evalimages, and evalvideo if args.image is not None: if ':' in args.image: inp, out = args.image.split(':') evalimage(net, inp, out) else: evalimage(net, args.image) return elif args.images is not None: inp, out = args.images.split(':') evalimages(net, inp, out) return elif args.video is not None: if ':' in args.video: inp, out = args.video.split(':') evalvideo(net, inp, out) else: evalvideo(net, args.video) return frame_times = MovingAverage() dataset_size = len(dataset) if args.max_images < 0 else min( args.max_images, len(dataset)) progress_bar = ProgressBar(30, dataset_size) print() if not args.display and not args.benchmark: # For each class and iou, stores tuples (score, isPositive) # Index ap_data[type][iouIdx][classIdx] ap_data = { 'box': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds], 'mask': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds] } detections = Detections() else: timer.disable('Load Data') dataset_indices = list(range(len(dataset))) if args.shuffle: random.shuffle(dataset_indices) elif not args.no_sort: # Do a deterministic shuffle based on the image ids # # I do this because on python 3.5 dictionary key order is *random*, while in 3.6 it's # the order of insertion. That means on python 3.6, the images come in the order they are in # in the annotations file. For some reason, the first images in the annotations file are # the hardest. To combat this, I use a hard-coded hash function based on the image ids # to shuffle the indices we use. That way, no matter what python version or how pycocotools # handles the data, we get the same result every time. hashed = [badhash(x) for x in dataset.ids] dataset_indices.sort(key=lambda x: hashed[x]) dataset_indices = dataset_indices[:dataset_size] try: # Main eval loop for it, image_idx in enumerate(dataset_indices): timer.reset() with timer.env('Load Data'): img, gt, gt_masks, h, w, num_crowd = dataset.pull_item( image_idx) # Test flag, do not upvote if cfg.mask_proto_debug: with open('scripts/info.txt', 'w') as f: f.write(str(dataset.ids[image_idx])) np.save('scripts/gt.npy', gt_masks) batch = Variable(img.unsqueeze(0)) if args.cuda: batch = batch.cuda() with timer.env('Network Extra'): preds = net(batch) # Perform the meat of the operation here depending on our mode. if args.display: img_numpy = prep_display(preds, img, h, w) elif args.benchmark: prep_benchmark(preds, h, w) else: prep_metrics(ap_data, preds, img, gt, gt_masks, h, w, num_crowd, dataset.ids[image_idx], detections) # First couple of images take longer because we're constructing the graph. # Since that's technically initialization, don't include those in the FPS calculations. if it > 1: frame_times.add(timer.total_time()) if args.display: if it > 1: print('Avg FPS: %.4f' % (1 / frame_times.get_avg())) plt.imshow(img_numpy) plt.title(str(dataset.ids[image_idx])) plt.show() elif not args.no_bar: if it > 1: fps = 1 / frame_times.get_avg() else: fps = 0 progress = (it + 1) / dataset_size * 100 progress_bar.set_val(it + 1) print( '\rProcessing Images %s %6d / %6d (%5.2f%%) %5.2f fps ' % (repr(progress_bar), it + 1, dataset_size, progress, fps), end='') if not args.display and not args.benchmark: print() if args.output_coco_json: print('Dumping detections...') if args.output_web_json: detections.dump_web() else: detections.dump() else: if not train_mode: print('Saving data...') with open(args.ap_data_file, 'wb') as f: pickle.dump(ap_data, f) return calc_map(ap_data) elif args.benchmark: print() print() print('Stats for the last frame:') timer.print_stats() avg_seconds = frame_times.get_avg() print('Average: %5.2f fps, %5.2f ms' % (1 / frame_times.get_avg(), 1000 * avg_seconds)) except KeyboardInterrupt: print('Stopping...')
# GPU net = net.cuda() torch.set_default_tensor_type('torch.cuda.FloatTensor') x = torch.zeros((1, 3, cfg.max_size, cfg.max_size)) y = net(x) for p in net.prediction_layers: print(p.last_conv_size) print() for k, a in y.items(): print(k + ': ', a.size(), torch.sum(a)) exit() net(x) # timer.disable('pass2') avg = MovingAverage() try: while True: timer.reset() with timer.env('everything else'): net(x) avg.add(timer.total_time()) print('\033[2J') # Moves console cursor to 0,0 timer.print_stats() print('Avg fps: %.2f\tAvg ms: %.2f ' % (1/avg.get_avg(), avg.get_avg()*1000)) except KeyboardInterrupt: pass
pin_memory=True, ) optimizer = optim.SGD( net.parameters(), lr=cfg.lr, momentum=cfg.momentum, weight_decay=cfg.decay ) criterion = yolact.layers.MultiBoxLoss( num_classes=cfg.num_classes, pos_threshold=cfg.positive_iou_threshold, neg_threshold=cfg.negative_iou_threshold, negpos_ratio=3, ) # %% save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=cfg.save_folder) time_avg = MovingAverage() loss_types = ["B", "C", "M", "P", "D", "E", "S"] loss_avgs = {k: MovingAverage(100) for k in loss_types} print("Begin training!") # %% # try-except so you can use ctrl+c to save early and stop training step_index = 0 iteration = 0 last_time = time.time() epoch_size = len(train_dataset) // cfg.batch_size num_epochs = math.ceil(cfg.max_iter / epoch_size) for epoch in range(num_epochs): for datum in data_loader: # Stop if we've reached an epoch if we're resuming from start_iter