def init_hidden(self): h0 = torch.zeros(self.N_LSTM_layers, self.N_time, self.N_LSTM_Out).to(self.device) c0 = torch.zeros(self.N_LSTM_layers, self.N_time, self.N_LSTM_Out).to(self.device) return h0, c0
def init_hidden(self): # the 1 here in the middle is the minibatch size h, c = (autograd.Variable(torch.zeros(LSTM_LAYERS, 1, HIDDEN_NODES), requires_grad=False), autograd.Variable(torch.zeros(LSTM_LAYERS, 1, HIDDEN_NODES), requires_grad=False)) if CUDA: return h.cuda(), c.cuda() return h, c
def init_hidden(self): # the 1 here in the middle is the minibatch size h = autograd.Variable(torch.zeros(self.layers, 1, self.nodes)) c = autograd.Variable(torch.zeros(self.layers, 1, self.nodes)) if torch.cuda.is_available() and self.with_cuda: h = h.cuda() c = c.cuda() return h, c
def init_hidden(self): # the 1 here in the middle is the minibatch size h = autograd.Variable(torch.zeros(self.n_layers, 1, self.n_nodes), requires_grad=False) c = autograd.Variable(torch.zeros(self.n_layers, 1, self.n_nodes), requires_grad=False) if torch.cuda.is_available(): h = h.cuda() c = c.cuda() return h, c
def init_hidden(self): # the 1 here in the middle is the minibatch size h = torch.zeros(LSTM_LAYERS, self.batch, self.hidden_nodes) c = torch.zeros(LSTM_LAYERS, self.batch, self.hidden_nodes) if self.use_cuda: h = h.cuda() c = c.cuda() h, c = (Variable(h), Variable(c)) return h, c
def __init__(self, input_size, hidden_size, lstm_drop): super(Decoder, self).__init__() self.trans_hidden = nn.Linear(input_size, hidden_size) self.trans_ceil = nn.Linear(input_size, hidden_size) self.rnn = EncoderLSTM(hidden_size, hidden_size, 1, False, lstm_drop) self.trans_pred = nn.Linear(hidden_size, hidden_size) self.eos_embed = nn.Parameter(torch.zeros(size=(1, hidden_size))) self.nop_embed = nn.Parameter(torch.zeros(size=(1, hidden_size)))
def init_hidden(self): # the 1 here in the middle is the minibatch size if self.cuda: h, c = (Variable(torch.zeros(LSTM_LAYERS, 1, HIDDEN_NODES).cuda(), requires_grad=False), Variable(torch.zeros(LSTM_LAYERS, 1, HIDDEN_NODES).cuda(), requires_grad=False)) else: h, c = (Variable(torch.zeros(LSTM_LAYERS, 1, HIDDEN_NODES), requires_grad=False), Variable(torch.zeros(LSTM_LAYERS, 1, HIDDEN_NODES), requires_grad=False)) return h, c
def load_classifier(name='resnet101', n=2): # Loads a pretrained model reshaped to n-class output import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch#torchvision model = pretrainedmodels.__dict__[name](num_classes=1000, pretrained='imagenet') # Display model properties for x in [ 'model.input_size', 'model.input_space', 'model.input_range', 'model.mean', 'model.std' ]: print(x + ' =', eval(x)) # Reshape output to n classes filters = model.last_linear.weight.shape[1] model.last_linear.bias = torch.nn.Parameter(torch.zeros(n)) model.last_linear.weight = torch.nn.Parameter(torch.zeros(n, filters)) model.last_linear.out_features = n return model
def forward(self, rel_embed, nodes_embed, h_t_pair_label, h_t_pair_path, h_t_pair_path_len, b_ind, h_ind, t_ind, global_step): max_path_num = h_t_pair_path.shape[-2] max_step_num = h_t_pair_path.shape[-1] N_bt = nodes_embed.shape[0] # no_rel_mask = (torch.sum(h_t_pair_label[b_ind,h_ind,t_ind,1:],dim=-1)==0) select_path_len = h_t_pair_path_len[b_ind, h_ind, t_ind] select = (torch.cumsum( (select_path_len > 0).long(), dim=-1) == 1) & (select_path_len > 0) path_select_id = torch.nonzero(select.long())[:, 1] select_path_id = h_t_pair_path[b_ind, h_ind, t_ind][ torch.arange(path_select_id.shape[0]).cuda(), path_select_id] select_path_len = h_t_pair_path_len[b_ind, h_ind, t_ind][ torch.arange(path_select_id.shape[0]).cuda(), path_select_id] path_bt_id = b_ind[..., None].repeat(1, max_step_num) path_embed = nodes_embed[path_bt_id, select_path_id] path_embed = torch.cat( (self.eos_embed.repeat(path_embed.shape[0], 1, 1), path_embed), dim=1) init_h = torch.relu(self.trans_hidden(rel_embed)).unsqueeze(dim=0) init_c = torch.relu(self.trans_ceil(rel_embed)).unsqueeze(dim=0) seq_hidden, _, _ = self.rnn(path_embed, select_path_len, init_h, init_c) nodes_ext = torch.cat((self.nop_embed.repeat(N_bt, 1, 1), nodes_embed), dim=1) vocb = torch.relu(self.trans_pred(nodes_ext[b_ind])) seq_pred = torch.einsum("abd,acd->abc", seq_hidden, vocb) select_path_id = select_path_id + 1 select_path_id = torch.cat( (select_path_id, torch.zeros(select_path_id.shape[0], 1, dtype=torch.long).cuda()), dim=-1) # select_path_len -= 1 seqlen, w_ids = torch.broadcast_tensors( select_path_len.unsqueeze(-1), torch.arange(0, max_step_num + 1).cuda()[None, ...]) seq_mask = w_ids < seqlen select_path_id[~seq_mask] = 0 return seq_pred, select_path_id, seq_mask
def zero_pad(tensor): batch_size = tensor.size(0) real_len = tensor.size(1) dim = tensor.size(2) if MAX_LEN > real_len: zeros = \ Variable(torch.zeros(batch_size, MAX_LEN-real_len, dim)).detach() zeros = zeros.to(DEVICE) tensor = torch.cat((tensor, zeros), 1) return (tensor)
def detect(save_img=False): out, source, weights, half, view_img, save_txt, imgsz = \ opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt, opt.img_size webcam = source == '0' or source.startswith('rtsp') or source.startswith( 'http') or source.endswith('.txt') # Initialize #device = torch_utils.select_device(opt.device) device = select_device(opt.device) if os.path.exists(out): shutil.rmtree(out) # delete output folder os.makedirs(out) # make new output folder # Load model ''' The original method needs exactly the same folder structure and imports it was trained on this is a pickle limitation in the torch model. The alternative would be loading from github, which is not working Another issue is when training and detection have different devices (CPU/GPU) ''' # DB 20201018 = Original method #google_utils.attempt_download(weights) attempt_download(weights) model = torch.load(weights, map_location=device)['model'] # ORIGINAL # torch.save(torch.load(weights, map_location=device), weights) # update model if SourceChangeWarning # model.fuse() model.to(device).eval( ) # ATTENTION! UMCOMMENT THIS IF YOU UNCOMMENT model = torch.load(weights, map_location=device)['model'] # ORIGINAL #model.to(device).float().eval() # DB 20201018: detect on CPU using GPU trained model # DB 20201018: Load from github method #model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).to(device).eval() # DB 20201016 MODEL IMPORT FIX #model = torch.hub.load('danfbento/SIB2', 'mod5_test_weight', pretrained=True).to(device).eval() # DB 20201016 MODEL IMPORT FIX # Second-stage classifier classify = False if classify: #modelc = torch_utils.load_classifier(name='resnet101', n=2) # initialize modelc = load_classifier(name='resnet101', n=2) # initialize modelc.load_state_dict( torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights modelc.to(device).eval() # Half precision half = half and device.type != 'cpu' # half precision only supported on CUDA if half: model.half() # Set Dataloader vid_path, vid_writer = None, None if webcam: view_img = True torch.backends.cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz) else: save_img = True dataset = LoadImages(source, img_size=imgsz) # Get names and colors names = model.names if hasattr(model, 'names') else model.modules.names colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))] # Run inference t0 = time.time() img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img _ = model(img.half() if half else img.float() ) if device.type != 'cpu' else None # run once for path, img, im0s, vid_cap in dataset: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 if img.ndimension() == 3: img = img.unsqueeze(0) # Inference #t1 = torch_utils.time_synchronized() t1 = time_synchronized() pred = model(img, augment=opt.augment)[0] #t2 = torch_utils.time_synchronized() t2 = time_synchronized() # to float if half: pred = pred.float() # Apply NMS pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, fast=True, classes=opt.classes, agnostic=opt.agnostic_nms) # Apply Classifier if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process detections for i, det in enumerate(pred): # detections per image if webcam: # batch_size >= 1 p, s, im0 = path[i], '%g: ' % i, im0s[i].copy() else: p, s, im0 = path, '', im0s save_path = str(Path(out) / Path(p).name) s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh if det is not None and len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += '%g %ss, ' % (n, names[int(c)]) # add to string # Write results for *xyxy, conf, cls in det: if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh with open(save_path[:save_path.rfind('.')] + '.txt', 'a') as file: file.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format if save_img or view_img: # Add bbox to image label = '%s %.2f' % (names[int(cls)], conf) plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) # Print time (inference + NMS) print('%sDone. (%.3fs)' % (s, t2 - t1)) # Stream results if view_img: cv2.imshow(p, im0) if cv2.waitKey(1) == ord('q'): # q to quit raise StopIteration # Save results (image with detections) if save_img: if dataset.mode == 'images': cv2.imwrite(save_path, im0) else: if vid_path != save_path: # new video vid_path = save_path if isinstance(vid_writer, cv2.VideoWriter): vid_writer.release( ) # release previous video writer fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) vid_writer = cv2.VideoWriter( save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h)) vid_writer.write(im0) if save_txt or save_img: print('Results saved to %s' % os.getcwd() + os.sep + out) if platform == 'darwin': # MacOS os.system('open ' + save_path) print('Done. (%.3fs)' % (time.time() - t0))
optimizer_lossadd = optim.Adam(net_lossadd.parameters()) loss_history = [np.inf] # very high loss because loss can't be empty for min() for epoch in np.arange(EPOCHS): loss_epoch_la = 0 loss_epoch_lb = 0 loss_epoch_cc = 0 diff_epoch = 0 epi_x_old = 0 x_buf = [] y_buf = [] loss_buffer = Variable(torch.zeros(1)) if torch.cuda.is_available(): loss_buffer = loss_buffer.cuda() for epi, data in tqdm(enumerate(dataloader_test)): x, y, epi_x = extract(data) delta = net_lossadd.forward(x) loss_lossadd = loss_function(delta, y) loss_episode_la = loss_lossadd.clone().cpu().data.numpy()[0] loss_buffer += loss_lossadd delta = net_lotsabackprop.forward(x) loss_lotsabackprop = loss_function(delta, y) loss_episode_lb = loss_lotsabackprop.clone().cpu().data.numpy()[0] loss_lotsabackprop.backward(retain_graph=True)
loss_history = [np.inf] # very high loss because loss can't be empty for min() for epoch in np.arange(EPOCHS): loss_epoch = 0 diff_epoch = 0 for epi_idx, epi_data in enumerate(dataloader_train): x, y = extract(epi_data) net.zero_grad() net.zero_hidden() optimizer.zero_grad() loss = Variable(torch.zeros(1)) if torch.cuda.is_available(): loss = loss.cuda() diff = 0 for frame in range(len(x)): delta = net.forward(x[frame].unsqueeze(0)) loss += loss_function(delta, y[frame].unsqueeze(0)) diff += F.mse_loss(x[frame, :, :12], x[frame, :, :12] + y[frame]).clone().cpu().data.numpy()[0] loss.backward() optimizer.step() loss.detach_() loss_episode = loss.clone().cpu().data.numpy()[0]
for epoch_idx in np.arange(EPOCHS): loss_epoch = 0 diff_epoch = 0 for episode_idx, data in enumerate(dataloader): x, y = makeIntoVariables(data) # reset hidden lstm units net.hidden = net.init_hidden() loss_episode = 0 optimizer.zero_grad() loss = autograd.Variable(torch.zeros(1)) if CUDA: loss = loss.cuda() loss.detach() net.hidden[0].detach() net.hidden[1].detach() # iterate over episode frames for frame_idx in np.arange(len(x)): # x_frame = x[frame_idx] # y_frame = y[frame_idx] prediction = net.forward(x[frame_idx]) loss += loss_function(prediction, y[frame_idx].view(1, -1))
def makeLossBuffer(): loss_buffer = Variable(torch.zeros(1)) if torch.cuda.is_available(): loss_buffer = loss_buffer.cuda() return loss_buffer