def main(): for t in ['60', '120', '180', '240', '300', '360']:#'20', '40', preprocess = 'filtered_torque_' + t + 's' #sys.argv[3] for exp in ['exp0', 'exp1', 'exp2', 'exp3', 'exp4']: path = '../data/csv/test/' + data + '/with_contact/' + exp dataset = indirectTrocarTestDataset(path, window, skip, in_joints, seal=seal, filter_signal=True, net=net) loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False) model_root = [] for j in range(JOINTS): folder = "trocar" + str(j) model_root.append(root / preprocess / net / folder) networks = [] for j in range(JOINTS): networks.append(trocarNetwork(window, len(in_joints), 1).to(device)) utils.load_prev(networks[j], model_root[j], epoch_to_use) print("Loaded a " + str(j) + " model") all_fs_diff = torch.tensor([]) all_diff = torch.tensor([]) all_jacobian = torch.tensor([]) all_time = torch.tensor([]) uncorrected_loss = 0 corrected_loss = 0 loss_fn = torch.nn.MSELoss() for i, (position, velocity, torque, jacobian, time, fs_pred) in enumerate(loader): position = position.to(device) velocity = velocity.to(device) step_loss = 0 step_pred = torch.tensor([]) for j in range(JOINTS): posvel = torch.cat((position, velocity, fs_pred[:,[j]].to(device)), axis=1).contiguous() pred = networks[j](posvel).detach().cpu() + fs_pred[:,[j]] step_pred = torch.cat((step_pred, pred), axis=1) if step_pred.size() else pred fs_pred = fs_pred[:,[j]] fs_diff = torque - fs_pred diff = torque - step_pred all_fs_diff = torch.cat((all_fs_diff, fs_diff), axis=0) if all_fs_diff.size() else fs_diff all_diff = torch.cat((all_diff, diff), axis=0) if all_diff.size() else diff all_jacobian = torch.cat((all_jacobian, jacobian), axis=0) if all_jacobian.size() else jacobian all_time = torch.cat((all_time, time), axis=0) if all_time.size() else time all_time = all_time.unsqueeze(1) all_fs_force = utils.calculate_force(all_jacobian, all_fs_diff) all_fs_force = torch.cat((all_time, all_fs_force), axis=1) results_path = '../results/' + data + '/with_contact/' + exp np.savetxt(results_path + '/uncorrected_' + net + '_' + seal + '_' + preprocess + '.csv', all_fs_force.numpy()) all_force = utils.calculate_force(all_jacobian, all_diff) all_force = torch.cat((all_time, all_force), axis=1) np.savetxt(results_path + '/corrected_' + net + '_' + seal + '_' + preprocess + '.csv', all_force.numpy())
def main(): config = get_config() val_loader = coco_loader.get_loader('val', config) net = model.twonets() utils.load_prev(net, "../models/%s_0.pt"%"gg_full") net.eval() net = net.cuda() val_stats(net, val_loader)
def main(): for t in ['60', '120', '180', '240', '300']: #'20', '40', preprocess = 'filtered_torque_' + t + 's' #sys.argv[3] dataset = indirectDataset(path, window, skip, in_joints, filter_signal=True, is_rnn=True) loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False) model_root = [] for j in range(JOINTS): folder = "trocar" + str(j) model_root.append(root / preprocess / net / folder) networks = [] for j in range(JOINTS): networks.append(torqueLstmNetwork(batch_size, device).to(device)) utils.load_prev(networks[j], model_root[j], epoch_to_use) print("Loaded a " + str(j) + " model") all_time = torch.tensor([]) all_pred = torch.tensor([]) loss_fn = torch.nn.MSELoss() for i, (position, velocity, torque, jacobian, time) in enumerate(loader): position = position.to(device) velocity = velocity.to(device) step_loss = 0 step_pred = torch.tensor([]) for j in range(JOINTS): posvel = torch.cat((position, velocity), axis=2).contiguous() pred = networks[j](posvel).detach().squeeze(0).cpu() step_pred = torch.cat( (step_pred, pred), axis=1) if step_pred.size() else pred all_pred = torch.cat( (all_pred, step_pred), axis=0) if all_pred.size() else step_pred time = time.permute(1, 0) all_time = torch.cat( (all_time, time), axis=0) if all_time.size() else time all_pred = torch.cat((all_time, all_pred), axis=1) results_path = '../results/' + data + '/no_contact/' np.savetxt(results_path + '/torque_lstm_troc_' + preprocess + '.csv', all_pred.numpy())
batch_size=batch_size, shuffle=True) val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False) for j in range(JOINTS): try: model_root.append(root / model / (folder + str(j))) model_root[j].mkdir(mode=0o777, parents=False) except OSError: print("Model path exists") if use_previous_model: for j in range(JOINTS): epoch = load_prev(networks[j], model_root[j], epoch_to_use, optimizers[j], schedulers[j]) else: for j in range(JOINTS): init_weights(networks[j]) epoch = 1 print('Training for ' + str(epochs)) best_loss = torch.zeros(6) + 1e10 for e in range(epoch, epochs + 1): tq = tqdm.tqdm(total=(len(train_loader) * batch_size)) tq.set_description('Epoch {}, lr {}'.format( e, optimizers[0].param_groups[0]['lr'])) epoch_loss = 0
def main(): all_pred = None if exp == 'train': path = '../data/csv/train/' + data + '/' elif exp == 'val': path = '../data/csv/val/' + data + '/' elif exp == 'test': path = '../data/csv/test/' + data + '/' + contact + '/' else: path = '../data/csv/test/' + data + '/' + contact + '/' + exp + '/' in_joints = [0, 1, 2, 3, 4, 5] if is_rnn: window = 1000 else: window = utils.WINDOW if is_rnn: dataset = indirectDataset(path, window, utils.SKIP, in_joints, is_rnn=is_rnn) else: dataset = indirectTestDataset(path, window, utils.SKIP, in_joints, is_rnn=is_rnn) loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, drop_last=False) model_root = [] for j in range(JOINTS): folder = fs + str(j) model_root.append(root / preprocess / net / folder) networks = [] for j in range(JOINTS): if is_rnn: networks.append(torqueLstmNetwork(batch_size, device).to(device)) else: networks.append(fsNetwork(window).to(device)) for j in range(JOINTS): utils.load_prev(networks[j], model_root[j], epoch_to_use) print("Loaded a " + str(j) + " model") # loss_fn = torch.nn.MSELoss() # all_loss = 0 all_pred = torch.tensor([]) all_time = torch.tensor([]) for i, (position, velocity, torque, time) in enumerate(loader): position = position.to(device) velocity = velocity.to(device) if is_rnn: posvel = torch.cat((position, velocity), axis=2).contiguous() else: posvel = torch.cat((position, velocity), axis=1).contiguous() if is_rnn: time = time.permute((1, 0)) torque = torque.squeeze() cur_pred = torch.zeros(torque.size()) for j in range(JOINTS): pred = networks[j](posvel).squeeze().detach() # pred = pred * utils.range_torque[j].to(device) cur_pred[:, j] = pred.cpu() # loss = loss_fn(cur_pred, torque) # all_loss += loss.item() if is_rnn: time = time.squeeze(-1) all_time = torch.cat( (all_time, time.cpu()), axis=0) if all_time.size() else time.cpu() all_pred = torch.cat( (all_pred, cur_pred.cpu()), axis=0) if all_pred.size() else cur_pred.cpu() all_pred = torch.cat((all_time.unsqueeze(1), all_pred), axis=1) np.savetxt(path + net + '_' + seal + '_pred_' + preprocess + '.csv', all_pred.numpy())
def main(): path = '../data/csv/test/' + data + '/' + contact + '/' in_joints = [0, 1, 2, 3, 4, 5] dataset = indirectTrocarTestDataset(path, utils.WINDOW, utils.SKIP, in_joints, seal=seal, net=net) loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False) for t in ['60', '120', '180', '240', '300']: preprocess = 'filtered_torque_' + t + 's' model_root = [] for j in range(JOINTS): if is_seal: folder = "trocar" + str(j) else: folder = "trocar_no_cannula" + str(j) model_root.append(root / preprocess / net / folder) networks = [] for j in range(JOINTS): networks.append( trocarNetwork(utils.WINDOW, len(in_joints)).to(device)) utils.load_prev(networks[j], model_root[j], epoch_to_use) print("Loaded a " + str(j) + " model") all_pred = torch.tensor([]) uncorrected_loss = 0 corrected_loss = 0 loss_fn = torch.nn.MSELoss() for i, (position, velocity, torque, jacobian, time, fs_pred) in enumerate(loader): position = position.to(device) velocity = velocity.to(device) torque = torque.to(device) fs_pred = fs_pred.to(device) step_loss = 0 step_pred = time.unsqueeze(1) for j in range(JOINTS): posvel = torch.cat((position, velocity, fs_pred[:, [j]]), axis=1).contiguous() pred = networks[j](posvel) + fs_pred[:, [j]] # pred = networks[j](posvel)*utils.max_torque[j] + fs_pred[:,j].unsqueeze(1) loss = loss_fn(pred.squeeze(), torque[:, j]) step_loss += loss.item() step_pred = torch.cat((step_pred, pred.detach().cpu()), axis=1) corrected_loss += step_loss / 6 in_joints = np.array(in_joints) # fs_pred = fs_pred[:,(in_joints+1)*utils.WINDOW-1] uncorrected_loss += loss_fn(fs_pred, torque) all_pred = torch.cat( (all_pred, step_pred), axis=0) if all_pred.size() else step_pred print('Uncorrected loss: ', uncorrected_loss / len(loader)) print('Corrected loss: ', corrected_loss / len(loader)) np.savetxt( '../results/' + data + '/' + contact + '/torque_' + net + '_' + seal + '_' + preprocess + '.csv', all_pred.numpy())
import coco_loader import visualize import config as C import model import utils import torch import os import time from datetime import datetime os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "2" config = C.BaseConfig() val_loader = coco_loader.get_loader('val', config) net = model.twonets() net = net.cuda() utils.load_prev(net, "../models/%s_0.pt" % "gg_full_final") for i, data in enumerate(val_loader): images, impulses, instance_masks, cat_ids = utils.cudify_data(data) with torch.no_grad(): mask_logits = net([images, impulses]) pred_masks = mask_logits.sigmoid() visualize.visualize_targets(data, config, '00') data[2] = (pred_masks > 0.5).cpu() visualize.visualize_targets(data, config, '01') input()