def __init__(self, input_size, device): super(TEMP_ENCODER, self).__init__() torch.manual_seed(0) self.var = RootVariables() self.device = device self.lstm = nn.LSTM(input_size, self.var.hidden_size, self.var.num_layers, batch_first=True, bidirectional=True).to(self.device) self.fc0 = nn.Linear(6, self.var.imu_input_size)
def __init__(self, trim_frame_size=150, input_channels=6, batch_norm=False): super(VISION_PIPELINE, self).__init__() self.var = RootVariables() torch.manual_seed(1) self.net = FlowNetS.FlowNetS(batch_norm) checkpoint_path = None dict = torch.load(checkpoint_path) self.net.load_state_dict(dict["state_dict"]) self.net = nn.Sequential(*list(self.net.children())[0:9]).to("cuda:0") for i in range(len(self.net) - 1): self.net[i][1] = nn.ReLU() self.fc1 = nn.Linear(1024 * 6 * 8, 4096).to("cuda:0") self.fc2 = nn.Linear(4096, 256).to("cuda:0") self.fc3 = nn.Linear(256, 2).to("cuda:0") self.dropout1 = nn.Dropout(0.35) # self.dropout2 = nn.Dropout(0.10) self.activation = nn.Sigmoid() # self.net[8][1] = nn.ReLU(inplace=False) self.net[8] = self.net[8][0] self.tensorboard_folder = '' for params in self.model.parameters(): params.requires_grad = True
def __init__(self, original_img_csv, heatmap_img_csv): self.indexes = [] self.var = RootVariables() # os.path.dirname(os.path.realpath(__file__)) self.ori_imgs_path = pd.read_csv(self.var.root + original_img_csv + '.csv') self.heat_imgs_path = pd.read_csv(self.var.root + heatmap_img_csv + '.csv') self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") name_index = 6 if len( self.ori_imgs_path.iloc[0, 1].split('/')) > 7 else 4 subfolder = self.ori_imgs_path.iloc[0, 1].split('/')[name_index] f_index = 3 #0 for index in range(len(self.heat_imgs_path)): f_index += 1 if self.ori_imgs_path.iloc[f_index, 1].split( '/')[name_index] == subfolder: self.indexes.append(f_index) else: f_index += 4 #1 self.indexes.append(f_index) subfolder = self.ori_imgs_path.iloc[f_index, 1].split( '/')[name_index] self.transforms = transforms.Compose([transforms.ToTensor()]) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") assert len(self.heat_imgs_path) == len(self.indexes)
def __init__(self, original_img_csv, labels): self.gaze_data = [] self.indexes = [] self.var = RootVariables() self.ori_imgs_path = pd.read_csv(self.var.root + original_img_csv + '.csv') name_index = 5 if len( self.ori_imgs_path.iloc[0, 1].split('/')) > 5 else 4 subfolder = self.ori_imgs_path.iloc[0, 1].split('/')[name_index] f_index = 7 checkedLast = False for index in range(len(labels)): check = np.isnan(labels[index]) if check.any(): f_index += 1 # continue else: f_index += 1 self.gaze_data.append(labels[index]) if self.ori_imgs_path.iloc[f_index, 1].split( '/')[name_index] == subfolder: self.indexes.append(f_index) else: f_index += 8 #1 self.indexes.append(f_index) subfolder = self.ori_imgs_path.iloc[f_index, 1].split( '/')[name_index] self.transforms = transforms.ToTensor() assert len(self.gaze_data) == len(self.indexes) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu")
def __init__(self, original_img_csv, imu_feat, labels): self.imu_data, self.gaze_data = [], [] self.indexes = [] self.var = RootVariables() self.ori_imgs_path = pd.read_csv(self.var.root + original_img_csv + '.csv') name_index = 5 if len( self.ori_imgs_path.iloc[0, 1].split('/')) > 5 else 4 subfolder = self.ori_imgs_path.iloc[0, 1].split('/')[name_index] f_index = 7 checkedLast = False for index in range(len(labels)): check = np.isnan(labels[index]) imu_check = np.isnan(imu_feat[index]) if check.any() or imu_check.any(): f_index += 1 else: f_index += 1 self.gaze_data.append(labels[index]) if self.ori_imgs_path.iloc[f_index, 1].split( '/')[name_index] == subfolder: self.indexes.append(f_index) else: f_index += 8 #1 self.indexes.append(f_index) subfolder = self.ori_imgs_path.iloc[f_index, 1].split( '/')[name_index] self.imu_data.append(imu_feat[index]) self.imu_data = standarization(self.imu_data) assert len(self.imu_data) == len(self.indexes) assert len(self.gaze_data) == len(self.indexes) self.transforms = transforms.Compose([transforms.ToTensor()])
def __init__(self, test_folder, device=None): super(FusionPipeline, self).__init__() torch.manual_seed(2) self.device = device self.var = RootVariables() # self.checkpoint_path = self.var.root + checkpoint self.activation = nn.Sigmoid() self.temporalSeq = 32 self.temporalSize = 16 self.trim_frame_size = 150 self.imuCheckpoint_file = 'signal_checkpointAdam64H_' + test_folder[ 5:] + '.pth' self.frameCheckpoint_file = 'vision_checkpointAdami3d_' + test_folder[ 5:] + '.pth' ## IMU Models self.imuModel = IMU_ENCODER() imuCheckpoint = torch.load(self.var.root + 'datasets/' + test_folder[5:] + '/' + self.imuCheckpoint_file, map_location="cuda:0") self.imuModel.load_state_dict(imuCheckpoint['model_state_dict']) for params in self.imuModel.parameters(): params.requires_grad = False ## FRAME MODELS self.frameModel = i3d_VIS_ENCODER() frameCheckpoint = torch.load(self.var.root + 'datasets/' + test_folder[5:] + '/' + self.frameCheckpoint_file, map_location="cuda:0") self.frameModel.load_state_dict(frameCheckpoint['model_state_dict']) for params in self.frameModel.parameters(): params.requires_grad = False ## TEMPORAL MODELS # self.temporalModel = TEMP_ENCODER(self.temporalSize) self.dropout = nn.Dropout(0.2) # self.imu_down_fc = nn.Linear(256, 128).to("cuda:0") self.fc0 = nn.Linear(256, 128).to("cuda:0") self.fc1 = nn.Linear(128, 2).to("cuda:0") # self.fc0 = nn.Linear(512, 256).to("cuda:0") # self.fc1 = nn.Linear(256, 2).to("cuda:0") ##OTHER self.imu_encoder_params = None self.frame_encoder_params = None self.imuBN = nn.BatchNorm1d(self.var.hidden_size * 2, affine=True).to("cuda:0") self.frameBN = nn.BatchNorm1d(self.var.hidden_size * 2, affine=True).to("cuda:0") # self.imuBN = nn.BatchNorm1d(self.var.hidden_size*2, affine=True).to("cuda:0") # self.frameBN = nn.BatchNorm1d(self.var.hidden_size*2, affine=True).to("cuda:0") self.concatBN = nn.BatchNorm1d(256).to("cuda:0") self.fcBN = nn.BatchNorm1d(128).to( "cuda:0") ## change to 256 when using flownet self.tensorboard_folder = ''
def __init__(self): super(IMU_ENCODER, self).__init__() torch.manual_seed(0) self.var = RootVariables() self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.lstm = nn.LSTM(self.var.imu_input_size, self.var.hidden_size, self.var.num_layers, batch_first=True, dropout=0.65, bidirectional=True).to(self.device) # self.fc0 = nn.Linear(6, self.var.imu_input_size).to(self.device) self.fc1 = nn.Linear(self.var.hidden_size*2, 2).to(self.device)
def __init__(self, test_folder, reset_dataset=0): self.var = RootVariables() self.dataset = BUILDING_DATASETS(test_folder) self.frame_datasets = None self.imu_train_datasets, self.gaze_train_datasets = None, None self.imu_test_datasets, self.gaze_test_datasets = None, None if Path(self.var.root + 'datasets/' + test_folder[5:] + '/imuExtracted_training_data' + '.npy').is_file(): print('Files exists') self.imu_train_datasets = np.load(self.var.root + 'datasets/' + test_folder[5:] + '/imuExtracted_training_data' + '.npy') self.gaze_train_datasets = np.load(self.var.root + 'datasets/' + test_folder[5:] + '/gazeExtracted_training_data' + '.npy') self.imu_test_datasets = np.load(self.var.root + 'datasets/' + test_folder[5:] + '/imuExtracted_testing_data' + '.npy') self.gaze_test_datasets = np.load(self.var.root + 'datasets/' + test_folder[5:] + '/gazeExtracted_testing_data' + '.npy') else: print('saved files does not exis') self.imu_train_datasets, self.imu_test_datasets = self.dataset.load_unified_imu_dataset( ) self.gaze_train_datasets, self.gaze_test_datasets = self.dataset.load_unified_gaze_dataset( ) np.save( self.var.root + 'datasets/' + test_folder[5:] + '/imuExtracted_training_data' + '.npy', self.imu_train_datasets) np.save( self.var.root + 'datasets/' + test_folder[5:] + '/gazeExtracted_training_data' + '.npy', self.gaze_train_datasets) np.save( self.var.root + 'datasets/' + test_folder[5:] + '/imuExtracted_testing_data' + '.npy', self.imu_test_datasets) np.save( self.var.root + 'datasets/' + test_folder[5:] + '/gazeExtracted_testing_data' + '.npy', self.gaze_test_datasets) self.dataset.load_unified_frame_dataset(reset_dataset) self.gaze_train_datasets = self.gaze_train_datasets.reshape( -1, 4, self.gaze_train_datasets.shape[-1]) self.imu_train_datasets = self.imu_train_datasets.reshape( -1, 4, self.imu_train_datasets.shape[-1]) # self.gaze_test_datasets = self.gaze_test_datasets.reshape( -1, 4, self.gaze_test_datasets.shape[-1]) self.imu_test_datasets = self.imu_test_datasets.reshape( -1, 4, self.imu_test_datasets.shape[-1])
def __init__(self, input_size): super(TEMP_ENCODER, self).__init__() torch.manual_seed(0) self.var = RootVariables() self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.lstm = nn.LSTM(input_size, int(self.var.hidden_size / 2), int(self.var.num_layers / 2), batch_first=True, dropout=0.45, bidirectional=True).to("cuda:0")
def __init__(self, test_folder): self.var = RootVariables() self.test_folder = test_folder Path(self.var.root + 'datasets').mkdir(parents=True, exist_ok=True) Path(self.var.root + 'datasets/' + test_folder[5:]).mkdir( parents=True, exist_ok=True) # _ = os.system('mkdir -p' + self.var.root + 'datasets') # _ = os.system('mkdir -p' + self.var.root + 'datasets/' + test_folder[5:]) self.train_folders_num, self.test_folders_num = 0, 0 self.gaze_start_index, self.gaze_end_index = 0, 0 self.imu_start_index, self.imu_end_index = 0, 0
def __init__(self, resnet_depth): super(VIS_ENCODER, self).__init__() self.var = RootVariables() torch.manual_seed(1) self.net = resnet.generate_model(50) dict = torch.load(self.var.root + 'r3d' + str(resnet_depth) + '_KM_200ep.pth') self.net.load_state_dict(dict["state_dict"]) for params in self.net.parameters(): params.requires_grad = True
def __init__(self, test_folder, reset_dataset=0): self.var = RootVariables() self.test_folder = test_folder if reset_dataset == 1: _ = os.system('mkdir ' + self.var.root + 'datasets') _ = os.system('mkdir ' + self.var.root + 'datasets/' + test_folder[5:]) self.dataset = IMU_GAZE_FRAME_DATASET(self.test_folder, reset_dataset) self.train_imu_dataset, self.test_imu_dataset = self.dataset.imu_train_datasets, self.dataset.imu_test_datasets self.train_gaze_dataset, self.test_gaze_dataset = self.dataset.gaze_train_datasets, self.dataset.gaze_test_datasets self.train_folders_num, self.test_folders_num = 0, 0 self.gaze_start_index, self.gaze_end_index = 0, 0 self.imu_start_index, self.imu_end_index = 0, 0
def __init__(self): super(VISION_PIPELINE, self).__init__() self.var = RootVariables() torch.manual_seed(1) self.input_channels = 6 self.batchNorm = False self.net = FlowNetS.FlowNetS(input_channels=self.input_channels, batchNorm=False) dict = torch.load(self.var.root + 'FlowNet2-S_checkpoint.pth.tar') self.net.load_state_dict(dict["state_dict"]) self.net = nn.Sequential(*list(self.net.children())) self.conv1 = self.net[0] self.conv2 = self.net[1] self.conv3 = self.net[2] self.conv3_1 = self.net[3] self.conv4 = conv(self.batchNorm, 256, 512, stride=1) self.conv4_1 = self.net[5] self.conv5 = conv(self.batchNorm, 512, 512, stride=1) self.conv5_1 = self.net[7] self.conv6 = self.net[8] self.conv6_1 = self.net[9] self.deconv5 = self.net[10] self.deconv4 = deconv(1024, 256, kernel_size=3, stride=1) self.deconv3 = deconv(768, 128, kernel_size=3, stride=1) self.deconv2 = deconv(384, 64, kernel_size=4) self.deconv1 = deconv(192, 32, kernel_size=(4, 4), stride=2) self.deconv0 = deconv(96, self.input_channels, kernel_size=4) self.predict_flow6 = self.net[14] self.predict_flow5 = predict_flow(1024) self.predict_flow4 = predict_flow(768) self.predict_flow3 = predict_flow(384) self.predict_flow2 = predict_flow(192) self.predict_flow1 = predict_flow(96) self.predict_flow0 = predict_flow(6, channels=3) self.out_conv = conv(self.batchNorm, 3, 3) self.upsampled_flow6_to_5 = self.net[19] self.upsampled_flow5_to_4 = self.net[20] self.upsampled_flow4_to_3 = self.net[21] self.upsampled_flow3_to_2 = self.net[22] self.upsampled_flow2_to_1 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow1_to_0 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.tensorboard_folder = '' self.activation = nn.Softmax2d() for params in self.net.parameters(): params.requires_grad = True
def __init__(self, folder_type, labels): self.var = RootVariables() self.folder_type = folder_type self.labels = labels self.indexes = [] checkedLast = False for index in range(len(self.labels)): check = np.isnan(self.labels[index]) if check.any(): continue else: self.indexes.append(index) self.transforms = transforms.Compose([transforms.ToTensor()]) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu")
def __init__(self): super(IMU_PIPELINE, self).__init__() torch.manual_seed(0) self.var = RootVariables() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.lstm = nn.LSTM(self.var.imu_input_size, self.var.hidden_size, self.var.num_layers, batch_first=True, dropout=0.55, bidirectional=True).to(self.device) # self.fc0 = nn.Linear(6, self.var.imu_input_size).to(self.device) self.deconv = deconv(1, 3, kernel_size=4, stride=2, padding=1) self.deconv1 = deconv(3, 3, kernel_size=4, stride=2, padding=1) self.deconv2 = deconv(3, 3, kernel_size=4, stride=2, padding=1) self.deconv3 = deconv(3, 3, kernel_size=(3, 4), stride=2, padding=1) self.fc1 = nn.Linear(self.var.hidden_size*2, 2).to(self.device) self.dropout = nn.Dropout(0.45) self.activation = nn.Sigmoid() self.orig_tensor = torch.tensor([3.75, 2.8125]).to(self.device) self.tensorboard_folder = '' #'BLSTM_signal_outputs_sell1/'
def __init__(self, test_folder): self.var = RootVariables() self.dataset = None self.imu_arr_acc, self.imu_arr_gyro, self.gaze_arr = None, None, None self.train_last, self.test_last = None, None self.train_new, self.test_new = None, None temp = None self.video_file = 'scenevideo.mp4' self.test_folders_num, self.train_folders_num = 0, 0 self.frame_count = 0 self.capture = None self.ret = None self.toggle = 0 self.test_folder = test_folder self.stack_frames = [] self.transforms = transforms.Compose([transforms.ToTensor()]) self.panda_data = {}
def __init__(self, feat, labels): self.var = RootVariables() self.gaze_data, self.imu_data = [], [] checkedLast = False for index in range(len(labels)): check = np.isnan(labels[index]) imu_check = np.isnan(feat[index]) if check.any() or imu_check.any(): continue else: self.gaze_data.append(labels[index]) self.imu_data.append(feat[index]) self.imu_data = self.standarization(self.imu_data) self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu")
def __init__(self, trim_frame_size=150, input_channels=6, batch_norm=False): super(i3d_VIS_ENCODER, self).__init__() self.var = RootVariables() torch.manual_seed(1) self.dropout1 = nn.Dropout(0.25) self.model = InceptionI3d().to("cuda:0") new_checkpoint = torch.load( '/home/sanketthakur/Documents/gaze_pred/IMU-data_processing/pytorchi3d/models/rgb_imagenet.pt' ) self.model.load_state_dict(new_checkpoint) self.dropout = nn.Dropout(0.25) self.fc1 = nn.Linear(400, 128).to("cuda:0") self.fc2 = nn.Linear(128, 2).to("cuda:0")
def __init__(self): super(IMU_PIPELINE, self).__init__() torch.manual_seed(0) self.var = RootVariables() self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.lstm = nn.LSTM(self.var.imu_input_size, self.var.hidden_size, self.var.num_layers, batch_first=True, dropout=0.55, bidirectional=True).to(self.device) self.fc0 = nn.Linear(6, self.var.imu_input_size).to(self.device) self.fc1 = nn.Linear(self.var.hidden_size * 2, 2).to(self.device) self.dropout = nn.Dropout(0.15) self.activation = nn.Sigmoid() self.tensorboard_folder = '' #'BLSTM_signal_outputs_sell1/'
def __init__(self, folder_type, imu_feat, labels): self.var = RootVariables() self.folder_type = folder_type self.imu_data = [] self.indexes = [] checkedLast = False for index in range(len(labels)): check = np.isnan(labels[index]) imu_check = np.isnan(imu_feat[index]) if check.any() or imu_check.any(): continue else: self.indexes.append(index) self.imu_data.append(imu_feat[index]) self.imu_data = self.standarization(self.imu_data) self.transforms = transforms.Compose([transforms.ToTensor()])
def __init__(self): super(VISION_PIPELINE, self).__init__() self.var = RootVariables() self.BasicBlock = resnet2p1d.BasicBlock self.get_inplanes = resnet2p1d.get_inplanes() # self.net = resnet.generate_model(18) self.net = resnet2p1d.ResNet(self.BasicBlock, [2, 2, 2, 2], self.get_inplanes) dict = torch.load(self.var.root + 'r2p1d18_K_200ep.pth') self.net.load_state_dict(dict["state_dict"]) self.net = nn.Sequential(*list(self.net.children()))[0:10] self.deconv0 = deconv(512, 256, kernel_size=4, stride=2) self.deconv1 = deconv(256, 128, kernel_size=4, stride=2) self.deconv2 = deconv(128, 64, kernel_size=4, stride=2) self.deconv3 = deconv(64, 32, kernel_size=4, stride=2) self.deconv4 = deconv(32, 3, kernel_size=4, stride=2) self.activation = nn.Softmax2d() self.tensorboard_folder = 'sample_tensorboard'
def __init__(self, trim_frame_size=150, input_channels=6, batch_norm=False): super(VISION_PIPELINE, self).__init__() self.var = RootVariables() torch.manual_seed(1) self.dropout1 = nn.Dropout(0.25) self.tensorboard_folder = '' self.model = InceptionI3d().to("cuda:0") new_checkpoint = torch.load( '/home/sanketthakur/Documents/gaze_pred/IMU-data_processing/pytorchi3d/models/rgb_imagenet.pt' ) self.model.load_state_dict(new_checkpoint) for params in self.model.parameters(): params.requires_grad = True self.fc1 = nn.Linear(400, 128).to("cuda:0") self.fc2 = nn.Linear(128, 2).to("cuda:0")
def __init__(self, resnet_depth, test_folder): super(FusionPipeline, self).__init__() torch.manual_seed(2) self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.var = RootVariables() self.activation = nn.Sigmoid() self.temporalSeq = 32 self.temporalSize = 16 self.trim_frame_size = self.var.trim_frame_size self.imuCheckpoint_file = 'signal_checkpoint0_' + test_folder[5:] + '.pth' self.frameCheckpoint_file = 'vision_checkpointAdam9CNN_' + test_folder[5:] +'.pth' self.orig_tensor = torch.tensor([3.75, 2.8125]).to(self.device) ## IMU Models self.imuModel = IMU_ENCODER() imuCheckpoint = torch.load(self.var.root + 'datasets/' + test_folder[5:] + '/' + self.imuCheckpoint_file, map_location="cuda:0") self.imuModel.load_state_dict(imuCheckpoint['model_state_dict']) for params in self.imuModel.parameters(): params.requires_grad = True ## FRAME MODELS self.frameModel = VIS_ENCODER(resnet_depth) frameCheckpoint = torch.load(self.var.root + 'datasets/' + test_folder[5:] + '/' + self.frameCheckpoint_file, map_location="cuda:0") self.frameModel.load_state_dict(frameCheckpoint['model_state_dict']) for params in self.frameModel.parameters(): params.requires_grad = True ## TEMPORAL MODELS # self.temporalModel = TEMP_ENCODER(self.temporalSize) # self.fc1 = nn.Linear(self.var.hidden_size, 2).to("cuda:2") self.dropout = nn.Dropout(0.35) self.fc0 = nn.Linear(512, 256).to(self.device) self.fc1 = nn.Linear(256, 2).to(self.device) # self.fc2 = nn.Linear(128, 2).to("cuda:2") ##OTHER self.imu_encoder_params = None self.frame_encoder_params = None self.imuBN = nn.BatchNorm1d(self.var.hidden_size*2, affine=True).to(self.device) self.frameBN = nn.BatchNorm1d(self.var.hidden_size*2, affine=True).to(self.device) self.fcBN = nn.BatchNorm1d(256).to(self.device) self.tensorboard_folder = ''
def __init__(self, checkpoint_path, input_channels=6, batch_norm=False): super(VIS_ENCODER, self).__init__() self.var = RootVariables() torch.manual_seed(1) self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.net = FlowNetS.FlowNetS(batch_norm) dict = torch.load(checkpoint_path) self.net.load_state_dict(dict["state_dict"]) self.net = nn.Sequential(*list(self.net.children())[0:9]).to( self.device) for i in range(len(self.net) - 1): self.net[i][1] = nn.ReLU() self.fc1 = nn.Linear(1024 * 6 * 8, 256).to(self.device) # self.fc2 = nn.Linear(4096, 256).to(self.device) self.fc3 = nn.Linear(256, 2).to(self.device) self.dropout = nn.Dropout(0.3) # self.net[8][1] = nn.ReLU(inplace=False) self.net[8] = self.net[8][0]
def __init__(self, args, checkpoint_path, device, input_channels=6, batch_norm=False): super(VIS_ENCODER, self).__init__() self.var = RootVariables() torch.manual_seed(1) self.device = device self.net = FlowNetS.FlowNetS(args, input_channels, batch_norm) dict = torch.load(checkpoint_path) self.net.load_state_dict(dict["state_dict"]) self.net = nn.Sequential(*list(self.net.children())[0:9]).to(self.device) for i in range(len(self.net) - 1): self.net[i][1] = nn.ReLU() self.fc1 = nn.Linear(1024*4*4, 4096).to(self.device) self.fc2 = nn.Linear(4096, 256).to(self.device) self.fc3 = nn.Linear(256, 2).to(self.device) self.dropout = nn.Dropout(0.35) self.activation = nn.Sigmoid() # self.net[8][1] = nn.ReLU(inplace=False) self.net[8] = self.net[8][0] for params in self.net.parameters(): params.requires_grad = True
def __getitem__(self, index): f_index = self.indexes[index] # img = self.frames[f_index] img = np.load(self.var.root + self.folder_type + '/frames_' + str(f_index) + '.npy') targets = self.gaze_data[f_index] targets[:, 0] *= 512.0 targets[:, 1] *= 384.0 return self.transforms(img).to("cuda:0"), torch.from_numpy( self.imu_data[index]).to("cuda:0"), torch.from_numpy(targets).to( "cuda:0") if __name__ == "__main__": #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") var = RootVariables() parser = argparse.ArgumentParser() parser.add_argument("--sepoch", type=int, default=0) # parser.add_argument('--sepoch', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).') parser.add_argument("--nepoch", type=int, default=15) parser.add_argument("--tfolder", action='store', help='tensorboard_folder name') parser.add_argument("--reset_data", type=int) args = parser.parse_args() lastFolder, newFolder = None, None for index, subDir in enumerate(sorted(os.listdir(var.root))): # if 'train_BookShelf_S1' in subDir: # continue if 'train_PosterSession' in subDir:
def __init__(self): self.var = RootVariables()
import sys, os, ast sys.path.append('../') from variables import RootVariables from FlowNetPytorch.models import FlowNetS device = torch.device("cpu") ## PREPARING THE DATA # folder = sys.argv[1] # dataset_folder = '/home/sans/Downloads/gaze_data/' # os.chdir(dataset_folder + folder + '/' if folder[-1]!='/' else (dataset_folder + folder)) if __name__ == "__main__": folder = sys.argv[1] device = torch.device("cpu") var = RootVariables() os.chdir(var.root + folder) # dataset = FRAME_IMU_DATASET(var.root, folder, 150, device) # trainLoader = torch.utils.data.DataLoader(dataset, batch_size=var.batch_size, drop_last=True) # a = iter(trainLoader) # f, g, i = next(a) # # print(data.shape, data) # print(i.shape) # [batch_size, sequence_length, input_size] # i = i.reshape(i.shape[0], i.shape[2], -1) # print(i.shape) model = IMU_ENCODER(var.imu_input_size, device).to(device) imuCheckpoint_file = 'hidden_256_60e_signal_pipeline_checkpoint.pth' imuCheckpoint = torch.load(var.root + imuCheckpoint_file) model.load_state_dict(imuCheckpoint['model_state_dict']) print(model)