def transpose(datalayer): from numpy import transpose as nptranspose datalayer.data = nptranspose(datalayer.data) datalayer.calculate_fdata() #must updata fdata after changing data. datalayer.ny = datalayer.data.shape[0] datalayer.nx = datalayer.data.shape[1]
def rigid_transform_3D(MatA, MatB): ''' Pass in 2 numpy arrays to get the R and t ''' assert len(MatA) == len(MatB) N = MatA.shape[0] comA = npmean(MatA, axis=0) comB = npmean(MatB, axis=0) A = MatA - nptile(comA, (N, 1)) B = MatB - nptile(comB, (N, 1)) H = npdot(nptranspose(A), B) U, S, V = nplinalgsvd(H) R = npdot(nptranspose(V), nptranspose(U)) if nplinalgdet(R) < 0: V[2, :] *= -1 R = npdot(nptranspose(V), nptranspose(U)) t = -npdot(R, nptranspose(comA)) + nptranspose(comB) return R, t
def train_cycle(): #safety toggle to make sure no files are overwritten by accident while testing! if model_number != 999: safety_toggle = input(f'ATTENTION: MODEL NUMBER IS :{model_number}:\ ANY FILES WITH THE SAME MODEL NUMBER WILL BE DELETED. Continue? (Y/n):' ) if safety_toggle != 'Y' and safety_toggle != 'y': raise ValueError( 'Please change the model number to 999, or choose to continue') loss_total_dict = { 'epochs': 0, 'lr': lr, 'dset_size': len(dataset), 'weight_decay': weight_decay, 'seed': SEED, 'losses': [], 'accuracies': [], 'losses_val': [], 'accuracies_val': [], 'best_loss': 999, 'best_loss_val': 999 } model_save_path_new = join(model_save_path, str(model_number)) mkdir(model_save_path_new) for epoch in range(num_epochs): # loop over the dataset multiple times tic = time() running_loss = 0.0 running_loss_val = 0.0 running_acc = 0.0 running_acc_val = 0.0 ################## TRAINING STARTS ######################## model.train() print(f'training epoch #{epoch+1}:') for i, data in enumerate(tqdm(train_loader, leave=False)): imgs, labels = data # img = imgs[0].numpy().copy().transpose(1, 2, 0) # cv2.imshow('img', cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) # if cv2.waitKey(1) & 0xFF == ord('q'): # break imgs = npasarray(list(nparray(im) for im in imgs)) imgs = nptranspose(imgs, (0, 3, 1, 2)) imgs = torch.from_numpy(imgs).float().to(device) #transform list of 0 dim tensors into one 1 dim tensor labels_cat = torch.tensor([label.item() for label in labels]).float().reshape( (-1, 1)) labels_cat = labels_cat.to(device) optimizer.zero_grad() preds = model(imgs) # print(preds) # print(preds.shape) # print(labels) # print(labels_cat.shape) loss = criterion(preds, labels_cat) acc = binary_acc(preds, labels_cat) running_loss += loss.item() running_acc += acc.item() if (i + 1) % log_interval == 0: print('%s ::Training:: [%d, %5d] loss: %.5f' % (model_number, epoch + 1, i + 1, running_loss / log_interval)) loss_total_dict['epochs'] = epoch loss_total_dict['losses'].append(running_loss / log_interval) loss_total_dict['accuracies'].append(running_acc / log_interval) #save model if running_loss / log_interval <= loss_total_dict['best_loss']: loss_total_dict['best_loss'] = running_loss / log_interval torch.save(model.state_dict(), join(model_save_path_new, "best_train") + '.th') else: torch.save(model.state_dict(), join(model_save_path_new, "last") + '.th') with open(f'{model_save_path_new}-train', 'wb') as filezin: pickle.dump(loss_total_dict, filezin) running_loss = 0.0 running_acc = 0.0 loss.backward() optimizer.step() ################## VALIDATION STARTS ######################## model.eval() print(f'validating epoch #{epoch+1}:') for i, data in enumerate(tqdm(val_loader, leave=False)): imgs, labels = data imgs = npasarray(list(nparray(im) for im in imgs)) imgs = nptranspose(imgs, (0, 3, 1, 2)) imgs = torch.from_numpy(imgs).float().to(device) #transform list of 0 dim tensors into one 1 dim tensor labels_cat = torch.tensor([label.item() for label in labels]).float().reshape( (-1, 1)) labels_cat = labels_cat.to(device) #running model preds = model(imgs) loss = criterion(preds, labels_cat) acc = binary_acc(preds, labels_cat) running_loss_val += loss.item() running_acc_val += acc.item() if (i + 1) % log_interval_val == 0: print('%s ::Validation:: [%d, %5d] loss: %.5f' % (model_number, epoch + 1, i + 1, running_loss_val / log_interval_val)) print( f'Taking (precisely) {(time()-tic)/60} minutes per epoch') loss_total_dict['epochs'] = epoch loss_total_dict['losses_val'].append(running_loss_val / log_interval_val) loss_total_dict['accuracies_val'].append(running_acc_val / log_interval_val) if running_loss / log_interval <= loss_total_dict[ 'best_loss_val']: loss_total_dict[ 'best_loss_val'] = running_loss_val / log_interval_val torch.save(model.state_dict(), join(model_save_path_new, "best_val") + '.th') if epoch in checkpoints: with open( join(model_save_path_new, str(model_number)) + "-train", 'wb') as filezin: pickle.dump(loss_total_dict, filezin) running_loss_val = 0.0 running_acc_val = 0.0 print("\n")
def vargraph(dataframe, explanatoryvariable, dependentvariable, categorical=False, catlabels=False, condition=False, log=False): """ dataframe: Cleaned (Errors are NaN) DataFrame containing at least explanatoryvariable and dependentvariable. explanatoryvariable: Name of explanatory (x) variable dependentvariable: Name of dependent (y) variable categorical: Optional boolean. True if explanatoryvariable is categorical. condition: Optional string. Name of condition variable for comparison. log: Optional boolean. True for logarithmic y scale. """ df = dataframe[[explanatoryvariable, dependentvariable]].dropna() if condition: dfc = dataframe[dataframe[condition] == 1] dfcond = dfc[[explanatoryvariable, dependentvariable]].dropna() dfnc = dataframe[dataframe[condition] == 2] dfncond = dfnc[[explanatoryvariable, dependentvariable]].dropna() if categorical: if condition: categplot(df, explanatoryvariable, dependentvariable, log=log, condition=condition, catlabels=catlabels, dfcond=dfcond, dfncond=dfncond) else: categplot(df, explanatoryvariable, dependentvariable, catlabels=catlabels, log=log) else: # Continuous Variable if condition: x_cond = dfcond[explanatoryvariable].values x_ncond = dfncond[explanatoryvariable].values y_cond = dfcond[dependentvariable].values y_ncond = dfncond[dependentvariable].values x = [x_ncond, x_cond] y = [y_ncond, y_cond] print array(x_cond).shape, array(y_cond).shape print array(x_ncond).shape, array(y_ncond).shape corr_ncond = pearsonr(array(x_ncond), array(y_ncond)) corr_cond = pearsonr(array(x_cond), array(y_cond)) labels = [ 'Yes, %s (r = %.4f, p = %.4f, sz = %d)' % (condition, corr_cond[0], corr_cond[1], len(x_cond)), 'No, %s (r = %.4f, p = %.4f, sz = %d)' % (condition, corr_ncond[0], corr_ncond[1], len(x_ncond)) ] else: x = [df[explanatoryvariable].values] y = [df[dependentvariable].values] print nptranspose(array(x)).shape, nptranspose(array(y)).shape corr = pearsonr(nptranspose(array(x)), nptranspose(array(y))) labels = ['r = %.4f, p = %d, sz = %d' % (corr[0], corr[1], len(x))] # Make scatterplot # Create a figure instance fig = plt.figure(1, figsize=(9, 6)) # Create an axes instance ax = fig.add_subplot(111) # Generate colors colors = cm.rainbow(linspace(0, 1, len(x))) # Create the scatterplot for i, xsets in enumerate(x): print 'wat' ax.scatter(xsets, y[i], marker='o', alpha=0.5, lw=0, color=colors[i]) if log: ax.set_yscale('log') ax.set_ylim(logyrange(y)) ax.legend(labels) ax.set_xlabel(explanatoryvariable) ax.set_ylabel(dependentvariable) if condition: nameplot(ax, fig, dependentvariable, explanatoryvariable, condition=condition) else: nameplot(ax, fig, dependentvariable, explanatoryvariable)