def compute_dictionary_one_image(args): ''' Extracts a random subset of filter responses of an image and save it to disk This is a worker function called by compute_dictionary Your are free to make your own interface based on how you implement compute_dictionary ''' opts = get_opts() ind, alpha, train_files = args # # ----- TODO ----- # #Inputs needed: location of the image, alpha (random pixels), image path img = Image.open("../data/" + (train_files)) #read an image img = np.array(img).astype(np.float32) / 255 # filter_resp = extract_filter_responses(opts, img) #extract the responses rand_loc_y = np.random.choice( filter_resp.shape[0], int(alpha)) #Sample out alpha random pixels from the images rand_loc_x = np.random.choice(filter_resp.shape[1], int(alpha)) # img_sub = filter_resp[ rand_loc_y, rand_loc_x, :] #Extract the random pixels of size alpha*3*F np.save(os.path.join("../temp/", str(ind) + '.npy'), img_sub)
def main(): for ind in range(len(K)): start = time.time() print('Started at: ',start) opts = get_opts() opts.L = L[ind] opts.K = K[ind] opts.alpha = alpha[ind] print('filter_scales',opts.filter_scales) print('L is', opts.L) print('K is', opts.K) print('alpha is', opts.alpha) n_cpu = util.get_num_CPU() visual_words.compute_dictionary(opts, n_worker=n_cpu) dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) # Q2.1-2.4 n_cpu = util.get_num_CPU() visual_recog.build_recognition_system(opts, n_worker=n_cpu) ## Q2.5 n_cpu = util.get_num_CPU() conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu) print(conf) print(accuracy) np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',') np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g') print('Finished at: ',time.time()) print('It took', ((time.time()-start)/60), 'minutes to execute the iteration.')
def compute_dictionary_one_image(args): ''' Extracts a random subset of filter responses of an image and save it to disk This is a worker function called by compute_dictionary Your are free to make your own interface based on how you implement compute_dictionary ''' # ----- TODO ----- # Instantiate arguments opts = get_opts() i, alpha, train_files = args # inputs: i-index of training image, alpha-number of random pixel samples, and img_path-path of image alpha = int(alpha) # Instantiate images from image path img = Image.open("../data/" + train_files) img = np.array(img).astype( np.float32) / 255 # Make image is a floating point format type filter_responses = extract_filter_responses(opts, img) #extract the responses randSampleX = np.random.choice( filter_responses.shape[0], alpha) # random x pixel sampling of alpha size randSampleY = np.random.choice( filter_responses.shape[1], alpha) # random y pixel sampling of alpha size filter_responses = filter_responses[ randSampleX, randSampleY, :] #Extract the random pixels of size alpha*3*F np.save(os.path.join("../feat/", str(i) + '.npy'), filter_responses)
def dmlc_opts(opts): """convert from mxnet's opts to dmlc's opts """ args = [ '--num-workers', str(opts.num_workers), '--num-servers', str(opts.num_servers), '--cluster', opts.launcher, '--host-file', opts.hostfile, '--sync-dst-dir', opts.sync_dst_dir ] # convert to dictionary dopts = vars(opts) for key in ['env_server', 'env_worker', 'env']: for v in dopts[key]: args.append('--' + key.replace("_", "-")) args.append(v) args += opts.command try: import opts except ImportError: print("Can't load dmlc_tracker package. Perhaps you need to run") print(" git submodule update --init --recursive") raise dmlc_opts = opts.get_opts(args) return dmlc_opts
def grid_nrbp(ranks = [100]): data = [] baseline_data = [] opts = get_opts() ## Need to specify following arguments # reverse_ranker_path # test_data_path # device # active_learning_stage true_dict, baseline_dict, result_dict, args_dict = testing(opts) active_learning = args_dict["active_learning"] network_type = args_dict["network_type"] num_query = args_dict["num_query"] num_passage = args_dict["num_passage"] results = {} p_forwards = [opts.p_forward] p_reverses = [opts.p_reverse] for r in ranks: for p_forward in p_forwards: rating_dict = transform_ground_truth(true_dict, p_forward) for p_reverse in p_reverses: baseline_nrbp = calculate_metrics(rating_dict, baseline_dict, r, p_reverse) model_nrbp = calculate_metrics(rating_dict, result_dict, r, p_reverse) results["baseline_nrbp"] = baseline_nrbp results["model_nrbp"] = model_nrbp obj_writer(results, OUTPUT_PATH + network_type +str(num_query)+"_" +str(num_passage)+"_"+str(opts.p_forward) + "_" + str(opts.p_reverse) + ".dict")
def compute_dictionary_one_image(args): ''' Extracts a random subset of filter responses of an image and save it to disk This is a worker function called by compute_dictionary ''' # ----- TODO ----- i, alpha, image_path = args opts = get_opts() img = Image.open(os.path.join(opts.data_dir, image_path)) img = np.array(img).astype(np.float32) / 255 #commence filter response and random sampling of alpha*T pixels, where T is num of training images N filter_response = extract_filter_responses(opts, img) H = filter_response.shape[0] W = filter_response.shape[1] C = 3 ##filter_response.shape[2] filter_response_reshaped = filter_response.reshape((H * W), C) x = np.random.choice(filter_response_reshaped[0], alpha, replace=True) y = np.random.choice(filter_response_reshaped[1], alpha, replace=True) random_sampled_pixels = filter_response_reshaped[x, y, :] #sel = np.random.choice(H*W, alpha, replace = False) - old draft for ref #sampled_response = filter_response_reshaped[sel] - old draft for ref np.save(opts.feat_dir, "temp" + "str(%d)" + ".npy" % i, random_sampled_pixels) print('compute_dictionary_one_img completed') return
def main(): opts = get_opts() img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 filter_responses = visual_words.extract_filter_responses(opts, img) n_cpu = util.get_num_CPU() visual_words.compute_dictionary(opts, n_worker=1) img_path = join(opts.data_dir, 'kitchen/sun_aaqhazmhbhefhakh.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) wordmap = visual_words.get_visual_words(opts, img, dictionary) n_cpu = util.get_num_CPU() visual_recog.build_recognition_system(opts, n_worker=n_cpu) n_cpu = util.get_num_CPU() conf, accuracy, incorrect = visual_recog.evaluate_recognition_system( opts, n_worker=n_cpu) print(conf) print(accuracy) np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',') np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
def main(): opts = get_opts() ## Q1.1 img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32)/255 filter_responses = visual_words.extract_filter_responses(opts, img) util.display_filter_responses(opts, filter_responses) # ## Q1.2 n_cpu = util.get_num_CPU() visual_words.compute_dictionary(opts, n_worker=n_cpu) ## Q1.3 img_path = join(opts.data_dir, 'windmill/sun_bsngeuxxmgmcsesp.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32)/255 dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) wordmap = visual_words.get_visual_words(opts, img, dictionary) util.visualize_wordmap(wordmap) ## Q2.1-2.4 n_cpu = util.get_num_CPU() visual_recog.build_recognition_system(opts, n_worker=n_cpu) ## Q2.5 n_cpu = util.get_num_CPU() conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu) print(conf) print(accuracy) np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',') np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
def main(): opts = get_opts() ## Q1.1 img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 filter_responses = visual_words.extract_filter_responses(opts, img) util.display_filter_responses(opts, filter_responses)
def main(): opts = get_opts() # Q1.1 img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 filter_responses = visual_words.extract_filter_responses(opts, img) util.display_filter_responses(opts, filter_responses) # Q1.2 n_cpu = util.get_num_CPU() visual_words.compute_dictionary(opts, n_worker=n_cpu) # Q1.3 ### Uncomment for picture 1 ### img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg') ### Uncomment for picture 2 ### # img_path = join(opts.data_dir, 'aquarium/sun_acrxheaggpuqwdwm.jpg') ### Uncomment for picture 3 ### # img_path = join(opts.data_dir, 'desert/sun_banypouestzeimab.jpg') #################################################################### img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) wordmap = visual_words.get_visual_words(opts, img, dictionary) util.visualize_wordmap(wordmap) # Q2.1-2.4 n_cpu = util.get_num_CPU() visual_recog.build_recognition_system(opts, n_worker=n_cpu) # Q2.5 n_cpu = util.get_num_CPU() conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu) # Q3.2 # n_cpu = util.get_num_CPU() # custom.build_recognition_system(opts, n_worker=n_cpu) # n_cpu = util.get_num_CPU() # conf, accuracy = custom.evaluate_recognition_system(opts, n_worker=n_cpu) print(conf) print(accuracy) np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',') np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
def main(): opt = get_opts() opt.use_cuda = torch.cuda.is_available() # Set seed for reproducibility torch.manual_seed(opt.seed) torch.cuda.manual_seed(opt.seed) np.random.seed(opt.seed) random.seed(opt.seed) if not os.path.exists(opt.save_dir): os.makedirs(opt.save_dir) corpus = get_data(opt) if opt.mode == 'train': train(opt, corpus) if opt.mode == 'generate': generate_text(opt, corpus)
def main(): opts = get_opts() ## Q1.1 # # img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg') # img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg') # img = Image.open(img_path) # img = np.array(img).astype(np.float32)/255 # filter_responses = visual_words.extract_filter_responses(opts, img) # util.display_filter_responses(opts, filter_responses) ## Q1.2 # n_cpu = util.get_num_CPU() # visual_words.compute_dictionary(opts, n_worker=n_cpu) # # ## Q1.3 # # img_path = join(opts.data_dir, 'aquarium/sun_acusadxqppxaqouk.jpg') # img = Image.open(img_path) # img = np.array(img).astype(np.float32)/255 # dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) # wordmap = visual_words.get_visual_words(opts, img, dictionary) # util.visualize_wordmap(wordmap) ## Q2.1-2.4 # n_cpu = util.get_num_CPU() # visual_recog.build_recognition_system(opts, n_worker=n_cpu) # # #Q2.5 n_cpu = util.get_num_CPU() conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu) # conf, accuracy = custom.evaluate_recognition_system(opts, n_worker=n_cpu) print(conf) print(accuracy) np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',') np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
def main(): opts = get_opts() # save filepath to one csv file train_path = opts.train_folder val_path = opts.val_folder print("train folder:", train_path) print("val folder:", val_path) train_csv_name = 'train_data.csv' save2csv(path=train_path, csvname=train_csv_name) val_csv_name = 'val_data.csv' # save2csv(path=val_path, csvname=val_csv_name) #csv_name = 'anime_data.csv' #save2csv(path=img_path, csvname=csv_name) # Can add some transform here # Define beta-vae net print('latent dim:', opts.latent_dim) Model = BetaVAE(in_channels=3, latent_dim=opts.latent_dim, hidden_dims=opts.hidden_dims, beta=opts.beta, gamma=opts.gamma, max_capacity=opts.max_capacity, Capacity_max_iter=opts.Capacity_max_iter, loss_type=opts.loss_type, tau=opts.tau) train_dataset = Dataload(imgpath=train_path, csv_name=train_csv_name) model_state = None print("Start Training!!!!!!!") model_state, train_loss, val_loss = Train(Model, train_dataset, None, batch_size=opts.bs, max_iters=opts.max_iters, lr=opts.lr, w_decay=opts.w_decay, m=opts.m, output_folder=opts.output_folder)
def extract_list(): # Load options parser = argparse.ArgumentParser(description='Attribute Learner') parser.add_argument('--config', type = str, help = 'Path to config .opt file. Leave blank if loading from opts.py') parser.add_argument('--pth', type = str, help = 'Path to model checkpoint. Leave blank if testing bestmodel') parser.add_argument('--input_list', type = str, help = 'Path to list with image paths') parser.add_argument('--output_list', type = str, help = 'Path to list where to store results') conf = parser.parse_args() opt = torch.load(conf.config) if conf.config else get_opts() opt.ngpu = 1 opt.batch_size=16 print('Loading model ...') M = Model(opt) checkpoint = torch.load(conf.pth) try: checkpoint = {key.replace('module.', ''): value for key, value in checkpoint['state_dict'].items()} except: pass M.model.load_state_dict(checkpoint) M.model.eval() test_loader = datasets.generate_loader(opt, 'test', conf.input_list) torch.set_grad_enabled(False) out_f = open(conf.output_list,'w') for batch_idx, (data, target) in tqdm(enumerate(test_loader)): #print('Extracting batch # {batch_idx} ...') data=data.to(M.device) output = M.model(data) output = torch.cat(output,1).detach().cpu().numpy() log_str='\n'.join(map(lambda x: ','.join(map(str,x)),output))+'\n' out_f.write(log_str) out_f.close() print('Extracting done!')
def main(): opts = get_opts() ## Q1.1 #img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg') #img = Image.open(img_path) #img = np.array(img).astype(np.float32)/255.0 #filter_responses = visual_words.extract_filter_responses(opts, img) #util.display_filter_responses(opts, filter_responses) ## Q1.2 # n_cpu = util.get_num_CPU() #visual_words.compute_dictionary(opts, n_worker=n_cpu) ## Q1.3 img_path = join(opts.data_dir, 'desert/sun_acrqldhmwdraspza.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) wordmap = visual_words.get_visual_words(opts, img, dictionary) #util.visualize_wordmap(wordmap) ## Q2.1-2.4 n_cpu = util.get_num_CPU() visual_recog.get_feature_from_wordmap(opts, wordmap) visual_recog.build_recognition_system(opts, n_worker=n_cpu) ## Q2.5 n_cpu = util.get_num_CPU() conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu) print(conf) print(accuracy) np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',') np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
def main(): # Load options parser = argparse.ArgumentParser(description='Attribute Learner') parser.add_argument( '--config', type=str, help='Path to config .opt file. Leave blank if loading from opts.py') conf = parser.parse_args() opt = torch.load(conf.config) if conf.config else get_opts() print('===Options==') d = vars(opt) for k in d.keys(): print(k, ':', d[k]) # Fix seed random.seed(opt.manual_seed) np.random.seed(opt.manual_seed) torch.manual_seed(opt.manual_seed) torch.cuda.manual_seed_all(opt.manual_seed) cudnn.benchmark = True # Create working directories try: os.makedirs(opt.out_path) os.makedirs(os.path.join(opt.out_path, 'checkpoints')) os.makedirs(os.path.join(opt.out_path, 'log_files')) print('Directory {} was successfully created.'.format(opt.out_path)) except OSError: print('Directory {} already exists.'.format(opt.out_path)) pass # Training M = Model(opt) M.train() '''
def grid_nrbp(p_forwards=[0.5, 0.9, 1], p_reverses=[0.5, 0.9, 1], ranks=[100]): x = [] y = [] data = [] baseline_data = [] opts = get_opts() ## Need to specify following arguments # reverse_ranker_path # test_data_path # device # active_learning_stage true_dict, baseline_dict, result_dict, args_dict = testing(opts) active_learning = args_dict["active_learning"] network_type = args_dict["network_type"] num_query = args_dict["num_query"] num_passage = args_dict["num_passage"] for r in ranks: for p_forward in p_forwards: rating_dict = transform_ground_truth(true_dict, p_forward) for p_reverse in p_reverses: x.append(p_forward) y.append(p_reverse) baseline_nrbp = calculate_metrics(rating_dict, baseline_dict, r, p_reverse) model_nrbp = calculate_metrics(rating_dict, result_dict, r, p_reverse) # data.append((model_nrbp-baseline_nrbp)/baseline_nrbp) data.append(model_nrbp) baseline_data.append(baseline_nrbp) print_message("Processed p_forward={}, p_reverse={}".format( p_forward, p_reverse)) # Write results to csv output_results = [active_learning, network_type, num_query, num_passage ] + data + baseline_data with open(OUTPUT_PATH, mode='a+') as output: output_writer = csv.writer(output) output_writer.writerow(output_results)
def get_image_feature( args): #ind, img_path,label): #args): #opts, img_path, dictionary ''' Extracts the spatial pyramid matching feature. [input] * opts : options * img_path : path of image file to read * dictionary: numpy.ndarray of shape (K, 3F) [output] * feature: numpy.ndarray of shape (K) ''' # ----- TODO ----- ind, img_path, label = args opts = get_opts() data_dir = opts.data_dir out_dir = opts.out_dir SPM_layer_num = opts.L dictionary = np.load(join(out_dir, 'dictionary.npy')) dict_size = len(dictionary) # size of dictionary img = Image.open("../data/" + (img_path)) #read an image img = np.array(img).astype(np.float32) / 255 #convert to 0-1 range values wordmap = visual_words.get_visual_words( opts, img, dictionary) # find the wordmap for the image feat = get_feature_from_wordmap_SPM( opts, wordmap) # plot the histogram of Spatial Pyramids word_hist = get_feature_from_wordmap( opts, wordmap, dict_size) # Histogram for the whole image np.savez("../temp/" + "train_" + str(ind) + ".npz", feat=feat, label=label, allow_pickle=True) return feat
def main(): opts = get_opts() ## Q1.1 #img_path = join(opts.data_dir, 'laundromat/sun_afrrjykuhhlwiwun.jpg') #print("Image path is :",img_path) #img = Image.open(img_path) #img = np.array(img).astype(np.float32)/255 #filter_responses = visual_words.extract_filter_responses(opts, img) #util.display_filter_responses(opts, filter_responses) #Q1.2 #n_cpu = util.get_num_CPU() #visual_words.compute_dictionary(opts, n_worker=n_cpu) ## Q1.3 img_path = join(opts.data_dir, 'windmill/sun_bcyuphldelrgtuwd.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 util.visualize_wordmap(img) dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) wordmap = visual_words.get_visual_words(opts, img, dictionary) util.visualize_wordmap(wordmap)
def main(): opts = get_opts() ## Q1.1 # img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg') img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg') # img = Image.open(img_path) # img = np.array(img).astype(np.float32)/255 # filter_responses = visual_words.extract_filter_responses(opts, img) # util.display_filter_responses(opts, filter_responses) ## Q1.2 n_cpu = util.get_num_CPU() # visual_words.compute_dictionary_one_image(opts, img) # visual_words.compute_dictionary(opts, n_worker=n_cpu) ## Q1.3 # img_path = join(opts.data_dir, 'desert/sun_aaqyzvrweabdxjzo.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) wordmap = visual_words.get_visual_words(opts, img, dictionary) util.visualize_wordmap(wordmap)
def main(): opts = get_opts() ## Q3.1 - Hyperparameter tunning # print("Q3.1 - Hyper Parameter tunning") # alpha = [25, 125] # filter_scales = [[1, 2], [1, 2, 4]] # K = [10, 50] # L = [3, 2, 1] # tune.tune(alpha, filter_scales, K, L) # results = tune.get_results(opts, sorted=True) # tuning.display_results(results) ## Q3.2 - Custom print("Q3.2 - Custom system with default parameters") alpha = [25] filter_scales = [[1, 2]] K = [10] L = [1] # Evaluating default vs D D = [1, 5] tune.tune(alpha, filter_scales, K, L, D) results = tune.get_results(opts, sorted=True) tune.display_results(results) print("Q3.2 - Custom system with best parameters") alpha = [125] filter_scales = [[1, 2]] K = [50] L = [3] # Evaluating default vs D D = [1, 10] # Evaluating default vs 0.8 tune.tune(alpha, filter_scales, K, L, D) results = tune.get_results(opts, sorted=True) tune.display_results(results)
def rotTest(): opts = get_opts() ratio = opts.ratio #'ratio for BRIEF feature descriptor' sigma = opts.sigma #'threshold for corner detection using FAST feature detector' #Q2.1.6 #Read the image and convert to grayscale cv_cover = cv2.imread('../data/cv_cover.jpg') img = cv_cover locs = [] # N x 2 matrix containing x,y coords or matched point pairs hist = [] num_matches = [] bin_list = [] for i in range(36): print(i) #Rotate Image rotImg = ndimage.rotate(img, i * 10, reshape=True) #Compute features, descriptors and Match features img_matches, locs1, locs2 = matchPics(rotImg, img, opts) #plotMatches(rotImg, img, img_matches, locs1, locs2) # display matches between both pictures num_matches.append(len(img_matches)) print(len(img_matches)) #plt.hist(num_matches, bins=36, range=None, density=False) ## put shape of matches in histogram plt.bar(i * 10, height=num_matches[i]) ## put shape of matches in histogram plt.title('Histogram of matches') plt.ylabel('Number of matches') plt.xlabel('Rotation') plt.show() return
import cv2 from opts import get_opts #Import necessary functions from helper import plotMatches from matchPics import matchPics from planarH import computeH_ransac from planarH import compositeH import matplotlib.pyplot as plt from imutils import paths import argparse import imutils #Write script for Q2.2.4 opts = get_opts() #read in images img_left = cv2.imread('../data/pano_left.jpg') img_right = cv2.imread('../data/pano_right.jpg') print(img_left.shape) print(img_right.shape) img_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2RGB) img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2RGB) matches, locs1, locs2 = matchPics(img_left, img_right, opts) pair1 = locs1[matches[:, 0]] pair2 = locs2[matches[:, 1]] homography = computeH_ransac(pair1, pair2, opts) right_warped = cv2.warpPerspective(img_right, homography, (img_left.shape[1], img_left.shape[0])) images = []
import numpy as np import json import pandas as pd import datetime import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from opts import get_opts import torch.nn as nn import torch.utils.data from torch.utils.data import Dataset from sklearn.metrics import mean_squared_error,r2_score file = 'data/data_knn_interp.json' with open(file) as f: data = json.load(f) args = get_opts() subdata = data['1'] print(len(np.array(subdata['time'][:]))) firstNSequence = 15000 firstDColumn = 5 firstNColData = np.zeros((firstNSequence, firstDColumn)) startDate = datetime.datetime(2004, 2, 28, 0, 58, 15) startDateInSeconds = int(startDate.strftime('%s')) firstNColData[:, 0] = np.array(subdata['time'][:firstNSequence]) + startDateInSeconds firstNColData[:, 1] = np.array(subdata['voltage'][:firstNSequence]) firstNColData[:, 2] = np.array(subdata['temperature'][:firstNSequence]) firstNColData[:, 3] = np.array(subdata['humidity'][:firstNSequence]) firstNColData[:, 4] = np.array(subdata['light'][:firstNSequence])
def main(): opts = get_opts() print('L is', opts.L) print('K is', opts.K) print('alpha is', opts.alpha) print() # Q1.1 img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 filter_responses = visual_words.extract_filter_responses(opts, img) # imageio.imsave('../results/filter_responses.jpg',filter_responses) util.visualize_wordmap(img) util.display_filter_responses(opts, filter_responses) ## # Q1.2 n_cpu = util.get_num_CPU() visual_words.compute_dictionary(opts, n_worker=n_cpu) dictionary = np.load(join(opts.out_dir, 'dictionary.npy')) ### # ## Q1.3 img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 wordmap = visual_words.get_visual_words(opts, img, dictionary) util.visualize_wordmap(img) util.visualize_wordmap(wordmap) # img_path = join(opts.data_dir, 'waterfall/sun_bbeqjdnienanmmif.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 wordmap = visual_words.get_visual_words(opts, img, dictionary) util.visualize_wordmap(img) util.visualize_wordmap(wordmap) # img_path = join(opts.data_dir, 'windmill/sun_bratfupeyvlazpba.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 wordmap = visual_words.get_visual_words(opts, img, dictionary) util.visualize_wordmap(img) util.visualize_wordmap(wordmap) img_path = join(opts.data_dir, 'desert/sun_adjlepvuitklskrz.jpg') img = Image.open(img_path) img = np.array(img).astype(np.float32) / 255 wordmap = visual_words.get_visual_words(opts, img, dictionary) util.visualize_wordmap(img) util.visualize_wordmap(wordmap) # # Q2.1-2.4 n_cpu = util.get_num_CPU() visual_recog.build_recognition_system(opts, n_worker=n_cpu) ## Q2.5 n_cpu = util.get_num_CPU() conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu) print(conf) print(accuracy) np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',') np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
osp.join(vis_dir, 'original_epoch{}_batch{}.png'.format(epoch, batch_idx))) torchvision.utils.save_image( new_image.data, osp.join(vis_dir, 'polluted_epoch{}_batch{}.png'.format(epoch, batch_idx))) torchvision.utils.save_image( delta_tmp.data, osp.join(vis_dir, 'delta_epoch{}_batch{}.png'.format(epoch, batch_idx))) torchvision.utils.save_image( mask.data * 255, osp.join(vis_dir, 'mask_epoch{}_batch{}.png'.format(epoch, batch_idx))) def check_freezen(net, need_modified=False, after_modified=None): # print(net) cc = 0 for child in net.children(): for param in child.parameters(): if need_modified: param.requires_grad = after_modified # if param.requires_grad: print('child', cc , 'was active') # else: print('child', cc , 'was forzen') cc += 1 if __name__ == '__main__': opt = get_opts(args.targetmodel) main(opt)
def __init__(self): super(pipeline, self).__init__() self.args = get_opts() self.keys = ['time', 'temperature', 'humidity', 'light', 'voltage']
def main(): opts = get_opts() # Paths and device current_device = opts.device train_data_path = opts.data_dir pretrained_path = opts.pretrain_model_path model_path = opts.out_dir # training settings pretrained = opts.pretrained_option num_epochs = opts.num_epochs learning_rate = opts.learning_rate num_query = opts.num_query num_passage = opts.num_passage active_learning = opts.active_learning_stage # network settings network_type = opts.network_type embed_size = opts.embed_size num_hidden_nodes = opts.num_hidden_nodes num_hidden_layers = opts.num_hidden_layers dropout_rate = opts.dropout_rate if not os.path.exists(model_path): os.makedirs(model_path) torch.manual_seed(318) if pretrained == "Yes": checkpoint = torch.load(pretrained_path) network_type = checkpoint['network_type'] embed_size = checkpoint['embed_size'] num_hidden_nodes = checkpoint['num_hidden_nodes'] num_hidden_layers = checkpoint['num_hidden_layers'] dropout_rate = checkpoint['dropout_rate'] if network_type == "append": net = AppendNet(embed_size=embed_size, num_hidden_nodes=num_hidden_nodes, num_hidden_layers=num_hidden_layers, dropout_rate=dropout_rate) if network_type == "residual": net = ResidualNet(embed_size=embed_size, num_hidden_nodes=num_hidden_nodes, num_hidden_layers=num_hidden_layers, dropout_rate=dropout_rate) net.load_state_dict(checkpoint['model']) net.to(current_device) optimizer = optim.Adam(net.parameters(), lr=learning_rate) optimizer.load_state_dict(checkpoint['optimizer']) else: if network_type == "append": net = AppendNet(embed_size=embed_size, num_hidden_nodes=num_hidden_nodes, num_hidden_layers=num_hidden_layers, dropout_rate=dropout_rate).to(current_device) if network_type == "residual": net = ResidualNet(embed_size=embed_size, num_hidden_nodes=num_hidden_nodes, num_hidden_layers=num_hidden_layers, dropout_rate=dropout_rate).to(current_device) optimizer = optim.Adam(net.parameters(), lr=learning_rate) print("Loading data") train_pos_dict, train_neg_dict, query_dict, passage_dict = load( train_data_path) print("Data successfully loaded.") print("Negative Pair dict size: " + str(len(train_neg_dict))) print("Positive Pair dict size: " + str(len(train_pos_dict))) print("Num of queries: " + str(len(query_dict))) print("Num of passages: " + str(len(passage_dict))) print("Finish loading.") arg_str = active_learning + "_" + network_type + "_" + str( num_query) + "_" + "query" + "_" + str(num_passage) + "_" + "passage" unique_path = model_path + arg_str + ".model" output_path = model_path + arg_str + ".csv" print("Total number of parameters: {}".format(net.parameter_count())) for ep_idx in range(num_epochs): train_loss = train(net, optimizer, opts, train_pos_dict, train_neg_dict, query_dict, passage_dict) print_message([ep_idx, train_loss]) with open(output_path, mode='a+') as output: output_writer = csv.writer(output) output_writer.writerow([ep_idx, train_loss]) torch.save( { "model": net.state_dict(), "optimizer": optimizer.state_dict(), "n_epoch": ep_idx, "train_loss": train_loss, "network_type": network_type, "embed_size": embed_size, "num_hidden_nodes": num_hidden_nodes, "num_hidden_layers": num_hidden_layers, "dropout_rate": dropout_rate, "num_passage": num_passage, "num_query": num_query }, unique_path)
import pandas as pd import numpy as np import json import statsmodels.api as sm from sklearn.metrics import mean_squared_error from matplotlib import pyplot from opts import get_opts ARGS = get_opts() def main(): file = ARGS.data_path + ARGS.saving_file with open(file, 'r') as f: data = json.load(f) f.close() series = pd.DataFrame(np.array(data['1'][ARGS.column][:ARGS.data_size]), columns=[ARGS.column]) X = series.values size = int(len(X) * 0.8) train, test = X[0:size], X[size:len(X)] model = sm.tsa.statespace.SARIMAX(train, order=(1, 1, 0), seasonal_order=(1, 1, 1, 10)) model_fit = model.fit() yhat = model_fit.forecast(len(test)) error = mean_squared_error(test, yhat) print('Test MSE: %.6f' % error) # plot
from matchPics import matchPics import matplotlib.pyplot as plt import scipy.ndimage as sci import os import opts #Q2.1.6 #Read the image and convert to grayscale, if necessary #%% img = cv2.imread( 'D:/Academic/CMU/Course/2020Fall/CV/Homework/HW2_Handout/HW2_Handout/data/cv_cover.jpg' ) #%% x = [] y = [] opts1 = opts.get_opts() for i in range(36): #Rotate Image img_rotate = sci.rotate(img, i * 10) #Compute features, descriptors and Match features matches, locs1, locs2 = matchPics(img, img_rotate, opts1) #Update histogram # degree = i * 10 x.append(i * 10) y.append(matches.shape[0]) # match_count = matches.shape[0] #Display histogram plt.bar(x, y, 5)