def main(): """Sets up all the configurations for apollocaffe, and ReInspect and runs the trainer.""" parser = apollocaffe.base_parser() parser.add_argument('--config', required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) if args.weights is not None: config["solver"]["weights"] = args.weights apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) net = apollocaffe.ApolloNet() image_mean = load_image_mean_from_binproto(config['data']["idl_mean"]) fake_input_en = {"image": np.zeros((1, 3, 227, 227))} forward(net, fake_input_en, deploy=True) if config["solver"]["weights"]: net.load(config["solver"]["weights"]) else: raise Exception('weights file is not provided!') run_socket(net, 13502, image_mean)
def main(): network_generators = { 'ZF': ZFGenerator } args = parse_args(network_generators.keys()) if args.cfg is not None: cfg_from_file(args.cfg) apollocaffe.set_random_seed(cfg.RNG_SEED) np.random.seed(cfg.RNG_SEED) if args.gpu_id >= 0: apollocaffe.set_device(args.gpu_id) apollocaffe.set_cpp_loglevel(3) train_roidb = None if args.train_imdb is not None: train_imdb = get_imdb(args.train_imdb) train_imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD) train_roidb = get_training_roidb(train_imdb) test_roidb = None if args.test_imdb is not None: test_imdb = get_imdb(args.test_imdb) test_imdb.set_proposal_method('gt') prepare_roidb(test_imdb) test_roidb = test_imdb.roidb faster_rcnn = FasterRCNN(args, network_generators, train_roidb=train_roidb, test_roidb=test_roidb) faster_rcnn.train() return 0
def main(): apollocaffe.set_device(0) #apollocaffe.set_cpp_loglevel(0) apollocaffe.set_random_seed(0) np.random.seed(0) job = sys.argv[1] corpus_name = sys.argv[2] config = util.Struct(**yaml.load(CONFIG)) if corpus_name == "abstract": train_scenes, dev_scenes, test_scenes = corpus.load_abstract() else: assert corpus_name == "birds" train_scenes, dev_scenes, test_scenes = corpus.load_birds() apollo_net = ApolloNet() print "loaded data" print "%d training examples" % len(train_scenes) listener0_model = Listener0Model(apollo_net, config.model) speaker0_model = Speaker0Model(apollo_net, config.model) sampling_speaker1_model = SamplingSpeaker1Model(apollo_net, config.model) compiled_speaker1_model = CompiledSpeaker1Model(apollo_net, config.model) if job == "train.base": train(train_scenes, dev_scenes, listener0_model, apollo_net, config.opt) train(train_scenes, dev_scenes, speaker0_model, apollo_net, config.opt) apollo_net.save("models/%s.base.caffemodel" % corpus_name) exit() if job == "train.compiled": apollo_net.load("models/%s.base.caffemodel" % corpus_name) print "loaded model" train(train_scenes, dev_scenes, compiled_speaker1_model, apollo_net, config.opt) apollo_net.save("models/%s.compiled.caffemodel" % corpus_name) exit() if job in ("sample.base", "sample.compiled"): if job == "sample.base": apollo_net.load("models/%s.base.caffemodel" % corpus_name) else: apollo_net.load("models/%s.compiled.caffemodel" % corpus_name) print "loaded model" if job == "sample.base": models = { "sampling_speaker1": sampling_speaker1_model, } elif job == "sample.compiled": models = { "compiled_speaker1": compiled_speaker1_model, } name = job.split(".")[1] run_experiment("one_different", corpus_name, name, models, dev_scenes) run_experiment("by_similarity", corpus_name, name, models, dev_scenes) run_experiment("all_same", corpus_name, name, models, dev_scenes)
def main(): parser = apollocaffe.base_parser() parser.add_argument("--config", required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) evaluate(config)
def main(): parser = apollocaffe.base_parser() parser.add_argument("--config", required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) train(config)
def main(): """Sets up all the configurations for apollocaffe, and ReInspect and runs the test.""" parser = apollocaffe.base_parser() parser.add_argument('--config', required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) print ("Test config file is " + config["data"]["test_idl"] ) apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(0) # gpu test(config)
def main(): parser = apollocaffe.base_parser() parser.add_argument("--config", required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) list_add=data_root+'list_all_test.txt' list_crop_add=data_root+'list_det_crop_align_filled.txt' feat_add=data_root+'feat1.txt' train_gt=np.loadtxt(list_add, dtype={'names': ('name', ), 'formats': ('S200', )}) train_crop_gt=np.loadtxt(list_crop_add, dtype={'names': ('name', 'label'), 'formats': ('S200', 'i4')}) train_feat=np.loadtxt(feat_add) train_feat_list=[] train_label_list=[] assert(len(train_crop_gt)==train_feat.shape[0]) im_list=train_crop_gt['name']; for k in xrange(len(im_list)): im_list[k]=im_list[k].split('/')[-1][0:-7] im_list_uniq=list(set(im_list)) for s in im_list_uniq: idx=s==train_crop_gt['name'] feat=train_feat[idx,:] train_feat_list.append(feat) train_label_list.append(s) feat_add2=data_root+'feat2.txt' train_feat2=np.loadtxt(feat_add2)#.reshape((-1,1)) train_feat_list2=[] assert(len(train_crop_gt)==train_feat2.shape[0]) for s in im_list_uniq: idx=s==im_list feat2=train_feat2[idx,:] train_feat_list2.append(feat2) holistic_feat_add=data_root+'feat_centrist_test_d1024.txt' holistic_feat=np.loadtxt(holistic_feat_add) scene_feat_list=[] for k,s in enumerate(im_list_uniq): idx=s.split('/')[-1]==train_gt['name'] assert(idx.sum()==1) scene_feat_list.append(holistic_feat[idx,:]) test_data={'feats': train_feat_list, 'feats2': train_feat_list2, 'labels': train_label_list, 'scene_feats': scene_feat_list, 'current_idx': 0} evaluate(config, test_data)
def main(): """Sets up all the configurations for apollocaffe, and ReInspect and runs the test.""" parser = apollocaffe.base_parser() parser.add_argument('--config', required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) print("Test config file is " + config["data"]["test_idl"]) apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(0) # gpu test(config)
def main(): parser = apollocaffe.base_parser() parser.add_argument('--config', required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) if args.weights is not None: config["solver"]["weights"] = args.weights config["solver"]["start_iter"] = args.start_iter apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) train(config)
def main(): parser = apollocaffe.base_parser() parser.add_argument("--config", required=True) args = parser.parse_args() config = json.load(open(args.config, "r")) if args.weights is not None: config["solver"]["weights"] = args.weights config["solver"]["start_iter"] = args.start_iter apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) train(config)
def main(): """Sets up all the configurations for apollocaffe, and ReInspect and runs the trainer.""" parser = apollocaffe.base_parser() parser.add_argument('--config', required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) if args.weights is not None: config["solver"]["weights"] = args.weights apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) deploy(config)
def main(): """Sets up all the configurations for apollocaffe, and ReInspect and runs the trainer.""" parser = apollocaffe.base_parser() parser.add_argument('--config', required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) if args.weights is not None: config["solver"]["weights"] = args.weights config["solver"]["start_iter"] = args.start_iter apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) train(config)
def setup(config, device_gpu): apollocaffe.set_device(device_gpu) net = apollocaffe.ApolloNet() image_mean = load_image_mean_from_binproto(config["idl_mean"]) fake_input_en = {"image": np.zeros((config['new_width'], config['new_height']))} forward(net, fake_input_en, deploy=True) net.draw_to_file(logging["schematic_path"]) if solver["weights"]: net.load(config["weights"]) else: raise Exception('weights file is not provided!') return net
def main(): """Sets up all the configurations for apollocaffe, and ReInspect and runs the trainer.""" parser = apollocaffe.base_parser() parser.add_argument('--config', required=True) args = parser.parse_args() config = json.load(open(args.config, 'r')) if args.weights is not None: config["solver"]["weights"] = args.weights config["solver"]["start_iter"] = args.start_iter apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) apollocaffe.set_cpp_loglevel(args.loglevel) print json.dumps(config['solver'], indent=4, sort_keys=True) print json.dumps(config['MMD'], indent=4, sort_keys=True) train(config)
def main(): """Sets up all the configurations for apollocaffe, and ReInspect and runs the trainer.""" parser = apollocaffe.base_parser() parser.add_argument('--datasize', required=True) parser.add_argument('--batchsize', required=True) parser.add_argument('--numIter', required=True) args = parser.parse_args() # config = json.load(open(args.config, 'r')) # if args.weights is not None: # config["solver"]["weights"] = args.weights # config["solver"]["start_iter"] = args.start_iter # apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(args.gpu) datasize = int(args.datasize) batchsize = int(args.batchsize) numIter = int(args.numIter) # apollocaffe.set_cpp_loglevel(args.loglevel) train(datasize, batchsize, numIter)
def main(): config = json.load(open("config.json", 'r')) config["data"]["test_idl"] = "./data/brainwash/brainwash_test.idl" apollocaffe.set_random_seed(config["solver"]["random_seed"]) apollocaffe.set_device(0) # Now lets load the data mean and the data. data_mean = load_data_mean(config["data"]["idl_mean"], config["net"]["img_width"], config["net"]["img_height"], image_scaling=1.0) num_test_images = 500 display = True ## Warning: load_idl returns an infinite generator. Calling list() before islice() will hang. test_list = list(itertools.islice( load_idl(config["data"]["test_idl"], data_mean, config["net"], False), 0, num_test_images)) # We can now load the snapshot weights. net = apollocaffe.ApolloNet() net.phase = 'test' import time; s = time.time() forward(net, test_list[0], config["net"], True) # define structure print time.time() - s net.load("./data/brainwash_800000.h5") # load pre-trained weights # We can now begin to run the model and visualize the results. annolist = al.AnnoList() net_config = config["net"] pix_per_w = net_config["img_width"]/net_config["grid_width"] pix_per_h = net_config["img_height"]/net_config["grid_height"] for i in range(10): inputs = test_list[i] timer = Timer() timer.tic() bbox_list, conf_list = forward(net, inputs, net_config, True) timer.toc() print ('Detection took {:.3f}s').format(timer.total_time) img = np.copy(inputs["raw"]) png = np.copy(inputs["imname"]) all_rects = [[[] for x in range(net_config["grid_width"])] for y in range(net_config["grid_height"])] for n in range(len(bbox_list)): for k in range(net_config["grid_height"] * net_config["grid_width"]): y = int(k / net_config["grid_width"]) x = int(k % net_config["grid_width"]) bbox = bbox_list[n][k] conf = conf_list[n][k,1].flatten()[0] abs_cx = pix_per_w/2 + pix_per_w*x + int(bbox[0,0,0]) abs_cy = pix_per_h/2 + pix_per_h*y+int(bbox[1,0,0]) w = bbox[2,0,0] h = bbox[3,0,0] all_rects[y][x].append(Rect(abs_cx,abs_cy,w,h,conf)) timer.tic() acc_rects = stitch_rects(all_rects) timer.toc() print ('Stitching detected bboxes took {:.3f}s').format(timer.total_time) if display: visualize_detection(img, acc_rects) anno = al.Annotation() anno.imageName = inputs["imname"] for rect in acc_rects: r = al.AnnoRect() r.x1 = rect.cx - rect.width/2. r.x2 = rect.cx + rect.width/2. r.y1 = rect.cy - rect.height/2. r.y2 = rect.cy + rect.height/2. r.score = rect.true_confidence anno.rects.append(r) annolist.append(anno)
#!/usr/bin/env python2 import caffe import apollocaffe from apollocaffe import ApolloNet, layers import numpy as np import timeit #caffe.set_mode_gpu() apollocaffe.set_device(0) net = ApolloNet() batch_size = 64 data = np.random.random(size=(batch_size, 512, 20, 20)).astype(np.float32) labels = np.random.randint(10, size=(batch_size, )).astype(np.int32).astype( np.float32) #print data.dtype #print labels.dtype #def load_mem(): # net.clear_forward() # net.f(layers.MemoryData( # "mem", data, labels, tops=["input_top", "label_top"], # batch_size=batch_size, channels=512, width=20, height=20)) # #def load_np(): # net.clear_forward() # net.f(layers.NumpyData("np", data)) # #load_mem()
import apollocaffe from apollocaffe.layers import (Concat, Dropout, LstmUnit, InnerProduct, NumpyData, Softmax, SoftmaxWithLoss, Wordvec) batch_size = 32 vocab_size = 256 zero_symbol = vocab_size - 1 dimension = 250 base_lr = 0.15 clip_gradients = 10 i_temperature = 1.5 parser = apollocaffe.base_parser() parser.add_argument('--data_source', type=str) args = parser.parse_args() apollocaffe.set_device(args.gpu) apollocaffe.set_random_seed(0) def get_data(): if args.data_source: data_source = args.data_source else: data_source = '%s/reddit_ml.txt' % os.path.dirname( os.path.realpath(__file__)) if not os.path.exists(data_source): raise IOError( 'You must download the data with ./examples/apollocaffe/char_model/get_char.sh' ) epoch = 0 while True:
import apollocaffe from apollocaffe.layers import NumpyData, Wordvec, TheanoGPU, EuclideanLoss import numpy as np apollocaffe.set_device(0) net = apollocaffe.ApolloNet() for i in range(1000): val1 = [[-2,4,1]] net.clear_forward() net.f(NumpyData('val1', val1)) net.f(NumpyData('wordval', [0])) net.f(Wordvec('vec', 3, 1, bottoms=['wordval'])) net.f(NumpyData('cosine_target', [1])) net.f(NumpyData('norm_target', [2])) expr = 'T.dot(x[0], x[1].T) / (T.dot(x[0], x[0].T) * T.dot(x[1], x[1].T))**0.5' net.f(TheanoGPU('cosine', [expr, (1,1)], bottoms=['val1', 'vec'])) expr2 = 'T.dot(x[0], x[0].T)' net.f(TheanoGPU('norm', [expr2, (1,1)], bottoms=['vec'])) net.f(EuclideanLoss('loss1', bottoms=['cosine', 'cosine_target'])) net.f(EuclideanLoss('loss2', bottoms=['norm', 'norm_target'])) net.backward() net.update(lr=0.01) if i % 100 == 0: print net.loss print net.blobs['vec'].data print net.blobs['norm'].data
import apollocaffe from apollocaffe.layers import (Concat, Dropout, LstmUnit, InnerProduct, NumpyData, Softmax, SoftmaxWithLoss, Wordvec) batch_size = 32 vocab_size = 256 zero_symbol = vocab_size - 1 dimension = 250 base_lr = 0.15 clip_gradients = 10 i_temperature = 1.5 parser = apollocaffe.base_parser() parser.add_argument('--data_source', type=str) args = parser.parse_args() apollocaffe.set_device(args.gpu) apollocaffe.set_random_seed(0) def get_data(): if args.data_source: data_source = args.data_source else: data_source = '%s/reddit_ml.txt' % os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(data_source): raise IOError('You must download the data with ./examples/apollocaffe/char_model/get_char.sh') epoch = 0 while True: with open(data_source, 'r') as f: for x in f.readlines(): try: