Esempio n. 1
0
def get_conferences():
    files = util.listdir(CONFERENCE_FOLDER)
    util.mkdir(CONFERENCE_CRALWED_FOLDER)
    cnt = 0
    conf = util.load_json('conf_name.json')
    for file_name in files:
        save_path = os.path.join(CONFERENCE_CRALWED_FOLDER, file_name)
        if util.exists(save_path):
            continue
        data = util.load_json(os.path.join(CONFERENCE_FOLDER, file_name))
        if data['short'] not in conf.keys():
            continue
        html = util.get_page(data['url'])
        subs = get_subs(data['short'], html)
        data['name'] = conf[data['short']]
        data['sub'] = {}
        for sub in subs:
            if sub not in conf.keys():
                continue
            html = util.get_page('http://dblp.uni-trier.de/db/conf/' + sub)
            data['sub'][sub] = {}
            data['sub'][sub]['pub'] = get_publications(html)
            data['sub'][sub]['name'] = conf[sub]
        cnt += 1
        print cnt, len(files), data['short']
        util.save_json(save_path, data)
Esempio n. 2
0
    def getTeamSeasonHtml(self):
        html = INVALID_STRING
        if cmp(self._htmlWebSite, INVALID_STRING) == 0:
            print "htmlWebSite has not been initialized yet"
        else:
            #If we have already synced the data we need to fetch the html from local instead of reloading the website again
            pathNeed2Test = CURRENT_PATH + self._Season0 + self._Season1 + '/' + self._Team_id
            print "Constructing path as ", pathNeed2Test
            self._dataStoredPath = pathNeed2Test
            util.mkdir(pathNeed2Test)

            htmlFile = pathNeed2Test + '/' + self._GameType + '.html'
            self._seasonDataHtmlFile = htmlFile
            print "Check if html file exist or not ", htmlFile

            if os.path.isfile(htmlFile):
                print "html file exists, open the file, read it and return the string"
                html = util.openFile(htmlFile)

                #print html
            else:
                print "html file does not exist, now loading the webpage from network"
                html = util.getHtmlFromUrl(self._htmlWebSite)

                if cmp(html, INVALID_STRING)!=0:
                    util.saveFile(htmlFile,html)

                return html


        return html
Esempio n. 3
0
 def sub(self):
     util.mkdir(self.log)
     sub = SubWithProcId.SubWithProcId()
     sub.setlog(self.log)
     logger.debug("self.job = {}".format(self.job))
     sub.setpath(os.path.abspath(self.job))
     sub.sub()
Esempio n. 4
0
 def ajob(self, dst, name):
     logger.info("Process " + dst)
     logger.info('each job contain about {} {}'.format(self.size, 'G dsts'))
     dsts = int(do('ls -1 -F ' + dst + r'  | grep -v [/$] | wc -l'))
     logger.debug("total dsts: {}".format(dsts))
     size = int(int(do('du ' + dst).split()[0]) / 1024. / 1024. / self.size)
     job = self.job + name
     root = self.rawpth + name
     util.mkdir(job)
     util.mkdir(root)
     j = subjobs.subjobs()
     j.setbody(self.body)
     jobnum = self._num
     if jobnum < size:
         jobnum = size + 1
     if dsts < jobnum:
         jobnum = dsts
     j.setjobnum(jobnum)
     j.setjobname("jobs_")
     j.setname(self.rootnm)
     j.setdstpath(dst)
     j.setjobpath(job)
     j.setProcesser(20)
     j.drop(self._drop)
     j.setrootpath(root)
     j.jobs()
def addDimFaceData():
    face_data_dir = '/home/SSD3/maheen-data/face_data/npy'
    out_dir = '/home/SSD3/maheen-data/face_data/npy_dimAdd'
    util.mkdir(out_dir)

    face_data_file = os.path.join(face_data_dir, 'data_list.txt')
    in_files = util.readLinesFromFile(face_data_file)

    args = []
    for idx, in_file in enumerate(in_files):
        out_file = in_file.replace(face_data_dir, out_dir)

        if os.path.exists(out_file):
            continue

        folder_curr = out_file[:out_file.rindex('/')]
        util.mkdir(folder_curr)
        args.append((idx, in_file, out_file))

    print len(args)
    # for arg in args:
    #   addDim(arg);
    #   break;
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    p.map(addDim, args)
def saveFaceBBoxIm():
    path_txt = '/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt'
    path_pre = '/home/laoreja/deep-landmark-master/dataset/train'
    path_im, bbox, anno_points = parseAnnoFile(path_txt, path_pre, face=True)

    out_dir_im = '/home/SSD3/maheen-data/face_data/im'
    out_dir_npy = '/home/SSD3/maheen-data/face_data/npy'
    args = []
    args_bbox_npy = []

    for idx, path_im_curr, bbox_curr, key_pts in zip(range(len(path_im)),
                                                     path_im, bbox,
                                                     anno_points):
        path_curr, file_name = os.path.split(path_im_curr)
        path_curr = path_curr.split('/')
        path_pre_curr = path_curr[-1]

        out_dir_curr = os.path.join(out_dir_im, path_pre_curr)
        out_dir_npy_curr = os.path.join(out_dir_npy, path_pre_curr)

        util.mkdir(out_dir_curr)
        util.mkdir(out_dir_npy_curr)

        out_file = os.path.join(out_dir_curr, file_name)
        args.append((path_im_curr, out_file, bbox_curr, idx))

        out_file_npy = os.path.join(out_dir_npy_curr,
                                    file_name[:file_name.rindex('.')] + '.npy')
        args_bbox_npy.append((bbox_curr, key_pts, out_file_npy, idx))
        # break;

    print len(args_bbox_npy)
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    # p.map(saveBBoxIm,args);
    p.map(saveBBoxNpy, args_bbox_npy)
Esempio n. 7
0
def get_conferences():
    files = util.listdir(CONFERENCE_FOLDER)
    util.mkdir(CONFERENCE_CRALWED_FOLDER)
    cnt = 0
    conf = util.load_json('conf_name.json')
    for file_name in files:
        save_path = os.path.join(CONFERENCE_CRALWED_FOLDER, file_name)
        if util.exists(save_path):
            continue
        data = util.load_json(os.path.join(CONFERENCE_FOLDER, file_name))
        if data['short'] not in conf.keys():
            continue
        html = util.get_page(data['url'])
        subs = get_subs(data['short'], html)
        data['name'] = conf[data['short']]
        data['sub'] = {}
        for sub in subs:
            if sub not in conf.keys():
                continue
            html = util.get_page('http://dblp.uni-trier.de/db/conf/' + sub)
            data['sub'][sub] = {}
            data['sub'][sub]['pub'] = get_publications(html)
            data['sub'][sub]['name'] = conf[sub]
        cnt += 1
        print cnt, len(files), data['short']
        util.save_json(save_path, data)
Esempio n. 8
0
def get_journals():
    pos, cnt = 1, 0
    util.mkdir(JOURNAL_FOLDER)
    while True:
        html = util.get_page(JOURNAL_URL + str(pos))
        links = util.find_journals(html)
        once_cnt = 0
        for link in links:
            if link[0] == '' or '?' in link[0]:
                continue
            data = {}
            data['type'] = 'journal'
            data['short'] = link[0]
            data['name'] = link[1]
            data['url'] = 'http://dblp.uni-trier.de/db/journals/' + data[
                'short']
            util.save_json(
                os.path.join(JOURNAL_FOLDER, util.hex_hash(data['short'])),
                data)
            cnt += 1
            once_cnt += 1
        if once_cnt == 0:
            break
        pos += 100
        print 'Journal', cnt
def script_saveToExploreBoxes():
    # dir_mat_overlap='/disk3/maheen_data/headC_160_noFlow_bbox/mat_overlaps_1000';
    dir_mat_overlap='/disk3/maheen_data/pedro_val/mat_overlap';
    scratch_dir='/disk3/maheen_data/scratch_dir'
    util.mkdir(scratch_dir);
    out_file=os.path.join(scratch_dir,'them_neg_box.p');
    
    
    overlap_files=util.getFilesInFolder(dir_mat_overlap,ext='.npz');
    print len(overlap_files)
    to_explore=[];
    
    for idx_overlap_file,overlap_file in enumerate(overlap_files):
        print idx_overlap_file
        meta_data=np.load(overlap_file);
        pred_boxes=meta_data['pred_boxes'];
        # print pred_boxes.shape
        min_boxes=np.min(pred_boxes,axis=1);
        num_neg=np.sum(min_boxes<0);
        if num_neg>0:
            to_explore.append((overlap_file,pred_boxes));

    print len(to_explore);

    pickle.dump(to_explore,open(out_file,'wb'));
Esempio n. 10
0
def create_videos(dirs):
    util.mkdir(VIDEO_DIR)
    for dir in dirs:
        basename = os.path.basename(dir)
        target = os.path.join(VIDEO_DIR, basename)
        cmd = f"ffmpeg -f image2 -pattern_type glob -i '{dir}/*.png' -t 30 -c:v libx264 -profile:v high -crf 25 -pix_fmt yuv420p {target}.mp4"
        subprocess.call(cmd, shell=True)
Esempio n. 11
0
    def save_one_img_of_batch(self, batch, dirpath, fname):
        util.mkdir(dirpath)

        imgpath = osp.join(dirpath, fname)
        assert len(batch.shape) == 4
        # img = util.saveTensorAsImg(batch[0], imgpath)
        torchvision.utils.save_image(batch[0], imgpath)
Esempio n. 12
0
def main():
    parser = get_parser()
    args = parser.parse_args()

    # load model.  model_options defined in models/__init__.py
    model = sk_model_options[args.model](args.choice, args.freq_floor)

    # load data
    data_path = data_paths[args.dataset]
    train_set, dev_set, test_set = get_datasets(model.batch_size,
                                                data_path,
                                                model.preprocess_inputs,
                                                sk=True)

    print 'training...'
    train(model, train_set)
    print 'done training.'

    truth_file = os.path.join(data_path, 'truth.jsonl')
    mkdir(os.path.join(CKPT, args.sess_name))
    results_dir = os.path.join(CKPT, args.sess_name, 'results')
    mkdir(results_dir)
    print 'evaluating...'
    evaluate(model, train_set, results_dir, 'train', truth_file)
    evaluate(model, dev_set, results_dir, 'dev', truth_file)
    evaluate(model, test_set, results_dir, 'test', truth_file)
    print 'done evaluating.'
def get_base_parses(run_id, trials_per=800, reverse=False, dry_run=False):
    print('run_id: %i' % run_id)
    print('Loading model...')
    type_model = TypeModel().eval()
    if torch.cuda.is_available():
        type_model = type_model.cuda()
    score_fn = lambda parses: model_score_fn(type_model, parses)

    print('Loading classification dataset...')
    dataset = ClassificationDataset(osc_folder='./one-shot-classification')
    run = dataset.runs[run_id]
    if reverse:
        imgs = run.test_imgs
    else:
        imgs = run.train_imgs
    run_dir = './run%0.2i' % (run_id + 1)
    save_dir = os.path.join(run_dir, 'base_parses')
    if not dry_run:
        mkdir(run_dir)
        mkdir(save_dir)

    print('Collecting top-K parses for each train image...')
    nimg = len(imgs)
    for i in range(nimg):
        start_time = time.time()
        parses, log_probs = get_topK_parses(imgs[i],
                                            k=5,
                                            score_fn=score_fn,
                                            configs_per=1,
                                            trials_per=trials_per)
        total_time = time.time() - start_time
        print('image %i/%i took %s' % (i + 1, nimg, time_string(total_time)))
        if dry_run:
            continue
        save_img_results(save_dir, i, parses, log_probs, reverse)
 def load(cls, conf, lang, bert=None):
     mkdir(conf.cache_dir)
     fasttext_emb = conf.fasttext_emb_file if conf.use_fasttext else None
     fname = (
         f"{conf.dataset}.{lang}." +
         (f"max{conf.max_ninst}." if conf.max_ninst else "") +
         (f"maxeval{conf.max_eval_ninst}." if conf.max_eval_ninst else "") +
         (f"cv{conf.crossval_idx}." if conf.crossval_idx is not None else "") +
         (f"bert{conf.bert_max_seq_len}." if bert is not None else "") +
         (f"fasttext." if fasttext_emb is not None else "") +
         f"vs{conf.vocab_size}.{conf.tag}." +
         (f"{conf.tag_scheme}." if conf.tag_scheme else "") +
         "pt"
         )
     cache_file = conf.cache_dir / fname
     ds = None
     try:
         print("loading", cache_file)
         ds = torch.load(cache_file)
         print("loaded", cache_file)
         ds.bpemb = BPEmb(
             lang=conf.bpemb_lang,
             vs=conf.vocab_size,
             dim=conf.bpemb_dim,
             add_pad_emb=True)
     except FileNotFoundError:
         pass
     if ds is None:
         print(f"Loading dataset {conf.dataset} {lang}")
         ds = cls(conf, lang, bert=bert)
         bpemb = ds.bpemb
         ds.bpemb = None  # cannot pickle SwigPyObject
         torch.save(ds, cache_file)
         ds.bpemb = bpemb
     return ds
def script_reSaveMatOverlap():

    dir_old='/disk3/maheen_data/pedro_val/mat_overlap';
    dir_new='/disk3/maheen_data/pedro_val/mat_overlap_check';

    util.mkdir(dir_new);
    
    path_to_im='/disk2/ms_coco/val2014';
    path_to_gt='/disk2/mayExperiments/validation_anno';

    mat_overlap_files=util.getFilesInFolder(dir_old,ext='.npz');
    im_names=util.getFileNames(mat_overlap_files,ext=False);

    args=[];
    for idx,(mat_overlap_file,im_name) in enumerate(zip(mat_overlap_files,im_names)):
        gt_file=os.path.join(path_to_gt,im_name+'.npy');
        im_file=os.path.join(path_to_im,im_name+'.jpg');
        out_file=os.path.join(dir_new,im_name+'.npz');
        # if os.path.exists(out_file):
        #     continue;

        args.append((mat_overlap_file,gt_file,im_file,out_file,idx));

    p = multiprocessing.Pool(32);
    p.map(fixMatOverlap, args);    
Esempio n. 16
0
    def build_test_res_dir(self):
        assert self.opt[CHECKPOINT_PATH]
        modelpath = pathlib.Path(self.opt[CHECKPOINT_PATH])

        # when passing valid_ds, ignore ds.name and use valid_ds.name instead.
        ds_type = VALID_DATA if self.opt[VALID_DATA][INPUT] else DATA
        fname = modelpath.name + '@' + self.opt[ds_type][NAME]
        dirpath = modelpath.parent / TEST_RESULT_DIRNAME

        if (dirpath / fname).exists():
            if len(os.listdir(dirpath / fname)) == 0:
                # an existing but empty dir
                pass
            else:
                input_str = input(
                    f'[ WARN ] Result directory "{fname}" exists. Press ENTER to overwrite or input suffix to create a new one:\n> New name: {fname}.')
                if input_str == '':
                    console.log(f"[ WARN ] Overwrite result_dir: {fname}")
                    pass
                else:
                    fname += '.' + input_str
            # fname += '.new'

        dirpath /= fname
        util.mkdir(dirpath)
        console.log('TEST - Result save path:')
        console.log(str(dirpath))

        global TEST_DIRPATH
        TEST_DIRPATH = dirpath
        return str(dirpath)
Esempio n. 17
0
def codeProject(args,flag,data):
  PARAM_KEY = 1
  PARAM_PATH = 2
  PARAM_FORMATTER = 3
  ARGUMENTS = len(args)-1

  # JSON mapping files and storage of this
  if( keyExists("projects",args[1])):
    if( "stdout" in args[2]):
      project = json.loads(load("projects/"+args[PARAM_KEY])); # Uses key value storage
      directory = args[PARAM_PATH] + "/" + args[PARAM_KEY]
      
      mkdir(directory)
      for x in project.keys(): # Reflect that with here
        _file = json.loads(load("files/"+x));
        out = '';
        for y in _file:
          block = str(load("blocks/"+ y))
          if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion
            block = format.block(block, args[PARAM_FORMATTER])     
          out += block
        # Output the file with the correct file name
        save(directory + "/" + project[x],out)

  else:
    error("Error: Project does not exist")
def moveFilesIntoFolders(in_dir,mat_file,out_dir,out_file_commands,pad_zeros_in=8,pad_zeros_out=4):
	arr=scipy.io.loadmat(mat_file)['ranges'];
	# videos=np.unique(arr);
	commands=[];
	for shot_no in range(arr.shape[1]):
		print shot_no,arr.shape[1];
		start_idx=arr[0,shot_no];
		end_idx=arr[1,shot_no];
		video_idx=arr[2,shot_no];
		out_dir_video=os.path.join(out_dir,str(video_idx));
		util.mkdir(out_dir_video);
		# print 
		# raw_input();
		shot_idx=np.where(shot_no==np.where(video_idx==arr[2,:])[0])[0][0]+1;
		out_dir_shot=os.path.join(out_dir_video,str(shot_idx));
		util.mkdir(out_dir_shot);

		# print start_idx,end_idx
		for idx,frame_no in enumerate(range(start_idx,end_idx+1)):
			in_file=os.path.join(in_dir,padZeros(frame_no,pad_zeros_in)+'.jpg');
			out_file=os.path.join(out_dir_shot,'frame'+padZeros(idx+1,pad_zeros_out)+'.jpg');
			command='mv '+in_file+' '+out_file;
			commands.append(command);
	print len(commands);
	util.writeFile(out_file_commands,commands);
Esempio n. 19
0
    def __init__(self, opt, running_modes):
        '''
        logger_img_group_names: images group names in wandb logger. recommand: ['train', 'valid']
        '''

        super().__init__()
        self.save_hyperparameters(opt)
        console.log('Running initialization for BaseModel')

        if IMG_DIRPATH in opt:
            # in training mode.
            # if in test mode, configLogging is not called.
            if TRAIN in running_modes:
                self.train_img_dirpath = osp.join(opt[IMG_DIRPATH], TRAIN)
                util.mkdir(self.train_img_dirpath)
            if VALID in running_modes and opt[VALID_DATA][INPUT]:
                self.valid_img_dirpath = osp.join(opt[IMG_DIRPATH], VALID)
                util.mkdir(self.valid_img_dirpath)

        self.opt = opt
        self.MODEL_WATCHED = False  # for wandb watching model
        self.global_valid_step = 0

        assert isinstance(running_modes, Iterable)
        self.logger_image_buffer = {k: [] for k in running_modes}
Esempio n. 20
0
def initialize():
  print '\nWelcome to the interactive Svndae configuration!'
  options = get_configuration_defaults()
  print "\nInitializing file system in '%s'" % options['path']
  util.mkdir(options['path'])
  util.mkdir(join(options['path'],options['dir']))
  print "Generating configuration file '%s'" % options['conf']
  SvndaeConfig.generate_config(options['path'],options['conf'],options['dir'],options['file'])
  config = SvndaeConfig(options['path'],name=options['conf'])
  print "Adding default administrative groups"
  config.create_group(init.ADMIN_GROUP)
  config.add_permission(init.ADMIN_GROUP,SvndaeConfig._WRITE,SvndaeConfig._ALL_REPOS)
  config.add_permission(init.ADMIN_GROUP,SvndaeConfig._READ,SvndaeConfig._ALL_REPOS)
  while True:
    correct_in = raw_input('Would you like to add any repository ACLs? ([y]/n) ')
    if correct_in == '' or correct_in == 'y':
      get_repo_manage(config)
      break
    elif correct_in == 'n':
      break
    else:
      print_confirmation_error()
  while True:
    correct_in = raw_input('Would you like to add any additional groups? ([y]/n) ')
    if correct_in == '' or correct_in == 'y':
      get_user_groups(config)
      break
    elif correct_in == 'n':
      break
    else:
      print_confirmation_error()
  print_final()
def rescaleImAndSaveMeta(img_paths,meta_dir,power_scale_range=(-2,1),step_size=0.5):
	img_names=util.getFileNames(img_paths);
	power_range=np.arange(power_scale_range[0],power_scale_range[1]+1,step_size);
	scales=[2**val for val in power_range];

	scale_infos=[];
	for idx,scale in enumerate(scales):
		out_dir_curr=os.path.join(meta_dir,str(idx));
		util.mkdir(out_dir_curr);
		scale_infos.append((out_dir_curr,scale));

	args=[];
	idx=0;
	for idx_img,img_path in enumerate(img_paths):
		for out_dir_curr,scale in scale_infos:
			out_file=os.path.join(out_dir_curr,img_names[idx_img]);
			
			if os.path.exists(out_file):
				continue;

			args.append((img_path,out_file,scale,idx));
			idx=idx+1;

	p=multiprocessing.Pool(multiprocessing.cpu_count());
	p.map(rescaleImAndSave,args);
def script_makeNegImages():

	in_dir='/disk2/aprilExperiments/testing_neg_fixed_test/'
	out_dir=os.path.join(in_dir,'visualizing_negs');
	util.mkdir(out_dir);


	# return
	im_paths=[os.path.join(in_dir,file_curr) for file_curr in os.listdir(in_dir) if file_curr.endswith('.png')];
	for im_path in im_paths:
		print im_path
		bbox=np.load(im_path.replace('.png','_bbox.npy'));
		crop_box=np.load(im_path.replace('.png','_crop.npy'));

		# print im_path.replace('.png','_bbox.npy')
		# print im_path.replace('.png','_crop.npy')
		# break;

		im = Image.open(im_path);
		im=drawCropAndBBox(im,bbox,crop_box)
		
		# write to stdout
		im.save(os.path.join(out_dir,im_path[im_path.rindex('/')+1:]), "PNG");
		# break;
	visualize.writeHTMLForFolder(out_dir,ext='png',height=300,width=300);
Esempio n. 23
0
def run(method_name):
    processed_fnames = []
    imgs = glob(args.input)
    c.log(f'Method: {method_name}, Input Images:')
    c.log(imgs)
    model = method_map[method_name]()
    dstdir = osp.join(args.output, method_name)
    util.mkdir(dstdir)
    for x in track(imgs):
        img = cv2.imread(x)
        res = model.process_one(np.array(img, dtype=np.uint8))

        # save result img:
        fname = osp.basename(x)
        if fname not in processed_fnames:
            processed_fnames.append(fname)
        else:
            new_fname = fname + '.dup.png'
            processed_fnames.append(new_fname)
            c.log(f'[*] {fname} already exists, rename to {new_fname}')
            fname = new_fname

        dst = osp.join(dstdir, fname)
        # if not osp.exists(dstdir):
        #     os.makedirs(dstdir)
        cv2.imwrite(dst, res)
Esempio n. 24
0
def example():
    """
    Comparison of my own neighbor classifier with sklearn.neighbors.RadiusNeighborsClassifier
    """

    # measure classification performance of both classifiers for the full dataset
    aprint('\nTest performance of classifiers for full dataset', fmt='bi')
    odir = mkdir(os.path.join(_par_.odir, 'full_dataset'))
    test_performance(odir=odir)
    aprint('\nMy NeighborsClassifier should have slightly better performance'
           '\nthan sklearn.neighbors.RadiusNeighborsClassifier.')

    # classify the feature space using the full dataset and create figures
    classify_feature_space(odir=odir)

    # measure classification performance of selected data containing
    # 50 samples of versicolor and the first 20 of virginica (asymmetric label frequency)
    aprint('\nTest performance of classifiers for asymmetric data', fmt='bi')
    aprint(
        '(using no setosa, all 50 versicolor, and only 20 versicolor samples)')
    odir = mkdir(os.path.join(_par_.odir, 'asymmetric'))
    test_performance(_par_.xx[50:120], _par_.yy[50:120], odir=odir)
    aprint(
        '\nMy NeighborsClassifier should have significantly better performance'
        '\nthan sklearn.neighbors.RadiusNeighborsClassifier.')

    # classify the feature space using asymmetric data and create figures
    classify_feature_space(_par_.xx[50:120], _par_.yy[50:120], odir=odir)

    # compare runtime of classifiers
    aprint('\nCompare execution time of classifiers', fmt='bi')
    compare_runtime()
def save_new_parses(parses_j, log_probs_j, save_dir, K_per_img, test_id, reverse=False):
    """
    i : train image index
    k : parse index
    """
    appendix_i = 'test' if reverse else 'train'
    appendix_j = 'train' if reverse else 'test'
    curr = 0
    for train_id, K in K_per_img.items():
        # get savedir paths
        save_dir_i = os.path.join(save_dir, appendix_i+'_%0.2i' % train_id)
        mkdir(save_dir_i)
        save_dir_ij = os.path.join(save_dir_i, appendix_j+'_%0.2i' % test_id)
        mkdir(save_dir_ij)
        # get data subset
        parses_ij = parses_j[curr : curr+K]
        log_probs_ij = log_probs_j[curr : curr+K]
        curr += K
        # save log-probs
        lp_file = os.path.join(save_dir_ij, 'log_probs.pt')
        torch.save(log_probs_ij, lp_file)
        # save parses
        for k in range(K):
            parse = parses_ij[k]
            parse_file = os.path.join(save_dir_ij, 'parse_%i.pt' % k)
            torch.save(parse.state_dict(), parse_file)
Esempio n. 26
0
    def makepkg(self, path):
        """Creates an Arch Linux package archive.
        
        A package archive is generated in the location 'path', based on the data
        from the object.
        """
        self.path = os.path.join(path, self.filename())

        curdir = os.getcwd()
        tmpdir = tempfile.mkdtemp()
        os.chdir(tmpdir)

        # Generate package file system
        for f in self.files:
            util.mkfile(f, f)
            self.size += os.lstat(self.parse_filename(f))[stat.ST_SIZE]

        # .PKGINFO
        data = ["pkgname = %s" % self.name]
        data.append("pkgver = %s" % self.version)
        data.append("pkgdesc = %s" % self.desc)
        data.append("url = %s" % self.url)
        data.append("builddate = %s" % self.builddate)
        data.append("packager = %s" % self.packager)
        data.append("size = %s" % self.size)
        if self.arch:
            data.append("arch = %s" % self.arch)
        for i in self.license:
            data.append("license = %s" % i)
        for i in self.replaces:
            data.append("replaces = %s" % i)
        for i in self.groups:
            data.append("group = %s" % i)
        for i in self.depends:
            data.append("depend = %s" % i)
        for i in self.optdepends:
            data.append("optdepend = %s" % i)
        for i in self.conflicts:
            data.append("conflict = %s" % i)
        for i in self.provides:
            data.append("provides = %s" % i)
        for i in self.backup:
            data.append("backup = %s" % i)
        util.mkfile(".PKGINFO", "\n".join(data))

        # .INSTALL
        if any(self.install.values()):
            util.mkfile(".INSTALL", self.installfile())

        # safely create the dir
        util.mkdir(os.path.dirname(self.path))

        # Generate package archive
        tar = tarfile.open(self.path, "w:gz")
        for i in os.listdir("."):
            tar.add(i)
        tar.close()

        os.chdir(curdir)
        shutil.rmtree(tmpdir)
Esempio n. 27
0
def scrape_sources(since, output_dir):
    base_url = 'http://resource.ufocatch.com/atom/edinetx/'
    namespace = '{http://www.w3.org/2005/Atom}'
    page = 1
    total_count = 0

    util.mkdir(output_dir)

    while True:
        response_string = request(base_url, page)
        ET_tree = ET.fromstring(response_string)
        ET.register_namespace('', namespace[1:-1])

        sources, done = get_sources(ET_tree, namespace, since)
        total_count += len(sources)
        if len(sources) > 0:
            out_file_path = f'{output_dir}/source_{page}.json'
            save(out_file_path, sources)

        if done:
            print(f'Done!')
            break
        print(f'processed {page}th page [total: {total_count} sources]')
        page += 1
        sleep(0.5)
Esempio n. 28
0
    def get_santiago_address(self):
        if 'santiago' in cfg.users['admin'] and 'address' in cfg.users[
                'admin']['santiago']:
            return cfg.users['admin']['santiago']['address']
        else:
            admin = cfg.users['admin']
            admin['santiago'] = {}

        with open("/etc/tor/torrc", 'r') as INF:
            rc = INF.read()

        self.santiago_dir = os.path.join(cfg.file_root, "data", "santiago",
                                         "tor")
        self.tor_dir = os.path.join(self.santiago_dir, "general")
        u.mkdir(self.santiago_dir)
        os.system('chmod a+w %s' % self.santiago_dir)
        hidden_service_config = "HiddenServiceDir %s\nHiddenServicePort 80 127.0.0.1:%d" % (
            self.tor_dir, santiago_port)
        if hidden_service_config in rc:
            ## get info from dir (but how? we need perms)
            ## just fake it for now
            admin['santiago']['address'] = "b5wycujkfh2jxfdo.onion"
            cfg.users['admin'] = admin
            return cfg.users['admin']['santiago']['address']
        print "Need to add these two lines to /etc/torrc:\n%s" % hidden_service_config
        return ""
Esempio n. 29
0
    def getTeamSeasonHtml(self):
        html = INVALID_STRING
        if cmp(self._htmlWebSite, INVALID_STRING) == 0:
            print "htmlWebSite has not been initialized yet"
        else:
            #If we have already synced the data we need to fetch the html from local instead of reloading the website again
            pathNeed2Test = CURRENT_PATH + self._Season0 + self._Season1 + '/' + self._Team_id
            print "Constructing path as ", pathNeed2Test
            self._dataStoredPath = pathNeed2Test
            util.mkdir(pathNeed2Test)

            htmlFile = pathNeed2Test + '/' + self._GameType + '.html'
            self._seasonDataHtmlFile = htmlFile
            print "Check if html file exist or not ", htmlFile

            if os.path.isfile(htmlFile):
                print "html file exists, open the file, read it and return the string"
                html = util.openFile(htmlFile)

                #print html
            else:
                print "html file does not exist, now loading the webpage from network"
                html = util.getHtmlFromUrl(self._htmlWebSite)

                if cmp(html, INVALID_STRING) != 0:
                    util.saveFile(htmlFile, html)

                return html

        return html
Esempio n. 30
0
def script_saveHumanOnlyNeg():
    out_file='/disk2/aprilExperiments/positives_160_human.txt'
    out_dir='/disk2/aprilExperiments/negatives_npy_onlyHuman';
    util.mkdir(out_dir);
    im_pre='COCO_train2014_'

    lines=util.readLinesFromFile(out_file);
    img_files=[line[:line.index(' ')] for line in lines];

    img_names=util.getFileNames(img_files,ext=False);
    img_name=img_names[0];
    
    print img_name
    
    img_name_split=img_name.split('_');
    idx_all=[int(img_name.split('_')[-1]) for img_name in img_names];

    print len(img_names),len(idx_all),idx_all[0];
    cat_id=1;

    path_to_anno='/disk2/ms_coco/annotations';
    anno_file='instances_train2014.json';
    anno=json.load(open(os.path.join(path_to_anno,anno_file),'rb'))['annotations'];
    
    script_saveBboxFiles(anno,out_dir,im_pre,idx_all,cat_id)
def script_vizForHMDB():

	out_dir='/disk2/mayExperiments/debug_finetuning/hmdb';
	clusters_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat';
	vid_list=os.path.join(out_dir,'video_list.txt');
	out_dir_viz=os.path.join(out_dir,'im');
	util.mkdir(out_dir_viz);
	out_file_html=out_dir_viz+'.html';
	
	path_to_hmdb='/disk2/marchExperiments/hmdb_try_2/hmdb'

	dirs=util.readLinesFromFile(vid_list);
	dirs=[os.path.join(path_to_hmdb,dir_curr) for dir_curr in dirs[2:]];
	random.shuffle(dirs);
	num_to_evaluate=100;
	out_file_tif=os.path.join(out_dir,'tif_list.txt');

	# recordContainingFiles(dirs,num_to_evaluate,out_file_flo,post_dir='images',ext='.flo');
	tif_files=util.readLinesFromFile(out_file_tif);
	tif_files=tif_files[:100];
	img_files=[file_curr.replace('.tif','.jpg') for file_curr in tif_files];
	flo_files=[file_curr.replace('.tif','.flo') for file_curr in tif_files];
	clusters=po.readClustersFile(clusters_file);

	script_writeFloVizHTML(out_file_html,out_dir_viz,flo_files,img_files,tif_files,clusters,True)
Esempio n. 32
0
    def organize_directory(self):
        if (self.scenes):
            # Iterate Over Scenes List
            for curr, scene in enumerate(self.scenes, 1):
                # Create Scene Folder
                util.mkdir(
                    util.form_path([self.image_path,
                                    SCENE.format(curr)]))

                # Move Images To Scene Folder
                for image in scene:
                    try:
                        # Generate Source and Destination Paths
                        src = util.absolute(image)
                        dst = util.normalize(
                            util.form_path([
                                util.dirname(image),
                                SCENE.format(curr),
                                util.basename(image)
                            ]))

                        # Move Images To Scene Folder
                        util.move(src, dst)
                    except FileNotFoundError:
                        pass

            # Update Prompt
            print("Organized All Images             ")
        else:
            util.perror("spectra: No scenes found to analyze")
Esempio n. 33
0
File: app.py Progetto: uzak/reftest
    def check(self, name):
        data = self.getSource()
        fnRef = self.fnRef(name)
        fnOrig = self.fnOrig(name)

        # save references only?
        if self.save:
            util.mkdir(self.config.REF_DIR)
            util.write(fnRef, data)

        else: # compare
            assert os.path.exists(fnRef), "Cannot compare without reference file: %s" % fnRef

            util.mkdir(self.outputDir)
            # first save original file
            util.write(fnOrig, data)

            ref = self.cleanup(util.read(fnRef))
            data = self.cleanup(data)

            # htmldiff
            result = htmldiff.htmldiff(ref, data, True)
            util.write(self.fnHtmlDiff(name), result)
            self.scenario.results[name] = self._eval_diff(result)

            # difflib
            linesRef = ref.splitlines()
            linesData = data.splitlines()
            result = difflib.HtmlDiff(wrapcolumn=80).make_file(linesRef, linesData, fnRef, fnOrig, context=True)
            util.write(self.fnDiffLib(name), result)
Esempio n. 34
0
def main():
    args = get_arguments()
    SEED = args.seed
    torch.manual_seed(SEED)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    np.random.seed(SEED)

    util.mkdir('runs')

    test_acc_file = 'runs/test' + str(args.cosine) + str(args.bna) + str(
        args.bnd) + '.txt'
    train_acc_file = 'runs/train' + str(args.cosine) + str(args.bna) + str(
        args.bnd) + '.txt'
    open(test_acc_file, 'w')
    open(train_acc_file, 'w')

    print('Building model, loading data...\n')
    if args.cuda:
        torch.cuda.manual_seed(SEED)

    model, optimizer, training_generator, test_generator = initialize(args)

    best_pred_loss = 1000.0
    print('\nCheckpoint folder:', args.save, '\n\nCosine:', args.cosine,
          '\t\tBna:', args.bna, '\t\tBnd:', args.bnd, '\t\tContrastive:',
          args.cont, '\n\nStart training...\n')

    for epoch in range(1, args.nEpochs + 1):
        train_metrics = train(args, model, training_generator, optimizer,
                              epoch)
        test_metrics, confusion_matrix, ucsd_correct_total, sars_correct_total, ucsd_test_total, sars_test_total \
            = validation(args, model, test_generator, epoch, mode='test')

        best_pred_loss = util.save_model(model, optimizer, args, test_metrics,
                                         epoch, best_pred_loss,
                                         confusion_matrix)

        print('COVID-CT Accuracy: {0:.2f}%\tSARS-Cov-2 Accuracy: {1:.2f}%\n'.
              format(100. * ucsd_correct_total / ucsd_test_total,
                     100. * sars_correct_total / sars_test_total))

        with open(test_acc_file, 'a+') as f:
            f.write(
                str(test_metrics.data['correct'] /
                    test_metrics.data['total']) + ' ' +
                str(optimizer.param_groups[0]['lr']) + ' ' +
                str(test_metrics.data['loss'] /
                    (test_metrics.data['total'] // args.batch_size + 1)) +
                '\n')
        with open(train_acc_file, 'a+') as f:
            f.write(
                str(train_metrics.data['correct'] /
                    train_metrics.data['total']) + ' ' +
                str(optimizer.param_groups[0]['lr']) + ' ' +
                str(train_metrics.data['loss'] /
                    (train_metrics.data['total'] // args.batch_size + 1)) +
                '\n')

        adjust_learning_rate(optimizer, epoch, args)
Esempio n. 35
0
    def generate(self):
        pkg_entries = [(pkg, self.db_write(pkg)) for pkg in self.pkgs]

        if self.dbdir:
            for pkg, entry in pkg_entries:
                path = os.path.join(self.dbdir, pkg.fullname())
                util.mkdir(path)
                for name, data in entry.items():
                    util.mkfile(path, name, data)

        if self.dbfile:
            tar = tarfile.open(self.dbfile, "w:gz")
            for pkg, entry in pkg_entries:
                # TODO: the addition of the directory is currently a
                # requirement for successful reading of a DB by libalpm
                info = tarfile.TarInfo(pkg.fullname())
                info.type = tarfile.DIRTYPE
                tar.addfile(info)
                for name, data in entry.items():
                    filename = os.path.join(pkg.fullname(), name)
                    info = tarfile.TarInfo(filename)
                    info.size = len(data)
                    tar.addfile(info, StringIO(data))
            tar.close()
            # TODO: this is a bit unnecessary considering only one test uses it
            serverpath = os.path.join(self.root, util.SYNCREPO, self.treename)
            util.mkdir(serverpath)
            shutil.copy(self.dbfile, serverpath)
Esempio n. 36
0
    def makepkg(self, path):
        """Creates an Arch Linux package archive.
        
        A package archive is generated in the location 'path', based on the data
        from the object.
        """
        self.path = os.path.join(path, self.filename())

        curdir = os.getcwd()
        tmpdir = tempfile.mkdtemp()
        os.chdir(tmpdir)

        # Generate package file system
        for f in self.files:
            util.mkfile(f, f)
            self.size += os.lstat(self.parse_filename(f))[stat.ST_SIZE]

        # .PKGINFO
        data = ["pkgname = %s" % self.name]
        data.append("pkgver = %s" % self.version)
        data.append("pkgdesc = %s" % self.desc)
        data.append("url = %s" % self.url)
        data.append("builddate = %s" % self.builddate)
        data.append("packager = %s" % self.packager)
        data.append("size = %s" % self.size)
        if self.arch:
            data.append("arch = %s" % self.arch)
        for i in self.license:
            data.append("license = %s" % i)
        for i in self.replaces:
            data.append("replaces = %s" % i)
        for i in self.groups:
            data.append("group = %s" % i)
        for i in self.depends:
            data.append("depend = %s" % i)
        for i in self.optdepends:
            data.append("optdepend = %s" % i)
        for i in self.conflicts:
            data.append("conflict = %s" % i)
        for i in self.provides:
            data.append("provides = %s" % i)
        for i in self.backup:
            data.append("backup = %s" % i)
        util.mkfile(".PKGINFO", "\n".join(data))

        # .INSTALL
        if any(self.install.values()):
            util.mkfile(".INSTALL", self.installfile())

        # safely create the dir
        util.mkdir(os.path.dirname(self.path))

        # Generate package archive
        tar = tarfile.open(self.path, "w:gz")
        for i in os.listdir("."):
            tar.add(i)
        tar.close()

        os.chdir(curdir)
        shutil.rmtree(tmpdir)
Esempio n. 37
0
File: pmdb.py Progetto: mineo/pacman
    def generate(self):
        pkg_entries = [(pkg, self.db_write(pkg)) for pkg in self.pkgs]

        if self.dbdir:
            for pkg, entry in pkg_entries:
                path = os.path.join(self.dbdir, pkg.fullname())
                util.mkdir(path)
                for name, data in entry.iteritems():
                    filename = os.path.join(path, name)
                    util.mkfile(filename, data)

        if self.dbfile:
            tar = tarfile.open(self.dbfile, "w:gz")
            for pkg, entry in pkg_entries:
                # TODO: the addition of the directory is currently a
                # requirement for successful reading of a DB by libalpm
                info = tarfile.TarInfo(pkg.fullname())
                info.type = tarfile.DIRTYPE
                tar.addfile(info)
                for name, data in entry.iteritems():
                    filename = os.path.join(pkg.fullname(), name)
                    info = tarfile.TarInfo(filename)
                    info.size = len(data)
                    tar.addfile(info, StringIO(data))
            tar.close()
            # TODO: this is a bit unnecessary considering only one test uses it
            serverpath = os.path.join(self.root, util.SYNCREPO, self.treename)
            util.mkdir(serverpath)
            shutil.copy(self.dbfile, serverpath)
Esempio n. 38
0
    def pack(self):
        emb_root = self.target_root
        if self.seed:
            emb_root = emb_root.pjoin(self.target_root)

        basedir = util.Path( os.path.dirname(self.tarpath) )
        util.mkdir(basedir)

        archive = tarfile.open(self.tarpath, 'w:bz2')
        archive.add(emb_root,
            arcname = '/',
            recursive = True
        )
        archive.close()

        curdir = os.path.realpath(os.curdir)
        os.chdir(emb_root)
        util.cmd('find ./ | cpio -H newc -o | gzip -c -9 > %s' % (self.cpiopath))
        os.chdir(curdir)

        if self.kernel:
            r = util.Path('/')
            if self.seed:
                r = self.target_root
            r = r.pjoin('/tmp/inhibitor/kernelbuild')

            kernel_link = r.pjoin('/boot/kernel')
            kernel_path = os.path.realpath( kernel_link )

            if os.path.lexists(self.kernlinkpath):
                os.unlink(self.kernlinkpath)

            shutil.copy2(kernel_path, basedir.pjoin( os.path.basename(kernel_path) ))
            os.symlink(os.path.basename(kernel_path), self.kernlinkpath)
Esempio n. 39
0
    def __init__(self,
                 data_path,
                 model_file,
                 s3_bucket,
                 epochs=50,
                 max_sample_records=500,
                 start_epoch=0,
                 restored_model=False,
                 restored_model_dir=None,
                 tf_timeline=False):
        self.data_path = data_path
        self.s3_bucket = format_s3_bucket(s3_bucket)
        self.s3_data_dir = format_s3_data_dir(self.s3_bucket)
        self.model_file = model_file
        self.n_epochs = int(epochs)
        self.max_sample_records = max_sample_records
        self.tf_timeline = tf_timeline

        # Always sync before training in case I ever train multiple models in parallel
        sync_from_aws(s3_path=self.s3_data_dir, local_path=self.data_path)

        if restored_model:
            self.model_dir = restored_model_dir
        else:
            self.tfboard_basedir = os.path.join(self.data_path,
                                                'tf_visual_data', 'runs')
            self.model_dir = mkdir_tfboard_run_dir(self.tfboard_basedir)

        self.results_file = os.path.join(self.model_dir, 'results.txt')
        self.model_checkpoint_dir = os.path.join(self.model_dir, 'checkpoints')
        self.saver = tf.train.Saver()
        self.start_epoch = start_epoch
        self.restored_model = restored_model
        mkdir(self.model_checkpoint_dir)
Esempio n. 40
0
 def write_params(self):
     """
     Write out the estimated parameters as csv.
     Colums are: ['id', 'mode', 'p(k)', 'parameter', 'value', 'se']
     """
     if self.id is None:
         self.exception("can't write model, as filename not specified.")
     if '.' not in self.id:
         self.exception("model id is not a filename.")
     # make sure the output folder exists
     folder = '%s/fits/%s' % (util.data_path, self.model_type)
     util.mkdir(folder)
     with open('%s/%s' % (folder, self.id), 'w') as f:
         writer = csv.writer(f)
         # write each degree as a row
         for i in range(len(self.u)):
             row = [self.id,
                    self.model_type,
                    1,
                    i,
                    self.u[i],
                    self.se[i]]
             writer.writerow(row)
     if self.vvv:
         self.message("wrote model to file")
Esempio n. 41
0
 def __init__(self, data_path, epochs=50, max_sample_records=500):
     self.data_path = data_path
     self.epochs = int(epochs)
     self.max_sample_records = max_sample_records
     tfboard_basedir = mkdir(data_path + '/tf_visual_data/runs/')
     tfboard_run_dir = mkdir_tfboard_run_dir(tfboard_basedir)
     model_checkpoint_path = mkdir(tfboard_run_dir + '/trained_model')
Esempio n. 42
0
 def auto_conf(self):
     util.mkdir(self.out_root)
     self.out_dir = os.path.join(self.out_root, self.name)
     util.mkdir(self.out_dir)
     self.model_path = os.path.join(self.out_dir, 'model')
     self.log_path = os.path.join(self.out_dir, 'log')
     self.test_out_path = os.path.join(self.out_dir, 'test_out')
Esempio n. 43
0
 def __write_permapage(self, posts):
     """Write blog posts to their permalink locations"""
     perma_template = self.template_lookup.get_template("permapage.mako")
     perma_template.output_encoding = "utf-8"
     for post in posts:
         if post.permalink:
             path_parts = [self.output_dir]
             path_parts.extend(urlparse.urlparse(
                     post.permalink)[2].lstrip("/").split("/"))
             path = os.path.join(*path_parts)
             logger.info("Writing permapage for post: "+path)
         else:
             #Permalinks MUST be specified. No permalink, no page.
             logger.info("Post has no permalink: "+post.title)
             continue
         try:
             util.mkdir(path)
         except OSError:
             pass
         html = self.__template_render(
             perma_template,
             { "post": post,
               "posts": posts })
         f = open(os.path.join(path,"index.html"), "w")
         f.write(html)
         f.close()
Esempio n. 44
0
    def __write_files(self):
        """Write all files for the blog to _site

        Convert all templates to straight HTML
        Copy other non-template files directly"""
        #find mako templates in template_dir
        for root, dirs, files in os.walk("."):
            if root.startswith("./"):
                root = root[2:]
            for d in list(dirs):
                #Exclude some dirs
                d_path = util.path_join(root, d)
                if util.should_ignore_path(d_path):
                    logger.debug("Ignoring directory: " + d_path)
                    dirs.remove(d)
            try:
                util.mkdir(util.path_join(self.output_dir, root))
            except OSError:  #pragma: no cover
                pass
            for t_fn in files:
                t_fn_path = util.path_join(root, t_fn)
                if util.should_ignore_path(t_fn_path):
                    #Ignore this file.
                    logger.debug("Ignoring file: " + t_fn_path)
                    continue
                elif t_fn.endswith(".mako"):
                    logger.info("Processing mako file: " + t_fn_path)
                    #Process this template file
                    t_name = t_fn[:-5]
                    t_file = open(t_fn_path)
                    template = Template(t_file.read().decode("utf-8"),
                                        output_encoding="utf-8",
                                        lookup=self.template_lookup)
                    #Remember the original path for later when setting context
                    template.bf_meta = {"path": t_fn_path}
                    t_file.close()
                    path = util.path_join(self.output_dir, root, t_name)
                    html_file = open(path, "w")
                    html = self.template_render(template)
                    #Write to disk
                    html_file.write(html)
                else:
                    #Copy this non-template file
                    f_path = util.path_join(root, t_fn)
                    logger.debug("Copying file: " + f_path)
                    out_path = util.path_join(self.output_dir, f_path)
                    if self.config.site.overwrite_warning and os.path.exists(
                            out_path):
                        logger.warn(
                            "Location is used more than once: {0}".format(
                                f_path))
                    if self.bf.config.site.use_hard_links:
                        # Try hardlinking first, and if that fails copy
                        try:
                            os.link(f_path, out_path)
                        except StandardError:
                            shutil.copyfile(f_path, out_path)
                    else:
                        shutil.copyfile(f_path, out_path)
Esempio n. 45
0
    def run(self):
        environ = self.distribution.environment

        if self.devel_support:
            for tpl in self.devel_support:
                if self.distribution.verbose:
                    print 'adding sysdevel support to ' + tpl[0]
                target = os.path.abspath(os.path.join(self.build_lib,
                                                      *tpl[0].split('.')))
                util.mkdir(target)
                source_dir = os.path.abspath(os.path.join(
                        os.path.dirname(__file__), 'support'))
                for mod in tpl[1]:
                    src_file = os.path.join(source_dir, mod + '.py.in')
                    if not os.path.exists(src_file):
                        src_file = src_file[:-3]
                    dst_file = os.path.join(target, mod + '.py')
                    util.configure_file(environ, src_file, dst_file)


        if self.antlr_modules:
            here = os.getcwd()
            for grammar in self.antlr_modules:
                if self.distribution.verbose:
                    print 'building antlr grammar "' + \
                        grammar.name + '" sources'
                ##TODO build in build_src, add to build_lib modules
                target = os.path.abspath(os.path.join(self.build_lib,
                                                      grammar.directory))
                util.mkdir(target)
                source_dir = os.path.abspath(grammar.directory)
                os.chdir(target)

                reprocess = True
                ref = os.path.join(target, grammar.name + '2Py.py')
                if os.path.exists(ref):
                    reprocess = False
                    for src in grammar.sources:
                        src_path = os.path.join(source_dir, src)
                        if os.path.getmtime(ref) < os.path.getmtime(src_path):
                            reprocess = True
                if reprocess:
                    for src in grammar.sources:
                        ## ANTLR cannot parse from a separate directory
                        shutil.copy(os.path.join(source_dir, src), '.')
                        cmd_line = list(environ['ANTLR'])
                        cmd_line.append(src)
                        status = subprocess.call(cmd_line)
                        if status != 0:
                            raise Exception("Command '" + str(cmd_line) +
                                            "' returned non-zero exit status "
                                            + str(status))
                    ## Cleanup so that it's only Python modules
                    for f in glob.glob('*.g'):
                        os.unlink(f)
                    for f in glob.glob('*.tokens'):
                        os.unlink(f)
                os.chdir(here)
        _build_src.run(self)
Esempio n. 46
0
def build(smbuildfile,
          compiler,
          plugins,
          packages,
          flags='',
          output_dir='builds',
          nosource=False):
    """Performs the entire build process."""
    # setup directory structure, execute user-configurations
    plugin_build_dir = os.path.join(output_dir, 'plugins')
    util.mkdir(output_dir)
    util.mkdir(plugin_build_dir)

    # scan deps for what we need to do
    packages_to_build = set()
    for name, package in packages.items():
        if smbuildfile == package.smbuildfile:
            packages_to_build.add(name)

    plugins_to_compile = set()
    for name in packages_to_build:
        for_this_package = base.find_plugin_deps(packages[name], packages)
        for plugin_name in for_this_package:
            plugins_to_compile.add(plugin_name)

            if plugin_name not in plugins:
                err = 'Package {} uses plugin {}, but it does not exist'.format(
                    name, plugin_name)
                raise ValueError(err)

            # also make sure plugin dependencies are met by the package
            for dep in plugins[plugin_name].deps:
                if dep not in for_this_package:
                    msg = 'Plugin {} depends on {}, but is not part of package {}'
                    msg = msg.format(plugin_name, dep, name)
                    util.warning(msg)

    # also compile any plugins from this smbuildfile
    for plugin_name in plugins:
        if plugins[plugin_name].smbuildfile == smbuildfile:
            plugins_to_compile.add(plugin_name)

    # compile plugins
    compiled_count = 0
    for name in plugins_to_compile:
        plugin = plugins[name]
        if plugin.compile(compiler, plugin_build_dir, flags):
            compiled_count += 1

    # build packages
    for name in packages_to_build:
        package = packages[name]
        print('Building package {}'.format(name))
        package.create(output_dir, packages, plugins, nosource)

    if len(plugins) == 0:
        util.warning('No plugins were found in {}.'.format(smbuildfile))
    elif compiled_count == 0:
        print('All plugins up to date.')
Esempio n. 47
0
    def __write_files(self):
        """Write all files for the blog to _site

        Convert all templates to straight HTML
        Copy other non-template files directly"""
        #find mako templates in template_dir
        for root, dirs, files in os.walk("."):
            if root.startswith("./"):
                root = root[2:]
            for d in list(dirs):
                #Exclude some dirs
                d_path = util.path_join(root,d)
                if util.should_ignore_path(d_path):
                    logger.debug("Ignoring directory: " + d_path)
                    dirs.remove(d)
            try:
                util.mkdir(util.path_join(self.output_dir, root))
            except OSError: #pragma: no cover
                pass
            for t_fn in files:
                t_fn_path = util.path_join(root, t_fn)
                if util.should_ignore_path(t_fn_path):
                    #Ignore this file.
                    logger.debug("Ignoring file: " + t_fn_path)
                    continue
                elif t_fn.endswith(".mako"):
                    logger.info("Processing mako file: " + t_fn_path)
                    #Process this template file
                    t_name = t_fn[:-5]
                    t_file = open(t_fn_path)
                    template = Template(t_file.read().decode("utf-8"),
                                        output_encoding="utf-8",
                                        lookup=self.template_lookup)
                    t_file.close()
                    path = util.path_join(self.output_dir, root, t_name)
                    html_file = open(path, "w")
                    # Prepare the "path" variable for the template context.
                    page_path = util.path_join(root, t_name)
                    if page_path.startswith('./'):
                      page_path = page_path[2:]
                    page_path = '/' + page_path
                    context = dict(path=page_path, logger=template_logger)
                    #render the page
                    html = self.template_render(template, context)
                    #Write to disk
                    html_file.write(html)
                else:
                    #Copy this non-template file
                    f_path = util.path_join(root, t_fn)
                    logger.debug("Copying file: " + f_path)
                    out_path = util.path_join(self.output_dir, f_path)
                    if self.bf.config.site.use_hard_links:
                        # Try hardlinking first, and if that fails copy
                        try:
                            os.link(f_path, out_path)
                        except StandardError:
                            shutil.copyfile(f_path, out_path)
                    else:
                        shutil.copyfile(f_path, out_path)
Esempio n. 48
0
 def make_profile_link(self):
     # XXX:  We also need to make the root profile link, Gentoo Bug 324179.
     for d in (self.target_root, self.target_root.pjoin(self.portage_cr)):
         targ = d.pjoin("/etc/make.profile")
         util.mkdir(os.path.dirname(targ))
         if os.path.lexists(targ):
             os.unlink(targ)
         os.symlink(self.env["PORTDIR"] + "/profiles/%s" % self.profile, targ)
Esempio n. 49
0
    def push(cmd_args):
        opts, args = getopt.getopt(
            cmd_args, "l:r:n:m",
            ["local-file=", "remote-path=", "remote-name=", "no-bak", "mod="])
        log.info("opts %s args:%s" % (opts, args))
        local_file, remote_path, remote_name = "", "", ""
        bak_file, chmod = True, "777"
        for op, value in opts:
            if op == "-l" or op == "--local-file":
                local_file = value
            elif op == "-r" or op == "--remote-path":
                remote_path = value
            elif op == "-n" or op == "--remote-name":
                remote_name = value
            elif op == "-n" or op == "--mod":
                chmod = value
            elif op == "--no-bak":
                bak_file = False
            else:
                log.error("unkown opt:%s value:%s" % (op, value))
                return False
        if len(opts) == 0:
            local_file = args[0] if len(args) >= 1 else ""
            remote_path = args[1] if len(args) >= 2 else ""
            remote_name = args[2] if len(args) >= 3 else ""

        if remote_path == "":
            remote_path = "/data/local/tmp"
        if os.path.isdir(local_file):
            # push 目录
            remote_file = remote_path + "/"
            util.mkdir(remote_file)
        elif os.path.isfile(os.path.join(os.getcwd(), local_file)):
            # push 文件
            # local_path = os.path.dirname(local_file)
            local_fname = os.path.basename(local_file)
            if remote_name == "":
                remote_name = local_fname
            remote_file = remote_path + "/" + remote_name
        else:
            log.error("local file:%s %s not exist" %
                      (local_file, os.path.join(os.getcwd(), local_file)))
            return False
        if bak_file:
            shell_cmd = util.getshell('mv "%s" "%s.bak"' %
                                      (remote_file, remote_file))
            util.execute_cmd(shell_cmd)
        log.info("local:%s remote:%s" % (local_file, remote_file))
        shell_cmd = util.getcmd('push "%s" "%s"' % (local_file, remote_file))
        ret, res_str = util.execute_cmd_with_stdout(shell_cmd)
        if not ret:
            return False
        if chmod != "":
            shell_cmd = util.getshell('chmod %s "%s"' % (chmod, remote_file))
            if not util.execute_cmd(shell_cmd): return False
            # shell_cmd = util.getshell('".%s"' % remote_file)
            # return util.execute_cmd(shell_cmd)
        return True
Esempio n. 50
0
def main():
	
	dir_clusters='/disk2/temp/youtube_clusters_check_nothresh';
	out_dir=os.path.join(dir_clusters,'viz');
	util.mkdir(out_dir);
	script_seeMultipleClusters(dir_clusters,out_dir)

	





	return
	
	script_seeMultipleClusters();
	return
	dir_clusters='/disk3/maheen_data/debug_networks/clusters_youtube_multiple';
	clusters_all=util.getFilesInFolder(dir_clusters,'.npy');
	clusters_all=[file_curr for file_curr in clusters_all if 'harder' in file_curr];
	clusters_all.append(os.path.join(dir_clusters,'clusters_original.npy'));
	min_mags=[];
	for file_curr in clusters_all:
		clusters=np.load(file_curr);
		mags=np.power(np.sum(np.power(clusters,2),axis=1),0.5);
		min_mag=np.min(mags);
		min_mags.append(min_mag);

	print min_mags,np.max(min_mags);

	thresh=1;
	counts=[];
	for file_curr in clusters_all:
		clusters=np.load(file_curr);
		print file_curr
		mags=np.power(np.sum(np.power(clusters,2),axis=1),0.5);
		count=np.sum(mags<=thresh);
		print count
		counts.append(count);

	print np.mean(counts);


	# return
	dir_curr='/disk3/maheen_data/debug_networks/figuringClustering';
	mag_file=os.path.join(dir_curr,'mags_all.npy');
	mags=np.load(mag_file);
	
	print len(mags),np.sum(mags<=thresh);
	mags=mags[mags>thresh];
	print len(mags);

	out_file=os.path.join(dir_curr,'mag_hist_noZero.png');
	visualize.hist(mags,out_file,bins=40,normed=True,xlabel='Value',ylabel='Frequency',title='',cumulative=False);
	print out_file.replace('/disk3','vision3.cs.ucdavis.edu:1001');

	print np.min(mags),np.max(mags),np.mean(mags),np.std(mags);
Esempio n. 51
0
def main():
    args = process_command_line_arguments()
    util.mkdir(PATH['TIMBRADAS'])

    xml = util.read_file(args.factura)

    # Enviamos a timbrar con el PAC
    timbra_xml(xml)
    return
Esempio n. 52
0
 def __write_blog_categories(self, posts):
     """Write all the blog posts in categories"""
     root = os.path.join(self.blog_dir,config.blog_category_dir)
     chron_template = self.template_lookup.get_template("chronological.mako")
     chron_template.output_encoding = "utf-8"
     #Find all the categories:
     categories = set()
     for post in posts:
         categories.update(post.categories)
     for category in categories:
         category_posts = [post for post in posts \
                               if category in post.categories]
         #Write category RSS feed
         self.__write_feed(category_posts,os.path.join(
                 config.blog_path,config.blog_category_dir,
                 category.url_name,"feed"),"rss.mako")
         self.__write_feed(category_posts,os.path.join(
                 config.blog_path,config.blog_category_dir,
                 category.url_name,"feed","atom"),"rss.mako")
         page_num = 1
         while True:
             path = os.path.join(root,category.url_name,
                                 str(page_num),"index.html")
             try:
                 util.mkdir(os.path.split(path)[0])
             except OSError:
                 pass
             f = open(path, "w")
             page_posts = category_posts[:config.blog_posts_per_page]
             category_posts = category_posts[config.blog_posts_per_page:]
             #Forward and back links
             if page_num > 1:
                 prev_link = util.blog_path_helper(
                     (config.blog_category_dir, category.url_name, str(page_num -1)))
             else:
                 prev_link = None
             if len(category_posts) > 0:
                 next_link = util.blog_path_helper(
                     (config.blog_category_dir, category.url_name, str(page_num + 1)))
             else:
                 next_link = None
             html = self.__template_render(
                 chron_template,
                 { "posts": page_posts,
                   "prev_link": prev_link,
                   "next_link": next_link })
             f.write(html)
             f.close()
             #Copy category/1 to category/index.html
             if page_num == 1:
                 shutil.copyfile(path,os.path.join(
                         root,category.url_name,
                         "index.html"))
             #Prepare next iteration
             page_num += 1
             if len(category_posts) == 0:
                 break
Esempio n. 53
0
 def make_profile_link(self):
     if self.seed:
         super(EmbeddedStage, self).make_profile_link()
     else:
         targ = self.portage_cr.pjoin('/etc/make.profile')
         util.mkdir( os.path.dirname(targ) )
         if os.path.lexists(targ):
             os.unlink(targ)
         os.symlink(self.env['PORTDIR'] + '/profiles/%s' % self.profile, targ)
Esempio n. 54
0
def processDay(nodeName, day):
    log(f'Processing day/node: {day}/{nodeName}')
    dayPath = f"../data/s3/{nodeName}/{day}"
    outputDir = os.path.join(DIR_PROCESSED, nodeName)
    mkdir(outputDir)

    createArchiveVideo(dayPath, outputDir, day)
    videoPath = createVideo(outputDir, dayPath, nodeName, day)
    createPreviewGif(videoPath, os.path.join(outputDir, f'{day}.gif'))
Esempio n. 55
0
    def __write_files(self):
        """Write all files for the blog to _site

        Convert all templates to straight HTML
        Copy other non-template files directly"""
        #find mako templates in template_dir
        for root, dirs, files in os.walk(".", followlinks=True):
            if root.startswith("./"):
                root = root[2:]
            for d in list(dirs):
                #Exclude some dirs
                d_path = util.path_join(root,d)
                if util.should_ignore_path(d_path):
                    logger.debug("Ignoring directory: " + d_path)
                    dirs.remove(d)
            try:
                util.mkdir(util.path_join(self.output_dir, root))
            except OSError: #pragma: no cover
                pass
            for t_fn in files:
                t_fn_path = util.path_join(root, t_fn)
                if util.should_ignore_path(t_fn_path):
                    #Ignore this file.
                    logger.debug("Ignoring file: " + t_fn_path)
                    continue
                elif t_fn.endswith(".mako"):
                    logger.info("Processing mako file: " + t_fn_path)
                    #Process this template file
                    t_name = t_fn[:-5]
                    t_file = open(t_fn_path)
                    template = Template(t_file.read().decode("utf-8"),
                                        output_encoding="utf-8",
                                        lookup=self.template_lookup)
                    #Remember the original path for later when setting context
                    template.bf_meta = {"path":t_fn_path}
                    t_file.close()
                    path = util.path_join(self.output_dir, root, t_name)
                    html_file = open(path, "w")
                    html = self.template_render(template)
                    #Write to disk
                    html_file.write(html)
                else:
                    #Copy this non-template file
                    f_path = util.path_join(root, t_fn)
                    logger.debug("Copying file: " + f_path)
                    out_path = util.path_join(self.output_dir, f_path)
                    if self.config.site.overwrite_warning and os.path.exists(out_path):
                        logger.warn("Location is used more than once: {0}".format(f_path))
                    if self.bf.config.site.use_hard_links:
                        # Try hardlinking first, and if that fails copy
                        try:
                            os.link(f_path, out_path)
                        except StandardError:
                            shutil.copyfile(f_path, out_path)
                    else:
                        shutil.copyfile(f_path, out_path)
Esempio n. 56
0
 def run (self):
     old_data.run(self)
     util.mkdir(self.data_install_dir)
     if (not hasattr(self.distribution, 'using_py2exe') or \
             not self.distribution.using_py2exe) and self.data_dirs:
        for tpl in self.data_dirs:
             target = os.path.join(self.data_install_dir, tpl[0])
             for d in tpl[1]:
                 util.copy_tree(d, target, excludes=['.svn*', 'CVS*',
                                                     'Makefile*'])
def script_writeTrainFile():
	dir_val='/disk2/ms_coco/train2014';
	out_dir='/disk2/mayExperiments/train_data';
	util.mkdir(out_dir);

	imgs=util.getEndingFiles(dir_val,'.jpg');
	imgs=[os.path.join(dir_val,file_curr) for file_curr in imgs];
	imgs.sort();
	out_file=os.path.join(out_dir,'train.txt');
	util.writeFile(out_file,imgs)
Esempio n. 58
0
 def __write_pygments_css(self):
     css_dir = os.path.join(self.output_dir, self.config.site_css_dir.lstrip("/"))
     try:
         util.mkdir(css_dir)
     except OSError:
         pass
     if config.syntax_highlight_enabled:
         f = open(os.path.join(css_dir,"pygments.css"),"w")
         f.write(config.html_formatter.get_style_defs(".highlight"))
         f.close()
def script_writeCommandsForExperiment():
    # out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayer';
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayerAlexNet';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel';

    util.mkdir(out_dir);
    train_txt_orig_path='/disk3/maheen_data/debug_networks/noFix/train.txt';

    template_deploy_file='deploy_debug_noFix.prototxt';
    template_solver_file='solver_debug.prototxt';

    train_file=os.path.join(out_dir,'train.txt');
    
    shutil.copyfile(train_txt_orig_path,train_file);

    base_lr=0.0001;
    snapshot=100;
    layers=['conv1','conv2','conv3','conv4','conv5','fc6','fc7'];

    command_pre=os.path.join(out_dir,'debug_');
    commands=[];

    for idx in range(len(layers)):
        # if idx==0:
        #     fix_layers=layers[0];
        #     layer_str=str(fix_layers);
        #     model_file_curr=None;
        # else:
        fix_layers=layers[:idx+1];
    
        layer_str='_'.join(fix_layers);
        model_file_curr=model_file
        # print fix_layers

        if idx<len(layers)/2:
            gpu=0;
        else:
            gpu=1;


        snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
        out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
        out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
        log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
        replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu);
        replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
        command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
        commands.append(command);
    
    command_file_1=command_pre+'0.sh';
    util.writeFile(command_file_1,commands[:len(commands)/2]);
    command_file_2=command_pre+'1.sh';
    util.writeFile(command_file_2,commands[len(commands)/2:]);
Esempio n. 60
0
def display_learning_curves(score_folders, picture_folder):
    """"Draw pictures of learning curves"""
    # Allow one folder to be submitted as input instead of list
    if not isinstance(score_folders, list):
        score_folders = [score_folders]
    # Make picture folder if necessary
    util.mkdir(picture_folder)
    # Determine names of datasets
    dataset_names = []
    for score_folder in score_folders:
        new_names = [name for name in os.listdir(score_folder) if os.path.isdir(os.path.join(score_folder, name))]
        dataset_names = sorted(list(set(dataset_names + new_names)))
    print('Datasets:')
    for dataset_name in dataset_names:
        print(' - %s' % dataset_name)
    print('')
    # Determine algorithm names
    algo_names = []
    for score_folder in score_folders:
        for dataset_name in dataset_names:
            if os.path.isdir(os.path.join(score_folder, dataset_name)):
                new_names = [name for name in os.listdir(os.path.join(score_folder, dataset_name))
                             if os.path.isdir(os.path.join(score_folder, dataset_name, name))]
                algo_names = sorted(list(set(algo_names + new_names)))
    print('Algorithms:')
    for algo_name in algo_names:
        print(' - %s' % algo_name)
    print('')
    # For each dataset plot learning curve of every algorithm across score folders
    for dataset_name in dataset_names:
        print(dataset_name)
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.set_title(dataset_name)
        ax.set_xlabel('Time (seconds)')
        # ax.set_xscale('log')
        ax.set_ylabel('Score')
        n_algos = 0
        for (algo_index, algo_name) in enumerate(algo_names):
            print(algo_name)
            for score_folder in score_folders:
                time_score_file = os.path.join(score_folder, dataset_name, algo_name, 'learning_curve.csv')
                if os.path.isfile(time_score_file):
                    time_scores = np.loadtxt(time_score_file, delimiter=',', skiprows=1)
                    if time_scores.size > 0:
                        ax.plot(time_scores[:, 0], time_scores[:, 1],
                                color=util.colorbrew(algo_index),
                                linestyle='dashed', marker='o',
                                label=algo_name)
                        n_algos += 1
        if n_algos > 0:
            leg = ax.legend(loc='best')
            leg.get_frame().set_alpha(0.5)
            fig.savefig(os.path.join(picture_folder, '%s.pdf' % dataset_name))
            plt.show()