Exemple #1
0
def exercise_17():
    # test for small molecule multi-sweep indexing, 3 sweeps with different values
    # of goniometer setting rotation (i.e. phi scans)
    data_dir = os.path.join(dials_regression, "dials-191")
    import glob
    pickle_paths = [
        glob.glob(os.path.join(data_dir,
                               "*SWEEP%i*_strong.pickle" % (i + 1)))[0]
        for i in range(3)
    ]
    sweep_paths = [
        glob.glob(os.path.join(data_dir,
                               "*SWEEP%i*_datablock.json" % (i + 1)))[0]
        for i in range(3)
    ]
    extra_args = ["filter_ice=False"]
    expected_unit_cell = uctbx.unit_cell(
        (9.440, 15.313, 17.126, 90.073, 90.106, 79.248))
    expected_rmsds = (0.32, 0.34, 0.005)
    expected_hall_symbol = ' P 1'

    result = run_one_indexing(" ".join(pickle_paths), " ".join(sweep_paths),
                              extra_args, expected_unit_cell, expected_rmsds,
                              expected_hall_symbol)
    assert len(result.indexed_reflections) > 12000, len(
        result.indexed_reflections)
    # expect at least indexed 2000 reflections per experiment
    for i in range(3):
        assert (result.indexed_reflections['id'] == i).count(True) > 2000
Exemple #2
0
def get_vimeo20k_instances(cfg, split, ext=".png"):
    """
        return a list of frame sequences
    """
    FRAME_SIZE = cfg.STPAN.FRAME_SIZE
    if split == 'train':
        dirpath = cfg.DATA.PATH_TO_TRAINING_SET
        all_sequences = []
        videos_clips = find_subdir_leaves(dirpath)
        for path_2_video in videos_clips:
            frames = glob.glob(path_2_video + '/*' + ext)
            frames.sort()
            for i in range(0, len(frames) - FRAME_SIZE):
                all_sequences.append(frames[i:i + FRAME_SIZE])
        return all_sequences
    else:
        if split == 'val':
            dirpath = cfg.DATA.PATH_TO_VAL_SET
        elif split == 'test':
            dirpath = cfg.DATA.PATH_TO_TEST_SET
        all_sequences = []
        videos_clips = find_subdir_leaves(dirpath)
        for path_2_video in videos_clips:
            noisy_frames = glob.glob(path_2_video + '/noisy*' + ext)
            gt_frames = glob.glob(path_2_video + '/gt*' + ext)
            all_sequences.append((noisy_frames, gt_frames))
        return all_sequences
Exemple #3
0
    def load_batch(self, batch_size=1, is_testing=False):
        data_type = "train" if not is_testing else "val"
        pathA =sorted(glob.glob('./datasets/%s/%s/A/*' % (self.dataset_name, data_type)), key=os.path.getmtime)
        pathB =sorted(glob.glob('./datasets/%s/%s/B/*' % (self.dataset_name, data_type)), key=os.path.getmtime)

        self.n_batches = int(len(pathA) / batch_size)

        for i in range(self.n_batches-1):
            batch = pathA[i*batch_size:(i+1)*batch_size]
            imgs_A = []
            for img in batch:
                img_A = self.imread(img)
                img_A = scipy.misc.imresize(img_A, self.img_res)                  
                imgs_A.append(img_A)
                
            imgs_A = np.array(imgs_A)/127.5 - 1.
            
            batch = pathB[i*batch_size:(i+1)*batch_size]
            imgs_B = []
            for img in batch:
                img_B = self.imread(img)
                #img_B = scipy.misc.imresize(img_B, self.img_res)
                img_B = cv2.resize(img_B.astype('uint8'),(self.img_res), interpolation=cv2.INTER_NEAREST)         
                imgs_B.append(img_B)
                
            imgs_B = np.array(imgs_B)/127.5 - 1.
            
            yield imgs_A, imgs_B
Exemple #4
0
    def _scan(self):
        names_hr = sorted(
            glob.glob(os.path.join(self.dir_hr, '**', '*' + self.ext[0])))
        names_lr = sorted(
            glob.glob(os.path.join(self.dir_lr, '**', '*' + self.ext[1])))

        return names_hr, names_lr
Exemple #5
0
 def test(self):
     self.sess.run(self.init)
     test_files = glob.glob('./data/test/test_use/*.dcm')
     test_save_filepath = glob.glob('./data/test/test_save')
     print('resnet!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
     # load testing input
     test_input = []
     test_data = np.zeros((1, 512, 512, 1), dtype=np.single)
     print("[*] Loading test images ...")
     test_input = load_images(test_files)  # list of array of different size
     #test_data = test_input
     if self.load(self.ckpt_dir):
         print(" [*] Load SUCCESS")
     else:
         print(" [!] Load failed...")
     psnr_sum = 0
     for idx in xrange(len(test_files)):
         test_data = test_input[idx]
         #noisy_image = test_data[idx]
         predicted_noise = self.forward(test_input[idx])
         print("forwarding successfully...")
         output_clean_image = test_data - predicted_noise
         #print(output_clean_image)
         psnr = cal_psnr(test_data, output_clean_image)
         print("cal_psnr")
         psnr_sum += psnr
         test_save_images(test_data, predicted_noise, output_clean_image,
                          test_save_filepath, idx)
     avg_psnr = psnr_sum / len(test_files)
     print("--- Average PSNR %.2f ---" % avg_psnr)
Exemple #6
0
def exercise_16():
    # test for small molecule multi-sweep indexing, 4 sweeps with different values
    # of goniometer.fixed_rotation()
    data_dir = os.path.join(dials_regression, "indexing_test_data",
                            "multi_sweep")
    import glob
    pickle_paths = [
        glob.glob(
            os.path.join(data_dir, "SWEEP%i" % (i + 1), "index",
                         "*_strong.pickle"))[0] for i in range(4)
    ]
    sweep_paths = [
        glob.glob(
            os.path.join(data_dir, "SWEEP%i" % (i + 1), "index",
                         "*_datablock_import.json"))[0] for i in range(4)
    ]
    extra_args = ["known_symmetry.space_group=I4", "filter_ice=False"]
    expected_unit_cell = uctbx.unit_cell(
        (7.310, 7.310, 6.820, 90.000, 90.000, 90.000))
    expected_rmsds = (0.10, 0.7, 0.5)
    expected_hall_symbol = ' I 4'

    result = run_one_indexing(" ".join(pickle_paths), " ".join(sweep_paths),
                              extra_args, expected_unit_cell, expected_rmsds,
                              expected_hall_symbol)
    assert len(result.indexed_reflections) > 1250, len(
        result.indexed_reflections)
def fetch_training_data_files(data_path, modalities):
    training_data_files = list()
    for subject_dir in glob.glob(os.path.join(data_path,'*')):#os.path.join(os.path.dirname(__file__), "data", "preprocessed", "*", "*")):
        subject_files = list()
        for modality in modalities + ["*seg*"]:
            subject_files.append(glob.glob(os.path.join(subject_dir, '*'+modality+'*')))
        training_data_files.append(tuple(subject_files))
    return training_data_files
Exemple #8
0
    def test(self, args):
        """Test cyclegan"""
        init_op = tf.global_variables_initializer()
        self.sess.run(init_op)
        if args.which_direction == 'AtoB':
            #print('test set path', './datasets/'+ self.dataset_dir + '/testA');
            #dir_names, sample_files = testing_files('./datasets/'+ self.dataset_dir + '/testA')
            #print('Number of direcotry', len(dir_names));
            #print('Number of files', len(sample_files));
            sample_files = glob.glob(
                './datasets/{}/*.*'.format(self.dataset_dir + '/testA'))
        elif args.which_direction == 'BtoA':
            sample_files = glob.glob(
                './datasets/{}/*.*'.format(self.dataset_dir + '/testB'))
        else:
            raise Exception('--which_direction must be AtoB or BtoA')

        if self.load(args.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        # write html for visual comparison
        index_path = os.path.join(
            args.test_dir, '{0}_index.html'.format(args.which_direction))
        index = open(index_path, "w")
        index.write("<html><body><table><tr>")
        index.write("<th>name</th><th>input</th><th>output</th></tr>")

        out_var, in_var = (
            self.testB,
            self.test_A) if args.which_direction == 'AtoB' else (self.testA,
                                                                 self.test_B)

        for sample_file in sample_files:
            print('Processing image: ' + sample_file)
            sample_image = [load_test_data(sample_file, args.fine_size)]
            sample_image = np.array(sample_image).astype(np.float32)
            #d_name, f_name = os.path.split(sample_file);
            #root_d, c_d    = os.path.split(d_name);
            #saving_dir = args.test_dir+ '/'+ c_d+'/'
            #print('result direcotry', saving_dir)
            #if not os.path.exists(saving_dir):
            #    os.mkdir(saving_dir);
            image_path = os.path.join(
                args.test_dir, '{0}_{1}'.format(args.which_direction,
                                                os.path.basename(sample_file)))
            fake_img = self.sess.run(out_var, feed_dict={in_var: sample_image})
            save_images(fake_img, [1, 1], image_path)
            index.write("<td>%s</td>" % os.path.basename(image_path))
            index.write("<td><img src='%s'></td>" %
                        (sample_file if os.path.isabs(sample_file) else
                         ('..' + os.path.sep + sample_file)))
            index.write("<td><img src='%s'></td>" %
                        (image_path if os.path.isabs(image_path) else
                         ('..' + os.path.sep + image_path)))
            index.write("</tr>")
        index.close()
Exemple #9
0
def main():
    """Set up paths to image and joint data, saves results.
    :param base_dir: folder containing LSP images and data
    :param out_dir: output folder
    :param use_interpenetration: boolean, if True enables the interpenetration term
    :param n_betas: number of shape coefficients considered during optimization
    :param flength: camera focal length (an estimate)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param use_neutral: boolean, if True enables uses the neutral gender SMPL model
    :param viz: boolean, if True enables visualization during optimization
    """

    # Render degrees: List of degrees in azimuth to render the final fit.
    # Note that rendering many views can take a while.
    import glob
    import os

    seqPath = '/home/xiul/databag/dbfusion/record0'
    allImgs = glob.glob(os.path.join(seqPath, 'xiu/*.png'))
    allImgs.sort()
    allPoses = np.loadtxt(
        os.path.join(seqPath, 'pose_parameters_per_frame.txt'))
    allBetas = np.loadtxt(os.path.join(seqPath, 'shape_parameters.txt'))
    camK = np.loadtxt(os.path.join(seqPath, 'cam_params.txt'), delimiter=',')

    allTrans = allPoses[:, 3:6]
    allPose = np.hstack((allPoses[:, 0:3], allPoses[:, 6:]))
    model = load_model(MODEL_MALE_PATH)

    useIds = range(len(allImgs))

    allJson = glob.glob('/home/xiul/databag/dbfusion/record0/openpose/*.json')
    allJson.sort()

    for idx in useIds[10::10]:
        tarIUV = cv2.imread(allImgs[idx])
        tarIUV = cv2.resize(tarIUV, (1280, 720))
        tarIUV = tarIUV[:, :, ::-1]
        tarIUV[tarIUV[:, :, 0] == 0, :] = 255
        tarIUV = tarIUV.astype(np.float)
        tarIUV = tarIUV / 255.0

        cJ2d, wJ2d = load_pose(allJson[idx])

        run_single_fit(tarIUV,
                       cJ2d,
                       wJ2d,
                       allPose[idx, 0:3],
                       allTrans[idx, :],
                       allPose[idx, 3:],
                       model,
                       camK,
                       viz=True,
                       ids=idx)
Exemple #10
0
def main():
    plt.ion()  # interactive mode

    # data_provider = DataProvider()
    #
    # train_loader = data_provider(args.train_data, args.batch_size, backbone=None, phase='train', num_worker=4)
    # eval_loader = data_provider(args.val_data, args.batch_size, backbone=None, phase='val', num_worker=4)
    #
    # images, labels = next(iter(eval_loader))
    #
    # plot_image_class(images[:36], labels[:36], index_class=index_class)

    dataset_path = '/media/alex/80CA308ECA308288/alex_dataset/ecological-assets'

    train_image_path = glob.glob(
        osp.join(dataset_path, 'suichang_round1_train_210120', '*.tif'))
    # train_mask_path = glob.glob(osp.join(dataset_path, 'suichang_round1_train_210120', '*.png'))

    test_image_path = glob.glob(
        osp.join(dataset_path, 'suichang_round1_test_partA_210120', '*.tif'))

    train_dataset = EcologicalDataset(
        image_path=train_image_path,
        transforms=get_transforms(size=256, mode='train'),
        album_aug=get_albu_transform(image_size=256),
        mode='train')

    test_dataset = EcologicalDataset(image_path=train_image_path,
                                     transforms=get_transforms(size=256,
                                                               mode='test'),
                                     mode='test')

    train_loader = data.DataLoader(train_dataset,
                                   batch_size=16,
                                   shuffle=True,
                                   num_workers=4)
    test_loader = data.DataLoader(test_dataset,
                                  batch_size=16,
                                  shuffle=True,
                                  num_workers=4)

    # for image, mask in train_dataset:
    image, mask = train_dataset[150]

    image = tensor_to_numpy(img_tensor=image, mean_std=True)
    mask = visual_mask(mask_img=mask + 1)
    plt.figure(figsize=(16, 8))
    plt.subplot(121)
    plt.imshow(mask, cmap='gray')
    plt.subplot(122)
    plt.imshow(image)
    plt.show()

    print('Done')
Exemple #11
0
def import_images(path = 'prediction/input/framewise/*.jpg'):
    # Path where the image is stored
    path = 'C:\\Users\\revan\\JUPYTERRRRRRRRRRRRRRRRRR\\Iris\\Drowsiness\\input_training\\train\\c4\\img_803.jpg'
    
    manual_pics = glob.glob(path)
    
    manual_imgs = []
    # Array for storing all the raw pictures
    manual_displays = []
    
    for file in manual_pics:    
        
        manual_display = get_cv2_image(file,640,480)
       
        manual_displays.append(manual_display)
        # For prediction
        manual = get_cv2_image(file, img_rows=224, img_cols=224, color_type=3)
        # For flipping the image
        #manual = img_flip_lr = cv2.flip(manual, 1)
        # Appending prediction image into manual_imgs
        manual_imgs.append(manual)

    # Changing into uint8
    manual_imgs = np.array(manual_imgs, dtype=np.uint8)
    # Changing into uint8
    manual_imgs = manual_imgs.reshape(-1,img_rows,img_cols,3)
    
    
    
    # Path where the image is stored
    path = 'prediction/input/framewise/*.jpg'
    j = 0
    # Set of all pixtures in the path
    manual_pics = sorted(glob.glob(path), key=os.path.getmtime)
    # Array for storing all the pictures after preprocessing
    manual_imgs = []
    # Array for storing all the raw pictures
    manual_displays = []
    # Looping over all pictures in the path
    for file in manual_pics:
        # For displayingn the picture and writing over it
        manual_display = get_cv2_image(file,640,480)
        # For flipping the image
        # manual_display = img_flip_lr = cv2.flip(manual_display, 1)
        # Appending display image into manual_display
        manual_displays.append(manual_display)
        manual = get_cv2_image(file, img_rows=224, img_cols=224, color_type=3)

        manual_imgs.append(manual)
        j += 1

    manual_imgs = np.array(manual_imgs, dtype=np.uint8)
    manual_imgs = manual_imgs.reshape(-1,img_rows,img_cols,3)
    return manual_imgs, manual_displays
Exemple #12
0
    def read_enrichment(self, enrichment_files, threshold=1):
        """
        Reads current output of motif enrichment analysis to get gene targets.

        *Keyword arguments:*

          - enrichment_files -- One string, or a list of strings, representing enrichment file paths.
          - threshold -- P-value threshold for motif acceptance.
        """

        if isinstance(enrichment_files, list):
            file_list = [
                filename for pattern in enrichment_files
                for filename in glob.glob(npath(pattern))
            ]
        else:
            file_list = glob.glob(npath(enrichment_files))

        # reading networks
        for filename in file_list:
            # use last dir name as name for condition
            condition = os.path.dirname(filename)
            condition = condition.split("/")[-1]
            self.conditions.append(condition)

            network = {}

            f = open(filename, "r")

            # skip header
            next(f)

            for line in f:
                line = line.strip("\n")
                values = line.split("\t")
                motif = values[0]

                if motif in self.motifs_map:
                    p_value = float(values[2])
                    genes = values[9].split(",")

                    if threshold >= p_value:
                        network[motif] = genes

                    if motif in self.motifs_enrichment:
                        self.motifs_enrichment[motif][condition] = p_value
                    else:
                        self.motifs_enrichment[motif] = {condition: p_value}
                else:
                    print("motif not found: " + motif)

            self.networks[condition] = network

            f.close()
    def grab_emulators (self, sza, vza, raa, \
                      verbose=True,emulator_home = "emus/"):
        import glob
        # Locate all available emulators...
        files = glob.glob("%s.npz" % emulator_home)
        if len(files) == 0:
            files = glob.glob("%s*.npz" % emulator_home)
        emulator_search_dict = {}
        for f in files:
            ff = f.split('/')[-1].split('.')[0]
            try:
                emulator_search_dict[ float(ff.split("_")[0]), \
                                    float(ff.split("_")[2]),
                                    float(ff.split("_")[1]) - \
                                    float(ff.split("_")[3])] = f
            except:
                emulator_search_dict[ \
                                    float(ff.split("_")[1]),
                                    float(ff.split("_")[2]) , \
                                    float(ff.split("_")[3])] = f

        # So we have a dictionary inddexed by SZA,
        # VZA and RAA and mapping to a filename
        emu_keys = np.array(emulator_search_dict.keys())

        cemu_keys = np.cos(emu_keys * np.pi / 180.).T
        semu_keys = np.sin(emu_keys * np.pi / 180.).T
        cthis = np.cos(np.array([sza, vza, raa]) * np.pi / 180.)
        sthis = np.sin(np.array([sza, vza, raa]) * np.pi / 180.)
        # view vector
        v_xyzThis = np.array(
            [sthis[0] * cthis[2], sthis[0] * sthis[2], cthis[0]])
        v_emu_keys = np.array([semu_keys[0]*cemu_keys[2],\
                            semu_keys[0]*semu_keys[2],cemu_keys[0]])
        # sun vector
        s_xyzThis = np.array([sthis[1], 0. * sthis[0], cthis[1]])
        s_emu_keys = np.array([semu_keys[1], 0. * semu_keys[1], cemu_keys[1]])
        vdist = np.dot(v_emu_keys.T, v_xyzThis)
        sdist = np.dot(s_emu_keys.T, s_xyzThis)
        # closest emulation point
        emu_locs = np.argmax(vdist + sdist, axis=0)

        emulators = {}
        for i in xrange(len(sza)):
            the_emu_key = emu_keys[emu_locs[i]]
            k = the_emu_key

            emulators[(int(k[0]), int(k[1]), int(k[2]))] = \
                    gp_emulator.MultivariateEmulator \
                    ( dump=emulator_search_dict[(k[0], k[1],k[2])])
            if verbose:                print i,sza[i], vza[i], raa[i], \
                   k,emulator_search_dict[(k[0], k[1],k[2])]
        return emulators
Exemple #14
0
def generate_CSV(image_npy_array_dir, heatmap_npy_array_dir, CSV_conf_path):
    with open(CSV_conf_path, 'wb') as fp:
        a = csv.writer(fp, dialect='excel')

        image_tiles = glob.glob(image_npy_array_dir + "/*")

        heatmap_tiles = glob.glob(heatmap_npy_array_dir + "/*")

        for img, hm in zip(image_tiles, heatmap_tiles):
            img_file_name = ntpath.basename(img)
            hm_file_name = ntpath.basename(hm)
            line = [img_file_name, hm_file_name]
            a.writerow(line)
Exemple #15
0
    def read_enrichment(self, enrichment_files, threshold=1):
        """
        Reads current output of motif enrichment analysis to get gene targets.

        *Keyword arguments:*

          - enrichment_files -- One string, or a list of strings, representing enrichment file paths.
          - threshold -- P-value threshold for motif acceptance.
        """

        if isinstance(enrichment_files, list):
            file_list = [filename for pattern in enrichment_files for filename in glob.glob(npath(pattern))]
        else:
            file_list = glob.glob(npath(enrichment_files))

        # reading networks
        for filename in file_list:
            # use last dir name as name for condition
            condition = os.path.dirname(filename)
            condition = condition.split("/")[-1]
            self.conditions.append(condition)

            network = {}

            f = open(filename, "r")

            # skip header
            next(f)

            for line in f:
                line = line.strip("\n")
                values = line.split("\t")
                motif = values[0]

                if motif in self.motifs_map:
                    p_value = float(values[2])
                    genes = values[9].split(",")

                    if threshold >= p_value:
                        network[motif] = genes

                    if motif in self.motifs_enrichment:
                        self.motifs_enrichment[motif][condition] = p_value
                    else:
                        self.motifs_enrichment[motif] = {condition: p_value}
                else:
                    print("motif not found: " + motif)

            self.networks[condition] = network

            f.close()
Exemple #16
0
def load_sis(train_path, test_path, mode):
    """ Load data for the semantic image segmentor
    """
    image_data_train_x = []
    image_data_test_x = []
    image_data_train_y = []
    image_data_test_y = []
    training_xml = glob.glob(f'{train_path}*.xml')[0]
    testing_xml = glob.glob(f'{test_path}*.xml')[0]

    x_datasets = [image_data_train_x, image_data_test_x]
    y_datasets = [image_data_train_y, image_data_test_y]
    paths = [train_path, test_path]
    xml_paths = [training_xml, testing_xml]

    # check to make sure xml exists in both datasets
    if os.path.isfile(training_xml) and os.path.isfile(testing_xml):
        for i in range(len(x_datasets)):
            #raw images
            for image in sorted(glob.glob(f'{paths[i]}*.png')):
                img = Image.open(paths[i] + image)
                image_data_train_x.append(np.array(img, dtype='uint8'))
            x_datasets[i] = np.concatenate(x_datasets[i])
            y_datasets[i] = np.zeros(
                (len(x_datasets[i]), config.RESOLUTION_CAPTURE_WIDTH,
                 config.RESOLUTION_CAPTURE_HEIGHT, len(
                     config.SIS_ENTITIES_SR)),
                dtype=np.uint8)

            #annotated ground truth
            #convert xml file from cvat to ground truth for y
            tree = ET.parse(xml_paths[i])
            root = tree.getroot()
            index = 0
            for image in root.findall('image'):
                for polygon in image.findall('polygon'):
                    label = polygon.get('label')
                    points = polygon.get('points').split(';')
                    #process points into readable format
                    for i in range(len(points)):
                        points[i] = points[i].split(',')
                    points = [[math.ceil(float(x)) for x in lst]
                              for lst in points]
                    image_data_train_y[index,:,:,config.SIS_ENTITIES_SR.index(label)] = \
                    create_polygon(image_data_train_y[index,:,:,config.SIS_ENTITIES_SR.index(label)],points)
                index += 1

    else:
        assert 'Missing xml annotations for dataset!'

    return image_data_train_x, image_data_train_y, image_data_test_x, image_data_test_y
Exemple #17
0
    def __init__(self, **kwargs):
        self.root = root = work_path + 'yy.release/'
        assert osp.exists(root)
        num_pids = 30

        dftl = []
        for pid in range(num_pids):
            imps = glob.glob(self.root + f'gallery/{pid}/*')  # png or jpg
            dft = pd.DataFrame({
                'imgs': imps,
                'pids': pid * np.ones(len(imps), int),
                'cids': np.arange(len(imps))
            })
            dftl.append(dft)
        df_ori_gallery = pd.concat(dftl, axis=0)

        dftl = []
        for pid in range(num_pids):
            imps = glob.glob(self.root + f'query/{pid}/*')  # png or jpg
            dft = pd.DataFrame({
                'imgs': imps,
                'pids': pid * np.ones(len(imps), int),
                'cids': np.arange(len(imps))
            })
            dftl.append(dft)
        df_ori_query = pd.concat(dftl, axis=0)
        np.random.seed(16)
        all_ind = np.random.permutation(df_ori_query.shape[0])
        np.random.seed(int(time.time() * 100 % 2**31))
        train_ind = all_ind[:df_ori_query.shape[0] * 7 // 10]
        test_ind = all_ind[df_ori_query.shape[0] * 7 // 10:]
        df_ori_train = df_ori_query.iloc[train_ind]
        df_final_train = pd.concat((
            df_ori_gallery,
            df_ori_train,
        ), axis=0)
        df_ori_test = df_ori_query.iloc[test_ind]
        df_test = pd.concat((df_ori_gallery, df_ori_test), axis=0)
        self.gallery = df_test.to_records(index=False).tolist()
        # self.gallery = df_ori_gallery.to_records(index=False).tolist()
        self.query = df_ori_test.to_records(index=False).tolist()
        self.train = self.trainval = df_final_train.to_records(
            index=False).tolist()
        self.val = None
        self.num_train_pids = num_pids
        self.num_trainval_ids = num_pids
        self.num_val_ids = 0
        self.num_query_pids = num_pids
        self.num_gallery_pids = num_pids
        self.images_dir = None
Exemple #18
0
    def get_test_dataset(self) -> tf.data.Dataset:
        map_func = partial(load_image_test,
                           height=self.config.dataset.image_size,
                           width=self.config.dataset.image_size)
        images_path=self.base_path+'val/out/*.jpg'
        labeles_path=os.path.join(self.base_path,'val','out','col','*.png')

        image_paths = glob.glob(images_path)
        label_map_paths = glob.glob(labeles_path)

        test_dataset = tf.data.Dataset.from_tensor_slices((image_paths, label_map_paths))
        test_dataset = test_dataset.map(map_func)
        test_dataset = test_dataset.batch(self.global_batch_size)
        return self.strategy.experimental_distribute_dataset(test_dataset)
Exemple #19
0
 def __call__(self, path="."):
     try:
         return os.listdir(path)
     except OSError:
         candidates = glob.glob(path + "/")
         if len(candidates) == 0:
             candidates = glob.glob(path + "*/")
             if len(candidates) == 0:
                 os.listdir(path)  # let it raise an error
         if len(candidates) == 1:
             dir = candidates[0]
             return os.listdir(dir)
         else:
             raise OSError("[ls error] more than one directory matches")
Exemple #20
0
def initDirs():
	import pyfits,glob	
	for f in FILTERS:
		#flat
		fdir = 	os.path.join(OUTPUTDIR, "flat", f)
		createDir(fdir)
		#object
		fdir = 	os.path.join(OUTPUTDIR, "object", f)
		createDir(fdir)
	for fitsfile in glob.glob("%s/*.fits" % IMGDIR):
		try:
			hdulist = pyfits.open(fitsfile)
		except IOError as e:
			print "I/O error({0}): {1}".format(e.errno, e.strerror)
			continue		
		header = hdulist[0].header
		#in M37 IMAGETYP = object for all
		#objectType = header["IMAGETYP"]
		if(header["OBJECT"].startswith("flat")):
			objectType = "flat"
		else:
			objectType = "object"		
		insfilter =  header["INSFILTE"]
		if insfilter in FILTERS:
			shutil.copy(fitsfile, os.path.join(OUTPUTDIR, objectType, insfilter))
			os.system("echo %s >> %s" % (os.path.basename(fitsfile), os.path.join(OUTPUTDIR, objectType, insfilter, "list" )))
def generateAccuracies():
    global DIR
    global NUMBER_OF_GENRES

    N = 10

    g = glob.glob(DIR + '/*.csv')
    shuffle(g)

    DATA_LENGTH = len(g)/N
    songAccuracies = [0]*NUMBER_OF_GENRES
    segmentAccuracies = [0]*NUMBER_OF_GENRES
    for i in xrange(N):
        startIndex = i*DATA_LENGTH
        endIndex = len(g) if i == N else (i+1)*DATA_LENGTH
        (songAccuracyIncr, segmentAccuracyIncr) = classifySegments(g[:startIndex] + g[endIndex:], g[startIndex:endIndex])
        songAccuracies = [songAccuracies[i] + songAccuracyIncr[i] for i in xrange(NUMBER_OF_GENRES)]
        segmentAccuracies = [segmentAccuracies[i] + segmentAccuracyIncr[i] for i in xrange(NUMBER_OF_GENRES)]

    songAccuracies = [x/N for x in songAccuracies]
    segmentAccuracies = [x/N for x in segmentAccuracies]

    print('|'*100)
    for k in GENRE_DICT:
        i = GENRE_DICT[k]
        print('Cross-Validated song classification accuracy for %s: %5.2f%%' % (k, 100.0*songAccuracies[i]))
        print('Cross-Validated segment classification accuracy for %s: %5.2f%%' % (k, 100.0*segmentAccuracies[i]))
    print('|'*100)
Exemple #22
0
    def item_candidates(self, item, artist, album):
        dir = path.dirname(item.path)
        cues = glob.glob(path.join(dir, "*.cue"))
        if not cues:
            return
        if len(cues) > 1:
            self._log.info(u"Found multiple cue files doing nothing: {0}",
                           map(displayable_path, cues))

        cue_file = cues[0]
        self._log.info("Found {} for {}", displayable_path(cue_file), item)

        try:
            # careful: will ask for input in case of conflicts
            command_output(['shnsplit', '-f', cue_file, item.path])
        except (subprocess.CalledProcessError, OSError):
            self._log.exception(u'shnsplit execution failed')
            return

        tracks = glob(path.join(dir, "*.wav"))
        self._log.info("Generated {0} tracks", len(tracks))
        for t in tracks:
            title = "dunno lol"
            track_id = "wtf"
            index = int(path.basename(t)[len("split-track"):-len(".wav")])
            yield TrackInfo(title, track_id, index=index, artist=artist)
Exemple #23
0
def load_checkpoint_model(checkpoint_path):
    '''
    Load the last checkpoint file
    '''
    import glob
    import os

    glob_pattern = os.path.join(checkpoint_path, '*.h5')
    print('glob pattern {}'.format(glob_pattern))

    list_of_checkpoint_files = glob.glob(glob_pattern)
    print('List of checkpoint files {}'.format(list_of_checkpoint_files))

    latest_checkpoint_file = max(list_of_checkpoint_files)
    print('Latest checkpoint file {}'.format(latest_checkpoint_file))

    initial_epoch_number_str = latest_checkpoint_file.rsplit(
        '_', 1)[-1].split('.h5')[0]
    initial_epoch_number = int(initial_epoch_number_str)

    loaded_model = TFDistilBertForSequenceClassification.from_pretrained(
        latest_checkpoint_file, config=config)

    print('loaded_model {}'.format(loaded_model))
    print('initial_epoch_number {}'.format(initial_epoch_number))

    return loaded_model, initial_epoch_number
Exemple #24
0
def clean_images(pattern):
    from skimage.io import imread
    for filename in glob.glob(pattern):
        try:
            imread(filename)
        except Exception:
            os.remove(filename)
Exemple #25
0
def readlist(listfile):
    import string, os, sys, re, glob
    if '*' in listfile:
        imglist = glob.glob(listfile)
    elif ',' in listfile:
        imglist = string.split(listfile, sep=',')
    else:
        try:
            hdulist = fits.open(listfile)
        except:
            hdulist = []
        if hdulist: imglist = [listfile]
        else:
            try:
                ff = open(listfile, 'r')
                files = ff.readlines()
                ff.close()
                imglist = []
                for ff in files:
                    ff = re.sub(' ', '', ff)
                    if not ff == '\n' and ff[0] != '#':
                        ff = re.sub('\n', '', ff)
                        try:
                            hdulist = fits.open(ff)
                            imglist.append(ff)
                        except Exception as e:
                            print 'problem reading header of', ff
                            print e
            except:
                sys.exit('\n##### Error ###\n file ' + str(listfile) +
                         ' do not  exist\n')
    if len(imglist) == 0:
        sys.exit('\n##### Error ###\nIf "'+str(listfile)\
                             +'" is an image, it is corrupted \n or is not a list of image\n')
    return imglist
Exemple #26
0
def find_data_files(srcdir, *wildcards, **kw):
    # get a list of all files under the srcdir matching wildcards,
    # returned in a format to be used for install_data
    ## A list of partials within a filename that would disqualify it
    ## from appearing in the tarball.
    badnames = ["jquery", "ui-lightness"]

    def walk_helper(arg, dirname, files):
        names = []
        lst, wildcards = arg
        for wc in wildcards:
            wc_name = opj(dirname, wc)
            for f in files:
                filename = opj(dirname, f)
                # if ".pyc" not in filename:
                ## This hairy looking line excludes the filename
                ## if any part of one of  badnames is in it:
                if not any(bad in filename for bad in badnames):
                    if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):
                        print(filename)
                        names.append(filename)
        if names:
            lst.append((dirname, names))

    file_list = []
    recursive = kw.get("recursive", True)
    if recursive:
        os.path.walk(srcdir, walk_helper, (file_list, wildcards))
    else:
        walk_helper((file_list, wildcards), srcdir, [os.path.basename(f) for f in glob.glob(opj(srcdir, "*"))])
    return file_list
Exemple #27
0
    def get_varlist(self, pos=False, particle=False):
        """Get a list of all existing VAR# file names.

        pos = False:                 give full list
        pos = 'last'/'first':        give latest/first var file
        pos = 'lastXXX' / 'firstXXX' give last/first XXX varfiles
        pos = list of numbers:       give varfiles at this positions
        particle = True:             return PVAR- instead of VAR-list"""

        import glob
        from os.path import join as join
        from os.path import basename
        from pencilnew.math import natural_sort

        key = 'VAR'
        if particle == True: key = 'PVAR'

        varlist = natural_sort([
            basename(i)
            for i in glob.glob(join(self.data_dir, 'proc0') + '/' + key + '*')
        ])
        #if particle: varlist = ['P'+i for i in varlist]

        if pos == False: return varlist
        if pos == 'first': return [varlist[0]]
        if pos == 'last': return [varlist[-1]]
        if pos.startswith('last'): return varlist[-int(pos[4:]):]
        if pos.startswith('first'): return varlist[:int(pos[4:])]
        if type(pos) == type([]):
            if pos[0].startswith('VAR'): pos = [i[3:] for i in pos]
            if pos[0].startswith('PVAR'): pos = [i[3:] for i in pos]
            return [varlist[int(i)] for i in pos]
        return varlist
Exemple #28
0
    def drop(choreo_id):
        for path in [F_ORG, F_SLICE]:
            try:
                for i in glob.glob("{}.*".format(path + choreo_id)):
                    os.remove(i)
            except:
                pass

        if os.path.isdir(F_SLICE + choreo_id):
            os.rmdir(F_SLICE + choreo_id)

        try:
            Initializer.choreo_metainfo_list.remove(choreo_id)
        except:
            pass

        # youtube-dl의 archive file reformat
        read = TxtReader().read("/home/jihee/choleor_media/audio/archive.txt")
        try:
            read.remove("youtube {}\n".format(choreo_id))
        except:
            pass
        TxtWriter().write("/home/jihee/choleor_media/audio/archive.txt", read)
        # self.audio_meta_list.remove(audio_id)
        # print(self.audio_meta_list)


# if __name__ == '__main__':
#     Initializer().drop("h5jz8xdpR0M")
Exemple #29
0
def listfiles(folder, extension='*.tif', full=False):
    '''
        DAH PRA MELHORAR. muda esse *.tif para tif
        FAZ ALGUMA COISA pra n ter que botar a / no final do folder
        ALGUM IF, SEI LAH

        Lista os arquivos de extensão escolhida para a pasta escolhida.
        A lista é ordenada e os os valores repetidos são eliminados.
        Extensão default = '*.tif'
        full=False, se for True, dá o endereço completo.

    '''

    listname = []
    import glob
    for j in glob.glob(folder + extension):
        if full == True:
            listname.append(j)
        else:
            b = j.split('/')
            c = b[-1]
            listname.append(c)
    listname.sort()
    listname = [ii for n, ii in enumerate(listname) if ii not in listname[:n]]
    return listname
Exemple #30
0
 def send_documents_folder(self,
                           folder: str,
                           config: str,
                           server: str = "") -> list:
     if server == "":
         if self.server == "":
             raise Exception('No server address provided')
         else:
             server = self.server
     responses = []
     os.chdir(folder)
     files = [glob.glob(e) for e in self.__supported_input_files()]
     files_flat = list(chain.from_iterable(files))
     for file in files_flat:
         packet = {
             'file': (file, open(file, 'rb'), 'application/pdf'),
             'config': (config, open(config, 'rb'), 'application/json'),
         }
         r = requests.post('http://' + server + '/api/v1/document',
                           files=packet)
         responses.append({
             'file': file,
             'config': config,
             'status_code': r.status_code,
             'server_response': r.text
         })
     return responses
Exemple #31
0
def readlist(listfile):
    import string,os,sys,re,glob
    if '*' in listfile:
        imglist=glob.glob(listfile)
    elif ',' in listfile: imglist = string.split(listfile,sep=',')
    else:
        try:            hdulist= fits.open(listfile)
        except:           hdulist=[]
        if hdulist:            imglist = [listfile]
        else:
           try:
              ff = open(listfile,'r')
              files = ff.readlines()
              ff.close()
              imglist = []
              for ff in files: 
                 ff=re.sub(' ','',ff)
                 if not ff=='\n' and ff[0]!='#':
                    ff=re.sub('\n','',ff)
                    try:
                       hdulist= fits.open(ff)
                       imglist.append(ff)
                    except Exception as e:
                        print 'problem reading header of', ff
                        print e
           except:              sys.exit('\n##### Error ###\n file '+str(listfile)+' do not  exist\n')
    if len(imglist)==0:
           sys.exit('\n##### Error ###\nIf "'+str(listfile)\
                                +'" is an image, it is corrupted \n or is not a list of image\n')
    return imglist
Exemple #32
0
    def get_varlist(self, pos=False, particle=False):
        """Get a list of all existing VAR# file names.

        pos = False:                 give full list
        pos = 'last'/'first':        give latest/first var file
        pos = 'lastXXX' / 'firstXXX' give last/first XXX varfiles
        pos = list of numbers:       give varfiles at this positions
        particle = True:             return PVAR- instead of VAR-list"""

        import glob
        from os.path import join as join
        from os.path import basename
        from pencilnew.math import natural_sort

        key = 'VAR'
        if particle == True: key = 'PVAR'

        varlist = natural_sort([basename(i) for i in glob.glob(join(self.datadir, 'proc0')+'/'+key+'*')])
        #if particle: varlist = ['P'+i for i in varlist]

        if pos == False: return varlist
        if pos == 'first': return [varlist[0]]
        if pos == 'last': return [varlist[-1]]
        if pos.startswith('last'): return varlist[-int(pos[4:]):]
        if pos.startswith('first'): return varlist[:int(pos[4:])]
        if type(pos) == type([]):
            if pos[0].startswith('VAR'): pos = [i[3:] for i in pos]
            if pos[0].startswith('PVAR'): pos = [i[3:] for i in pos]
            return [varlist[int(i)] for i in pos]
        return varlist
Exemple #33
0
    def __init__(self,
                 sess,
                 input_height=108,
                 input_width=108,
                 crop=True,
                 output_height=64,
                 output_width=64,
                 gf_dim=64,
                 df_dim=64,
                 c_dim=3,
                 dataset_name='lsun',
                 input_fname_pattern='*.webp'):

        self.sess = sess
        self.crop = crop

        self.input_height = input_height
        self.input_width = input_width
        self.output_height = output_height
        self.output_width = output_width

        self.gf_dim = gf_dim
        self.df_dim = df_dim
        self.c_dim = c_dim

        self.dataset_name = dataset_name
        self.input_fname_pattern = input_fname_pattern
        self.data = glob.glob(
            os.path.join(IMAGE_PATH, self.input_fname_pattern))
        self.checkpoint_dir = LOGDIR
Exemple #34
0
def test_batch_h5(FLAGS):
    """
    test all the h5 weights files in the model_dir
    """
    file_paths = glob.glob(os.path.join(FLAGS.eval_weights_path, '*.h5'))
    for file_path in file_paths:
        test_single_h5(FLAGS, file_path)
Exemple #35
0
    def get_path_completions(self, text, line, begidx, endidx):
        '''
    Get completions for paths

    '''
        import os
        from os.path import isdir
        import glob

        def _append_slash_if_dir(p):
            if p and isdir(p) and p[-1] != os.sep:
                return p + os.sep
            else:
                return p

        before_arg = line.rfind(" ", 0, begidx)
        if before_arg == -1:
            return  # arg not found

        fixed = line[before_arg + 1:begidx]  # fixed portion of the arg
        arg = line[before_arg + 1:endidx]
        pattern = arg + '*'

        completions = []
        for path in glob.glob(pattern):
            path = _append_slash_if_dir(path)
            completions.append(path.replace(fixed, "", 1))
        return completions
Exemple #36
0
def makedaypage(day,month,year,station):
    '''
    Make daily page for one station and copy plots to desired directory
    '''
    import datetime,os,glob,shutil
    
    doy=datetime.date(year,month,day).strftime('%j')
    daypage='/var/www/daypages/mainday.html'
    daydir='/var/www/daypages/'+str(year)+doy+'/'
    stationpage=daydir+station+'.html'
    plotdir='/home/sopac/GM/plots/'+str(year)+doy+'/'
    #Make directory for webpage
    try:
        os.mkdir(daydir)
    except:
        pass
    #Read in template web page and replace patterns
    s = open(daypage).read()
    #station name
    s = s.replace('Station ####', 'Station '+station)
    #date
    s = s.replace('YYYY/MM/DD', str(year)+'/'+str(month)+'/'+str(day))
    #name of plot files
    s = s.replace('SSSS.', station+'.')
    #save as new page
    f = open(stationpage, 'w')
    f.write(s)
    f.close()
    #Now station plots to this dir
    plotlist=glob.glob(plotdir+station+'*')
    for k in range(len(plotlist)):
        shutil.copy(plotlist[k], daydir)
def load_files_in_dir(directory: str, suffix: str = ''):
    """Search for all files in sample_directory (optionally) with suffix sample_suffix an return them as pandas
    dataframe
    
    Parameters
    -------
    directory : str
        Directory to search for files in
    suffix : str
        If a string is provided as sample_suffix, the file names without this suffix will be ignored

    Returns
    -------
    : pd.DataFrame
        Pandas dataframe with base filenames (=filenames without path or suffix) as keys and full filenames as values;
        Dataframe is sorted by full filenames;
    """
    from os import path
    import glob.glob
    import pandas as pd
    
    sample_pattern = "**/*{}".format(suffix)
    
    # Collect files in path, sort them by name, and store them into dictionary
    samples = glob.glob(path.join(directory, sample_pattern))
    samples.sort()
    
    # Extract base filenames without path or suffix and store them as keys for the pandas dataframe
    keys = [path.basename(file)[:-len(suffix)] for file in samples]
    
    # Store in data frame for easy indexing and fast key->value access
    samples = pd.DataFrame(index=keys, data=samples)
    
    return samples
Exemple #38
0
    def run(self):

        import os
        import glob
        from PyQt4.uic import compileUi

        for infile in glob.glob(os.path.join('glue', 'qt', 'ui', '*.ui')):
            print("Compiling " + infile)
            directory, filename = os.path.split(infile)
            outfile = os.path.join(directory, filename.replace('.ui', '.py'))
            compileUi(infile, open(outfile, 'wb'))

        import sys
        import subprocess
        print("Compiling glue/qt/glue.qrc")
        if sys.version_info[0] == 2:
            option = '-py2'
        else:
            option = '-py3'
        try:
            subprocess.call([self.pyrcc4, option, 'glue/qt/glue.qrc', '-o', 'glue/qt/glue_qt_resources.py'])
        except OSError:
            print("pyrcc4 command failed - make sure that pyrcc4 "
                  "is in your $PATH, or specify a custom command with "
                  "--pyrcc4=command")
Exemple #39
0
def chooseFile(path,filt='*.*',selection=None):
    import glob
    flist = glob.glob(path+filt)
    print flist
    err = True
    while err:
        idx = 0
        for f in flist:
            print '[%d]: %s'%(idx,f)
            idx = idx + 1
        print '[%s]: quit'%'q'
        if selection==None:
            choice = raw_input('Please choose one of the files above: ')
            if choice.lower()=='q':
                sys.exit('User quit')
            else:
                try:
                    out = flist[int(choice)]
                    return out
                except IndexError:
                    print 'Choice out of range.'
                except ValueError:
                    print 'Invalid choice.'

        else:
            print 'File %d selected in function call'%selection
            try:
                out = flist[selection]
                return out
            except IndexError:
                print 'Selection out of range.'
                sys.exit()
            except ValueError:
                print 'Invalid selection.'
                sys.exit()
def find_data_files(srcdir, *wildcards, **kw):
    # get a list of all files under the srcdir matching wildcards,
    # returned in a format to be used for install_data
    def walk_helper(arg, dirname, files):
        if '.svn' in dirname:
            return
        names = []
        lst, wildcards = arg
        for wc in wildcards:
            wc_name = opj(dirname, wc)
            for f in files:
                filename = opj(dirname, f)

                if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):
                    names.append(filename)
        if names:
            lst.append( (os.path.join(target, dirname), names ) )

    file_list = []
    recursive = kw.get('recursive', True)
    target = kw.get('target', '')
    if recursive:
        os.path.walk(srcdir, walk_helper, (file_list, wildcards))
    else:
        walk_helper((file_list, wildcards),
                    srcdir,
                    [os.path.basename(f) for f in glob.glob(opj(srcdir, '*'))])
    return file_list
Exemple #41
0
  def get_path_completions(self, text, line, begidx, endidx):
    '''
    Get completions for paths

    '''
    import os
    from os.path import isdir
    import glob

    def _append_slash_if_dir(p):
      if p and isdir(p) and p[-1] != os.sep:
        return p + os.sep
      else:
        return p

    before_arg = line.rfind(" ", 0, begidx)
    if before_arg == -1:
      return # arg not found

    fixed = line[before_arg+1:begidx]  # fixed portion of the arg
    arg = line[before_arg+1:endidx]
    pattern = arg + '*'

    completions = []
    for path in glob.glob(pattern):
      path = _append_slash_if_dir(path)
      completions.append(path.replace(fixed, "", 1))
    return completions
Exemple #42
0
def get_data_files(srcdir, *wildcards, **kw):
    def walk_helper(arg, dirname, files):
        if '.git' in dirname:
            return
        names = []
        lst, wildcards = arg
        for wc in wildcards:
            wc_name = opj(dirname, wc)
            for f in files:
                filename = opj(dirname, f)

                if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):
                    names.append(filename)
        if names:
            lst.append( (dirname, names ) )

    file_list = []
    recursive = kw.get('recursive', True)
    if recursive:
        os.path.walk(srcdir, walk_helper, (file_list, wildcards))
    else:
        walk_helper((file_list, wildcards),
                    srcdir,
                    [os.path.basename(f) for f in glob.glob(opj(srcdir, '*'))])
    return file_list
    def __init__(self, sess, config, model):
        print ('{:=^120}'.format(' Building MidiNet '))


        self.config = config
        self.sess = sess

        # create global step variable and increment op
        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.global_step_increment = tf.assign(self.global_step, self.global_step+1)

        # create generator (G)
        self.model = model

        self.summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
        self.summary = tf.summary.merge(self.summaries)
        self.summary_image = tf.summary.merge([s for s in self.summaries if '/prediction/' in s.name])

        self.model.get_model_info(quiet=False)

        """ Saver """
        self.saver = tf.train.Saver()
        self.saver_g = tf.train.Saver(self.model.g_vars, max_to_keep=30)
        self.saver_d = tf.train.Saver(self.model.d_vars, max_to_keep=30)
        self.saver_dict = {'midinet': self.saver, 'G':self.saver_g, 'D':self.saver_d}
        print( '{:=^120}'.format('Done!'))

        print('*initializing variables...')



        # init metrics amd loss collection
        self.metrics = Metrics(eval_map=self.config.eval_map,
                    inter_pair=self.config.inter_pair,
                    drum_filter=self.config.drum_filter,
                    scale_mask=self.config.scale_mask,
                    track_names=self.config.track_names)

        tf.global_variables_initializer().run()

        self.dir_ckpt = os.path.join(self.config.exp_name, 'checkpoint')
        self.dir_sample = os.path.join(self.config.exp_name, 'samples')
        self.dir_log = os.path.join(self.config.exp_name, 'logs')

        if not os.path.exists(self.dir_ckpt):
            os.makedirs(self.dir_ckpt)
        if not os.path.exists(self.dir_sample):
            os.makedirs(self.dir_sample)
        if not os.path.exists(self.dir_log):
            os.makedirs(self.dir_log)

        path_src = os.path.join(self.dir_log, 'src')

        if not os.path.exists(path_src):
            os.makedirs(path_src)

        for file_path in glob.glob("./*.py"):
            copyfile(file_path, os.path.join(path_src, os.path.basename(file_path)))
Exemple #44
0
def regpath(subject_id, fpath):
    import os, glob
    fpath = fpath % subject_id
    gpath = glob.glob(fpath)
    if len(gpath) == 0:
        raise Exception("Could not find reg file %s" % fpath)
    elif len(gpath) > 1:
        raise Exception("Too many files found for reg file '%s'" % fpath)
    return gpath[0]
Exemple #45
0
def display_image(img,frame,_z1,_z2,scale,_xcen=0.5,_ycen=0.5,_xsize=1,_ysize=1,_erase='yes'):
    goon='True'
    import glob, subprocess, os, time
    ds9 = subprocess.Popen("ps -U {:d} u | grep -v grep | grep ds9".format(os.getuid()),shell=True,stdout=subprocess.PIPE).stdout.readlines()
    if len(ds9)== 0 :   
       subproc = subprocess.Popen('ds9',shell=True)
       time.sleep(3)

    if glob.glob(img):
       from pyraf import iraf
       iraf.images(_doprint=0)
       iraf.tv(_doprint=0)
       import string,os
       if _z2: 
          try:
              sss=iraf.display(img + '[0]', frame, xcen=_xcen, ycen=_ycen, xsize=_xsize, ysize=_ysize, erase=_erase,\
                                   fill='yes', zscale='no', zrange='no', z1=_z1, z2=_z2,Stdout=1)
          except:
              print ''
              print '### ERROR: PROBLEM OPENING DS9'
              print ''
              goon='False'                 
       else:
        try:  
            sss=iraf.display(img + '[0]', frame, xcen=_xcen, ycen=_ycen, xsize=_xsize, ysize=_ysize, erase=_erase, fill='yes', Stdout=1)
        except:
            print ''
            print '### ERROR: PROBLEM OPENING DS9'
            print ''
            goon=False
 
       if scale and goon:
          answ0 = raw_input('>>> Cuts OK ? [y/n] ? [y] ')
          if not answ0: answ0='y'
          elif answ0=='no' or answ0=='NO': answ0='n' 

          while answ0=='n':
              _z11=float(string.split(string.split(sss[0])[0],'=')[1])
              _z22=float(string.split(string.split(sss[0])[1],'=')[1])
              z11 = raw_input('>>> z1 = ? ['+str(_z11)+'] ? ')
              z22 = raw_input('>>> z2 = ? ['+str(_z22)+'] ? ')
              if not z11: z11=_z11
              else: z11=float(z11)
              if not z22: z22=_z22
              else: z22=float(z22)
              print z11,z22
              sss=iraf.display(img + '[0]',frame,fill='yes', xcen=_xcen, ycen=_ycen, xsize=_xsize, ysize=_ysize, erase=_erase,\
                                   zrange='no', zscale='no', z1=z11, z2=z22, Stdout=1)
              answ0 = raw_input('>>> Cuts OK ? [y/n] ? [y] ')
              if not answ0: answ0='y'
              elif answ0=='no' or answ0=='NO': answ0='n'
       if goon:
          _z1,_z2=string.split(string.split(sss[0])[0],'=')[1],string.split(string.split(sss[0])[1],'=')[1]
    else:
        print 'Warning: image '+str(img)+' not found in the directory '
    return _z1,_z2,goon
Exemple #46
0
def cleanup_cspp_workdir(workdir):
    """Clean up the CSPP working dir after processing"""
    import os
    import glob
    filelist = glob.glob('%s/*' % workdir)
    this = [ os.remove(s) for s in filelist if os.path.isfile(s) ]
    print "Number of files left after cleaning working dir = ", len(this)
    #shutil.rmtree(workdir)
    #os.mkdir(workdir)
    return
def make_combined_species_lists():
    for flu in ['H9']:
        flist = glob.glob('/Users/yujiazhou/Documents/nextflu/H9_nextflu-master/auspice/'+flu+'*species.tsv')
        all_species=set()
        for fname in flist:
            with open(fname) as infile:
                all_species.update([tuple(line.split('\t')[:2]) for line in infile])

        with open('/Users/yujiazhou/Documents/nextflu/H9_nextflu-master/auspice/'+flu+'_all_species.tsv', 'w') as ofile:
            for strain, acc in all_species:
                ofile.write(strain+'\t'+species+'\t'+acc+'\n')
def make_combined_accession_number_lists():
    for flu in ['H9', 'H4', 'H7', 'H10']:
        flist = glob.glob('/Users/yujiazhou/Documents/nextflu/H9_nextflu-master/auspice/data/'+flu+'*accession_numbers.tsv')
        all_accessions=set()
        for fname in flist:
            with open(fname) as infile:
                all_accessions.update([tuple(line.split('\t')[:2]) for line in infile])

        with open('/Users/yujiazhou/Documents/nextflu/H9_nextflu-master/auspice/data/'+flu+'_all_accession_numbers.tsv', 'w') as ofile:
            for strain, acc in all_accessions:
                ofile.write(strain+'\t'+acc+'\n')
Exemple #49
0
def make_combined_accession_number_lists():
    for flu in ['H3N2', 'H1N1pdm', 'Vic', 'Yam']:
        flist = glob.glob('../auspice/data/'+flu+'*accession_numbers.tsv')
        all_accessions=set()
        for fname in flist:
            with open(fname) as infile:
                all_accessions.update([tuple(line.split('\t')[:2]) for line in infile])

        with open('../auspice/data/'+flu+'_all_accession_numbers.tsv', 'w') as ofile:
            for strain, acc in all_accessions:
                ofile.write(strain+'\t'+acc+'\n')
Exemple #50
0
def exercise_16():
  # test for small molecule multi-sweep indexing, 4 sweeps with different values
  # of goniometer.fixed_rotation()
  data_dir = os.path.join(dials_regression, "indexing_test_data", "multi_sweep")
  import glob
  pickle_paths = [
    glob.glob(os.path.join(data_dir, "SWEEP%i" %(i+1), "index", "*_strong.pickle"))[0]
    for i in range(4)]
  sweep_paths = [
    glob.glob(os.path.join(data_dir, "SWEEP%i" %(i+1), "index", "*_datablock_import.json"))[0]
    for i in range(4)]
  extra_args = ["known_symmetry.space_group=I4"]
  expected_unit_cell = uctbx.unit_cell(
    (7.310, 7.310, 6.820, 90.000, 90.000, 90.000))
  expected_rmsds = (0.10, 0.7, 0.5)
  expected_hall_symbol = ' I 4'

  result = run_one_indexing(" ".join(pickle_paths),  " ".join(sweep_paths),
                            extra_args, expected_unit_cell,
                            expected_rmsds, expected_hall_symbol)
  assert len(result.indexed_reflections) > 1250, len(result.indexed_reflections)
Exemple #51
0
def  name_duplicate(img,nome,ext):  ###########################
   import re,string,os,glob
   import lsc
   from lsc.util import readhdr,readkey3, delete
   dimg=readkey3(readhdr(img),'DATE-OBS')
   listafile=glob.glob(nome+'_?'+ext+'.fits')+glob.glob(nome+'_??'+ext+'.fits')
   if len(listafile) == 0: nome = nome+"_1"+ext+'.fits'
   else:
      date=[]
      for l in listafile:
         date.append(readkey3(readhdr(l),'DATE-OBS'))
      if dimg in date:
         nome=listafile[date.index(dimg)]
#         if overwrite:
#            delete(nome)
      else:
         n=1
         while nome+'_'+str(n)+str(ext)+'.fits' in listafile:
            n=n+1
         nome=nome+'_'+str(n)+str(ext)+'.fits'
   return nome
Exemple #52
0
def list_serial_ports():
    # Adapted from http://stackoverflow.com/questions/11303850/what-is-the-cross-platform-method-of-enumerating-serial-ports-in-python-includi
    system_name = platform.system()
    assert system_name != "Windows", "gross OS error"
    if system_name == "Windows":
        # Scan for available ports.
        available = []
        for i in range(256):
            try:
                s = serial.Serial(i)
                available.append(i)
                s.close()
            except serial.SerialException:
                pass
        return available
    elif system_name == "Darwin":  # Mac
        return glob.glob('/dev/tty*') + glob.glob('/dev/cu*')
    elif system_name == "Linux":
        # + glob('/dev/ttyS*')
        return glob('/dev/ttyACM*') + glob('/dev/ttyUSB*')
    else:
        print("Error: unknown system: " + system_name)
        exit(1)
Exemple #53
0
def exercise_17():
  # test for small molecule multi-sweep indexing, 3 sweeps with different values
  # of goniometer setting rotation (i.e. phi scans)
  data_dir = os.path.join(dials_regression, "dials-191")
  import glob
  pickle_paths = [
    glob.glob(os.path.join(data_dir, "*SWEEP%i*_strong.pickle" %(i+1)))[0]
    for i in range(3)]
  sweep_paths = [
    glob.glob(os.path.join(data_dir, "*SWEEP%i*_datablock.json" %(i+1)))[0]
    for i in range(3)]
  extra_args = ["filter_ice=False"]
  expected_unit_cell = uctbx.unit_cell(
    (9.440, 15.313, 17.126, 90.073, 90.106, 79.248))
  expected_rmsds = (0.32, 0.34, 0.005 )
  expected_hall_symbol = ' P 1'

  result = run_one_indexing(" ".join(pickle_paths),  " ".join(sweep_paths),
                            extra_args, expected_unit_cell,
                            expected_rmsds, expected_hall_symbol)
  assert len(result.indexed_reflections) > 12000, len(result.indexed_reflections)
  # expect at least indexed 2000 reflections per experiment
  for i in range(3):
    assert (result.indexed_reflections['id'] == i).count(True) > 2000
def init_db():
    with app.app_context():
        db = get_db()
        is_db_inited = query_db(
                """SELECT CASE
                       WHEN EXISTS (SELECT * FROM SchemaVersion LIMIT 1) THEN 1
                       ELSE 0
                   END""")
        if not is_db_inited:
            for fname in sorted(glob.glob(SQL_SCHEMA_FILES)):
                print 'sql schema file'
                print name
                with app.open_resource(fname, mode='r') as f:
                    db.cursor().executescript(f.read())
            db.commit()
Exemple #55
0
def main( ):
    args = parse_args()

    if os.path.isdir( args.input ):
        sffs = glob.glob( args.input + '/*.sff' )
    elif os.path.exists( args.input ):
        sffs = [args.input]
    else:
        raise ValueError( "{} is not a valid directory or sff file".format(args.input) )

    # Convert all input sff to fastq
    for sff in sffs:
        bn, ext = os.path.splitext( sff )
        outname = bn + '.fastq'
        SeqIO.write( SeqIO.parse( sff, 'sff' ), outname, 'fastq' )
Exemple #56
0
    def read_mtf(self, mtf_filenames):
        """
        Reads TF annotation in mtf (internal format; check manual) format.

        *Keyword arguments:*

          - mtf_filenames -- A string, or a list of strings, representing .mtf file paths.
        """

        if not isinstance(mtf_filenames, list):
            mtf_filenames = [mtf_filenames]

        file_list = [filename for pattern in mtf_filenames for filename in glob.glob(npath(pattern))]

        # Iterating over the file name list
        for filename in file_list:

            database = os.path.splitext(os.path.basename(filename))[0]

            # Opening MTF file
            mtf_file = open(filename, "r")

            # Reading file
            for line in mtf_file:
                # Processing line
                line_list = line.strip().split("\t")
                tf_id = line_list[0].strip()
                name = line_list[1].strip()
                version = line_list[2].strip()
                gene_names = line_list[3].strip().split("+")
                tf_class = line_list[4].strip()
                uniprot_ids = line_list[5].strip().split(";")
                data_source = line_list[6].strip()
                tax_group = line_list[7].strip()
                species = line_list[8].strip()
                threshold_list = line_list[9].strip().split(",")
                fpr_list = [0.005, 0.001, 0.0005, 0.0001, 0.00005, 0.00001]
                thresholds = {}
                for i in range(0, 6):
                    thresholds[fpr_list[i]] = float(threshold_list[i])

                self.add(MotifAnnotation(tf_id, name, database, version, gene_names, tf_class, uniprot_ids, data_source,
                                         tax_group, species, thresholds))

            # Termination
            mtf_file.close()
Exemple #57
0
    def copy_extensions(self, extensions):
        super(MediaCollector, self).copy_extensions(extensions)

        # Create the media subdir where the
        # Python files are collected.
#        media = os.path.join('foo', 'media')
        full = os.path.join(self.collect_dir, "resources")
        if not os.path.exists(full):
            self.mkpath(full)

        # Copy the media files to the collection dir.
        # Also add the copied file to the list of compiled
        # files so it will be included in zipfile.
        for f in glob.glob('../shared/resources/*'):
            name = os.path.basename(f)
            self.copy_file(f, os.path.join(full, name))
            self.compiled_files.append(os.path.join(media, name))
Exemple #58
0
 def fix_gtksharp_configs(self):
     print 'Fixing GTK# configuration files...',
     count = 0
     libs = [
         'atk-sharp',
         'gdk-sharp',
         'glade-sharp',
         'glib-sharp',
         'gtk-dotnet',
         'gtk-sharp',
         'pango-sharp'
     ]
     gac = os.path.join(bockbuild.package_root, "lib", "mono", "gac")
     confs = [glob.glob(os.path.join(gac, x, "*", "*.dll.config")) for x in libs]
     for c in itertools.chain(*confs):
         count = count + 1
         self.fix_dllmap(c, lambda line: "dllmap" in line)
     print count
Exemple #59
0
    def load_directory(self, db_list):

        for directory in db_list:
            for file_name in glob.glob(directory + "/*.pwm"):
                tf_id = os.path.splitext(os.path.basename(file_name))[0]
                name = tf_id
                database = os.path.basename(directory)
                version = "0"
                gene_names = None
                tf_class = None
                uniprot_ids = None
                data_source = None
                tax_group = None
                species = None
                thresholds = {}

                self.add(MotifAnnotation(tf_id, name, database, version, gene_names, tf_class, uniprot_ids, data_source,
                                         tax_group, species, thresholds))