Esempio n. 1
0
    def move(self, new_photo_dir, new_photo_name):
        old_path = self.get_path()

        # Ensure new_name has correct extension
        _, extension = os.path.splitext(self.name)
        new_short_name, _ = os.path.splitext(new_photo_name)
        new_name = f"{new_short_name}{extension}"

        # Get full file path
        new_dir = self.build_dir(self.is_video, self.size_key, new_photo_dir)
        self.dir = new_dir
        self.name = new_name
        new_path = self.get_path()

        if old_path != new_path:
            # generate new non-conflicting name
            new_dir, new_name = self.get_new_name(new_dir, new_name)

            # Get revised full file path
            self.dir = new_dir
            self.name = new_name
            new_path = self.get_path()

            full_old_path = os.join(settings.IMAGE_PATH, old_path)
            full_new_path = os.join(settings.IMAGE_PATH, new_path)

            # Actually move the file
            print(f"Moving '{full_old_path}' to '{full_new_path}'.")
            if not os.path.lexists(os.path.dirname(full_new_path)):
                os.makedirs(os.path.dirname(full_new_path), 0o755)
            shutil.move(full_old_path, full_new_path)

            self.save()
Esempio n. 2
0
 def walker(arg, d, files, path=path, newpath=newpath, fs=fs, newfs=newfs):
     targetDir = os.path.join(newpath, d[len(path):])
     if newfs.exists(targetDir) and not newfs.isdir(targetDir):
         raise VFSException, "file %s already exists" % targetDir
     newfs.mkdir(targetDir)
     for f in files:
         _movefile(fs, os.join(d, file), os.join(targetDir, file), newfs)
Esempio n. 3
0
 def __getitem__(self, idx):
     img1 = cv2.imread(os.join(self.root1, str(idx + 1) + ".bmp"))
     img2 = cv2.imread(os.join(self.root2, str(idx + 1) + ".jpg"))
     if self.transform != None:
         img1 = self.transform(img1)
         img2 = self.transform(img2)
     return img1, img2
Esempio n. 4
0
    def build_crf_corpus(self, percetage = 0.9, path="",cross_validation=False):
        # Todo: shuffle or not
        def chunks(l, n):
            """ Yield successive n-sized chunks from l.
            """
            for i in xrange(0, len(l), n):
                yield l[i:i+n]

        print "The number of features is {}".format(str(len(self.vectorizer.get_feature_names())))
        if not cross_validation:
            trainlen = int(len(self._corpus) * 0.9)
            corpus_train = self._corpus[:trainlen]
            corpus_test = self._corpus[trainlen:]
            self._build_crf_corpus_help(corpus_train, os.join(path, 'train.data'))
            self._build_crf_corpus_help(corpus_test, os.join(path, 'test.data'))
        else:
            len1 = int(len(self._corpus) * 0.1)
            corpus10fold = list(chunks(self._corpus, len1))
            for i in range(10):
                corpus_test = corpus10fold[i]
                corpus_train = [instance for t in corpus10fold if t is not corpus_test for instance in t ]
                dpath = os.path.join(path, str(i))
                if not os.path.exists(dpath):
                    os.mkdir(dpath)
                self._build_crf_corpus_help(corpus_test, os.path.join(path, str(i), 'test.data'))
                self._build_crf_corpus_help(corpus_train, os.path.join(path, str(i), 'train.data'))
def getData():
    # read input data from input dir, and reduplicate, then feed to spiders
    input_data_dir = settings['INPUT_DATA_DIR']
    log_dir = settings['HIT_LOG_DIR']
    process_dir = settings['PROVESS_DATA_DIR']
    p = ''
    input_files_list = []
    file_batch_size = settings['FILE_BATCH_SIZE']

    # read input data files
    for root, dir, files in os.walk(input_data_dir):
        for f in files:
            input_files_list.append(f)
    # get input data
    input_data = {}
    for f in input_files_list[:file_batch_size]:
        # read input data
        with open(os.join(input_data_dir, f)) as reader:
            for line in reader:
                input_data.update({line.strip(): ''})
        # remove input file after reading
        os.remove(os.join(input_data_dir, f))

        # remove those data that has been search and hit
        with open(os.join(log_dir, f)) as reader:
            for line in reader:
                if line in input_data:
                    del input_data[line]

    return input_data.keys()
Esempio n. 6
0
    def make_graphs(self, trial_num, results):
        """
        Saves a plot of the reward for each episode and the moving average over 100 episodes
        """
        sns.set_style('darkgrid')

        path = os.join(self.dir, f"trial_{trial_num}")

        try:
            os.mkdir(os.join(path))
        except:
            pass

        # Save plot of the rewards
        results_array = np.array(results)
        n = len(results_array)
        ax = np.arance(0, n)

        plt.plot(ax, results_array)
        plt.savefig(os.join(path, "rewards.pdf"), dpi=1600)

        # Save plot of the moving average of the rewards
        moving_avg = np.empty(n)
        for i in range(n):
            moving_avg[i] = results_array[max(0, i - 100):i + 1].mean()

        plt.plot(ax, running_avg)
        plt.savefig(os.join(path, "rewards_ma.pdf"), dpi=1600)
Esempio n. 7
0
 def walker(arg, d, files, path=path, newpath=newpath, fs=fs, newfs=newfs):
     targetDir=os.path.join(newpath, d[len(path):])
     if newfs.exists(targetDir) and not newfs.isdir(targetDir):
         raise VFSException, "file %s already exists" % targetDir
     newfs.mkdir(targetDir)
     for f in files:
         _movefile(fs, os.join(d, file), os.join(targetDir, file), newfs)
Esempio n. 8
0
 def __init__(self, path_to_root):
     '''
     Args:
         path_to_root (string): the root path to PF-Pascal dataset extracted from zip file.
     Returns:
         None
     '''
     self.image_dir = os.join(path_to_root, "PF-dataset-PASCAL",
                              "JPEGImages")
     self.annotation_dir = os.join(path_to_root, "PF-dataset-PASCAL",
                                   "Annotations")
     self.category_list = os.listdir(self.annotation_dir)
     self.category_list = [
         cat for cat in self.category_list if cat != ".DS_Store"
     ]
     #filter out .DS_Store file, Its not category.
     self.num_category = len(self.category_list)
     self.category_images = []
     for cat in self.category_list:
         tmp_dir = os.listdir(os.path.join(self.PATH_TO_ANNOTATIONS, cat))
         image_lists_of_the_category = [
             os.path.splitext(m_file)[0] + ".jpg" for m_file in tmp_dir
         ]
         #convert extensions(.m -> .jpg) each file names
         self.category_images.append(image_lists_of_the_category)
    def convertAMRToMp3(self,dirpath,filename):
        pathToAMR = os.join(dirpath,filename)
        pathToMP3 = os.join(dirpath,"%s.mp3" % (filename))

        print "Creating file: " + pathToMP3

        # ffmpeg -i .amr -acodec libmp3lame -ab 64k test.mp3
        call(["ffmpeg", "-i",pathToAMR,"-acodec","libmp3lame","-ab","64k",pathToMP3])
Esempio n. 10
0
 def clone(self, app, target_path):
     """
     Use rsync to clone the contents of a web2py app folder (web2py/applications/<appname>)
     to a folder outside the web2py directory, ignoring .git files.
     """
     rsync -avz --exclude ".git/*"
     app_path = os.join('applications', app)
     target_path = os.join('../', app, app_path, target_path)
Esempio n. 11
0
def build_from_path(in_dir, out_dir, test_speaker=None, num_workers=1, tqdm=lambda x: x):
    """
    Preprocesses the speech dataset from a gven input path to given output directories

    Args:
        - hparams: hyper parameters
        - input_dir: input directory that contains the files to prerocess
        - out_dir: output directory of npz files
        - n_jobs: Optional, number of worker process to parallelize across
        - tqdm: Optional, provides a nice progress bar

    Returns:
        - A list of tuple describing the train examples. this should be written to train.txtX

    """
    # Train & Test path 설정
    train_path = join(out_dir, "train")
    test_path = join(out_dir, "test")
    os.makedirs(train_path, exist_ok=True)
    os.makedirs(test_path, exist_ok=True)

    # speaker 저장 변수
    speakers = []

    # for multiprocessing
    executor = ProcessPoolExecutor(max_workers=num_workers)
    futures = []
    index = 1

    print(os.path.join(in_dir, "*"))
    speaker_paths = glob.glob(os.path.join(in_dir, "*"))
    # 전처리 할 data가 없는 경우
    if not speaker_paths:
        print("dataset is empty!")
        exit(-1)

    # train & test split
    total_speaker_num = len(speaker_paths)
    train_speaker_num = (total_speaker_num // 10) * 9
    print("Total speaker number : %d" % total_speaker_num)
    print("train : %d, test : %d" % (train_speaker_num, total_speaker_num - train_speaker_num))

    for i, path in enumerate(speaker_paths):
        # extract speaker name
        speaker_name = path.split('/')[-1]
        speakers.append(speaker_name)

        # data output dir
        if i < train_speaker_num:
            data_out_dir = os.join(train_path, speaker_name)
        else:
            data_out_dir = os.join(test_path, speaker_name)

        print("speaker %s processing..." % speaker_name)
        futures.append(executor.submit(partial(_process_utterance, data_out_dir, path, speaker_name, hparams)))
        index += 1

    return [future.result() for future in tqdm(futures) if future.result() is not None], speakers
Esempio n. 12
0
 def pop(self):
     with self.lock:
         ret = {}
         for i in os.listdir(self.prefix):
             path = os.join(self.prefix, i)
             if os.path.isfile(path):
                 ret[i] = load(open(path))
             os.remove(os.join(self.prefix, i))
         return ret
Esempio n. 13
0
def Predict(datapath, stylefile, testfile, model):
    style = os.join(datapath, 'style', stylefile)
    test = os.join(datapath, 'test', testfile)
    transform = transforms.Compose(
        [transforms.Resize((500, 500)),
         transforms.ToTensor()])

    test = transform(test)

    result = model(style, test)
    result.save()
Esempio n. 14
0
 def addComponent(self, component):
     """
     This method adds the component fileObj to the appropriate directory
     """
     path = self.getComponentsDir()
     fileObjpath = os.join(path, component)
     while os.path.exists(fileObjpath):
         print "Component with name '%s' already exists"
         response = self.askForNewFilename()
         fileObjpath = os.join(path, response)
     os.makedirs(fileObjpath)
Esempio n. 15
0
def only_public_members():
    '''
    Search through json files containing scraped MyFitnessPal usernames
    and discard any which do not have their diary settings set to public.

    Expected directory structure to search:
    /webscraper/
        |--/only_public_profiles.py
    /data/
        |--/page_1/
            |--group_1.json
            |--group_2.json
            ...
        |--/page_2/
            |--group_1.json
            |--group_2.json
            ...
    '''
    json_files = {}
    for root, dirs, files in os.walk(os.join(os.getcwd(), '../data')):
        json_files[root] = files
    # Remove Key on top level directory
    del json_files[os.join(os.getcwd(), 'data')]

    public_json = {}
    s = requests.Session()
    for page_dir, json_page_list in json_files.items():
        for json_page in json_page_list:
            with open(os.join(page_dir, json_page)) as f:
                json_data = json.load(f)
                print(os.join(page_dir, json_page))

                # Multiprocessing to speed this up
                with Pool(cpu_count() - 1) as p:
                    public_profiles = p.map(is_public, json_data['Members'])
                p.close()
                p.join()

                # Remove all None values (Private Accounts)
                public_profiles = list(filter(None, public_profiles))

                # Put the data in an expected format
                group = json_data['Group']

                public_json[group] = {
                    'Group': group,
                    'URL': json_data['URL'],
                    'Member_Count': json_data['Member_Count'],
                    'Members': public_profiles
                }

                to_json(page_dir, json_page, public_json[group])
Esempio n. 16
0
def pre(*, input_path, output_path):
    ad_path = os.join(file_path, '/ad_id.csv')
    click_log_path = os.join(file_path, '/click_log.csv')
    ad = pd.read_csv(ad_path, 'r')
    click_log = pd.read_csv(click_log_path, 'r')
    ad_click = pd.merge(ad, click_log, on='creative_id')
    ad_click['industry'] = ad_click['industry'].astype(str).replace(
        r'\N', '336').astype(int)
    #test_ad_click['industry'] = test_ad_click['industry'].apply(lambda x: 0 if str(x)==r'\N' else int(str(x)))
    ad_click['product_id'] = ad_click['product_id'].astype(str).replace(
        r'\N', '0').astype(int)
    ad_click = ad_click.sort_values(by=['user_id', 'time'],
                                    ascending=[True, True])
Esempio n. 17
0
 def save_model(self, model, model_path=None):
     if isinstance(model, torch.nn.DataParallel):
         model = model.module
     if model_path is None:
         if 'model_path' in self.train_info and os.path.exists(
                 os.join(self.logdir, self.train_info['model_path'])):
             print("Removing old model {}".format(
                 self.train_info['model_path']))
             os.remove(os.join(self.logdir, self.train_info['model_path']))
         model_path = misc.datetimestr() + '.model.pth'
         self.train_info['model_path'] = model_path
     print("Saving model to {}".format(model_path))
     model.save(os.path.join(self.logdir, model_path))
Esempio n. 18
0
def create_profile(name, contact_email, b64_image, animal='Cat', lost=True):
    id = max_profile_idx() + 1
    profile = {
        'id': id,
        'name': name,
        'animal': animal,
        'lost': lost,
        'contact_email': contact_email,
        'image': b64_image
    }
    dir = os.join(DB_DIR, str(id))
    os.mkdir(dir)
    save_json(os.join(dir, 'profile.json'))
    return profile
Esempio n. 19
0
def __initConfig():
    import SkunkWeb.Configuration as C
    import os
    C.mergeDefaults(
        webdavDB=os.join(C.SkunkRoot, 'var/run/WEBDAVdb'),
        webdavFS=fs.WebdavFS(),
    )
Esempio n. 20
0
def evaluate(model_specs, model_dir, params, restore_from):
    """Evaluate the model

    Args:

    """
    # Initialize tf.Saver()
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(model_specs['variable_init_op'])

        # Reload weights from weights subdirectory
        save_path = os.path.join(model_dir, restore_from)
        if os.path.isdir(save_path):
            save_path = tf.train.latest_checkpoint(save_path)
        saver.restore(sess, save_path)

        # Evaluate
        num_steps = (params.eval_size + params.batch_size -
                     1) // params.batch_size
        metrics = evaluate_sess(sess, model_specs, num_steps)
        metrics_name = '_'.join(restore_from.split('/'))
        save_path = os.join(model_dir,
                            "metric_test_{}.json".format(metrics_name))
        save_dict_to_json(metrics, save_path)
def generateMcModInfo(src_dir):
    print 'Finding minecraft version'
    with open(os.path.join(src_dir, 'minecraft', 'net', 'minecraft', 'client', 'Minecraft.java'), 'rb') as mcClass:
        for line in mcClass:
            startIndex = line.find('"Minecraft Minecraft ')
            endIndex = line.rfind('"')
            if startIndex > -1 and endIndex > -1 and  endIndex > startIndex:
                mcVersion = line[startIndex:endIndex]
                mcVersion = mcVersion.replace('"', '')
                mcVersion = mcVersion.replace('Minecraft', '')
                mcVersion = mcVersion.replace(' ', '')
    modid = ''
    version = ''
    
    for root, _, filelist in os.walk(os.join(src_dir, 'minecraft', 'mods'), followlinks=True):
        for cur_file in filelist :
            if cur_file.endswith('.java')
                with open(os.path.join(root, cur_file), 'rb') as modClass:
                    for line in mainClass:
                        if(line.startswith('@Mod(')):
                            line = line.replace('@Mod(','')
                            line = line.replace(')','')
                            line = line.strip()
                            fields = line.split(', ')
                            for field in fields:
                                pair = field.split('=')
                                if(pair[0] == 'modid'):
                                    modid = pair[1]
                                elif (pair[0] == 'name'):
                                    modname = pair[1]
                                elif (pair[0] == 'version'):
                                    version = pair[1]
Esempio n. 22
0
def del_em(file_dict, dest_dir):
    for d, files in file_dict.keys():
        if os.isdir(d):
            glob = '*'
            dp_io.printf("arg %s is a dir, glob(%s)? ", d, glob)
            a = sys.stdin.readline()
            if a == "\n":
                a =  glob
            files = os.listdir(glob)
        for f in files:
            # if dest_file exists and is the same, del in src.:
            dest_file = os.join(d, f)
            num = 0
            while os.exists(dest_file):
                if filecmp.cmp(f, dest_file):
                    os.unlink(f)
                dp_io.printf("dest_file(%s) exists copying with modified name\n",
                             dest_file)
                name, ext = opath.splitext(dest_file)
                dest_file = name + "-" + str(num) + ext
                num += 1
            print "os.rename(%s, %s)" % (f, dest_dir)
        remains = os.listdir(d)
        if remains:
            ans = "n"
            dp_io.printf("files remain in src dir(%s); Remove them(y/N)? ", d)
            ans = sys.std
Esempio n. 23
0
def generate_and_save_images_conditional(model, epoch, test_input, X_test, label_test, maxval, save_img=False, path=None):
    # Notice `training` is set to False.
    # This is so all layers run in inference mode (batchnorm).
    predictions = model([test_input, label_test], training=False)
    #print(labels_input)
    X_test_np = X_test.numpy()
    
    fig = plt.figure(figsize=(16,16))

    for i in range(8):
        plt.subplot(4, 4, 2*i+1 )
        plt.imshow(predictions[i, :, :, 0] * (maxval) )
        plt.colorbar()
        plt.subplot(4, 4, 2*i+2 )
        plt.imshow(X_test_np[i, :, :, 0] * (maxval) )
        plt.colorbar()
        plt.axis('off')


    if save_img and path is not None:
        os.makedirs(path, exist_ok=True)
        file_path = os.join(path, 'image_at_epoch_{:04d}.png'.format(epoch))
        plt.savefig(file_path)
        
    plt.show()
Esempio n. 24
0
def build_file(mode, hour, dir='/home/shao', prefix='delta_vs_tweets_'):
    """
    Build file path.
    -----------------
    Parameters:
        mode, string, type of measument 1
        hour, string, type of measument 2
        dir, string, directory that store the data file
        prefix, stirng, prefix of the data file
    Return:
        string, filepath
    """
    if mode == 'whole':
        return os.join(dir, prefix, mode, '.csv')
    else:
        return os.join(dir, prefix, mode, '_' + hour, '.csv')
Esempio n. 25
0
def copy_files(fn, files):
    out = []
    for f in files:
        fname = os.join(fn, f)
        job = ["cp", f, fname]
        out.append(run_and_return(job))
    return out
Esempio n. 26
0
 def _fetch_jdk(self, jdk_distribution, paths):
     if not self._mx_path:
         self._mx_path = mx.get_jdk().home
     mx._opts.quiet = True
     tmp_dir = tempfile.gettempdir()
     _, repo_path = VC.get_vc_root(paths[0])
     common_location = os.path.join(repo_path, 'common.json')
     fetch_args = [
         '--to', tmp_dir, '--java-distribution', jdk_distribution,
         '--configuration', common_location
     ]
     JdkDistribution._jdk_distributions = []
     args = mx_fetchjdk._parse_fetchsettings(fetch_args)
     distribution = args["java-distribution"]
     base_path = args["base-path"]
     jdk_home = distribution.get_final_path(base_path)
     if mx.is_darwin():
         jdk_home = os.join(jdk_home, 'Contents', 'Home')
     if jdk_home != self._jdk_home:
         try:
             mx_fetchjdk.fetch_jdk(fetch_args)
             self._jdk_home = jdk_home
             os.environ['JAVA_HOME'] = jdk_home
         except (OSError, IOError) as err:
             mx.log(str(err))
             self._jdk_home = mx.get_jdk().home
     mx._opts.quiet = False
Esempio n. 27
0
def check_image(image, box, time=None):
    res = False
    (y, x, h, w) = box[0], box[1], box[2], box[3]
    if verbose >= 2:
        cv2.drawContours(
            image,
            np.array([[[x, y], [x + w, y], [x + w, y + h], [x, y + h]]],
                     np.int32), 0, (0, 0, 255), 2)
        show_image('image', image)

    crop_img = image[y:y + h, x:x + w]

    if verbose >= 2:
        show_image('crop', crop_img)

    cnt = 0
    if is_save_frame:
        filename = os.join(
            is_save_frame, "image_" +
            (time.replace(':', '_') + cnt if time else str(cnt)) + ".jpg")
        cv2.imwrite(filename, crop_img)

    if adaptive_threshold == 1:
        txt = text.adaptive_tresholding(crop_img, lang, verbose=verbose)
    elif adaptive_threshold == 2:
        txt = text.otsus_binarization(crop_img, lang, verbose=verbose)
    else:
        txt = text.simple_extract(crop_img, lang)

    if txt:
        if len(txt.replace(' ', '').strip('\n')) > 16:
            res = True
            print('{} - {}'.format(time, txt))

    return res
def _create_child_path(i_parent_path, i_child, i_new_paths_already_used, i_old_child_map, i_old_paths_used_for_obj):
    """
    Returns a unique path for the child object.
    The unique path is guaranteed to not already exist
    in i_new_paths_already_used. It could exist in
    i_old_paths_used_for_obj, but only if this path
    is used for the same object in the old map


    i_parent_path (str) : The already-guaranteed-to-be-unique
    path of the parent if i_child
    i_child (file or collection) : The object to create a
    unique path for
    i_new_paths-already_used (collection of strings) : the
    paths that have already been used in the new gdocs backup
    i_old_child_map (maps object id to collection of strings) :
    contains the old collections of paths used for i_child
    i_old_paths_used_for_obj (collection of strings) : the
    paths that were used in the old gdocs backup
    """

    attempt = os.join(i_parent_path, i_child.filename)

    # Keep appending a string until the path is unique or the path
    # is found to have not changed since the last backup
    while attempt in i_new_paths_already_used or (attempt in i_old_paths_for_obj and attempt not in i_old_child_map[i_child.resource_id]):
        attempt += '__uniq__'
    return attempt
def update_comments_in_src(src_path='src', spreadsheet_path=''):
    keyword_dict = get_keyword_dict_from_spreadsheet()
    python_files = [f for f in os.listdir() if f.endswith('py')]
    num_updated_total = 0
    for filename in python_files:
        num_updated_file = 0
        edits_required = False
        tmp_filename = os.join(filename, '.tmp')
        with open(filename, 'r') as source_file:
            print ('\n'+filename)
            with open(tmp_filename, 'w'):
                for line in source_file:
                    match = re.search(pattern, line)
                    if match is not None:
                        keyword = match.group(1)
                        citation = match.group(2)
                        if not keyword in keyword_dict:
                            print ('WARNING: keyword '+ keyword + ' not found in spreadsheet.')
                            break
                        if citation == keyword_dict[keyword]:
                            print ('[OK] '+ keyword + ' ['+ citation +']')
                        else:
                            edits_required = True
                            num_updated_file += 1
                            num_updated_total += 1
                            citation = keyword_dict[keyword]
                            print ('[UP] '+ keyword + ' ['+ citation +']')
        if edits_required:
            shutil.move(src=tmp_filename, dst=filename)
        print (str(num_updated_file) + 'edits made to '+ filename)
    print (str(num_updated_total) + 'edits made in total.')
Esempio n. 30
0
    def order(self, destination_base, source_files):
        """
        Yield a list of (source, destination) tuples describing the move operations that should
        occur.
        """
        if not os.path.isabs(destination_base):
            destination_base = os.path.abspath(destination_base)
        destination_files = set()

        for source in source_files:
            if not os.path.isabs(source):
                source = os.path.abspath(source)
            if not os.path.exists(source):
                continue

            base = os.path.basename(source)
            name, ext = os.path.splitext(base)

            tags = Tags.read(source)
            try:
                destination_path = tags.format(self.destination_format)
            except (AttributeError, KeyError):
                destination_path = self.unknown_destination

            index = 1
            destination_path = os.path.join(destination_base, destination_path)
            destination = os.path.join(destination_path, base)

            while os.path.exists(destination) or destination in destination_files:
                destination = os.join(destination_path, '{}_{:03d}.{}'.format(name, index, ext))
                index += 1

            destination_files.add(destination)
            yield (source, destination)
Esempio n. 31
0
def list_directories(parent_dir, recurse=True):
    """
    returns a list of directories in parent_dir
    Directory 'common' will appear first if present
    if recurse = True, also includes all subdirectories in the tree
    """
    import os
    rv = []
    if recurse:
        for root, dum1, dum2 in os.walk(parent_dir):
            if root == parent_dir:
                continue
            rv.append(root)
    else:
        import stat
        subobjs = os.listdir(parent_dir)
        rv = [os.join(parent_dir, x) for x in subobjs]
        rv = [x for x in rv if stat.S_ISDIR(os.stat(x))]
    rv.sort()
    truerv = []
    for x in rv:
        if 'common' in x:
            truerv = [x] + truerv
        else:
            truerv.append(x)
    return truerv
 def __getNewSR_10_0__(self, wkidOut):
     try:
         parentDir = None
         p_type = wkidInfo[str(wkidOut)]['type']
         if p_type == 'gcs':
             parentDir = os.path.join(arcpy.GetInstallInfo()["InstallDir"],
                                      "Coordinate Systems",
                                      "Geographic Coordinate Systems")
         elif p_type == 'pcs':
             parentDir = os.path.join(arcpy.GetInstallInfo()["InstallDir"],
                                      "Coordinate Systems",
                                      "Projected Coordinate Systems")
         else:
             raise Exception("wkid not registered!")
         prjFile = parentDir
         path = wkidInfo[str(wkidOut)]['path'].split('/')
         for p in path:
             prjFile = os.join(prjFile, p)
         sr = arcpy.SpatialReference(prjFile)
         return sr
     except arcpy.ExecuteError:
         EH = ErrorHandling.ErrorHandling()
         line, filename, err = EH.trace()
         m = "Python error on " + line + " of " + __file__ + \
             " : with error - " + err
         arcpy.AddMessage(m)
Esempio n. 33
0
def local(directory):
    '''Show charms that actually exist locally. Different than Mr.list'''
    local_charms = []
    for charm in os.listdir(directory):
        if os.path.exists(os.join(directory, charm, '.bzr')):
            local_charms.append(charm)
    return local_charms
Esempio n. 34
0
def Clear_Thumb():
    latest = latestDB('Textures')
    if yesnoDialog(AddonTitle,
                   "Would you like to delete the %s and Thumbnails folder?" %
                   latest,
                   "They will repopulate on startup",
                   nolabel='No, Cancel',
                   yeslabel='Yes, Remove'):
        try:
            removeFile(os.join(control.DATABASE, latest))
        except:
            log('Failed to delete, Purging DB.')
            purgeDb(latest)
        removeFolder(control.THUMBSPATH)
        if yesnoDialog(AddonTitle,
                       "Would you like to restart Kodi now?",
                       "",
                       nolabel='No',
                       yeslabel='Yes'):
            from resources.lib.modules import forceClose
            forceClose.ForceClose()
        else:
            log('Clear Thumbnails Cancelled')
    else:
        log('Clear Thumbnails Cancelled')
Esempio n. 35
0
def SaveFile2():
    baseurl="http://www.csrc.gov.cn/pub/newsite/ssgsjgb/bgczfkyj"  #网址路径
    needSubPath=False  #是否要建子文件价
    flodername="download6"  #保存的文件目录
    pagenum=2

    curpath=os.getcwd()
    downloadpath=os.path.join(curpath,flodername)
    if os.path.exists(downloadpath)==True:
        os.rmdir(downloadpath)
    os.mkdir(downloadpath)
    
    for pageindex in range(0,pagenum):
        url=""
        if pageindex==0:
            url=baseurl+""
        else:
            url=baseurl+"/index_%d.htm" % pageindex
        print(url)
        html = getHtml(url)
        reslist=ParseDocUrl(html)
        for docitem in reslist:
            docurl=baseurl+docitem[0]
            doctitle=docitem[1]
            dochtml = getHtml(docurl)
            docpath=docitem[0].split("/")[1]
            htmltext = re.findall('(<div[\s]*class="content">[.\S\s]*)<div[\s]*class="foot">',dochtml)[0]
            filename=doctitle.encode('gb2312')
            subpath=downloadpath
            if needSubPath==True:
                subpath=os.join(downloadpath,docpath)
                if os.path.exists(subpath)==False:
                    os.mkdir(subpath)
            converhtml2doc(htmltext,os.path.join(subpath,filename))
Esempio n. 36
0
def AddToRegistry():

    # in python __file__ is the instant of
    # file path where it was executed
    # so if it was executed from desktop,
    # then __file__ will be
    # c:\users\current_user\desktop
    pth = os.path.dirname(os.path.realpath(__file__))

    # name of the python file with extension
    s_name = "autorunstartup.py"

    # joins the file name to end of path address
    address = os.join(pth, s_name)

    # key we want to change is HKEY_CURRENT_USER
    # key value is Software\Microsoft\Windows\CurrentVersion\Run
    key = HKEY_CURRENT_USER
    key_value = "Software\Microsoft\Windows\CurrentVersion\Run"

    # open the key to make changes to
    open = reg.OpenKey(key, key_value, 0, reg.KEY_ALL_ACCESS)

    # modifiy the opened key
    reg.SetValueEx(open, "any_name", 0, reg.REG_SZ, address)

    # now close the opened key
    reg.CloseKey(open)
 def __getNewSR_10_0__(self, wkidOut):
     try:
         parentDir = None
         p_type = wkidInfo[str(wkidOut)]["type"]
         if p_type == "gcs":
             parentDir = os.path.join(
                 arcpy.GetInstallInfo()["InstallDir"], "Coordinate Systems", "Geographic Coordinate Systems"
             )
         elif p_type == "pcs":
             parentDir = os.path.join(
                 arcpy.GetInstallInfo()["InstallDir"], "Coordinate Systems", "Projected Coordinate Systems"
             )
         else:
             raise Exception("wkid not registered!")
         prjFile = parentDir
         path = wkidInfo[str(wkidOut)]["path"].split("/")
         for p in path:
             prjFile = os.join(prjFile, p)
         sr = arcpy.SpatialReference(prjFile)
         return sr
     except arcpy.ExecuteError:
         EH = ErrorHandling.ErrorHandling()
         line, filename, err = EH.trace()
         m = "Python error on " + line + " of " + __file__ + " : with error - " + err
         arcpy.AddMessage(m)
Esempio n. 38
0
def zipscanner(input_dir, input_file, output_file, arg, target_type):
	"""scans zip files and any other files using zip compression"""
	
	#sets up search match values in a list
	if search_values:
		search_values = arg.search_values
	if arg.input_file: 
		search_input_file = open(arg.input_file, 'rb')
		for line in search_input_file:
			search_values = search_values.append(line)
		search_input_file.close()
	
	 
	file_target = os.join(input_dir, input_file)
	zip_target = ZipFile(file_target, 'r', True)
	zip_manifest = zip_target.infolist()
	for item in zip_manifest:
		for search_value in search_values:
			if search_value in item.filename:
				handle_output(input_dir, search_value("Zip File", input_dir, input_file, item.filename, "-zip file name match"), arg)
		count = 0
		zip_part = zip_target.open(item, 'rU')
		for search_value in search_values:
			if search_value in zip_part:
				count += 1
		handle_output(output_file, search_value("Zip File", input_dir, input_file, item.filename, '-zip file hits: ' + str(count)) , arg)	
	zip_target.close()
Esempio n. 39
0
 def name_sort_launcher(self, pos, name=Test_Pyle_Sort_Name.keyword):
     # edit hardcoded test_files to TEST_DIR
     params = "{'pos':{0}, 'name':{1}}".format(repr(pos), repr(name))
     args = pyle.parser.parse_args(['sort', 'name', 'test_files',
                                     '--o', params])
     pyle.launcher(args)
     directory = os.join(TEST_DIR, name)
     for filename in next(os.walk(directory))[2]:
         name = os.path.splitext(filename)[0]
         error_msg = "{0} sorted improperly".format(name)
         if pos == "start":
             self.assertTrue(filename.startswith(name), error_msg)
         elif pos == "end":
             self.assertTrue(filename.endswith(name), error_msg)
         else:
             self.assertTrue(name in filename, error_msg)
     for filename in next(os.walk(TEST_DIR))[2]:
         error_msg = "{0} improperly left unsorted.".format(filename)
         if pos == "start":
             self.assertNotTrue(filename.startswith(name), error_msg)
         elif pos == "end":
             self.assertNotTrue(filename.endswith(name), error_msg)
         else:
             self.assertNotTrue(name not in filename, error_msg)
     return
Esempio n. 40
0
def __initConfig():
    import SkunkWeb.Configuration as C
    import os
    C.mergeDefaults(
        webdavDB=os.join(C.SkunkRoot, 'var/run/WEBDAVdb'),
        webdavFS=fs.WebdavFS(),
        )
Esempio n. 41
0
def unzip_submissions():
	for root, dirs, files in os.walk("."): 
		for f in files: 
			obj = os.path.abspath(os.path.join(root, f)) 
			(dirname, filename) = os.path.split(obj)
			message = "Decompressing "+f
			if f.endswith(".zip"):
				print message
				cmd = 'unzip -u -d '+dirname+' '+obj+' > /dev/null'
				subprocess.call(cmd, shell=True)
			elif f.endswith(".tar.gz") or f.endswith(".tgz"):
				print message
				subprocess.call(['tar','-xzf',obj, '-C', dirname])
			elif f.endswith(".tar"):
				print message
				subprocess.call(['tar','-xf',obj, '-C', dirname])
			elif f.endswith(".rar"):
				print message
				subprocess.call(['unrar','e',obj,dirname])
	
	# Summon All submission to top level student directories
	for name, email in students:
		dir_to_flatten = email[0] 
		for dirpath, dirnames, filenames in os.walk(dir_to_flatten):
			for filename in filenames:
				if filename.endswith(".c"):
					try:
						os.rename(os.path.join(dirpath, filename), os.path.join(dir_to_flatten, filename))
					except OSError:
						print ("Could not move %s " % os.join(dirpath, filename))

	return
Esempio n. 42
0
def deleteLargeFiles(folder):
    folder = os.path.abspath(folder)
    for foldername, subfolders, filenames in os.walk(folder):
        for filename in filenames:
            if os.path.getsize(os.join(folder, filename)) >= 100000000:
                print('Deleting %s in %s...' % filename, foldername)
                send2trash.send2trash('filename')
Esempio n. 43
0
def get_img_flow_path(img_root,
                      flow_root,
                      pred_img_root,
                      pred_flow_root,
                      num_clips=21):
    """

    :param img_root: F:\avenue\testing\frames
    :param flow_root: F:\avenue_optical\testing\optical_flow_visualize
    :param pred_img_root: F:\avenue_save_npy\imgs
    :param pred_flow_root: F:\avenue_save_npy\flows
    :return:
    """
    img_paths = []
    flow_paths = []
    pred_img_paths = []
    pred_flow_paths = []

    for num_clip in os.listdir(img_root):
        img_path = []
        flow_path = []
        pred_img_path = []
        pred_flow_path = []
        img_path = [
            os.join(img_root, num_clip,
                    (img
                     for img in os.listdir(os.path.join(img_root, num_clip))))
        ]

    return
Esempio n. 44
0
def unregister():
    upath = bpy.utils.script_path_user()
    file1 = os.join(upath, "setup_sculpt", "add_detail.png")
    os.unlink(file1)

    bpy.utils.unregister_class(SetupSculpting)
    bpy.types.VIEW3D_MT_view.remove(menu_func_import)
Esempio n. 45
0
def getAllList():
	docNames = []
	for r, dirs, f in os.walk(DIR_INPUT):
		for dir in dirs:
			for root, d, files in os.walk(os.path.join(DIR_INPUT, dir)):
				for name in files:
					docNames.append(os.join(dir, os.path.splitext(name)[0]))
	return docNames
Esempio n. 46
0
	def dump(self):
		for name, item in self._stack.iteritems():
			file_name = name + '.json'
			full_path = os.join(self._cache_directory, file_name)		
			with open(full_path, 'w+') as cache:
				cache.write(item)

		self._stack.clear()
Esempio n. 47
0
def create_database(db_name='monitor.db'):
    if not os.path.exists(os.path.join(os.getcwd(), 'db')):
        db_path = os.join(os.getcwd(), 'db')
        os.mkdir(db_path)
        fh = open(os.join(db_path, db_name))
        fh.close()
    elif not os.path.exists(os.path.join(os.getcwd(), 'db/' + db_name)):
        fh = open(os.path.join(os.getcwd(), 'db/' + db_name))
        fh.close()

    conn = sqlite3.connect(os.path.join(os.getcwd(), 'db/' + db_name))
    curr = conn.cursor()
    curr.execute("""CREATE TABLE IF NOT EXISTS portfolio
                        (ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
                         symbol TEXT UNIQUE);""")
    curr.close()
    conn.close()
 def make_build_dirs(self, module_dir, gws=None):
     if gws is None:
         gws = self.gws
     for gw in gws:
         builddir = os.join(gw["home_dir"], "archives", module_dir, "build")
         cmd = make_command("set_path", builddir)
         gw["channel"].send(cmd)
     return self._receive_all()
 def __init__(self, base_dir, collection_name, api=None):
     """Initialize file listener."""
     super(FileListener, self).__init__(api)
     self.base_dir = base_dir
     self.collection_name = collection_name
     self.base_filename = os.join(
         self.base_dir,
         self.collection_name,
         self.collection_name)
Esempio n. 50
0
    def delete_file(self, filename):
        """
        Delete a file within this folder.

        Args:
            filename (string): Name of a file.
        """
        filepath = os.join(self._path, filename)
        os.remove(filepath)
Esempio n. 51
0
	def createDirectory(self):
		"""METHOD TIReport.getDirectoryPath
	
	Input: None
	Output: None"""
		try:
			os.mkdir(os.join(self.directoryPath, self.directoryName))
		except Exception,error:
			self.message.ERROR('Directory exists!')
 def reapOldRecords(self):
     # walk through contents of session directory and delete any
     # lapsed files
     for f in os.listdir(_sesspath):
         p = os.join(_sesspath, f)
         lastAccess = os.path.getatime(p)
         now = time.time()
         if now - lastAccess > Configuration.SessionTimeout:
             os.remove(p)
Esempio n. 53
0
 def POST(self):
     x = web.input(myfile={})
     filedir = os.join(os.getcwd(), '/Databases') # change this to the directory you want to store the file in.
     if 'myfile' in x: # to check if the file-object is created
         filepath=x.myfile.filename.replace('\\','/') # replaces the windows-style slashes with linux ones.
         filename=filepath.split('/')[-1] # splits the and chooses the last part (the filename with extension)
         fout = open(filedir +'/'+ filename,'w') # creates the file where the uploaded file should be stored
         fout.write(x.myfile.file.read()) # writes the uploaded file to the newly created file.
         fout.close() # closes the file, upload complete.
     raise web.seeother('/upload')
Esempio n. 54
0
    def KindleMobiDecrypt(self,path_to_ebook):

        # add the alfcrypto directory to sys.path so alfcrypto.py
        # will be able to locate the custom lib(s) for CDLL import.
        sys.path.insert(0, self.alfdir)
        # Had to move this import here so the custom libs can be
        # extracted to the appropriate places beforehand these routines
        # look for them.
        import calibre_plugins.dedrm.prefs as prefs
        import calibre_plugins.dedrm.k4mobidedrm

        dedrmprefs = prefs.DeDRM_Prefs()
        pids = dedrmprefs['pids']
        serials = dedrmprefs['serials']
        kindleDatabases = dedrmprefs['kindlekeys'].items()

        try:
            book = k4mobidedrm.GetDecryptedBook(path_to_ebook,kindleDatabases,serials,pids,self.starttime)
        except Exception, e:
            decoded = False
            # perhaps we need to get a new default Kindle for Mac/PC key
            defaultkeys = []
            print u"{0} v{1}: Failed to decrypt with error: {2}".format(PLUGIN_NAME, PLUGIN_VERSION,e.args[0])
            print u"{0} v{1}: Looking for new default Kindle Key after {2:.1f} seconds".format(PLUGIN_NAME, PLUGIN_VERSION, time.time()-self.starttime)

            try:
                if iswindows or isosx:
                    from calibre_plugins.dedrm.kindlekey import kindlekeys

                    defaultkeys = kindlekeys()
                else: # linux
                    from wineutils import WineGetKeys

                    scriptpath = os.join(self.alfdir,u"kindlekey.py")
                    defaultkeys = self.WineGetKeys(scriptpath, u".k4i",dedrmprefs['kindlewineprefix'])
            except:
                pass

            newkeys = {}
            for i,keyvalue in enumerate(defaultkeys):
                keyname = u"default_key_{0:d}".format(i+1)
                if keyvalue not in dedrmprefs['kindlekeys'].values():
                    newkeys[keyname] = keyvalue
            if len(newkeys) > 0:
                print u"{0} v{1}: Found {2} new {3}".format(PLUGIN_NAME, PLUGIN_VERSION, len(newkeys), u"key" if len(newkeys)==1 else u"keys")
                try:
                    book = k4mobidedrm.GetDecryptedBook(path_to_ebook,newkeys.items(),[],[],self.starttime)
                    decoded = True
                    # store the new successful keys in the defaults
                    print u"{0} v{1}: Saving {2} new {3}".format(PLUGIN_NAME, PLUGIN_VERSION, len(newkeys), u"key" if len(newkeys)==1 else u"keys")
                    for keyvalue in newkeys.values():
                        dedrmprefs.addnamedvaluetoprefs('kindlekeys','default_key',keyvalue)
                    dedrmprefs.writeprefs()
                except Exception, e:
                    pass
Esempio n. 55
0
 def _createFile(self, assetLibPath, name):
     ''' Only create file if it doesn't exist '''
     self.logger.debug('_createFile(): Starting...')
     
     fileName = os.join(assetLibPath,name)
     if not os.path.isfile(fileName):
         f = open( fileName, 'w')
         f.write()
         f.close()
         
     self.logger.debug('_createFile(): End.')
Esempio n. 56
0
 def __init__(self):
     dirname = "/users/cxlyc007/Dropbox/repository/lmdb/dataset/"
     print dirname
     self.labeled_train_data_path = os.join(dirname, "labeledTrainData.tsv")
     self.test_data_path = os.join(dirname, "testData.tsv")
     self.unlabeled_train_data_path = os.join(dirname, "unlabeledTrainData.tsv")
     self.sample_submission_path = os.join(dirname,"sampleSubmission.csv")
     self.model_save_dir = os.join(dirname,"model")
     self.prediction_dir = os.join(dirname,"prediction")
     self.dataset_dir = os.join(dirname, "dataset")
Esempio n. 57
0
    def __init__(self,folder=None,post=""):
        #TODO make this more flexible, and work for all operating systems.
        self.folder = folder or os.join(os.getcwd(),'contraband')
        self.post = post

        if self.folder[-1] == '\\':
            self.folder = self.folder[:-1]
            
        if not os.path.exists(self.folder):
            os.makedirs(self.folder)
        
        self.payload = Payload()