Example #1
0
    def make_target_directory(self, path):
        path = os.path.abspath(path)
        try:
            os.makedirs(path)
        except OSError as e:
            self.abort('Could not create target folder: %s' % e)

        if os.path.isdir(path):
            try:
                if len(os.listdir(path)) != 0:
                    raise OSError('Directory not empty')
            except OSError as e:
                self.abort('Bad target folder: %s' % e)

        scratch = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
        os.makedirs(scratch)
        try:
            yield scratch
        except:
            shutil.rmtree(scratch)
            raise
        else:
            # Use shutil.move here in case we move across a file system
            # boundary.
            for filename in os.listdir(scratch):
                if isinstance(path, unicode):
                    filename = filename.decode(fs_enc)
                shutil.move(os.path.join(scratch, filename),
                            os.path.join(path, filename))
            os.rmdir(scratch)
Example #2
0
def valid_dir(d):
    # type: (Dict) -> bool
    dir = d['path']
    if not path.exists(dir):
        return True
    if not path.isdir(dir):
        return False

    if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):  # type: ignore
        return False

    if d['sep']:
        dir = os.path.join('source', dir)
        if not path.exists(dir):
            return True
        if not path.isdir(dir):
            return False

    reserved_names = [
        'conf.py',
        d['dot'] + 'static',
        d['dot'] + 'templates',
        d['master'] + d['suffix'],
    ]
    if set(reserved_names) & set(os.listdir(dir)):  # type: ignore
        return False

    return True
Example #3
0
def test_sequence_output():
    directory = tempfile.mkdtemp()
    assert 0 == len(os.listdir(directory))
    cli.main(['--seq', '-v=1', '-e=m', '-o=' + os.path.join(directory, 'test.svg'), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
    number_of_files = len(os.listdir(directory))
    shutil.rmtree(directory)
    assert 4 == number_of_files
Example #4
0
 def test_basic(self):
     """grab_driver_files: copy drivers into place, return module list"""
     # create a bunch of fake extracted files
     outdir = self.tmpdir + '/extract-outdir'
     moddir = outdir + "/lib/modules/%s/kernel/" % os.uname()[2]
     fwdir = outdir + "/lib/firmware/"
     modules = makefiles(moddir+"net/funk.ko", moddir+"fs/lolfs.ko.xz")
     firmware = makefiles(fwdir+"funk.fw")
     makefiles(outdir+"/usr/bin/monkey", outdir+"/other/dir/blah.ko")
     mod_upd_dir = self.tmpdir+'/module-updates'
     fw_upd_dir = self.tmpdir+'/fw-updates'
     # use our updates dirs instead of the default updates dirs
     with mock.patch.multiple("driver_updates",
                              MODULE_UPDATES_DIR=mod_upd_dir,
                              FIRMWARE_UPDATES_DIR=fw_upd_dir):
         modnames = grab_driver_files(outdir)
     self.assertEqual(set(modnames), set(["funk", "lolfs"]))
     modfiles = set(['funk.ko', 'lolfs.ko.xz'])
     fwfiles = set(['funk.fw'])
     # modules/firmware are *not* in their old locations
     self.assertEqual([f for f in modules+firmware if os.path.exists(f)], [])
     # modules are in the system's updates dir
     self.assertEqual(set(os.listdir(mod_upd_dir)), modfiles)
     # modules are also in outdir's updates dir
     self.assertEqual(set(os.listdir(outdir+'/'+mod_upd_dir)), modfiles)
     # repeat for firmware
     self.assertEqual(set(os.listdir(fw_upd_dir)), fwfiles)
     self.assertEqual(set(os.listdir(outdir+'/'+fw_upd_dir)), fwfiles)
Example #5
0
def env_status():
    """
    Print information about the test bed to check tests aren't
    failing to clean up.

    To see, ensure you use: py.test -s
    """
    folders = [folder for folder
               in os.listdir(os.path.dirname(CONF.path_to('link')))
               if folder.find('cmd') == -1]
    try:
        link_d = os.listdir(CONF.path_to('link'))
    except OSError:
        link_d = 'DOES NOT EXIST'
    try:
        prefix_d = os.listdir(CONF.path_to('prefix'))
    except OSError:
        prefix_d = 'DOES NOT EXIST'
    print('\n')
    print('Environment Summary BEFORE Cleanup')
    print('Paths: ', CONF.get('pakit.paths'))
    print('IDB Entries: ', sorted([key for key, _ in pakit.conf.IDB]))
    print('Contents Root Dir: ', sorted(folders))
    print('Contents Link Dir: ', sorted(link_d))
    print('Contents Prefix Dir: ', sorted(prefix_d))
Example #6
0
def clear_project(exitIfError=True):
	"""
	This function will erease the tables and views that were created
	The name of the table that is going to be deleted, are all in the config
	file in the constant forler.

	This function is dropping/deleting all views and the single table
	"""
	tables_to_delete = db.get_table_names()
	db.clear_qc_data(tables_to_delete)

	db.drop_table(QC_TABLE_NAME)
	if os.path.exists(WEB_APP_PATH+"assets/img/"):
		files = os.listdir(WEB_APP_PATH+"assets/img/")
	else:
		files = []
	for img_folder in files:
		print "removing : ",WEB_APP_PATH+"assets/img/"+img_folder
		rmtree(WEB_APP_PATH+"assets/img/"+img_folder)

	if os.path.exists(WEB_APP_PATH+"assets/reports/"):
		files = os.listdir(WEB_APP_PATH+"assets/reports/")
	else:
		files = [] 
	for report in files:
		print "removing : ",WEB_APP_PATH+"assets/reports/"+report
		os.remove(WEB_APP_PATH+"assets/reports/"+report)
	
	# recreate the database tables and views
	create_database(exitIfError)
Example #7
0
    def load(self):
        now = int(time.time())
        """ Try to import the requested modules ; put the imported modules in self.imported_modules.
The previous imported modules, if any, are cleaned before. """ 
        # We get all modules file with .py
        modules_files = [ fname[:-3] for fname in os.listdir(self.modules_path) 
                         if fname.endswith(".py") ]

        # And directories
        modules_files.extend([ fname for fname in os.listdir(self.modules_path)
                               if os.path.isdir(os.path.join(self.modules_path, fname)) ])

        # Now we try to load thems
        # So first we add their dir into the sys.path
        if not self.modules_path in sys.path:
            sys.path.append(self.modules_path)

        # We try to import them, but we keep only the one of
        # our type
        del self.imported_modules[:]
        for fname in modules_files:
            #print "Try to load", fname
            try:
                m = __import__(fname)
                if not hasattr(m, 'properties'):
                    continue

                # We want to keep only the modules of our type
                if self.modules_type in m.properties['daemons']:
                    self.imported_modules.append(m)
            except Exception , exp:
                logger.log("Warning in importing module : %s" % exp)
Example #8
0
def run_ocr(result_dir, output_file):
    DEFAULT_TEXT = 'Lorem ipsum\ndolor sit amet,\nconsectetur\n\nadipiscing elit.\n\nDonec vel\naliquet velit,\nid congue\nposuere.'
    csv_file = open(output_file, 'w')
    fieldnames = ['image', 'param_set', 'ocr']
    writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    writer.writeheader()
    # Enumerate images
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            # Enumerate parameter sets
            for result_file in sorted(os.listdir(image_dir)):
                if result_file.endswith('.png'):
                    image_path = os.path.join(image_dir, result_file)
                    image = util.img_as_float(io.imread(image_path))
                    ocr = ocr_accuracy(image, DEFAULT_TEXT)

                    result_ps_name = os.path.splitext(result_file)[0]
                    # # Write into csv file
                    result_row = {
                        'image': image_name,
                        'param_set': result_ps_name,
                        'ocr': ocr,
                    }
                    writer.writerow(result_row)
    csv_file.close()
    return None
Example #9
0
def run_q(result_dir, output_file):
    csv_file = open(output_file, 'w')
    # fieldnames = ['image', 'param_set', 'final_error', 'training_time']
    fieldnames = ['image', 'param_set', 'q']
    writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    writer.writeheader()
    # Enumerate images
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            # Enumerate parameter sets
            for result_file in sorted(os.listdir(image_dir)):
                if result_file.endswith('.png'):
                    image_path = os.path.join(image_dir, result_file)
                    image = util.img_as_float(io.imread(image_path))
                    q = q_py(image)

                    # result_json_path = os.path.join(image_dir, result_json)
                    result_ps_name = os.path.splitext(result_file)[0]
                    # result_data = read_results(result_json_path)
                    # # last_epoch_error = float(parse_epochs(result_data)[-1]['error'])
                    # last_epoch_error = float(parse_epochs(result_data)[-1])
                    # # Write into csv file
                    result_row = {
                        'image': image_name,
                        'param_set': result_ps_name,
                        'q': q,
                    }
                    writer.writerow(result_row)
    csv_file.close()
    return None
Example #10
0
def load_designs(db, root='designs'):
    for design in os.listdir(root):
        views = dict()
        path = os.path.join(root, design)
        if not os.path.isdir(path): continue
        path = os.path.join(root, design, 'views')
        for filename in os.listdir(path):
            name, ext = os.path.splitext(filename)
            if ext != '.js': continue
            with open(os.path.join(path, filename)) as codefile:
                code = codefile.read()
            if name.startswith('map_'):
                name = name[len('map_'):]
                key = 'map'
            elif name.startswith('reduce_'):
                name = name[len('reduce_'):]
                key = 'reduce'
            else:
                key = 'map'
            views.setdefault(name, dict())[key] = code
        id = "_design/%s" % design
        try:
            doc = db[id]
        except couchdb.http.ResourceNotFound:
            logging.debug("loading %s", id)
            db.save(dict(_id=id, views=views))
        else:
            if doc['views'] != views:
                doc['views'] = views
                logging.debug("updating %s", id)
                db.save(doc)
            else:
                logging.debug("no change %s", id)
Example #11
0
    def __init__(self):
        self.options = config.YAML_CONFIG.get('options')

        # where graphite lives
        self.graphite_base = config.YAML_CONFIG.get('graphite')

        # where the graphite renderer is
        self.graphite_render = "%s/render/" % self.graphite_base

        # where to find graph, dash etc templates
        self.graph_templates = config.YAML_CONFIG.get('templatedir')

        # the dash site might have a prefix for its css etc
        self.prefix = self.options.get('prefix', "")

        # the page refresh rate
        self.refresh_rate = self.options.get('refresh_rate', 60)

        # how many columns of graphs do you want on a page
        self.graph_columns = self.options.get('graph_columns', 2)

        # how wide each graph should be
        self.graph_width = self.options.get('graph_width')

        # how hight each graph sould be
        self.graph_height = self.options.get('graph_height')

        # Dashboard title
        self.dash_title = self.options.get('title', 'Graphite Dashboard')

        # Dashboard logo
        self.dash_logo = self.options.get('logo')

        # Time filters in interface
        self.interval_filters = self.options.get('interval_filters', [])

        self.intervals = self.options.get('intervals', [])

        self.top_level = {}

        for category in [ name for name in os.listdir(self.graph_templates)
                                        if not name.startswith('.') and os.path.isdir(os.path.join(self.graph_templates, name)) ]:

            if os.listdir( os.path.join(self.graph_templates,category) ) != []:

                self.top_level[category] = leonardo.Leonardo( self.graphite_base,
                                                              "/render/",
                                                              self.graph_templates,
                                                              category,
                                                              { "width" : self.graph_width,
                                                                "height" : self.graph_height
                                                              }
                                                            )

        self.search_elements = [ "%s/%s" % (d['category'], d['name'])   for dash in self.top_level  for d in self.top_level[dash].dashboards() ]

        elements_string = ""
        for item in self.search_elements:
            elements_string += '"%s",' % item
        self.search_elements = "[%s]" % elements_string[:-1]
Example #12
0
    def getFiles(self, subject):
        """Get a dictionary with a list of all candidate filenames for associated data, such as roi overlays, flatmap caches, and ctm caches.
        """
        surfparse = re.compile(r'(.*)/([\w-]+)_([\w-]+)_(\w+).*')
        surfpath = os.path.join(filestore, subject, "surfaces")

        if self.subjects[subject]._warning is not None:
            warnings.warn(self.subjects[subject]._warning)

        surfs = dict()
        for surf in os.listdir(surfpath):
            ssurf = os.path.splitext(surf)[0].split('_')
            name = '_'.join(ssurf[:-1])
            hemi = ssurf[-1]

            if name not in surfs:
                surfs[name] = dict()
            surfs[name][hemi] = os.path.abspath(os.path.join(surfpath,surf))

        filenames = dict(
            surfs=surfs,
            xfms=os.listdir(os.path.join(filestore, subject, "transforms")),
            xfmdir=os.path.join(filestore, subject, "transforms", "{xfmname}", "matrices.xfm"),
            anats=os.path.join(filestore, subject, "anatomicals", '{type}{opts}.{ext}'), 
            surfinfo=os.path.join(filestore, subject, "surface-info", '{type}{opts}.npz'),
            masks=os.path.join(filestore, subject, 'transforms', '{xfmname}', 'mask_{type}.nii.gz'),
            cachedir=os.path.join(filestore, subject, "cache"),
            rois=os.path.join(filestore, subject, "rois.svg").format(subj=subject),
        )

        return filenames
Example #13
0
def diff(options, a, b):
    def print_meta(s):
        codec_print(simple_colorize(s, "magenta"), options)

    if os.path.isfile(a) and os.path.isfile(b):
        if not filecmp.cmp(a, b, shallow=False):
            diff_files(options, a, b)

    elif os.path.isdir(a) and os.path.isdir(b):
        a_contents = set(os.listdir(a))
        b_contents = set(os.listdir(b))

        for child in sorted(a_contents.union(b_contents)):
            if child not in b_contents:
                print_meta("Only in %s: %s" % (a, child))
            elif child not in a_contents:
                print_meta("Only in %s: %s" % (b, child))
            elif options.recursive:
                diff(options,
                     os.path.join(a, child),
                     os.path.join(b, child))
    elif os.path.isdir(a) and os.path.isfile(b):
        print_meta("File %s is a directory while %s is a file" % (a, b))

    elif os.path.isfile(a) and os.path.isdir(b):
        print_meta("File %s is a file while %s is a directory" % (a, b))
Example #14
0
def action_uninstall(plat=None, x64=None, ext=None, **kwargs):
    plat, x64, ext, folder, location = fix_args(plat, x64, ext)

    print 'Uninstalling flow123d...'

    if plat == 'linux':
        # only remove install folder
        pass

    if plat == 'windows':
        uninstaller_location = os.path.abspath(os.path.join(folder, 'Uninstall.exe'))
        command = [uninstaller_location, '/S']
        process, stdout, stderr = run_command(command)
        check_error(process, stdout, stderr)
        if process.returncode != 0:
            return process.returncode

    # add sleep since windows spawns child which is not bound by parent
    # so exiting parent does not exit children as well
    time.sleep(5)

    shutil.rmtree(os.path.abspath(folder), True)
    shutil.rmtree(os.path.abspath('output'), True)
    if os.path.exists(folder):
        print 'Uninstallation not successful!'
        print os.listdir(folder)
        return 1

    print 'Uninstallation successful!'
    return 0
Example #15
0
    def getDBSummary(self):
        """ Obtain a summary of the contents of the requestDB

    :param self: self reference
    :return: S_OK with dict[requestType][status] => nb of files
    """
        self.log.info("getDBSummary: Attempting to get database summary.")
        requestTypes = os.listdir(self.root)
        try:
            summaryDict = {}
            for requestType in requestTypes:
                summaryDict[requestType] = {}
                reqTypeDir = os.path.join(self.root, requestType)
                if os.path.isdir(reqTypeDir):
                    statusList = os.listdir(reqTypeDir)
                    for status in statusList:
                        reqTypeStatusDir = os.path.join(reqTypeDir, status)
                        requests = os.listdir(reqTypeStatusDir)
                        summaryDict[requestType][status] = len(requests)
            self.log.info("getDBSummary: Successfully obtained database summary.")
            return S_OK(summaryDict)
        except Exception, x:
            errStr = "getDBSummary: Exception while getting DB summary."
            self.log.exception(errStr, lException=x)
            return S_ERROR(errStr)
Example #16
0
def recreate_images(result_dir, noisy_image_dir):
    # Read noisy images first
    test_images = {}
    for image_name in os.listdir(noisy_image_dir):
        if image_name.endswith('.png'):
            image_path = os.path.join(noisy_image_dir, image_name)
            image = util.img_as_float(io.imread(image_path))
            image_name_noext = os.path.splitext(image_name)[0]
            test_images[image_name_noext] = image
    # Enumerate results - image directories
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            for result_file in sorted(os.listdir(image_dir)):
                if result_file.endswith('.net'):
                    # Instantiate trained ANN from .net file
                    net_path = os.path.join(image_dir, result_file)
                    ann = libfann.neural_net()
                    ann.create_from_file(net_path)
                    # Filter the same image which it was trained with
                    filtered_image = filter_fann(
                        test_images[image_name], ann)
                    param_set_name = os.path.splitext(result_file)[0]
                    io.imsave(
                        os.path.join(image_dir, param_set_name + '.png'),
                        filtered_image)
Example #17
0
 def download(self, cameras, path):
     left_dir = os.path.join(path, 'left')
     right_dir = os.path.join(path, 'right')
     target_dir = os.path.join(path, 'raw')
     if not os.path.exists(target_dir):
         os.mkdir(target_dir)
     left_pages = [os.path.join(left_dir, x)
                   for x in sorted(os.listdir(left_dir))]
     right_pages = [os.path.join(right_dir, x)
                    for x in sorted(os.listdir(right_dir))]
     # Write the orientation as a JPEG comment to the end of the file
     if len(left_pages) != len(right_pages):
         logger.warn("The left and right camera produced an inequal"
                     " amount of images, please fix the problem!")
         logger.warn("Will not combine images")
         return
     if (self.config['first_page']
             and not self.config['first_page'].get(str) == 'left'):
         combined_pages = reduce(operator.add, zip(right_pages, left_pages))
     else:
         combined_pages = reduce(operator.add, zip(left_pages, right_pages))
     logger.info("Combining images.")
     for idx, fname in enumerate(combined_pages):
         fext = os.path.splitext(os.path.split(fname)[1])[1]
         target_file = os.path.join(target_dir, "{0:04d}{1}"
                                    .format(idx, fext))
         shutil.copyfile(fname, target_file)
     shutil.rmtree(right_dir)
     shutil.rmtree(left_dir)
Example #18
0
def parse_results(result_dir, output_file):
    csv_file = open(output_file, 'w')
    # fieldnames = ['image', 'param_set', 'final_error', 'training_time']
    fieldnames = ['image', 'param_set', 'final_error']
    writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    writer.writeheader()
    # Enumerate images
    for image_name in sorted(os.listdir(result_dir)):
        image_dir = os.path.join(result_dir, image_name)
        if os.path.isdir(image_dir):
            print image_name
            # Enumerate parameter sets
            for result_json in sorted(os.listdir(image_dir)):
                if result_json.endswith('.json'):
                    result_json_path = os.path.join(image_dir, result_json)
                    result_ps_name = os.path.splitext(result_json)[0]
                    result_data = read_results(result_json_path)
                    # last_epoch_error = float(parse_epochs(result_data)[-1]['error'])
                    last_epoch_error = float(parse_epochs(result_data)[-1])
                    # Write into csv file
                    result_row = {
                        'image': image_name,
                        'param_set': result_ps_name,
                        'final_error': last_epoch_error,
                        # 'training_time': result_data['training_time']
                    }
                    writer.writerow(result_row)
    csv_file.close()
    return None
Example #19
0
def getDownloadedMusicList ():
	dirs = os.listdir(__resource__)
	for dir in dirs:
		mp3Path = __resource__ + '/' + dir + '/mp3/'
		picPath = __resource__ + '/' + dir + '/pic/'

		filename, file_extension = os.path.splitext(dir)

		print 'file_extension ', file_extension
		print 'filename ', filename
		#filter js files
		if isExcludeFiles(file_extension) :
			continue
		if mp3Path == 'static/css/mp3/':
			continue

		mp3 = os.listdir(mp3Path)
		mp3 = map(lambda x : mp3Path + x, mp3)
		#print 'mp3Path:', mp3Path 
		#print mp3

		pic = os.listdir(picPath)
		pic = map(lambda x : picPath + x, pic)
		#print 'picPath:', picPath
		#print pic
	return None
def get_data_old(data_dir, data_x, data_y):
    #if we ened up needing the not-data-wrapper version
    label_num = 0
    labels = []#so we actually know what each index value associated represents, str reps
    for f in os.listdir(os.path.abspath(data_dir)):
        f = os.path.abspath(data_dir + f)

        label = f.split("/")[-1]
        labels.append(label)

        if os.path.isdir(f):
            for sample_index, sample in enumerate(os.listdir(f)):
                #get our spectrogram for the sample
                samplerate, audio_raw = wav.read(f + "/" + sample)

                #start with zeros so we have our blank trailing space
                audio = np.zeros(max_audio_len)

                #convert our signed integer to an unsigned one by adding 32768, then divide by max of unsigned integer to get percentage and use that as our float value
                #fill as far as we've got
                for a_index, a in enumerate(audio_raw):
                    audio[a_index] = float((a+32768)/65536.0)
                data_x[sample_index] = audio
                data_y[sample_index][label_num] = 1.0
                #audio = [float((a+32768)/65536.0) for a in audio]
        label_num += 1
    return zip(data_x, data_y)
Example #21
0
def build_gfortran(ctx, target):
    binpath = os.path.join(ctx.out_dir, "bin")
    manpath = os.path.join(ctx.out_dir, "share", "man", "man1")
    includepath = os.path.join(ctx.out_dir, "include")

    binfiles = os.listdir(binpath)
    manfiles = os.listdir(manpath)
    srcpath = ctx.env.SRCPATH

    ctx.venv_exec("""
        base="gcc-42-5666.3-darwin11"
        pushd %(srcpath)s/3rdparty
        rm -fr "$base"
        mkdir -p "$base"
        pushd "$base"
        xar -xf "../$base.pkg"
        mv *.pkg/Payload Payload.gz
        pax --insecure -rz -f Payload.gz -s ",./usr,$VIRTUAL_ENV,"
        ln -sf "$VIRTUAL_ENV/bin/gfortran-4.2" "$VIRTUAL_ENV/bin/gfortran"
        popd
        rm -fr "$base"
        popd
    """ % locals())

    # Delete other files installed
    shutil.rmtree(os.path.join(includepath, "gcc"))

    for f in os.listdir(binpath):
        if f not in binfiles and not "gfortran" in f:
            os.unlink(os.path.join(binpath, f))

    for f in os.listdir(manpath):
        if f not in manfiles and not "gfortran" in f:
            os.unlink(os.path.join(manpath, f))
Example #22
0
 def run(self):
     dst = self.config.get_dst_folder()
     cdv_dst = self.config.get_cordova_dst_folder(self.key)
     if os.path.exists(cdv_dst):
         names = os.listdir(cdv_dst)
         for name in names:
             if not name.startswith('.'):
                 name = os.path.join(cdv_dst, name)
                 if os.path.isfile(name):
                     os.remove(name)
                 else:
                     shutil.rmtree(name)
     names = os.listdir(dst)
     for name in names:
         if not name.startswith('.'):
             src = os.path.join(dst, name)
             copy = os.path.join(cdv_dst, name)
             if os.path.isfile(src):
                 shutil.copy(src, copy)
             else:
                 shutil.copytree(src, copy, ignore=shutil.ignore_patterns('.*'))
     for r, d, f in os.walk(cdv_dst):
         for files in filter(lambda x: x.endswith('.html'), f):
             p = os.path.join(r, files)
             self.replace_cordova_tag(p)
     self.copy_icons(dst)
     self.copy_splash(dst)
    def handle(self, **options):

        for party_slug in sorted(os.listdir(ppc_data_directory)):
            json_directory = join(
                ppc_data_directory,
                party_slug
            )
            for leafname in sorted(os.listdir(json_directory)):
                if not leafname.endswith('.json'):
                    continue
                filename = join(json_directory, leafname)
                image = re.sub(r'\.json$', '-cropped.png', filename)
                if not exists(image):
                    image = None
                print '==============================================================='
                print "filename:", filename
                with open(filename) as f:
                    ppc_data = json.load(f)
                ppc_data['party_slug'] = party_slug
                ppc_data['party_object'] = party_slug_to_popit_party[party_slug]
                ppc_data['constituency_object'] = get_constituency_from_name(
                    ppc_data['constituency']
                )
                if options['check']:
                    continue
                self.handle_person(ppc_data, image)
Example #24
0
def main(max_stations=0, folder='.'):
    try:
        makedirs(output_folder+'/'+folder)
    except OSError:
        pass

    all_files = [ f for f in listdir(data_folder) if isfile(join(data_folder,f)) and f.endswith('.gz') ]
    
    for ndf in all_files:
        string = '_%dstations' % max_stations
        new_name=ndf[:-7]+string+ndf[-7:]
        rename(data_folder+'/'+ndf, data_folder+'/'+new_name)
        
    all_files = [ f for f in listdir(data_folder) if isfile(join(data_folder,f)) and f.endswith('.gz') ]
    
    for a_f in all_files:
        move(data_folder+'/'+a_f, output_folder+'/'+folder+'/'+a_f)
        print "Moved:", a_f[0:-3]
        
    data_files = [ f for f in listdir(output_folder+'/'+folder) if isfile(join(output_folder+'/'+folder,f)) and f.endswith('.dat.gz') ]

    print "\n"

    for d_f in data_files:
        fin = gzip.open(output_folder+'/'+folder+'/'+d_f, 'rb')
        data = fin.read()
        fin.close()

        with open(output_folder+'/'+folder+'/'+d_f[0:-3],'w') as fout:
            fout.write(data)

        print "Unzipped:", d_f[0:-3]
def check_for_node_modules( node_modules ):
	if os.path.isdir( node_modules ):
		for dirname in os.listdir( node_modules ):
			path = os.path.join(node_modules, dirname)
			for filename in os.listdir(path):
				if filename.endswith('.js'):
					PATHS[ 'modules' ][ filename ] = os.path.join( path, filename )
Example #26
0
 def cleanup(self, action):
     if not self.steps_filename:
         return
     if not self.question_yes_no("All unused PPM files will be moved to a"
                                 " backup directory. Are you sure?",
                                 "Clean up data directory?"):
         return
     # Remember the current step index
     current_step_index = self.current_step_index
     # Get the backup dir
     backup_dir = os.path.join(self.steps_data_dir, "backup")
     # Create it if it doesn't exist
     if not os.path.exists(backup_dir):
         os.makedirs(backup_dir)
     # Move all files to the backup dir
     for filename in glob.glob(os.path.join(self.steps_data_dir,
                                            "*.[Pp][Pp][Mm]")):
         shutil.move(filename, backup_dir)
     # Get the used files back
     for step in self.steps:
         self.set_state_from_step_lines(step, backup_dir, warn=False)
         self.get_step_lines(self.steps_data_dir)
     # Remove the used files from the backup dir
     used_files = os.listdir(self.steps_data_dir)
     for filename in os.listdir(backup_dir):
         if filename in used_files:
             os.unlink(os.path.join(backup_dir, filename))
     # Restore step index
     self.set_step(current_step_index)
     # Inform the user
     self.message("All unused PPM files may be found at %s." %
                  os.path.abspath(backup_dir),
                  "Clean up data directory")
Example #27
0
  def _fetch_pkg(self, gopath, pkg, rev):
    """Fetch the package and setup symlinks."""
    fetcher = self._get_fetcher(pkg)
    root = fetcher.root()
    root_dir = os.path.join(self.workdir, 'fetches', root, rev)

    # Only fetch each remote root once.
    if not os.path.exists(root_dir):
      with temporary_dir() as tmp_fetch_root:
        fetcher.fetch(dest=tmp_fetch_root, rev=rev)
        safe_mkdir(root_dir)
        for path in os.listdir(tmp_fetch_root):
          shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))

    # TODO(John Sirois): Circle back and get get rid of this symlink tree.
    # GoWorkspaceTask will further symlink a single package from the tree below into a
    # target's workspace when it could just be linking from the fetch_dir.  The only thing
    # standing in the way is a determination of what we want to artifact cache.  If we don't
    # want to cache fetched zips, linking straight from the fetch_dir works simply.  Otherwise
    # thought needs to be applied to using the artifact cache directly or synthesizing a
    # canonical owner target for the fetched files that 'child' targets (subpackages) can
    # depend on and share the fetch from.
    dest_dir = os.path.join(gopath, 'src', root)
    # We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
    # chroot to avoid collision; thus `clean=True`.
    safe_mkdir(dest_dir, clean=True)
    for path in os.listdir(root_dir):
      os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
Example #28
0
    def __locateRequest(self, requestName, assigned=False):
        """ Locate the sub requests associated with a requestName

    :param self: self reference
    :param str requestName: request name
    :param bool assigned: flag to include/exclude Assigned requests
    """
        self.log.info("__locateRequest: Attempting to locate %s." % requestName)
        requestTypes = os.listdir(self.root)
        subRequests = []
        try:
            for requestType in requestTypes:
                reqDir = "%s/%s" % (self.root, requestType)
                if os.path.isdir(reqDir):
                    statusList = os.listdir(reqDir)
                    if not assigned and "Assigned" in statusList:
                        statusList.remove("Assigned")
                    for status in statusList:
                        statusDir = os.path.join(reqDir, status)
                        if os.path.isdir(statusDir):
                            requestNames = os.listdir(statusDir)
                            if requestName in requestNames:
                                requestPath = os.path.join(statusDir, requestName)
                                subRequests.append(requestPath)
            self.log.info("__locateRequest: Successfully located %s." % requestName)
            return S_OK(subRequests)
        except Exception, error:
            errStr = "__locateRequest: Exception while locating request."
            self.log.exception(errStr, requestName, lException=error)
            return S_ERROR(errStr)
Example #29
0
def zip_android(zf, basepath):
    android_dist_dir = os.path.join(top_dir, 'dist', 'android')
    zip_dir(zf, os.path.join(cur_dir, 'simplejson'),
            os.path.join(basepath, 'android', 'simplejson'))
    android_jar = os.path.join(android_dist_dir, 'titanium.jar')
    zf.write(android_jar, '%s/android/titanium.jar' % basepath)

    android_depends = os.path.join(top_dir, 'android', 'dependency.json')
    zf.write(android_depends, '%s/android/dependency.json' % basepath)

    titanium_lib_dir = os.path.join(top_dir, 'android', 'titanium', 'lib')
    for thirdparty_jar in os.listdir(titanium_lib_dir):
        if thirdparty_jar == "smalljs.jar": continue
        elif thirdparty_jar == "commons-logging-1.1.1.jar": continue
        jar_path = os.path.join(top_dir, 'android', 'titanium', 'lib',
                                thirdparty_jar)
        zf.write(jar_path, '%s/android/%s' % (basepath, thirdparty_jar))

    # include all module lib dependencies
    modules_dir = os.path.join(top_dir, 'android', 'modules')
    for module_dir in os.listdir(modules_dir):
        module_lib_dir = os.path.join(modules_dir, module_dir, 'lib')
        if os.path.exists(module_lib_dir):
            for thirdparty_jar in os.listdir(module_lib_dir):
                if thirdparty_jar.endswith('.jar'):
                    jar_path = os.path.join(module_lib_dir, thirdparty_jar)
                    zf.write(jar_path,
                             '%s/android/%s' % (basepath, thirdparty_jar))

    android_module_jars = glob.glob(
        os.path.join(android_dist_dir, 'titanium-*.jar'))
    for android_module_jar in android_module_jars:
        jarname = os.path.split(android_module_jar)[1]
        zf.write(android_module_jar,
                 '%s/android/modules/%s' % (basepath, jarname))
Example #30
0
def process_and_save(filename):
	"""
	Little script to do reading, selecting of the right data, getting 
	it in the right structure and then pickling it.
	This is supposed to make reading a lot faster when the light cones 
	are to be made.
	"""
	
	picklename = filename+'.pickled'
		
	# First check if the pickled version doesn't already exist
	if '/' in filename:
		dir = os.listdir(filename.rsplit('/', 1)[0])
	else: dir = os.listdir('.')
	
	if picklename.rsplit('/', 1)[1] in dir:
		print "Pickled version already exists for", filename
		return False
	
	data = read_bolshoi(filename, nopickle=True)
	if not data: return None
	
	with open(picklename, 'w') as picklefile:
		cPickle.dump(data, picklefile)
	
	return True