예제 #1
0
def transpile_package(package, language):
    """ translate from crop2ml package"""
    sourcef = package
    namep = sourcef.split(".")[0]
    pkg = Path(sourcef)
    models = model_parser(
        pkg)  # parse xml files and create python model object
    output = Path(os.path.join(pkg, 'src'))
    dir_test = Path(os.path.join(pkg, 'test'))

    # Generate packages if the directories does not exists.
    if not output.isdir():
        output.mkdir()
    if not dir_test.isdir():
        dir_test.mkdir()

    m2p = render_cyml.Model2Package(models, dir=output)
    m2p.generate_package()  # generate cyml models in "pyx" directory
    tg_rep = Path(os.path.join(
        output, language))  # target language models  directory in output
    dir_test_lang = Path(os.path.join(dir_test, language))

    if not tg_rep.isdir():
        tg_rep.mkdir()
    if not dir_test_lang.isdir():  #Create if it doesn't exist
        dir_test_lang.mkdir()

    m2p.write_tests()

    # generate cyml functions
    cyml_rep = Path(os.path.join(output,
                                 'pyx'))  # cyml model directory in output

    # cretae topology of composite model
    T = Topology(namep, package)
    namep = T.model.name.lower()

    # domain class
    if language in domain_class:
        getattr(
            getattr(pycropml.transpiler.generators,
                    '%sGenerator' % NAMES[language]),
            'to_struct_%s' % language)([T.model], tg_rep, namep)
    # wrapper
    if language in wrapper:
        getattr(
            getattr(pycropml.transpiler.generators,
                    '%sGenerator' % NAMES[language]),
            'to_wrapper_%s' % language)(T.model, tg_rep, namep)

    # Transform model unit to languages and platforms
    for k, file in enumerate(cyml_rep.files()):
        with open(file, 'r') as fi:
            source = fi.read()
        name = os.path.split(file)[1].split(".")[0]
        for model in models:  # in the case we have'nt the same order
            if name.lower() == model.name.lower(
            ) and prefix(model) != "function":
                test = Main(file, language, model, T.model.name)
                test.parse()
                test.to_ast(source)
                code = test.to_source()
                filename = Path(
                    os.path.join(tg_rep,
                                 "%s.%s" % (name.capitalize(), ext[language])))
                with open(filename, "wb") as tg_file:
                    tg_file.write(code.encode('utf-8'))
                Model2Nb(model, code, name,
                         dir_test_lang).generate_nb(language, tg_rep, namep)
                #code2nbk.generate_notebook(code, name, dir_nb_lang)

    # Create Cyml Composite model
    T_pyx = T.algo2cyml()
    fileT = Path(os.path.join(cyml_rep, "%sComponent.pyx" % namep))
    with open(fileT, "wb") as tg_file:
        tg_file.write(T_pyx.encode('utf-8'))

    filename = Path(
        os.path.join(tg_rep,
                     "%sComponent.%s" % (namep.capitalize(), ext[language])))

    with open(filename, "wb") as tg_file:
        tg_file.write(T.compotranslate(language).encode('utf-8'))

    status = 0
    return status
예제 #2
0
from path import Path
from natsort import natsorted

files = natsorted(Path('../data/averaged/').glob('*.xyz'))

content = \
"""
universe = vanilla
requirements = (OpSys == "LINUX") && (OpSysMajorVer == 6)
executable = condor_run.sh
arguments = $(Process)
output = condor_outputs/$(Cluster)_$(Process).out
error = condor_errors/$(Cluster)_$(Process).err
log = $(Cluster).log
should_transfer_files = YES
when_to_transfer_output = ON_EXIT
transfer_input_files = ../data/clusters.tar.gz, condor_run.sh, ../data/averaged
transfer_output_files = ppm3d_correct/analysis/data/motif_results, ppm3d_correct/analysis/data/motif_errors
request_cpus = 1
request_memory = 2GB
request_disk = 2GB
queue arguments from (
{}
)
""".format("\n".join(files))

open("submit.sub", "w").write(content)
예제 #3
0
def crop(filename):
    file_path = Path(app.config['UPLOADED_CROP_DEST']) / filename
    if file_path.exists() and file_path.isfile():
        return Path(app.config['PATH_CROP_KEY']) / filename
    return filename
예제 #4
0
파일: files.py 프로젝트: sobhe/zolal
import json
from path import Path
from collections import OrderedDict
from quran import read_quran, read_simple, read_lines
from almizan import read_tafsir, section_ayas, refine_numbers, resolve_footnotes, refine_section, resolve_phrases, resolve_headers

USE_ALMIZAN_TRANSLATION = False

data = Path('data')
files = Path('../files')

# read quran data
ayas, suras = read_quran(open(data / 'quran.txt'))
read_simple(open(data / 'quran-simple.txt'), ayas)
pages = read_lines(open(data / 'quran-lines.txt'), ayas)

# write quran pages
with open(files / 'quran' / 'all', 'w') as quran_file:
    format = lambda aya: OrderedDict([('page', aya['page']),
                                      ('text', aya['text']),
                                      ('raw', aya['raw']), ('id', aya['id'])])
    for p, ids in pages.items():
        page = '\n'.join(
            [json.dumps(format(ayas[id]), ensure_ascii=False) for id in ids])
        print(page, file=open(files / 'quran' / ('p%d' % p), 'w'))
        print(page, file=quran_file)

# almizan
almizan_sections, phrases, headers, translations = [], [], [], {}
for ar_section, fa_section in zip(read_tafsir(open(data / 'almizan_ar.html')),
                                  read_tafsir(open(data / 'almizan_fa.html'))):
예제 #5
0
def module_tmpdir(request, tmpdir_factory, logger):
    suffix = request.module.__name__
    temp_dir = Path(tmpdir_factory.mktemp(suffix))
    logger.info('Created temp folder: %s', temp_dir)

    return temp_dir
예제 #6
0
 def read(self, filename):
     filename = Path(filename).abspath()
     ret = elf_read_firmware(str(filename), self.backend)
     if ret == -1:
         raise ValueError(filename + ' could not be loaded!')
     self.filename = filename
예제 #7
0
def generate_project_scratch_dir(experiment):
    project_subdir = '{user_id}_{project}'.format(user_id=experiment.user_id, project=experiment.project)
    scratch_subdir = (experiment.scratch_subdir or DEFAULT_SCRATCH_SUBDIR)
    return Path(experiment._slurm_scratch_dir) / scratch_subdir / project_subdir
예제 #8
0
def run(ctx, neptune, spec, tags, requirements_file, base_image, script,
        params):
    """Run experiment"""

    context = ctx.obj['context']

    # validate options and arguments
    requirements = requirements_file and [
        req.strip() for req in Path(requirements_file).open('r')
    ] or []
    if context['backend_type'] == 'kubernetes' and not base_image:
        raise click.ClickException('Provide docker base image')
    if context['backend_type'] == 'kubernetes' and not requirements_file:
        raise click.ClickException('Provide requirements.txt file')
    script_has_spec = get_experiments_spec_handle(script, spec) is not None
    neptune_support = context.get('neptune', None) or neptune
    if neptune_support and not neptune and not script_has_spec:
        raise click.ClickException(
            'Neptune support is enabled in context '
            'but no neptune config or python experiment descriptor provided')
    if neptune and script_has_spec:
        raise click.ClickException(
            'Provide only one of: neptune config or python experiment descriptor'
        )

    if not neptune_support:
        # TODO: implement it if possible
        raise click.ClickException(
            'Currentlu doesn\'t support experiments without neptune')

    neptune_dir = None
    try:
        # prepare neptune directory in case if neptune yamls shall be generated
        if neptune_support and not neptune:
            script_path = Path(script)
            neptune_dir = script_path.parent / 'neptune_{}'.format(
                script_path.stem)
            neptune_dir.makedirs_p()

        for neptune_path, experiment in generate_experiments(
                script, neptune, context, spec=spec, neptune_dir=neptune_dir):

            experiment.update({
                'base_image': base_image,
                'requirements': requirements
            })

            if neptune_support:
                script = experiment.pop('script')
                cmd = ' '.join([script] + list(params))
                # tags from neptune.yaml will be extracted by neptune
                additional_tags = context.get('tags', []) + list(tags)
                cmd = NeptuneWrapperCmd(cmd=cmd,
                                        experiment_config_path=neptune_path,
                                        neptune_storage=context['storage_dir'],
                                        paths_to_dump=None,
                                        additional_tags=additional_tags)
                experiment['cmd'] = cmd
                experiment.setdefault('paths_to_copy', [])
                for possible_token_path in [
                        '~/.neptune_tokens/token', '~/.neptune/tokens/token'
                ]:
                    neptune_path = Path(
                        possible_token_path).expanduser().abspath()
                    if neptune_path.exists():
                        neptune_token_files = experiment.setdefault(
                            'neptune_token_files', [])
                        neptune_token_files.append(str(neptune_path))

                assert len(experiment.get('neptune_token_files', [])) < 2, \
                    'You have multiple neptune tokens ({}); remove obsolete'.format(
                        ', '.join(experiment['neptune_token_files']))
            else:
                # TODO: implement no neptune version
                # TODO: for sbatch set log path into something like os.path.join(resource_dir_path, "job_logs.txt")
                raise click.ClickException('Not implemented yet')

            run_kwargs = {'experiment': experiment}
            backend = {
                'kubernetes': KubernetesBackend,
                'slurm': SlurmBackend
            }[experiment['backend_type']]()
            # TODO: add calling experiments in parallel
            backend.run(**run_kwargs)
    finally:
        if neptune_dir:
            neptune_dir.rmtree_p()
예제 #9
0
def get_default_config_path(ctx):
    default_config_file_name = 'config.yaml'

    app_name = Path(ctx.command_path).stem
    app_dir = Path(click.get_app_dir(app_name))
    return app_dir / default_config_file_name
예제 #10
0
    help='Whether to tank the images after they have been scored')

parser.add_argument('-out',
                    type=str,
                    default=None,
                    help='save the result to file')

args = parser.parse_args()
resize_image = args.resize.lower() in ("true", "yes", "t", "1")
target_size = (224, 224) if resize_image else None
rank_images = args.rank.lower() in ("true", "yes", "t", "1")

# give priority to directory
if args.dir is not None:
    print("Loading images from directory : ", args.dir)
    imgs = Path(args.dir).files('*.png')
    imgs += Path(args.dir).files('*.jpg')
    imgs += Path(args.dir).files('*.jpeg')

elif args.img[0] is not None:
    print("Loading images from path(s) : ", args.img)
    imgs = args.img

else:
    raise RuntimeError(
        'Either -dir or -img arguments must be passed as argument')

if args.out is None:
    raise RuntimeError('Please specify the output')

with tf.device('/GPU:0'):
예제 #11
0
def get_repository():
    config = twittback.config.read_config()
    db_path = Path(config["db"]["path"])
    db_path.parent.makedirs_p()
    return Repository(db_path)
예제 #12
0
# Run the following command in terminal to connect to redis channel
# docker run -p 6379:6379 -d redis:5

User = get_user_model()

#Initializing DeepCorrect
corrector = DeepCorrect(
    '/home/pranshu/GAMR/gamr/meetingmode/deepcorrect/deeppunct_params_en',
    '/home/pranshu/GAMR/gamr/meetingmode/deepcorrect/deeppunct_checkpoint_google_news'
)

#Initializing dataset for LexRank
print('loading dataset and initializing...')
documents = []
documents_dir = Path('/home/pranshu/GAMR/gamr/meetingmode/total')

for file_path in documents_dir.files('*.txt'):
    with file_path.open(mode='rt', encoding='latin1') as fp:
        documents.append(fp.readlines())

lxr = LexRank(documents, stopwords=STOPWORDS['en'])
print('dataset load done!')
print('server is running!')


class ChatConsumer(AsyncWebsocketConsumer):
    async def connect(self):
        self.room_name = self.scope['url_route']['kwargs']['meeting_code']
        self.room_group_name = 'meeting_%s' % self.room_name
        # Join room group
예제 #13
0
#!/usr/bin/env python
from __future__ import absolute_import, division

__version__ = "$Revision: 1.5 $"

import inspect
import logging
import logging.config
from path import Path
import sys

LOGGINGRC_PATH = Path("~/.loggingrc").expand()

if LOGGINGRC_PATH.exists():
    logging.config.fileConfig(LOGGINGRC_PATH)
else:
    logging.basicConfig()

class autolog(object):
    def __init__(self):
        self._name = self._get_name()
        self._logger = logging.getLogger(self._name)

    def __getitem__(self, name):
        if name.startswith("."):
            name = self._name + name

        return logging.getLogger(name)

    def __getattr__(self, name):
        """
예제 #14
0
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config

from __future__ import absolute_import, unicode_literals

import datetime
import os
import sys
from subprocess import check_call

import django
import edx_theme
import six
from path import Path

root = Path('../..').abspath()

# Hack the PYTHONPATH to match what LMS and Studio use so all the code
# can be successfully imported
sys.path.insert(0, root)
sys.path.append(root / "docs/guides")
sys.path.append(root / "cms/djangoapps")
sys.path.append(root / "common/djangoapps")
sys.path.append(root / "common/lib/capa")
sys.path.append(root / "common/lib/safe_lxml")
sys.path.append(root / "common/lib/symmath")
sys.path.append(root / "common/lib/xmodule")
sys.path.append(root / "lms/djangoapps")
sys.path.append(root / "lms/envs")
sys.path.append(root / "openedx/core/djangoapps")
sys.path.append(root / "openedx/features")
예제 #15
0
def initialize_train(parser):

    print("=> initializing train")


    args = parser.parse_args()
    save_path = save_path_formatter(args, parser)
    # workers to 0 in pycharm
    isRunningInPyCharm = "PYCHARM_HOSTED" in os.environ
    if isRunningInPyCharm:
        print('=> Pycharm mode - setting workers to 0')
        args.save_path = save_path = 'pycharm_' + save_path
    else:
        print('=> Shell mode - full settings')

    if args.DEBUG:
        args.save_path = save_path = 'debug_' + save_path
        args.print_freq = 5
        args.epoch_size = 10
        args.epochs = 10

    try:
        stats_file = open(os.path.join(args.data, "stats.txt"), "r")
        args.std_vals = [float(val) for val in stats_file.readline().split('[')[1].split(']')[0].split()]
        args.mean_vals = [float(val) for val in stats_file.readline().split('[')[1].split(']')[0].split()]
        stats_file.close()
    except:
        args.std_vals = [0.5, 0.5, 0.5]
        args.mean_vals = [0.5, 0.5, 0.5]


    if args.final_act=='sigmoid' or args.final_act=='sigmoid4x' or args.final_act=='clamp01':
        args.norm_mm_by_act=[0, 1]
    else:
        args.norm_mm_by_act=[]


    #when only 1 channel out dont calc correlation loss
    if args.out_channels==1:
        args.multi_channel_corr_weight = 0

    #when working channelwise use always i=out=1 for network
    if args.channel_wise:
        args.out_channels=1
        args.in_channels=1

    if args.final_act == 'None':
        args.final_act = None

    args.save_path = 'checkpoints' / save_path
    if args.manual_save_path is not None:
        args.save_path = 'checkpoints' / Path(args.manual_save_path)

    print('=> will save everything to {}'.format(args.save_path))
    args.save_path.makedirs_p()
    torch.manual_seed(args.seed)

    # saving execution args to file
    with open(args.save_path / 'args_in_run.txt', 'w') as f:
        f.write("command line in run:\n")
        f.write(' '.join(sys.argv[1:]))
        f.write("\n\nargs in run:\n")
        for a in vars(args):
            f.write('--{} {}\n'.format(a, getattr(args, a)))
    f.close()

        # define writers for Tensorboard
    training_writer = SummaryWriter(args.save_path)

    return training_writer, args
예제 #16
0
    def getWebDriverInstance(self):
        """
       Get WebDriver Instance based on the browser configuration

        Returns:
            'WebDriver Instance'
        """
        baseURL = "https://www.xome.com/"

        items = []

        if self.browser != "none" and self.os == "none":
            if self.browser == "safari":
                driver = webdriver.Safari()
                self.log.info("Running Safari Tests")
            elif self.browser == "firefox":
                #driver = webdriver.Firefox(executable_path=r'./zfiles/geckodriver')
                driverpath = os.path.join(self.thisdir, 'zfiles/geckodriver')
                driver = webdriver.Firefox(executable_path=driverpath)
                self.log.info("Running Firefox Tests")
            elif self.browser == "chrome":
                # Set chrome driver
                #driver = webdriver.Chrome(executable_path=r'./zfiles/chromedriver')
                driverpath = os.path.join(self.thisdir, 'zfiles/chromedriver')
                driver = webdriver.Chrome(executable_path=driverpath)
                self.log.info("Running Chrome Tests")

            # Loading browser with App URL
            driver.get(baseURL)

            # Maximize the window
            driver.maximize_window()
            #driver.set_window_size(1440, 900)  # mac 15inch screen resolution
            time.sleep(5)

            # Setting Driver Implicit Time out for An Element
            driver.implicitly_wait(30)

            items.append(driver)
            items.append(self.browser)
            items.append(self.os)
            items.append(self.thisdir)
            return items

        elif self.browser == "none" and self.os != "none":
            desired_caps = {}
            if self.os == "android":
                desired_caps['platformName'] = 'Android'
                desired_caps['platformVersion'] = '7.1.1'
                desired_caps['automationName'] = 'uiautomator2'
                desired_caps['deviceName'] = 'Nexus'
                desired_caps['appPackage'] = "com.xome.android"
                #adb shell dumpsys window windows|grep -E 'mCurrentFocus' to get app info
                desired_caps[
                    'appActivity'] = "com.xome.android.ui.map.MapActivity2"
                desired_caps['newCommandTimeout'] = 120
                #desired_caps['app'] = Path('../zfiles/base.apk')
                appzpath = os.path.join(self.thisdir, 'zfiles/base.apk')
                desired_caps['app'] = Path(appzpath)

                driver = appdriver.Remote("http://localhost:4723/wd/hub",
                                          desired_caps)
                self.log.info("Running Android Tests")

                items.append(driver)
                items.append(self.browser)
                items.append(self.os)
                items.append(self.thisdir)
                return items

            elif self.os == "ios":
                desired_caps['platformName'] = 'iOS'
                desired_caps['platformVersion'] = '10.3.3'
                desired_caps['automationName'] = 'xcuitest'
                desired_caps['deviceName'] = 'iPhone 5'
                desired_caps[
                    'udid'] = '8319807bbbc1d04c9bbc0634e14d28aca946b536'
                desired_caps['xcodeOrgid'] = 'Angela Tong'
                desired_caps['xcodeSigningId'] = 'iPhone Developer'
                desired_caps['newCommandTimeout'] = 120
                #desired_caps['app'] = Path('../zfiles/base.apk')
                appzpath = os.path.join(self.thisdir, 'zfiles/base.ipa')
                desired_caps['app'] = Path(appzpath)
                driver = appdriver.Remote("http://localhost:4723/wd/hub",
                                          desired_caps)
                self.log.info("Running iOS Tests")

                items.append(driver)
                items.append(self.browser)
                items.append(self.os)
                items.append(self.thisdir)
                return items

        else:
            self.log.info(
                "Have to enter a value for both browser and os. Please enter --browser none for appium automation or --os none for selenium automation"
            )
예제 #17
0
                              required=True)

    location = schema.TextLine(title=_(u"label_location", default=u"Location"),
                               max_length=256,
                               required=False)

    start = UTCDatetime(title=_('label_start', default=u"Start"),
                        required=True)

    end = UTCDatetime(title=_('label_end', default=u"End"), required=False)


ADD_MEETING_STEPS = (('add-meeting', _(u'Add Meeting')),
                     ('add-meeting-dossier', _(u'Add Dossier for Meeting')))

TEMPLATES_DIR = Path(__file__).joinpath('..', 'templates').abspath()


def get_dm_key(committee_oguid=None):
    """Return the key used to store meeting-data in the wizard-storage."""
    committee_oguid = committee_oguid or get_committee_oguid()
    return 'create_meeting:{}'.format(committee_oguid)


def get_committee_oguid():
    return Oguid.parse(getRequest().get('committee-oguid'))


class AddMeetingWizardStep(BaseWizardStepForm, Form):
    step_name = 'add-meeting'
    label = _('Add Meeting')
예제 #18
0
from path import Path
from itertools import chain
import os

# get all files
d = Path('./');

# text files
config       = d.walkfiles(".*");
md           = d.walkfiles("*.md");
licensefiles = d.walkfiles("*.license");

# concat all lists
files = chain(md, config, licensefiles);

# delete them
for file in files:
	print("Deleting :" + file);
	file.remove()

# compress media files
mp3 = d.walkfiles("*.mp3");
m4a = d.walkfiles("*.m4a");
wav = d.walkfiles("*.wav");

# concat all lists
allmedia = chain(mp3, m4a, wav);

# compress
for media in allmedia:
	directory 	= os.path.dirname(media);
예제 #19
0
import re

import pytest
from path import Path

import nodely

TEST_DIR = (
    Path(__file__)  # pylint: disable=no-value-for-parameter
    .realpath().dirname())

exec((TEST_DIR / 'variables.py').text())


@pytest.fixture
def node_package():
    return NODE_PACKAGE  # pylint: disable=undefined-variable


@pytest.fixture
def install_node_package(node_package):
    nodely.install(node_package)


@pytest.fixture
def node_package_command():
    return 'coffee'


@pytest.fixture
def node_package_command_args():
예제 #20
0
from path import Path

from asm1.main.play_one_game import play_one_game
from asm1.model.hex_board import HexBoard
from asm1.model.search import Search
import trueskill as ts
from itertools import combinations
import matplotlib.pyplot as plt

from others.system_config import SystemConfig
from util.common_util import CommonUtil
from util.plot_util import PlotUtil

if __name__ == '__main__':
    save_dir_path: str = SystemConfig.FILE_OUTPUT_HOME_DIR + CommonUtil.generate_date_time_str(
    ) + "_" + Path(__file__).stem

    print("start_time", CommonUtil.generate_date_time_str(), "file_path",
          save_dir_path)
    CommonUtil.make_dir(save_dir_path)

    board_size: int = 4
    num_of_games_to_run = 500

    data_analysis_interval: int = 20
    is_save_data: bool = True

    mcts_cp: int = 2
    mcts_search_iterations: int = 100

    player_random_3 = Search(
예제 #21
0
파일: qtro.py 프로젝트: qfacegen/qface-qtro
import click
import logging
import logging.config
import yaml
from path import Path

from qface.generator import FileSystem, RuleGenerator
from qface.helper.qtcpp import Filters
from qface.helper.doc import parse_doc
from qface.watch import monitor
from qface.shell import sh
import qface.filters

from jinja2 import environmentfilter

here = Path(__file__).dirname()

logging.basicConfig()

log_config = Path('log.yaml')

if log_config.exists():
    logging.config.dictConfig(yaml.load(log_config.open('r')))
log = logging.getLogger(__name__)

features = set()


class CustomFilters:
    @staticmethod
    def path(symbol):
예제 #22
0
 def test__getitem__(self, node_package_command):
     command = nodely.bin[node_package_command]
     assert type(command) is Command
     assert Path(command).normcase() \
         == nodely.which(node_package_command).normcase()
def main():
    args = parser.parse_args()

    model = get_depth_model(args.model_name).to(device)
    weights = torch.load(args.model_path)
    # weights = torch.load(args.model_path, map_location=lambda storage, loc: storage)
    model.load_state_dict(weights['model_state'])
    model.eval()

    seq_length = 0

    dataset_dir = Path(args.dataset_dir)
    with open(args.dataset_list, 'r') as f:
        test_files = list(f.read().splitlines())

    framework = test_framework(dataset_dir, test_files, seq_length,
                               args.min_depth, args.max_depth)

    print('{} files to test'.format(len(test_files)))
    errors = np.zeros((2, 7, len(test_files)), np.float32)

    for j, sample in enumerate(tqdm(framework)):
        tgt_img = sample['tgt']  # [375, 1242, 3] ndarray, original RGB image

        h, w, _ = tgt_img.shape
        if h != args.img_height or w != args.img_width:
            tgt_img = imresize(
                tgt_img, (args.img_height, args.img_width)).astype(np.float32)

        tgt_img = np.transpose(tgt_img, (2, 0, 1))
        tgt_img = torch.from_numpy(tgt_img).unsqueeze(0)
        tgt_img = ((tgt_img / 255 - 0.5) / 0.5).to(
            device)  # normalize to [-1, 1]

        pred = model(tgt_img).cpu().numpy()[0, 0]
        gt_depth = sample['gt_depth']

        if args.pred_disp:
            pred_depth = 1 / pred
        else:
            pred_depth = pred

        # upsample to gt depth resolution, [375, 1242]
        # and mask out pixels with depth not in [min_depth, max_depth]
        pred_depth_zoomed = zoom(
            pred_depth,
            (gt_depth.shape[0] / pred_depth.shape[0], gt_depth.shape[1] /
             pred_depth.shape[1])).clip(args.min_depth, args.max_depth)
        if sample['mask'] is not None:
            pred_depth_zoomed = pred_depth_zoomed[sample['mask']]
            gt_depth = gt_depth[sample['mask']]

        errors[1, :, j] = compute_errors(gt_depth, pred_depth_zoomed)

    mean_errors = errors.mean(2)
    error_names = ['abs_rel', 'sq_rel', 'rms', 'log_rms', 'a1', 'a2', 'a3']

    print("Results : ")
    print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
        *error_names))
    print(
        "{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
        format(*mean_errors[1]))
예제 #24
0
 def test__getattr__(self, node_package_command):
     command = getattr(nodely.bin, node_package_command)
     assert type(command) is Command
     assert Path(command).normcase() \
         == nodely.which(node_package_command).normcase()
예제 #25
0
                release = pkg_version[0:-42]
            else:
                # Or we try to get it from Resources
                try:
                    pkg_version = open('../Resources/exe/version').readline()
                    release = pkg_version[0:-42]
                except:
                    release = "unknown"

# We try to get the revision from the version file (if it exists)
revision = pkg_version[-40:] if pkg_version else ''

# Compose version string
version = release + "-r" + revision if revision else release

# SNAP version and release
snap_environ = os.environ.get('SNAP')
if snap_environ:
    try:
        snap_base_path = Path(snap_environ)
        changelog_path = snap_base_path / 'lib'/ 'python2.7' / 'site-packages' / 'usr' / 'share' / 'exe' / 'ChangeLog'
        line = open(changelog_path).readline()
        release = line.split(':')[1].split(')')[0]
        version = release
        revision = release
    except:
        pass

# If this file is executed directly, we print the project and version info
if __name__ == '__main__':
    print project, version
예제 #26
0
 def test__new__(self, node_package_command):
     command = Command(node_package_command)
     assert Path(command).normcase() \
         == nodely.which(node_package_command).normcase()
예제 #27
0
import os
from time import sleep

import fabric
import vagrant
from entrypoint2 import entrypoint
from path import Path

# pip3 install fabric vncdotool python-vagrant entrypoint2

DIR = Path(__file__).parent.parent.parent


class Options:
    halt = True
    recreate = True
    destroy = False


def run_box(options, vagrantfile, cmds):
    env = os.environ
    env["VAGRANT_VAGRANTFILE"] = DIR / vagrantfile
    if vagrantfile != "Vagrantfile":
        env["VAGRANT_DOTFILE_PATH"] = DIR / ".vagrant_" + vagrantfile
    else:
        env["VAGRANT_DOTFILE_PATH"] = ""

    v = vagrant.Vagrant(env=env, quiet_stdout=False, quiet_stderr=False)
    status = v.status()
    state = status[0].state
    print(status)
예제 #28
0
    par.add_argument('action', choices=['draw_roc', 'roc5_stats'])
    par.add_argument('args')
    par.add_argument('-xlim', type=int, default=500)
    par.add_argument('-ylim', type=int, default=900)
    par.add_argument('-max_evalue', type=float, default=1)
    par.add_argument('-rocn_max_evalue', type=float)
    par.add_argument('-standards',
                     nargs='+',
                     choices=["fold", "superfamily", "JG"])
    par.add_argument('--compare', action="store_true")
    args = par.parse_args()

    ARGS = json.load(open(args.args))
    if args.action == "draw_roc":
        name = ARGS['benchmark']
        Path("./fig").mkdir_p()
        draw_SF(ARGS,
                './fig/ROC_' + name + '_' + '_'.join(args.standards),
                args.standards,
                xlim=args.xlim,
                ylim=args.ylim,
                dpi=600,
                max_evalue=args.max_evalue,
                rocn_max_evalue=args.rocn_max_evalue)

    elif args.action == "roc5_stats":
        calc_rocn_stats(ARGS,
                        args.standards[0],
                        max_evalue=args.max_evalue,
                        rocn_max_evalue=args.rocn_max_evalue)
예제 #29
0
def inject_static_file(filepath):
    data = None
    with open(Path(app.static_folder) / filepath, 'r') as f:
        data = f.read()
    return Markup(data)
예제 #30
0
 def _save(s):
     Path(s.path).write_text(s.as_text)