Esempio n. 1
0
    def __init__(self):
        self.flags = ArgumentParser().get_flags()

        # Because -v and -p options are mutually exclusive, there's no need to read and compute the config and regexes.
        if self.flags.version:
            return

        try:
            self.repo_root = check_output(
                ['git', 'rev-parse', '--show-toplevel']).strip('\n')
        except CalledProcessError:
            print 'Try running codesearch from a valid git repository.'
            exit(1)

        self.config = self.__config()
        self.include_folders_regex = self.__include_folders_regex()
        self.file_extensions_regex = self.__file_extensions_regex()
        self.exclude_folders_regex = self.__exclude_folders_regex()
Esempio n. 2
0
import json
from argumentparser import ArgumentParser
from dbconnector import HostelDBConnector
from filehandler import FileHandler


def json2dict(filename):
    raw_data = FileHandler.read(filename)
    return json.loads(raw_data)


if __name__ == '__main__':
    parser = ArgumentParser()
    args = parser.parse_arguments()

    students_data = json2dict(args['students_file'])
    rooms_data = json2dict(args['rooms_file'])

    connector = HostelDBConnector(args['config_file'])
    connector.connect()

    connector.create_tables()
    connector.insert_rooms(rooms_data)
    connector.insert_students(students_data)

    # adding index on birthday
    # - we have queries that use this column
    # - no need to create index on enum column with only 2 elements ('Sex': 'M', 'F')
    # - we do not use other column in select queries
    index_creation = '''create index idx_student_birthday on hostel.student (birthday) 
                        comment '' 
Esempio n. 3
0
        name_list=output_name_list,
    )

    # Load galaxy plots
    loadGalaxyPlots(
        web,
        config.output_directory,
        num_galaxies_to_show,
        output_name_list,
    )

    # Finish and output html file
    html.render_web(web, config.output_directory)

    # Compute how much time it took to run the script
    time_end = time()
    script_runtime = time_end - time_start
    m, s = divmod(script_runtime, 60)
    h, m = divmod(m, 60)
    print(
        f"The script was run in {h:.0f} hours, {m:.0f} minutes, and {s:.0f} seconds"
    )

    return


if __name__ == "__main__":

    config_parameters = ArgumentParser()
    main(config_parameters)
Esempio n. 4
0
from model import Transformer, Generator, Discriminator
import torch
import torch.nn as nn
from torch import optim
from torch.autograd import grad
from argumentparser import ArgumentParser
from data_processing import train_loader
from utils import bit_entropy
arg = ArgumentParser()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
D1 = Discriminator().to(device)
G1 = Generator().to(device)
model = Transformer(arg.num_size).to(device)
loss_fn = nn.MSELoss().to(device)

optimizer = optim.Adam(model.parameters(), lr=arg.lr)
optimizer_d = optim.Adam(D1.parameters(), lr=arg.lr)
optimizer_g = optim.Adam(G1.parameters(), lr=arg.lr)


def train(data_iter, model, D, G):

    for i in range(arg.num_epoch):
        model.train()
        D1.train()
        G1.train()
        for it, data in enumerate(data_iter):
            data = data.to(device)
            out, input1, binary_code, out1 = model.forward(data)
            out1 = torch.reshape(
                out1, (arg.batch_size, -1, arg.window, arg.code_size))
Esempio n. 5
0
from torch.utils.data import Dataset, DataLoader
import torch
import pandas as pd
from sklearn import preprocessing
from argumentparser import ArgumentParser
import numpy as np

arg = ArgumentParser()


class MTSdataset(Dataset):
    def __init__(self, x_data):
        self.len = int(x_data.shape[0] / arg.stride)
        self.x_data = torch.from_numpy(x_data).float().view(
            x_data.shape[0], x_data.shape[1])

    def __getitem__(self, index):
        data = self.x_data[arg.stride * index:arg.stride * index +
                           arg.window, :]
        if data.shape[0] == arg.window:
            data = data
        else:
            data = torch.zeros(arg.window, arg.num_size)
        return data

    def __len__(self):
        return self.len


if arg.dataset == "air_quality":
    num_data = pd.read_csv("./data/air_quality_data.csv")
Esempio n. 6
0
import os, sys, logging
from argumentparser import ArgumentParser, ArgumentError
import statscollector
## Debugging for Visual Studio 2013 with Python Tools for Visual Studio
def dbg():
	try:
		import ptvsd
		ptvsd.enable_attach(secret='s')
		ptvsd.wait_for_attach()
	except ImportError:
		import pip
		pip.main(['install','-Iv','ptvsd==2.2.0'])
		dbg()

argParser = ArgumentParser(sys.argv)
try:
	argParser.ValidateAllArgs()
except ArgumentError as err:
	logging.error("\nERROR: " + err.message)
	logging.error(argParser.GetUsage())

# Otherwise, store the pathname provided as an argument
else:
	# Assume the Dicom To Nrrd Converter is in the same folder as this script
	pathtoconverter = os.path.realpath(os.path.join(os.path.dirname(sys.argv[0]), "DicomToNrrdConverter.exe"))
	# Get parsed arguments
	pathtodicoms = argParser.GetArg("pathtodicoms")[0]
	segmentationfile = argParser.GetArg("segmentationfile")[0]
	foldersaveName = argParser.GetArg("foldersavename")[0]
	keepnrrddir = argParser.GetArg("keepnrrddir")
	snrsegment = argParser.GetArg("getsnr")[0]