Esempio n. 1
0
async def main():
    load_dotenv()

    argparser = get_argparser()
    argparser.add_argument("--port",
                           type=int,
                           default=5050,
                           help="port to write to")
    args = argparser.parse_args()

    try:
        token = os.environ["TOKEN"]
        username = os.environ["USERNAME"]
    except KeyError:
        try:
            token, username = await register(args.host, args.port)
            print(f"Welcome {username}! Registration complete.")
        except asyncio.TimeoutError:
            print(f"Can't connect to {args.host}")
            return

    while True:
        try:
            await chat_writer(args.host, args.port, token)
        except asyncio.TimeoutError:
            print(f"Can't connect to {args.host}")
            return
        except (ConnectionResetError, asyncio.exceptions.IncompleteReadError):
            print(f"Lost connection to {args.host}. Trying to reconnect...")
Esempio n. 2
0
def main():

    # Configure argparser
    argparser = get_argparser()

    # Parse the arguments
    args = argparser.parse_args()

    # Input files: json input file to be used as template and
    input_json = args.json_input
    input_excel = args.pheno_file

    # Configure logging appropriate for verbosity
    configure_logging(args.verbosity_level)

    # Read in json file and check format
    logging.info("Reading WDL input template: {0}".format(input_json))
    with open(input_json) as fh:
        input_dict = json.load(fh, object_pairs_hook=OrderedDict)

    # Guess name of workflow from input names
    workflow_name = detect_workflow_name(input_dict)
    logging.info("Workflow name detected: {0}".format(workflow_name))

    # Read in excel file and check format
    logging.info("Reading phenotype information from excel file: {0}".format(
        input_excel))
    pheno_df = pd.read_excel(input_excel)

    logging.info("Validating structure of excel file...")
    check_pheno_input_format(pheno_df)

    # Normalize trait names so they are machine readable
    pheno_df["trait_name"] = pheno_df["plot_label"].apply(normalize_trait_name)

    # Get signed sumstat string that will be passed to munge_sumstats.py
    pheno_df["signed_sumstats"] = pheno_df["effect_type"].apply(
        get_signed_sumstat)

    # Replace NA sample size values with -1
    pheno_df["sample_size"] = pheno_df["sample_size"].apply(mask_na)

    # Replace NA sample size col values with -1
    pheno_df["sample_size_col"] = pheno_df["sample_size_col"].apply(mask_na)

    # Add new data fields to existing WDL input file
    output_dict = make_final_output_dict(workflow_name, input_dict, pheno_df)

    # Output WDL to stdout
    print(json.dumps(output_dict, indent=1))
Esempio n. 3
0
async def main():
    argparser = get_argparser()
    argparser.add_argument("--port", type=int, default=5000, help="port to read from")
    argparser.add_argument(
        "--history", default="chat.history", help="where to save transcript"
    )
    args = argparser.parse_args()

    while True:
        try:
            await chat_reader(args.host, args.port, args.history)
        except asyncio.TimeoutError:
            print(f"Can't connect to {args.host}")
            return
        except asyncio.exceptions.IncompleteReadError:
            print(f"Lost connection to {args.host}. Trying to reconnect...")
Esempio n. 4
0
#!/usr/bin/env python3

import sys
import utils
import aws_api


if __name__ == '__main__':
    parser = utils.get_argparser()
    options = parser.parse_args()

    config = utils.read_config(options.configPath)
    aws_client = aws_api.Client(config)

    if options.subparser_name == 'listProfiles':
        aws_client.list_profiles()

    elif options.subparser_name == 'listAvailabilityZones':
        print('\n'.join(aws_client.get_availability_zones()))

    elif options.subparser_name == 'listRequests':
        aws_client.list_spot_instance_requests()

    elif options.subparser_name == 'listInstances':
        aws_client.list_spot_instances()

    elif options.subparser_name == 'listVolumes':
        aws_client.list_volumes()

    elif options.subparser_name == 'attachVolume':
        volume_id = input('Enter volume id to attach: ')
Esempio n. 5
0
def main():

    # get parameters
    parser = u.get_argparser()
    args = parser.parse_args()

    args.samplers = [
        "plot_output_potential", "plot_cummulative_potential",
        "plot_output_spikes", "plot_reconstruction"
    ]
    args.conv_channels = [int(item) for item in args.conv_channels.split(',')]
    args.metrics = []

    # choose the devices for computation (GPU if available)
    device = u.get_backend(args)

    # initialize logger
    logger = WandBLogger(
        args=args,
        name=args.model,
    )

    # make experiments reproducible
    if args.seed:
        u.set_seed(args.seed)

    # load dataset
    train_loader, val_loader, (width, height, channels) = u.get_datasets(
        dataset=args.dataset,
        batch_size=args.batch_size,
        cuda=args.cuda,
        verbose=args.verbose)

    # get model
    if args.model.lower() == "fcn_classifier":
        from models.fcn_classifier import FullyConnectedClassifier
        net = FullyConnectedClassifier(
            input_width=width,
            input_height=height,
            input_channels=channels,
            hidden_sizes=args.hidden_sizes,
            dataset=args.dataset,
            loss=args.loss,
            optimizer=args.optimizer,
            learning_rate=args.lr,
            weight_decay=args.wd,
            device=device,
            activation=args.activation,
            activation_out=args.activation_out,
            n_out=len(train_loader.dataset.targets.unique()),
            verbose=args.verbose,
        )

    elif args.model.lower() == "cnn_classifier":
        from models.cnn_classifier import ConvolutionalClassifier
        net = ConvolutionalClassifier(
            input_width=width,
            input_height=height,
            input_channels=channels,
            hidden_sizes=args.hidden_sizes,
            conv2d_channels=args.conv_channels,
            dataset=args.dataset,
            loss=args.loss,
            optimizer=args.optimizer,
            learning_rate=args.lr,
            weight_decay=args.wd,
            device=device,
            kernel_size=args.kernel_size,
            stride=args.stride,
            padding=args.padding,
            pooling_kernel=2,
            pooling_stride=1,
            activation=args.activation,
            activation_out=args.activation_out,
            pooling=args.pooling,
            n_out=len(train_loader.dataset.targets.unique()),
            verbose=args.verbose,
        )

    elif args.model.lower() == "scnn_classifier":
        from models.scnn_classifier import SpikingConvolutionalClassifier
        net = SpikingConvolutionalClassifier(
            input_width=width,
            input_height=height,
            input_channels=channels,
            conv2d_channels=args.conv_channels,
            hidden_sizes=args.hidden_sizes,
            dataset=args.dataset,
            loss=args.loss,
            optimizer=args.optimizer,
            learning_rate=args.lr,
            weight_decay=args.wd,
            device=device,
            kernel_size=args.kernel_size,
            stride=args.stride,
            padding=args.padding,
            pooling_kernel=2,
            pooling_stride=2,
            activation=args.activation,
            activation_out=args.activation_out,
            pooling="avg",
            steps=100,
            threshold=1,
            decay=0.99,
            pool_threshold=0.75,
            n_out=len(train_loader.dataset.targets.unique()),
            verbose=True,
        )

    elif args.model.lower() == "fcn_autoencoder":
        from models.fcn_autoencoder import FullyConnectedAutoencoder
        net = FullyConnectedAutoencoder(
            input_width=width,
            input_height=height,
            input_channels=channels,
            hidden_sizes=args.hidden_sizes,
            dataset=args.dataset,
            loss=args.loss,
            optimizer=args.optimizer,
            learning_rate=args.lr,
            weight_decay=args.wd,
            device=device,
            activation=args.activation,
            activation_out=args.activation_out,
            verbose=args.verbose,
        )

    elif args.model.lower() == "cnn_autoencoder":
        from models.cnn_autoencoder import ConvolutionalAutoencoder
        net = ConvolutionalAutoencoder(
            input_width=width,
            input_height=height,
            input_channels=channels,
            conv2d_channels=args.conv_channels,
            hidden_sizes=args.hidden_sizes,
            dataset=args.dataset,
            loss=args.loss,
            optimizer=args.optimizer,
            learning_rate=args.lr,
            weight_decay=args.wd,
            device=device,
            kernel_size=args.kernel_size,
            stride=args.stride,
            padding=args.padding,
            pooling_kernel=1,
            pooling_stride=1,
            activation=args.activation,
            activation_out=args.activation_out,
            pooling=args.pooling,
            verbose=args.verbose,
        )

    elif args.model.lower() == "scnn_autoencoder":
        from models.scnn_autoencoder import SpikingConvolutionalAutoencoder
        net = SpikingConvolutionalAutoencoder(
            input_width=width,
            input_height=height,
            input_channels=channels,
            conv2d_channels=args.conv_channels,
            hidden_sizes=args.hidden_sizes,
            dataset=args.dataset,
            loss=args.loss,
            optimizer=args.optimizer,
            learning_rate=args.lr,
            weight_decay=args.wd,
            device=device,
            kernel_size=args.kernel_size,
            stride=args.stride,
            padding=args.padding,
            pooling_kernel=1,  # not really supported due to lack of avgunpool
            pooling_stride=1,  # same
            pooling="avg",  # same
            pool_threshold=0.75,  # same
            activation=args.activation,
            activation_out=args.activation_out,
            steps=args.steps,
            threshold=args.threshold,
            decay=args.decay,
            verbose=args.verbose,
        )

    else:
        raise NotImplementedError(f"The model {args.model} is not implemented")

    # tell logger to watch model
    logger.watch(net.model)

    # run training and evaluation
    net.train_and_evaluate(
        train_loader=train_loader,
        val_loader=val_loader,
        epochs=args.epochs,
        epoch_batches=args.epoch_batches,
        load=args.load,
        model_name=args.model,
        metrics=args.metrics,
        key_metric=args.key_metric,
        goal=args.goal,
        eval_first=args.eval_first,
        logger=logger,
        checkpoints_dir=f"{logger.run.dir}/checkpoints",
        samplers=args.samplers,
        sample_freq=args.sample_freq,
    )
Esempio n. 6
0
from models.scnn_classifier import SpikingConvolutionalClassifier
import utils as u

# set important parameters
parser = u.get_argparser()
args = parser.parse_args()
args.loss = "mse"
args.dataset = "mnist"
args.hidden_sizes = [100]
args.conv_channels = "16, 32"
args.conv_channels = [int(item) for item in args.conv_channels.split(',')]
args.epoch_batches = 100
args.epochs = 5
args.lr = 0.005
args.batch_size = 20
args.steps = 20
args.seed = 3
args.samplers = [
    "plot_output_spikes", "plot_output_potential", "plot_cummulative_potential"
]
# choose the devices for computation (GPU if available)
device = u.get_backend(args)

# make experiments reproducible
if args.seed:
    u.set_seed(args.seed)

# load dataset
train_loader, val_loader, (width, height, channels) = u.get_datasets(
    dataset=args.dataset,
    batch_size=args.batch_size,
Esempio n. 7
0
import torch

from utils import get_argparser, get_datasets
from models.vsc import VariationalSparseCoding

if __name__ == "__main__":
    parser = get_argparser('VSC Example')
    parser.add_argument('--alpha',
                        default=0.5,
                        type=float,
                        metavar='A',
                        help='value of spike variable (default: 0.5')
    args = parser.parse_args()
    print('VSC Baseline Experiments\n')
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    #Set reproducibility seed
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    #Define device for training
    device = torch.device('cuda' if args.cuda else 'cpu')
    print(f'Using {device} device...')

    #Load datasets
    train_loader, test_loader, (width, height, channels) = get_datasets(
        args.dataset, args.batch_size, args.cuda)

    # Tune the learning rate (All training rates used were between 0.001 and 0.01)
    vsc = VariationalSparseCoding(args.dataset, width, height, channels,
                                  args.hidden_size, args.latent_size, args.lr,
import torch

from utils import get_argparser, get_datasets
from models.conv_vsc import VariationalSparseCoding

if __name__ == "__main__":    
    parser = get_argparser('ConvVSC Example')
    parser.add_argument('--alpha', default=0.5, type=float, metavar='A', 
                    help='value of spike variable (default: 0.5')
    parser.add_argument('--kernel-size', type=str, default='32,32,64,64', metavar='HS',
                        help='kernel sizes, separated by commas (default: 32,32,64,64)')
    args = parser.parse_args()
    print('ConvVSC Baseline Experiments\n')
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    #Set reproducibility seed
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    #Define device for training
    device = torch.device('cuda' if args.cuda else 'cpu')
    print(f'Using {device} device...')
    
    #Load datasets
    train_loader, test_loader, (width, height, channels) = get_datasets(args.dataset,
                                                                        args.batch_size,
                                                                        args.cuda)
    
    # Tune the learning rate (All training rates used were between 0.001 and 0.01)
    vsc = ConvolutionalVariationalSparseCoding(args.dataset, width, height, channels, 
                                  args.kernel_size, args.hidden_size, args.latent_size, 
Esempio n. 9
0
File: main.py Progetto: SKholkin/EGA
                    ega_state.pop_amount + 1)):
            elite_ones.append(population[inc_sorted_idx[-i]])
        inc_sorted_idx = inc_sorted_idx[0 - (int(
            config.get('selection', {}).get('elite_ratio', 0) *
            ega_state.pop_amount)):]
        population = [
            population[x] for x in range(len(population))
            if x not in inc_sorted_idx
        ]

    amount_of_out_desc = int(ega_state.gen_overlap * ega_state.pop_amount)
    amount_of_out_pop = int(
        (1 - ega_state.gen_overlap) * ega_state.pop_amount) - len(elite_ones)
    if amount_of_out_desc > len(descendants):
        return descendants
    final_desc = launch_selection(descendants, criterio, config,
                                  amount_of_out_desc)
    final_pop = launch_selection(population, criterio, config,
                                 amount_of_out_pop)

    return final_pop + final_desc + elite_ones


if __name__ == '__main__':
    parser = get_argparser()
    random.seed(10)
    args = parser.parse_args(args=sys.argv[1:])
    mean_criterio_averagemetr = Averagemeter()
    config = create_config(args.config)
    main_worker(config)
def main():

    # Configure argparser
    argparser = get_argparser()

    # Parse the arguments
    args = argparser.parse_args()

    # Input files: sumstat file with rsids and snp info file that contains snp info for rsid
    input_sumstats = args.sumstats_input
    snp_info_file = args.snp_info_file

    # Output file
    output_file = args.output_file

    # Column names for lookup
    sumstat_rsid_colname = args.sumstat_rsid_colname
    snp_info_rsid_colname = args.snp_info_rsid_colname
    snp_info_chr_col = args.snp_info_chr_colname
    snp_info_pos_col = args.snp_info_pos_colname

    # Configure logging appropriate for verbosity
    configure_logging(3)

    # Read in json file and check format
    logging.info("Reading snp info file: {0}".format(snp_info_file))
    snp_info = pd.read_csv(snp_info_file, sep="\t", dtype=object)

    # Check to make sure column names are present in snp info file
    if not valid_colnames(
            snp_info,
        [snp_info_rsid_colname, snp_info_pos_col, snp_info_chr_col],
            err_msg="Unable to find required colname in snp info file!"):
        raise IOError("Unable to find colnames in snp info file!")

    logging.info("Reading sumstats file: {0}".format(input_sumstats))
    sumstats_info = pd.read_csv(input_sumstats, sep="\t", dtype=object)

    # Check to make sure column names are present in sumstats file
    if not valid_colnames(
            sumstats_info, [sumstat_rsid_colname],
            err_msg="Unable to find required colname in sumstats file!"):
        raise IOError("Unable to find colnames in sumstats file!")

    # Subset snp info columns
    snp_info = snp_info[[
        snp_info_rsid_colname, snp_info_chr_col, snp_info_pos_col
    ]]

    logging.info("Merging snp information...")
    final_df = sumstats_info.merge(snp_info,
                                   how="inner",
                                   left_on=sumstat_rsid_colname,
                                   right_on=snp_info_rsid_colname)

    # Drop duplicate rsid column
    final_df.drop(labels=snp_info_rsid_colname, axis=1, inplace=True)

    # Remove 'chr' from beginning of chromsomes
    logging.info("Fixing chr numbering...")
    final_df[snp_info_chr_col] = final_df[snp_info_chr_col].map(
        lambda x: x.lstrip("chr"))

    logging.info("Writing to output file: {0}".format(output_file))
    final_df.to_csv(
        output_file,
        sep="\t",
        index=False,
    )