Пример #1
0
def cli(context, args):
    #this import is only available when you execute via cli
    import util
    util.set_logger(args.verbose)

    from resource_manager_common import constant
    credentials = context.aws.load_credentials()

    resources = util.get_resources(context)
    os.environ[c.ENV_DB_TABLE_CONTEXT] = resources[c.RES_DB_TABLE_CONTEXT]
    os.environ[c.ENV_LAMBDA_CONSUMER] = resources[c.RES_LAMBDA_FIFOCONSUMER]
    os.environ[c.ENV_LAMBDA_PRODUCER] = resources[c.RES_LAMBDA_FIFOPRODUCER]
    os.environ[c.ENV_AMOEBA] = resources[c.RES_AMOEBA]
    os.environ[c.ENV_VERBOSE] = str(args.verbose) if args.verbose else ""
    os.environ[c.ENV_REGION] = context.config.project_region
    os.environ[c.ENV_S3_STORAGE] = resources[c.RES_S3_STORAGE]
    os.environ[c.ENV_DEPLOYMENT_STACK_ARN] = resources[c.ENV_STACK_ID]
    os.environ["AWS_LAMBDA_FUNCTION_NAME"] = os.environ[c.ENV_LAMBDA_PRODUCER]
    os.environ[
        "AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
    os.environ[
        "AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.SECRET_KEY_OPTION)

    print(
        query(type('obj', (object, ),
                   {c.ENV_STACK_ID: resources[c.ENV_STACK_ID]}),
              {"sql": args.sql},
              sync=True))
Пример #2
0
def cli(context, args):
    util.set_logger(args.verbose)

    from resource_manager_common import constant
    credentials = context.aws.load_credentials()

    resources = util.get_resources(context)
    os.environ[c.ENV_SHARED_BUCKET] = context.config.configuration_bucket_name
    os.environ[c.ENV_S3_STORAGE] = resources[c.RES_S3_STORAGE]
    os.environ[c.ENV_DB_TABLE_CONTEXT] = resources[c.RES_DB_TABLE_CONTEXT]
    os.environ[c.ENV_VERBOSE] = str(args.verbose) if args.verbose else ""
    os.environ[c.ENV_SERVICE_ROLE] = resources[c.RES_SERVICE_ROLE]
    os.environ[c.ENV_REGION] = context.config.project_region
    os.environ[c.ENV_DEPLOYMENT_STACK_ARN] = resources[c.ENV_STACK_ID]
    os.environ[c.ENV_EVENT_EMITTER] = resources[c.RES_EVENT_EMITTER]
    os.environ[c.IS_LOCALLY_RUN] = "True"
    os.environ[
        "AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
    os.environ[
        "AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.SECRET_KEY_OPTION)
    main({c.ENV_STACK_ID: resources[c.ENV_STACK_ID]},
         type('obj', (object, ), {}))
Пример #3
0
def cli(context, args):
    util.set_logger(args.verbose)

    from resource_manager_common import constant
    credentials = context.aws.load_credentials()

    resources = util.get_resources(context)
    os.environ[c.ENV_REGION] = context.config.project_region
    os.environ[c.ENV_VERBOSE] = str(args.verbose) if args.verbose else ""
    os.environ[c.IS_LOCALLY_RUN] = "True"
    os.environ[
        "AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
    os.environ[
        "AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.SECRET_KEY_OPTION)
    eval(args.function)(
        {
            'RequestType': args.event_type,
            c.ENV_STACK_ID: resources[c.ENV_STACK_ID]
        },
        type('obj', (object, ),
             {'function_name': resources[c.RES_LAMBDA_FIFOCONSUMER]}))
Пример #4
0
    def __init__(self, dir):
        """
        Constructor 
        Args: 
            dir (string) directory of the experiment to be run
        """
        super(PermutationTest, self).__init__(dir)

        # Set the logger
        set_logger(os.path.join(self.dir, 'experiment.log'),
                   level=logging.INFO,
                   console=True)

        # unpack parameters
        self.ppi_matrices = {
            name: np.load(file)
            for name, file in self.params.ppi_matrices.items()
        }
        self.exclude = set(self.params.exclude) if hasattr(
            self.params, "exclude") else set()

        # Log title
        logging.info("Metric Significance of Diseases in the PPI Network")
        logging.info("Sabri Eyuboglu  -- SNAP Group")
        logging.info("======================================")
        logging.info("Loading Disease Associations...")
        self.diseases = load_diseases(self.params.diseases_path,
                                      self.params.disease_subset,
                                      exclude_splits=['none'])

        self.metric_fn = globals()[self.params.metric_fn]
Пример #5
0
def cli(context, args):
    util.set_logger(args.verbose)

    from resource_manager_common import constant
    credentials = context.aws.load_credentials()

    resources = util.get_resources(context)
    os.environ[c.ENV_DB_TABLE_CONTEXT] = resources[c.RES_DB_TABLE_CONTEXT]
    os.environ[c.ENV_DEPLOYMENT_STACK_ARN] = resources[
        c.RES_LAMBDA_FIFOCONSUMER]
    os.environ[c.ENV_LAMBDA_PRODUCER] = resources[c.RES_LAMBDA_FIFOPRODUCER]
    os.environ[c.ENV_AMOEBA] = resources[c.RES_AMOEBA]
    os.environ[c.ENV_VERBOSE] = str(args.verbose) if args.verbose else ""
    os.environ[c.ENV_REGION] = context.config.project_region
    os.environ[c.ENV_S3_STORAGE] = resources[c.RES_S3_STORAGE]
    os.environ[
        "AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
    os.environ[
        "AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.SECRET_KEY_OPTION)

    print(
        eval(args.function)(type('obj', (object, ),
                                 {c.ENV_STACK_ID: resources[c.ENV_STACK_ID]})))
Пример #6
0
def cli(context, args):
    util.set_logger(args.verbose)

    from resource_manager_common import constant
    credentials = context.aws.load_credentials()

    resources = util.get_resources(context)
    os.environ[c.ENV_DB_TABLE_CONTEXT] = resources[c.RES_DB_TABLE_CONTEXT]
    os.environ[c.ENV_VERBOSE] = str(args.verbose) if args.verbose else ""
    os.environ['err'] = str(
        args.erroneous_metrics) if args.erroneous_metrics else ""
    os.environ[c.ENV_REGION] = context.config.project_region
    os.environ["AWS_LAMBDA_FUNCTION_NAME"] = resources[
        c.RES_LAMBDA_FIFOPRODUCER]
    os.environ[
        "AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
    os.environ[
        "AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(
            args.profile if args.profile else
            context.config.user_default_profile, constant.SECRET_KEY_OPTION)
    os.environ[c.ENV_LAMBDA_PRODUCER] = resources[c.RES_LAMBDA_FIFOPRODUCER]
    os.environ[c.ENV_DEPLOYMENT_STACK_ARN] = resources[c.ENV_STACK_ID]
    generate_threads(resources[c.RES_LAMBDA_FIFOPRODUCER], args.threads,
                     args.iterations_per_thread, args.events_per_iteration,
                     args.sleep_duration_between_jobs, args.use_lambda,
                     args.event_type, args.sensitivity_type,
                     args.compression_type)
Пример #7
0
def cli(context, args):
    util.set_logger(args.verbose)

    from resource_manager_common import constant
    credentials = context.aws.load_credentials()

    resources = util.get_resources(context)
    os.environ[c.ENV_REGION] = context.config.project_region
    os.environ[c.ENV_DB_TABLE_CONTEXT] = resources[c.RES_DB_TABLE_CONTEXT]
    os.environ["AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(args.profile if args.profile else context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
    os.environ["AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(args.profile if args.profile else context.config.user_default_profile, constant.SECRET_KEY_OPTION)
    print eval(args.function)({}, args.param )
Пример #8
0
def cli(context, args):
    #this import is only available when you execute via cli
    import util
    util.set_logger(args.verbose)

    from resource_manager_common import constant
    credentials = context.aws.load_credentials()

    resources = util.get_resources(context)        
    os.environ[c.ENV_DB_TABLE_CONTEXT] = resources[c.RES_DB_TABLE_CONTEXT]   
    os.environ[c.ENV_REGION] = context.config.project_region
    os.environ["AWS_ACCESS_KEY"] = args.aws_access_key if args.aws_access_key else credentials.get(args.profile if args.profile else context.config.user_default_profile, constant.ACCESS_KEY_OPTION)
    os.environ["AWS_SECRET_KEY"] = args.aws_secret_key if args.aws_secret_key else credentials.get(args.profile if args.profile else context.config.user_default_profile, constant.SECRET_KEY_OPTION)
    print eval(args.function)( type('obj', (object,), {c.ENV_STACK_ID: resources[c.ENV_STACK_ID]}))
Пример #9
0
 def __init__(
     self,
     filename_prefix="output",
     filename_suffix="raw",
     rate=44100,
     channels=2,
     card="default",
     log_level=logging.INFO,
 ):
     threading.Thread.__init__(self)
     self.daemon = True
     self.filename_prefix = filename_prefix
     self.filename_suffix = filename_suffix
     self.filename = None
     self.rate = rate
     self.channels = channels
     self.card = card
     self.recording = False
     self.finish = False
     # set logger
     self.logger = util.set_logger(level=log_level)
     # setup input
     self.input = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK, self.card)
     self.input.setchannels(self.channels)
     self.input.setrate(self.rate)
     self.input.setformat(alsaaudio.PCM_FORMAT_S16_LE)
     self.input.setperiodsize(self.PERIOD_SIZE)
     self.file = None
Пример #10
0
 def __init__(self,
              filename_prefix='output',
              filename_suffix='h264',
              log_level=logging.INFO,
              format='h264',
              horizontal_resolution=640,
              vertical_resolution=480,
              rotation=0,
              snapshot_filename="snapshot.png"
              ):
     threading.Thread.__init__(self)
     self.daemon = True
     self.filename_prefix = filename_prefix
     self.filename_suffix = filename_suffix
     self.filename = "%s.%s" % (self.filename_prefix, self.filename_suffix)
     self.recording = False
     self.format = format
     self.finish = False
     self.snapshot_filename = snapshot_filename
     # initialize camera
     self.horizontal_resolution = horizontal_resolution
     self.vertical_resolution = vertical_resolution
     self.rotation = rotation
     self.camera = picamera.PiCamera()
     self.camera.resolution = (self.horizontal_resolution, self.vertical_resolution)
     self.camera.rotation = self.rotation
     # set logger
     self.logger = util.set_logger(level=log_level)
Пример #11
0
    def __init__(self, endpoint, log_level=logging.INFO, port=8883,
                 keepalive=60):
        # set logger
        self.logger = util.set_logger(level=log_level)

        # tls
        self.ca_file = None
        self.client_cert_file = None
        self.client_key_file = None

        self.client_id = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(16)])
        self._client = mqtt.Client(client_id=self.client_id)
        self.endpoint = endpoint
        self.port = port
        self.keepalive = keepalive

        # set callbacks
        self._client.on_connect = self.mqtt_on_connect
        self._client.on_disconnect = self.mqtt_on_disconnect
        self._client.on_message = self.mqtt_on_message
        self._client.on_publish = self.mqtt_on_publish
        self._client.on_subscribe = self.mqtt_on_subscribe
        self._client.on_log = self.mqtt_on_log

        # subscriptions
        self.subscriptions = []
Пример #12
0
    def __init__(self, pin, name='sensor', log_level=logging.INFO):
        threading.Thread.__init__(self)
        self.name = name
        self.pin = pin
        self.finish = False
        self.daemon = True
        self.logger = util.set_logger(level=log_level)
        self.last_reading = None

        GPIO.setwarnings(False)
        GPIO.setmode(GPIO.BCM)
        GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
Пример #13
0
    def __init__(self, pin, name='relay', pulse_duration=1, log_level=logging.INFO):
        threading.Thread.__init__(self)
        self.name = name
        self.pin = pin
        self.pulse_duration = pulse_duration
        self.finish = False
        self.daemon = True
        self.logger = util.set_logger(level=log_level)
        self.last_reading = None

        # initialize relay
        wiringpi.wiringPiSetup()
Пример #14
0
    def __init__(self, dir):
        super(CodiseaseProbExp, self).__init__(dir)

        # Set the logger

        set_logger(os.path.join(self.dir, 'experiment.log'),
                   level=logging.INFO,
                   console=True)

        # Log title
        logging.info("Co-disease probability in the PPI Network")
        logging.info("Sabri Eyuboglu  -- SNAP Group")
        logging.info("======================================")

        logging.info("Loading Network...")
        self.ppi_networkx, self.ppi_adj, self.protein_to_node = load_network(
            self.params.ppi_network)

        logging.info("Loading Disease Associations...")
        self.diseases = load_diseases(self.params.diseases_path,
                                      self.params.disease_subset)

        # unpack params
        self.ppi_matrices = {
            name: np.load(file)
            for name, file in self.params.ppi_matrices.items()
        }
        self.top_k = self.params.top_k
        self.n_buckets = self.params.n_buckets
        self.window_length = self.params.window_length
        self.smooth = self.params.smooth
        self.plots = self.params.plots

        if hasattr(self.params, 'codisease_matrix'):
            logging.info("Loading Codisease Matrix...")
            self.codisease_matrix = np.load(self.params.codisease_matrix)
        else:
            logging.info("Building Codisease Matrix...")
            self.codisease_matrix = self.build_codisease_matrix()
Пример #15
0
    def __init__(self, pin, name='pir', log_level=logging.INFO):
        threading.Thread.__init__(self)
        self.name = name
        self.pin = pin
        self.finish = False
        self.daemon = True
        self.logger = util.set_logger(level=log_level)
        self.last_reading = None

        # for GPIO numbering, choose BCM
        GPIO.setmode(GPIO.BCM)
        GPIO.setwarnings(False)
        # initialize motion sensor
        GPIO.setup(self.pin, GPIO.IN, GPIO.PUD_DOWN)
Пример #16
0
    def __init__(self):
        self.movie_df = bf.get_dataframe()
        self.episode_df = ef.get_episodes()
        self.df = None

        self.logger = set_logger('recommendation')
        self.matrix = None
        self.random_array = None

        self.create_df()
        self.preproccessing()
        self.clustering()
        self.create_matrix()
        self.create_array()
Пример #17
0
    def __init__(self, name, pin, log_level=logging.INFO):
        threading.Thread.__init__(self)
        self.name = name
        self.pin = pin
        self.finish = False
        self.daemon = True

        # set logger
        self.logger = util.set_logger(level=log_level)

        # for GPIO numbering, choose BCM
        GPIO.setmode(GPIO.BCM)
        GPIO.setwarnings(False)
        # set pin
        GPIO.setup(self.pin, GPIO.OUT)
        GPIO.output(self.pin, False)  # on start , turn LED off
Пример #18
0
def _zh_experiment0_helper(vec_type, num_units, projection, num_hidden_layers, 
        exp_name, relation_list, label_vector, label_alphabet):
    params = [vec_type, str(num_units), projection, str(num_hidden_layers)] 
    file_name = '%s-%s' % (experiment_name, '-'.join(params))
    json_file = util.set_logger(file_name)

    word2vec_ff = util._get_zh_word2vec_ff(num_units, vec_type, projection)
    data_matrix_pair = word2vec_ff(relation_list) 

    learning_rate = 0.001
    lr_smoother = 0.01

    num_folds = 7
    for fold_index in xrange(num_folds):
        data_triplet = get_xvalidated_datatriplet(data_matrix_pair, label_vector, label_alphabet,
                num_folds, fold_index)
        num_reps = 15
        num_hidden_units_list = [50, 200, 300, 400] 
        for num_hidden_units in num_hidden_units_list:
            rng = np.random.RandomState(100)
            X_list = [T.matrix(), T.matrix()]
            net, layers = make_multilayer_net(rng, 
                    n_in_list=data_triplet.input_dimensions(),
                    X_list=X_list, Y=T.lvector(), use_sparse=False,
                    num_hidden_layers=num_hidden_layers, 
                    num_hidden_units=num_hidden_units, 
                    num_output_units=data_triplet.output_dimensions()[0],
                    output_activation_fn=T.nnet.softmax,
                    dropout=False)
            trainer = AdagradTrainer(net, net.crossentropy, 
                    learning_rate, lr_smoother, data_triplet, _make_givens)
            for rep in xrange(num_reps):
                random_seed = rep
                rng = np.random.RandomState(random_seed)
                net.reset(rng)
                trainer.reset()
                minibatch_size = np.random.randint(20, 60)
                n_epochs = 50

                start_time = timeit.default_timer()
                best_iter, best_dev_acc, best_test_acc = \
                        trainer.train_minibatch_triplet(minibatch_size, n_epochs)
                end_time = timeit.default_timer()
                print 'Training process takes %s seconds' % (end_time - start_time)
                print 'Best iteration is %s;' % best_iter + \
                        'Best dev accuracy = %s' % best_dev_acc + \
                        'Test accuracy =%s' % best_test_acc
Пример #19
0
parser.add_argument('--experiment_dir',
                    default='experiments/base_model',
                    help="Directory containing params.json")

if __name__ == '__main__':
    # Load the parameters from the experiment params.json file in model_dir
    args = parser.parse_args()
    json_path = os.path.join(args.experiment_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = Params(json_path)
    params.update(json_path)

    # Set the logger
    set_logger(os.path.join(args.experiment_dir, 'experiment.log'),
               level=logging.INFO,
               console=True)

    # Log Title
    logging.info("DPP-Diff Generator")
    logging.info("Sabri Eyuboglu  -- SNAP Group")
    logging.info("======================================")

    prepare_sns(sns, params)

    diseases_dict = load_diseases(params.diseases_path, params.disease_subset)

    method_to_scores = {}
    for method_name, method_exp_dir in params.method_exp_dirs.items():
        method_to_scores[method_name] = {}
        with open(os.path.join(method_exp_dir, 'metrics.csv'),
Пример #20
0
def main():
    """Create the model and start the training."""

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    logger = util.set_logger(args.snapshot_dir, args.log_file, args.debug)
    logger.info('start with arguments %s', args)

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)
    lscale, hscale = map(float, args.train_scale.split(','))
    train_scale = (lscale, hscale)

    cudnn.enabled = True

    # Create network.
    model = Res_Deeplab(num_classes=args.num_classes)

    #saved_state_dict = torch.load(args.restore_from)
    #new_params = model.state_dict().copy()
    #for i in saved_state_dict:
    #    #Scale.layer5.conv2d_list.3.weight
    #    i_parts = i.split('.')
    #    # print i_parts
    #    if not args.num_classes == 21 or not i_parts[1]=='layer5':
    #        new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
    model_urls = {
        'resnet18':
        'https://download.pytorch.org/models/resnet18-5c106cde.pth',
        'resnet34':
        'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
        'resnet50':
        'https://download.pytorch.org/models/resnet50-19c8e357.pth',
        'resnet101':
        'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
        'resnet152':
        'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
    }
    saved_state_dict = torch.utils.model_zoo.load_url(model_urls['resnet101'])
    # coco pretrained parameters:
    # saved_state_dict = torch.load(args.restore_from)
    new_params = model.state_dict().copy()
    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = str(i).split('.')
        # print i_parts
        if not i_parts[0] == 'fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
    model.load_state_dict(new_params)
    #model.float()
    model.eval()  # use_global_stats = True
    #model.train()
    device = torch.device("cuda:" + str(args.gpu))
    model.to(device)

    cudnn.benchmark = True

    trainloader = data.DataLoader(GTA5DataSet(args.data_dir,
                                              args.data_list,
                                              max_iters=args.num_steps *
                                              args.batch_size,
                                              crop_size=input_size,
                                              train_scale=train_scale,
                                              scale=args.random_scale,
                                              mirror=args.random_mirror,
                                              mean=IMG_MEAN,
                                              std=IMG_STD),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=5,
                                  pin_memory=args.pin_memory)
    optimizer = optim.SGD([{
        'params': get_1x_lr_params_NOscale(model),
        'lr': args.learning_rate
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)

    for i_iter, batch in enumerate(trainloader):
        images, labels, _, _ = batch
        images = images.to(device)
        labels = labels.long().to(device)

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)
        pred = interp(model(images))
        loss = loss_calc(pred, labels)
        loss.backward()
        optimizer.step()

        # print('iter = ', i_iter, 'of', args.num_steps,'completed, loss = ', loss.data.cpu().numpy())
        logger.info('iter = {} of {} completed, loss = {:.4f}'.format(
            i_iter, args.num_steps,
            loss.data.cpu().numpy()))

        if i_iter >= args.num_steps - 1:
            print('save model ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(i_iter) + '.pth'))

    end = timeit.default_timer()
    print(end - start, 'seconds')
    df = pd.read_csv(os.path.join(path, 'rates.csv'))[:n_sample]
    train_df, val_df = train_test_split(df, test_size=.2, random_state=42)
    user_to_index = {user: id for id, user in enumerate(df['user'].unique())}
    movie_to_index = {
        movie: id
        for id, movie in enumerate(df['movie'].unique())
    }

    return train_df, val_df, user_to_index, movie_to_index


train_df, val_df, user_to_index, movie_to_index = read_data(DATA_DIR)

if __name__ == '__main__':

    logger = set_logger(name='dataloader', file_path=LOGGER_PATH)
    train_df, val_df, user_to_index, movie_to_index = read_data(DATA_DIR)
    train_dataset = KMRDataset(train_df, user_to_index, movie_to_index)
    val_dateset = KMRDataset(val_df, user_to_index, movie_to_index)

    train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True)
    val_dataloader = DataLoader(val_dateset, batch_size=16, shuffle=True)

    logger.info(f"Load data from {DATA_DIR}")
    logger.info(f"Train_df: {train_df.shape}, {train_dataset.data[0].size()}")
    logger.info(
        f"Validaetion df: {val_df.shape}, {val_dateset.data[0].size()}")

    save_pickle(os.path.join(DATA_LOADER_DIR, 'dataloader_train.pickle'),
                train_dataloader)
    save_pickle(os.path.join(DATA_LOADER_DIR, 'dataloader_val.pickle'),
Пример #22
0
%load_ext autoreload
%autoreload 2
"""

import json
from datetime import datetime
from typing import Dict
from pathlib import Path
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
from data_processor import ConllDataSet
import mst_lstm
from util import set_logger

logger = set_logger("train.py")
torch.manual_seed(0)

def train(config_dict:Dict):

    # Directory for storing result
    result_dir_path = Path("result",str(datetime.now()))
    result_dir_path.mkdir()
    # Store the config in json
    config_path = result_dir_path / "config.json"
    with config_path.open(mode="w") as fp:
        json.dump(config_dict, fp)

    # Read data
    train_path = Path("data","en-universal-train.conll")
    train_data = ConllDataSet(conll_path=train_path,
Пример #23
0
def main():
    """Create the model and start the evaluation process."""
    device = torch.device("cuda:" + str(args.gpu))

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    logger = util.set_logger(args.save, args.log_file, args.debug)
    logger.info('start with arguments %s', args)

    x_num = 0

    with open(args.data_list) as f:
        for _ in f.readlines():
            x_num = x_num + 1

    sys.path.insert(0, 'dataset/helpers')
    if args.data_src == 'gta' or args.data_src == 'cityscapes':
        from labels import id2label, trainId2label
    elif args.data_src == 'synthia':
        from labels_cityscapes_synthia import id2label, trainId2label
    #
    label_2_id = 255 * np.ones((256, ))
    for l in id2label:
        if l in (-1, 255):
            continue
        label_2_id[l] = id2label[l].trainId
    id_2_label = np.array(
        [trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
    valid_labels = sorted(set(id_2_label.ravel()))
    scorer = ScoreUpdater(valid_labels, args.num_classes, x_num, logger)
    scorer.reset()

    if args.model == 'DeeplabRes':
        model = Res_Deeplab(num_classes=args.num_classes)
    # elif args.model == 'DeeplabVGG':
    #     model = DeeplabVGG(num_classes=args.num_classes)
    #     if args.restore_from == RESTORE_FROM:
    #         args.restore_from = RESTORE_FROM_VGG

    if args.restore_from[:4] == 'http':
        saved_state_dict = model_zoo.load_url(args.restore_from)
        new_params = model.state_dict().copy()
        for i in saved_state_dict:
            # Scale.layer5.conv2d_list.3.weight
            i_parts = str(i).split('.')
            # print i_parts
            if not i_parts[0] == 'fc':
                new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
    else:
        loc = "cuda:" + str(args.gpu)
        saved_state_dict = torch.load(args.restore_from, map_location=loc)
        new_params = saved_state_dict.copy()
    model.load_state_dict(new_params)
    #model.train()
    model.eval()
    model.to(device)

    testloader = data.DataLoader(GTA5TestDataSet(args.data_dir,
                                                 args.data_list,
                                                 test_scale=1.0,
                                                 test_size=(1024, 512),
                                                 mean=IMG_MEAN,
                                                 std=IMG_STD,
                                                 scale=False,
                                                 mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    test_scales = [float(_) for _ in str(args.test_scale).split(',')]

    h, w = map(int, args.test_image_size.split(','))
    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(h, w), mode='bilinear', align_corners=True)
    else:
        interp = nn.Upsample(size=(h, w), mode='bilinear')

    test_image_size = (h, w)
    mean_rgb = IMG_MEAN[::-1].copy()
    std_rgb = IMG_STD[::-1].copy()
    with torch.no_grad():
        for index, batch in enumerate(testloader):
            image, label, _, name = batch
            img = image.clone()
            num_scales = len(test_scales)
            # output_dict = {k: [] for k in range(num_scales)}
            for scale_idx in range(num_scales):
                if version.parse(torch.__version__) > version.parse('0.4.0'):
                    image = F.interpolate(image,
                                          scale_factor=test_scales[scale_idx],
                                          mode='bilinear',
                                          align_corners=True)
                else:
                    test_size = (int(h * test_scales[scale_idx]),
                                 int(w * test_scales[scale_idx]))
                    interp_tmp = nn.Upsample(size=test_size,
                                             mode='bilinear',
                                             align_corners=True)
                    image = interp_tmp(img)
                if args.model == 'DeeplabRes':
                    output2 = model(image.to(device))
                    coutput = interp(output2).cpu().data[0].numpy()
                if args.test_flipping:
                    output2 = model(
                        torch.from_numpy(
                            image.numpy()[:, :, :, ::-1].copy()).to(device))
                    coutput = 0.5 * (
                        coutput +
                        interp(output2).cpu().data[0].numpy()[:, :, ::-1])
                if scale_idx == 0:
                    output = coutput.copy()
                else:
                    output += coutput

            output = output / num_scales
            output = output.transpose(1, 2, 0)
            output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
            pred_label = output.copy()
            label = label_2_id[np.asarray(label.numpy(), dtype=np.uint8)]
            scorer.update(pred_label.flatten(), label.flatten(), index)

            output_col = colorize_mask(output)
            output = Image.fromarray(output)

            name = name[0].split('/')[-1]
            output.save('%s/%s' % (args.save, name))
            output_col.save('%s/%s_color.png' %
                            (args.save, name.split('.')[0]))
Пример #24
0
        json_path), "No json configuration file found at {}".format(json_path)
    params = util.Params(json_path)

    # use GPU if available
    params.cuda = torch.cuda.is_available()
    if params.ngpu > 0 and params.cuda: params.device = torch.device('cuda')
    else: params.device = torch.device('cpu')

    print(params.device)

    # Set the random seed for reproducible experiments
    torch.manual_seed(42)
    if params.cuda: torch.cuda.manual_seed(42)

    # Set the logger
    util.set_logger(os.path.join(args.model_dir, 'train.log'))

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # fetch dataloaders
    train_dl = data_loader.fetch_dataloader(args.data_dir,
                                            'train',
                                            params,
                                            shuffle=True)
    val_dl = data_loader.fetch_dataloader(args.data_dir,
                                          'valid',
                                          params,
                                          shuffle=False)

    logging.info("- done.")
Пример #25
0
    def __init__(self, dir):
        """ Initialize the disease protein prediction experiment 
        Args: 
            dir (string) The directory where the experiment should be run
        """
        super(DPPExperiment, self).__init__(dir)

        # Set the logger
        set_logger(os.path.join(args.dir, 'experiment.log'), level=logging.INFO, console=True)

        # Log Title 
        logging.info("Disease Protein Prediction in the PPI Network")
        logging.info("Sabri Eyuboglu  -- SNAP Group")
        logging.info("======================================")

        # Load data from params file
        logging.info("Loading PPI Network...")
        self.ppi_networkx, self.ppi_network_adj, self.protein_to_node = load_network(self.params.ppi_network)
        self.node_to_protein = {node: protein for protein, node in self.protein_to_node.items()}
        logging.info("Loading Disease Associations...")
        self.diseases_dict = load_diseases(self.params.diseases_path, self.params.disease_subset, ['none'])

        # Load method specific data 
        # TODO: Build class for each method 
        if(self.params.method == "ppi_matrix"):
            logging.info("Loading PPI Matrix...")
            self.ppi_matrix = np.load(self.params.ppi_matrix)
            # normalize columns of ppi_matrix
            if(self.params.normalize):
                if hasattr(self.params, "norm_type"):
                    if self.params.norm_type == "frac":
                        self.ppi_matrix = self.ppi_matrix / np.sum(self.ppi_matrix, 
                                                                   axis=0)
                    elif self.params.norm_type == "zscore":
                        self.ppi_matrix = (self.ppi_matrix - np.mean(self.ppi_matrix, axis=0)) / np.std(self.ppi_matrix, axis=0)
                else:
                    self.ppi_matrix = (self.ppi_matrix - np.mean(self.ppi_matrix, axis=0)) / np.std(self.ppi_matrix, axis=0)
         
            # zero out the diagonal
            np.fill_diagonal(self.ppi_matrix, 0)  

        elif (self.params.method == 'lr'):
            logging.info("Loading Feature Matrices...")
            self.feature_matrices = []
            for features_filename in self.params.features:
                self.feature_matrices.append(
                    build_embedding_feature_matrix(self.protein_to_node, 
                                                   features_filename))
        elif (self.params.method == 'l2_rw'):
            self.method = L2RandomWalk(self.params)

        elif (self.params.method == 'pathway_expansion'):
            self.method = PathwayExpansion(self.params, 
                                           self.ppi_networkx, 
                                           self.ppi_network_adj)
        
        elif (self.params.method == "learned_cn"):
            self.method = LearnedCN(self.dir,
                                    self.params,
                                    self.ppi_network_adj,
                                    self.diseases_dict,
                                    self.protein_to_node)

        elif (self.params.method == 'gcn'):
            self.method = GCN(self.params, self.ppi_network_adj)
Пример #26
0
from db.factory import BasicFactory
from elasticsearch import Elasticsearch, helpers, exceptions
from util import get_params, set_logger
import json

bf = BasicFactory()
config = get_params()
logger = set_logger('elastic')

es = Elasticsearch(hosts=[{
    'host': config['elastic']['host'],
    'port': config['elastic']['port']
}])


class Elastic:
    INDEX = config['elastic']['index']

    def search(self, name):
        """
        Autocomplete for movies

        :param str name: prefix from search
        :return: movies
        :rtype: dict
        """
        message = {'status': False}
        query = {
            'suggest': {
                'movie': {
                    'prefix': name,
Пример #27
0
import store
import prep

# Twitter
consumer_key = os.environ['CONSUMER_KEY']
consumer_secret = os.environ['CONSUMER_SECRET']
access_token = os.environ['ACCESS_TOKEN']
access_token_secret = os.environ['ACCESS_TOKEN_SECRET']

# Set location
# Bounding boxes for geolocations
# Online-Tool to create boxes (c+p as raw CSV): http://boundingbox.klokantech.com/
GEOBOX_NETHERLANDS = [3.0761845666, 51.0227615064, 7.288878522, 53.9033167283]

# Set log
util.set_logger(True)


def parse_args():
    parser = argparse.ArgumentParser(description='BI Specialist Assignment',
                                     formatter_class=RawTextHelpFormatter)
    parser.add_argument('-t',
                        '--track',
                        type=str,
                        help='python main.py --track word-to-track')

    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
Пример #28
0
    config = util.load_module(cnf).config

    if weights_from is None:
        weights_from = config.weights_file
    else:
        weights_from = str(weights_from)

    files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(files)
    labels = data.get_labels(names).astype(np.float32)

    net = create_net(config)

    try:
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))
    except IOError:
        print("couldn't load weights starting from scratch")
    print("Shape of files: " + str(files.shape))
    print("Shape of labels: " + str(labels.shape))
    start = time.time()
    print("fitting ...")
    net.fit(files, labels)
    end = time.time()
    print("Time elapsed for fitting: " + str(end - start))


if __name__ == '__main__':
    set_logger('train.log')
    main()
Пример #29
0
from db import connect
from db.factory import TableFactory
from util import get_params, set_logger
import numpy as np
import pandas as pd
import re

config = get_params()
tf = TableFactory()
logger = set_logger('imdb data')

path_config = {
    'basics': f"{config['crawler']['path']}/title.basics.tsv.gz",
    'episodes': f"{config['crawler']['path']}/title.episode.tsv.gz",
    'ratings': f"{config['crawler']['path']}/title.ratings.tsv.gz"
}

mapping = {
    'tconst': 'title_id',
    'runtimeMinutes': 'runtime',
    'parentTconst': 'parent_id'
}

title_types = {
    'movie': 1,
    'tvSeries': 2,
    'tvMiniSeries': 3,
    'tvMovie': 4,
    'short': 5,
    'tvShort': 6,
    'tvSpecial': 7
Script to confirm model correctness by observing overfitting to small data

For Hydrogen;
%load_ext autoreload
%autoreload 2
"""

from pathlib import Path
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
from data_processor import ConllDataSet
from mst_lstm import BiLSTM_Parser, margin_based_loss
from util import set_logger
# Set logger
logger = set_logger(__name__)

# Read data
train_path = Path("data", "en-universal-train.conll")
train_data = ConllDataSet(train_path)


# Init model
def init_model():
    model = BiLSTM_Parser(vocab_size=train_data.vocab_size,
                          pos_size=train_data.pos_size,
                          word_embed_dim=100,
                          pos_embed_dim=25,
                          lstm_hidden_dim=250,
                          mlp_hidden_dim=100,
                          num_layers=2)
Пример #31
0
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(
        json_path), "No json configuration file found at {}".format(json_path)
    params = util.Params(json_path)

    # use GPU if available
    params.cuda = torch.cuda.is_available()
    if params.ngpu > 0 and params.cuda: params.device = torch.device('cuda')
    else: params.device = torch.device('cpu')

    # Set the random seed for reproducible experiments
    torch.manual_seed(42)
    if params.cuda: torch.cuda.manual_seed(42)

    # Get the logger
    util.set_logger(os.path.join(args.model_dir, 'evaluate.log'))

    # Create the input data pipeline
    logging.info("Creating the dataset...")

    # fetch dataloaders
    test_dl = data_loader.fetch_dataloader('data',
                                           'test',
                                           params,
                                           shuffle=False)

    logging.info("- done.")

    # Define the model
    g = began.BeganGenerator(params).to(params.device)
    d = began.BeganDiscriminator(params).to(params.device)
Пример #32
0
def main():
    setup_seed(666)
    device = torch.device("cuda")
    save_path = args.save
    save_pseudo_label_path = osp.join(
        save_path,
        'pseudo_label')  # in 'save_path'. Save labelIDs, not trainIDs.
    save_stats_path = osp.join(save_path, 'stats')  # in 'save_path'
    save_lst_path = osp.join(save_path, 'list')
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    if not os.path.exists(save_pseudo_label_path):
        os.makedirs(save_pseudo_label_path)
    if not os.path.exists(save_stats_path):
        os.makedirs(save_stats_path)
    if not os.path.exists(save_lst_path):
        os.makedirs(save_lst_path)

    cudnn.enabled = True
    cudnn.benchmark = True

    logger = util.set_logger(args.save, args.log_file, args.debug)
    logger.info('start with arguments %s', args)

    model = DeeplabMulti(num_classes=args.num_classes)
    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)
    model.train()
    model.to(device)

    # init D
    num_class_list = [2048, 19]
    model_D = nn.ModuleList([
        FCDiscriminator(num_classes=num_class_list[i]).train().to(device)
        if i < 1 else OutspaceDiscriminator(
            num_classes=num_class_list[i]).train().to(device) for i in range(2)
    ])
    saved_state_dict_D = torch.load(args.restore_from_D)
    model_D.load_state_dict(saved_state_dict_D)

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    image_src_list, _, src_num = parse_split_list(args.data_src_list)
    image_tgt_list, image_name_tgt_list, tgt_num = parse_split_list(
        args.data_tgt_train_list)
    # portions
    tgt_portion = args.init_tgt_port

    # training crop size
    w, h = map(int, args.input_size.split(','))
    input_size = (w, h)

    w, h = map(int, args.input_size_target.split(','))
    input_size_target = (w, h)

    bce_loss1 = torch.nn.MSELoss()
    bce_loss2 = torch.nn.MSELoss(reduce=False, reduction='none')
    seg_loss = torch.nn.CrossEntropyLoss(ignore_index=255)
    round_idx = 3
    save_round_eval_path = osp.join(args.save, str(round_idx))
    save_pseudo_label_color_path = osp.join(save_round_eval_path,
                                            'pseudo_label_color')
    if not os.path.exists(save_round_eval_path):
        os.makedirs(save_round_eval_path)
    if not os.path.exists(save_pseudo_label_color_path):
        os.makedirs(save_pseudo_label_color_path)
    ########## pseudo-label generation
    # evaluation & save confidence vectors
    test(model, model_D, device, save_round_eval_path, round_idx, 500, args,
         logger)
    conf_dict, pred_cls_num, save_prob_path, save_pred_path = val(
        model, model_D, device, save_round_eval_path, round_idx, tgt_num, args,
        logger)
    # class-balanced thresholds
    cls_thresh = kc_parameters(conf_dict, pred_cls_num, tgt_portion, round_idx,
                               save_stats_path, args, logger)
    # pseudo-label maps generation
    label_selection(cls_thresh, tgt_num, image_name_tgt_list, round_idx,
                    save_prob_path, save_pred_path, save_pseudo_label_path,
                    save_pseudo_label_color_path, save_round_eval_path, args,
                    logger)
    src_train_lst, tgt_train_lst, src_num_sel = savelst_SrcTgt(
        image_tgt_list, image_name_tgt_list, image_src_list, save_lst_path,
        save_pseudo_label_path, src_num, tgt_num, args)
    ########### model retraining
    # dataset
    srctrainset = SrcSTDataSet(args.data_src_dir,
                               src_train_lst,
                               max_iters=args.num_steps * args.batch_size,
                               crop_size=input_size,
                               scale=False,
                               mirror=False,
                               mean=IMG_MEAN)
    tgttrainset = TgtSTDataSet(args.data_tgt_dir,
                               tgt_train_lst,
                               pseudo_root=save_pseudo_label_path,
                               max_iters=args.num_steps * args.batch_size,
                               crop_size=input_size_target,
                               scale=False,
                               mirror=False,
                               mean=IMG_MEAN,
                               set='train')
    trainloader = torch.utils.data.DataLoader(srctrainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=8,
                                              pin_memory=True)
    trainloader_iter = enumerate(trainloader)
    targetloader = torch.utils.data.DataLoader(tgttrainset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=8,
                                               pin_memory=True)
    targetloader_iter = enumerate(targetloader)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()
    optimizer_D = optim.Adam(model_D.parameters(),
                             lr=args.learning_rate_D,
                             betas=(0.9, 0.99))
    optimizer_D.zero_grad()
    logger.info(
        '###### Start model retraining dataset in round {}! ######'.format(
            round_idx))

    start = timeit.default_timer()
    # start training
    interp = nn.Upsample(size=(input_size[1], input_size[0]),
                         mode='bilinear',
                         align_corners=True)
    interp_target = nn.Upsample(size=(input_size_target[1],
                                      input_size_target[0]),
                                mode='bilinear',
                                align_corners=True)

    # labels for adversarial training
    source_label = 0
    target_label = 1

    for i_iter in range(args.num_steps):

        lamb = 1
        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)

        optimizer_D.zero_grad()
        adjust_learning_rate_D(optimizer_D, i_iter)

        # train G
        # don't accumulate grads in D
        for param in model_D.parameters():
            param.requires_grad = False

        # train with source
        _, batch = trainloader_iter.__next__()
        images, labels, _, _ = batch
        images = images.to(device)
        labels = labels.long().to(device)

        feat_source, pred_source = model(images, model_D, 'source')
        pred_source = interp(pred_source)

        loss_seg = seg_loss(pred_source, labels)
        loss_seg.backward()

        # train with target
        _, batch = targetloader_iter.__next__()
        images, labels, _, _ = batch
        images = images.to(device)
        labels = labels.long().to(device)

        feat_target, pred_target = model(images, model_D, 'target')
        pred_target = interp_target(pred_target)
        # atten_target = F.interpolate(atten_target, size=(16, 32), mode='bilinear', align_corners=True)

        loss_seg_tgt = seg_loss(pred_target, labels) * lamb

        D_out1 = model_D[0](feat_target)
        loss_adv1 = bce_loss1(
            D_out1,
            torch.FloatTensor(
                D_out1.data.size()).fill_(source_label).to(device))
        D_out2 = model_D[1](F.softmax(pred_target, dim=1))
        loss_adv2 = bce_loss2(
            D_out2,
            torch.FloatTensor(
                D_out2.data.size()).fill_(source_label).to(device))
        loss_adv = loss_adv1 * 0.01 + loss_adv2.mean() * 0.01
        loss = loss_seg_tgt + loss_adv
        loss.backward()

        optimizer.step()

        # train D
        # bring back requires_grad
        for param in model_D.parameters():
            param.requires_grad = True

        # train with source
        D_out_source1 = model_D[0](feat_source.detach())
        loss_D_source1 = bce_loss1(
            D_out_source1,
            torch.FloatTensor(
                D_out_source1.data.size()).fill_(source_label).to(device))
        D_out_source2 = model_D[1](F.softmax(pred_source.detach(), dim=1))
        loss_D_source2 = bce_loss1(
            D_out_source2,
            torch.FloatTensor(
                D_out_source2.data.size()).fill_(source_label).to(device))
        loss_D_source = loss_D_source1 + loss_D_source2
        loss_D_source.backward()

        # train with target
        D_out_target1 = model_D[0](feat_target.detach())
        loss_D_target1 = bce_loss1(
            D_out_target1,
            torch.FloatTensor(
                D_out_target1.data.size()).fill_(target_label).to(device))
        D_out_target2 = model_D[1](F.softmax(pred_target.detach(), dim=1))
        weight_target = bce_loss2(
            D_out_target2,
            torch.FloatTensor(
                D_out_target2.data.size()).fill_(target_label).to(device))
        loss_D_target2 = weight_target.mean()
        loss_D_target = loss_D_target1 + loss_D_target2
        loss_D_target.backward()

        optimizer_D.step()

        if i_iter % 10 == 0:
            print(
                'iter={0:8d}/{1:8d}, seg={2:.3f} seg_tgt={3:.3f} adv={4:.3f} adv1={5:.3f} adv2={6:.3f} src1={7:.3f} src2={8:.3f} tgt1={9:.3f} tgt2={10:.3f} D1={11:.3f} D2={12:.3f}'
                .format(i_iter, args.num_steps, loss_seg.item(),
                        loss_seg_tgt.item(), loss_adv.item(), loss_adv1.item(),
                        loss_adv2.mean().item(), loss_D_source1.item(),
                        loss_D_source2.item(), loss_D_target1.item(),
                        loss_D_target2.item(), loss_D_source.item(),
                        loss_D_target.item()))

        if i_iter >= args.num_steps_stop - 1:
            print('save model ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'GTA5_' + str(args.num_steps_stop) + '.pth'))
            torch.save(
                model_D.state_dict(),
                osp.join(args.snapshot_dir,
                         'GTA5_' + str(args.num_steps_stop) + '_D.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            test(model, model_D, device, save_round_eval_path, round_idx, 500,
                 args, logger)
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '.pth'))
            torch.save(
                model_D.state_dict(),
                osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '_D.pth'))

    end = timeit.default_timer()
    logger.info(
        '###### Finish model retraining dataset in round {}! Time cost: {:.2f} seconds. ######'
        .format(round_idx, end - start))
    # test self-trained model in target domain test set
    test(model, model_D, device, save_round_eval_path, round_idx, 500, args,
         logger)
Пример #33
0
        <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.3.1/css/bootstrap.min.css">
        </head>
        <body>
        
        <div class="container">
        <h1>Upload CSV for NLPQL Form Parsing</h1>
        <br>
        
        <div>
            <a href="https://github.com/ClarityNLP/Utilities/blob/master/custom_query/afib.csv" target="_blank">
                (See sample CSV)
            </a>
        </div>
        <form method=post enctype=multipart/form-data>
          <br>
          <input type=text name="formname" class="form-control" placeholder="Form Name">
          <br>
          <input type=file name=file class="form-control-file">
          <br>
          <input type=submit value=Upload class="btn btn-primary">
        </form>
        </div>
        </body>
        '''


if __name__ == '__main__':
    util.app_token()
    application.run(host='0.0.0.0', port=5000, debug=True)
    util.set_logger(application.logger)