コード例 #1
0
def main(params):
    np.random.seed(params.rand_seed)

    models = []
    for name, dir in zip(params.models[::2], params.models[1::2]):
        if 'hmm' in name.lower():
            hmm = sio.loadmat(os.path.join(dir, 'envelope_HMM_K8.mat'))
            models.append(
                Model(name.replace('_', ' '),
                      covariances=hmm['cov'],
                      gamma=hmm['Gamma'],
                      statepath=hmm['vpath'].flatten() - 1))
            models[-1].dir = dir
        else:
            models.append(Model(name, dir=dir))

    fig_dir = os.path.join(models[0].dir, 'figures')

    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    # # Just look at first subject for now
    # for m in models:
    #     m.gamma = m.gamma[:38000]
    #     m.statepath = m.statepath[:38000]
    #     m.statepath_onehot = m.statepath_onehot[:38000]

    # Match up the models using the munkres algorithm
    for m in models[1:]:
        m.reorder_states(None, models[0].covariances)

    # Export covariance matrices to matlab to get spatial maps
    for m in models:
        sio.savemat(
            os.path.join(models[0].dir, 'model_{}_cov.mat'.format(m.name)),
            {'C': m.covariances})

    # Plot all the figures
    plot_loss(models, fig_dir)
    plot_covariances(models, fig_dir)
    plot_timecourses(models,
                     fig_dir,
                     start=2000,
                     length=10000,
                     freq=params.sample_freq)
    plot_timecourses(models,
                     fig_dir,
                     start=2000,
                     length=10000,
                     freq=params.sample_freq,
                     hard=False)

    if len(models) > 1:
        plot_correlation_matrix(models, fig_dir)
        plot_kl_matrix(models, fig_dir)

    plot_global_stats(models, fig_dir, freq=params.sample_freq)
    plot_windowed_fo(models, fig_dir, params.sample_freq)
    plot_autocorrelation(models, fig_dir)
コード例 #2
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)

    args = build_argparser().parse_args()
    interactive_mode = not (os.path.isdir(args.input)
                            or args.input.endswith('.png')
                            or args.input.endswith('.jpg'))
    model = Model(args, interactive_mode)
    if not interactive_mode:
        non_interactive_demo(model, args)
        return

    height, width = model.encoder.input_info['imgs'].input_data.shape[-2:]
    prev_text = ''
    demo = InteractiveDemo((height, width), resolution=args.resolution)
    show_window = not args.no_show
    capture = create_capture(args.input, demo.resolution)
    if not capture.isOpened():
        log.error("Cannot open camera")
        return 1
    while True:
        ret, frame = capture.read()
        if not ret:
            log.info("End of file or error reading from camera")
            break
        bin_crop = demo.get_crop(frame)
        model_input = prerocess_crop(bin_crop, (height, width),
                                     preprocess_type=args.preprocessing_type)
        frame = demo.put_crop(frame, model_input)
        model_res = model.infer_async(model_input)
        if not model_res:
            phrase = prev_text
        else:
            distribution, targets = model_res
            prob = calculate_probability(distribution)
            log.info("Confidence score is %s", prob)
            if prob >= args.conf_thresh**len(distribution):
                log.info("Prediction updated")
                phrase = model.vocab.construct_phrase(targets)
            else:
                log.info("Confidence score is low, prediction is not complete")
                phrase = ''
        frame = demo.draw(frame, phrase)
        prev_text = phrase
        if show_window:
            cv.imshow('Press q to quit.', frame)
            key = cv.waitKey(1) & 0xFF
            if key in (ord('Q'), ord('q'), ord('\x1b')):
                break
            elif key in (ord('o'), ord('O')):
                demo.resize_window("decrease")
            elif key in (ord('p'), ord('P')):
                demo.resize_window("increase")

    log.info(
        "This demo is an API example, for any performance measurements please use the dedicated benchmark_app tool "
        "from the openVINO toolkit\n")
コード例 #3
0
def main():
    width = 800
    height = 800
    model = Model('D:\graphExample\iafrican_head.obj')
    showLines(width, height, model)
    showCircles(500, 500, 200, 200, 70, [255, 255, 255])
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #4
0
def second_stage_traing(models, configs, input_config):
  orig_models = models.copy()
  for i in range(top):
    model_id = "2_1_{}".format(i)
    upd_configs, params = shuffle_params(configs, orig_models)

    model_config, train_config, eval_config = upd_configs.values()
    
    model = Model(upd_configs, params)

    train_dir = os.path.join(FLAGS.train_dir, model_id)
    if not os.path.exists(train_dir):
      os.makedirs(train_dir)

    total_loss = train_process(model_config, input_config, train_config, train_dir)

    # def evaluate(config, create_input_dict_fn, model_fn, train_dir, eval_dir):
    eval_dir = train_dir.replace('train', 'eval')
    evaluation = evaluate([model_config, eval_config, input_config], train_dir, eval_dir)
    model.set_total_loss(total_loss)
    model.set_eval_metrics(evaluation)

    models[model_id] = model
  print(["{}: {}".format(i, models[i].total_loss) for i in models])

  for i in range(2):
    model_id = "2_2_{}".format(i)
    upd_configs, params = update_config(configs)
    upd_configs = update_augmentation_options(upd_configs)
    model_config, train_config, eval_config = upd_configs.values()
    
    model = Model(upd_configs, params)

    train_dir = os.path.join(FLAGS.train_dir, model_id)
    if not os.path.exists(train_dir):
      os.makedirs(train_dir)
    
    total_loss = train_process(model_config, input_config, train_config, train_dir)

    # def evaluate(config, create_input_dict_fn, model_fn, train_dir, eval_dir):
    eval_dir = train_dir.replace('train', 'eval')
    evaluation = evaluate([model_config, eval_config, input_config], train_dir, eval_dir)
    model.set_total_loss(total_loss)
    model.set_eval_metrics(evaluation)
    models[model_id] = model
  return models
コード例 #5
0
    def __init__(self, input_dim, output_dim, lr=0.2, device='cuda:0'):
        self.total_it = 0
        self.log_freq = 100
        self.model = Model(input_dim, output_dim)

        self.alpha = lr
        self.H = torch.eye(input_dim * output_dim)
        self.input_dim = input_dim
        self.output_dim = output_dim
コード例 #6
0
    def __init__(self, obs_dim, action_dim, *args, **kwargs):
        # Initialize arguments
        hidden_dims_actor = tuple(kwargs.get("hidden_dims_actor",
                                             (256, 256)))
        hidden_dims_critic = tuple(kwargs.get("hidden_dims_critic",
                                              (256, 256)))
        hidden_dims_model = tuple(kwargs.get("hidden_dims_model",
                                             (256, 256)))

        self.gamma = 0.99
        self.tau = 0.005
        self.delay = 2
        lr_actor = 0.001
        lr_critic = 0.001
        lr_model = 0.0001
        self.step_random = 500 # How many random actions to take before using actor for action selection
        self.update_every_n_steps = 51 # How often to update model, actor and critics
        self.update_steps = 200 # How many gradient updates to perform, per model,  when updating
        self.time = time.time()

        # Initialize actor
        self.actor = Actor(obs_dim, hidden_dims_actor,  action_dim)
        self.actor_target = copy.deepcopy(self.actor)
        self.optimizer_actor = torch.optim.Adam(self.actor.parameters(),
                                                lr=lr_actor)
        for par in self.actor_target.parameters():
            par.requires_grad = False

        # Initialize 2 critics
        self.critics = []
        self.critics_target = []
        self.optimizer_critics = []
        for k in range(2):
            critic = Critic(obs_dim + action_dim, hidden_dims_critic)
            self.critics.append(critic)
            self.critics_target.append(copy.deepcopy(critic))
            self.optimizer_critics.append(torch.optim.Adam(critic.parameters(),
                                                           lr=lr_critic))

            for par in self.critics_target[k].parameters():
                par.requires_grad = False

        # Initialize models
        self.models = []
        self.optimizer_models = []
        for k in range(25):
            model = Model(obs_dim + action_dim, hidden_dims_model, obs_dim)
            self.models.append(model)
            self.optimizer_models.append(torch.optim.Adam(model.parameters(),
                                                          lr=lr_model))

        # Setup Replay Buffer
        self.buffer = ReplayBuffer()
        self.o_old = None
        self.a_old = None

        self.step_i = 0
コード例 #7
0
ファイル: chat_bot.py プロジェクト: Arolive/NLP
def activate_bot(driver):
    print("Press Ctrl + C any time to stop bot")
    model = Model.Model()
    activation_time = get_time_tuple(str(datetime.datetime.now().time()))
    memory = BotMemory()
    users = getUserList(driver)
    memory.addAll(users)

    try:
        while True:
            usersList = getUserList(driver)
            for details in usersList:
                newuser = False
                user = details[0]
                timing = details[1]
                if user not in memory.exisingUser():
                    memory.updateUser(details)
                    newuser = True
                search_chatter(driver, user)
                time.sleep(2)
                msg = read_last_msg(driver)
                last_chat_owner = msg[0].split("] ")[-1].split(":")[0]
                last_chat_time = msg[0].split(" ")[0].split("[")[-1]
                if (get_time_tuple(last_chat_time) > get_time_tuple(
                        memory.chatTime(user))
                        and last_chat_owner == user) or newuser or (
                            last_chat_owner == user and
                            get_time_tuple(last_chat_time) > activation_time):

                    print('\nNew chat detected from', user)
                    last_new_msg = msg[-1]
                    reply = model.getReply(last_new_msg)
                    # Selecting Input Box
                    input_box = driver.find_elements_by_xpath(
                        '//*[@id="main"]/footer/div[1]/div[2]/div/div[2]')[0]
                    # Sending Intro
                    if newuser:
                        intro = "Hi, this is ABot, a bot created by Aritra Banerjee :). "
                        "\nAritra is not available right now, so I'm here to chat with you "
                        "! Although I'm under development but I can do a few cool stuffs."
                        input_box.send_keys(intro)
                        time.sleep(1)
                        input_box.send_keys(Keys.ENTER)
                        time.sleep(1)
                    # Sending reply
                    input_box.send_keys(reply + Keys.SHIFT + Keys.ENTER)
                    time.sleep(1)
                    input_box.send_keys(Keys.ENTER)
                    memory.updateTime(user, last_chat_time)
    except KeyboardInterrupt:
        print("Exiting")
        sys.exit()
コード例 #8
0
def main():
    args = build_argparser().parse_args()
    interactive_mode = not (os.path.isdir(args.input)
                            or args.input.endswith('.png')
                            or args.input.endswith('.jpg'))
    model = Model(args, interactive_mode)
    if not interactive_mode:
        non_interactive_demo(model, args)
        return

    height, width = model.encoder.input_info['imgs'].input_data.shape[-2:]
    prev_text = ''
    demo = InteractiveDemo((height, width), resolution=args.resolution)
    show_window = not args.no_show
    capture = create_capture(args.input, demo.resolution)
    if not capture.isOpened():
        log.error("Cannot open camera")
        return 1
    while True:
        ret, frame = capture.read()
        if not ret:
            break
        bin_crop = demo.get_crop(frame)
        model_input = prerocess_crop(bin_crop, (height, width),
                                     preprocess_type=args.preprocessing_type)
        frame = demo.put_crop(frame, model_input)
        model_res = model.infer_async(model_input)
        if not model_res:
            phrase = prev_text
        else:
            distribution, targets = model_res
            prob = calculate_probability(distribution)
            log.debug("Confidence score is {}".format(prob))
            if prob >= args.conf_thresh**len(distribution):
                log.debug("Prediction updated")
                phrase = model.vocab.construct_phrase(targets)
            else:
                log.debug(
                    "Confidence score is low, prediction is not complete")
                phrase = ''
        frame = demo.draw(frame, phrase)
        prev_text = phrase
        if show_window:
            cv.imshow('Press q to quit.', frame)
            key = cv.waitKey(1) & 0xFF
            if key in (ord('Q'), ord('q'), ord('\x1b')):
                break
            elif key in (ord('o'), ord('O')):
                demo.resize_window("decrease")
            elif key in (ord('p'), ord('P')):
                demo.resize_window("increase")
コード例 #9
0
	def __init__(
		self,
		input_dim,
		output_dim,
		lr=0.2,
		device='cuda:0'
	):
		self.total_it = 0
		self.log_freq = 1
		self.model = Model(input_dim, output_dim)

		self.alpha = lr
		self.last_grad = None
		self.last_p = None
 def _init_create_networks(self):
     """
     init current model according to sofar tasks
     """
     backbone = BackBone(self._opt)
     output_sizes = [self._output_size_per_task[x] for x in self._opt.tasks]
     output_feature_dim = backbone.output_feature_dim
     classifiers = [
         Head(output_feature_dim, self._opt.hidden_size, output_sizes[i])
         for i in range(len(self._opt.tasks))
     ]
     classifiers = nn.ModuleList(classifiers)
     self.resnet50 = Model(backbone, classifiers, self._opt.tasks)
     self.to_device()
コード例 #11
0
ファイル: drex.py プロジェクト: haochihlin/CoRL2019-DREX
def train_reward(args):
    # set random seed
    np.random.seed(args.seed)
    tf.random.set_random_seed(args.seed)

    log_dir = Path(args.log_dir)/'trex'
    log_dir.mkdir(parents=True,exist_ok='temp' in args.log_dir)

    with open(str(log_dir/'args.txt'),'w') as f:
        f.write( str(args) )

    env = gym.make(args.env_id)

    ob_dims = env.observation_space.shape[-1]
    ac_dims = env.action_space.shape[-1]

    dataset = BCNoisePreferenceDataset(env,args.max_steps,args.min_noise_margin)

    loaded = dataset.load_prebuilt(args.noise_injected_trajs)
    assert loaded

    models = []
    for i in range(args.num_models):
        with tf.variable_scope('model_%d'%i):
            net = RewardNet(args.include_action,ob_dims,ac_dims,num_layers=args.num_layers,embedding_dims=args.embedding_dims)
            model = Model(net,batch_size=64)
            models.append(model)

    ### Initialize Parameters
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    # Training configuration
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession()

    sess.run(init_op)

    for i,model in enumerate(models):
        D = dataset.sample(args.D,include_action=args.include_action)

        model.train(D,iter=args.iter,l2_reg=args.l2_reg,noise_level=args.noise,debug=True)

        model.saver.save(sess,os.path.join(str(log_dir),'model_%d.ckpt'%(i)),write_meta_graph=False)

    sess.close()
コード例 #12
0
    def __init__(self, venv, model_dir, ctrl_coeff=0., alive_bonus=0.):
        VecEnvWrapper.__init__(self, venv)

        ob_shape = venv.observation_space.shape
        ac_dims = venv.action_space.n if venv.action_space.dtype == int else venv.action_space.shape[
            -1]

        self.ctrl_coeff = ctrl_coeff
        self.alive_bonus = alive_bonus

        self.graph = tf.Graph()

        config = tf.ConfigProto(device_count={'GPU': 0})  # Run on CPU
        #config.gpu_options.allow_growth = True
        self.sess = tf.Session(graph=self.graph, config=config)

        with self.graph.as_default():
            with self.sess.as_default():
                import os, sys
                from argparse import Namespace
                from pathlib import Path

                dir_path = os.path.dirname(os.path.realpath(__file__))
                sys.path.append(os.path.join(dir_path, '..', '..', '..', '..'))
                from utils import Model, RewardNet

                print(os.path.realpath(model_dir))
                with open(str(Path(model_dir) / 'args.txt')) as f:
                    args = eval(f.read())

                models = []
                for i in range(args.num_models):
                    with tf.variable_scope('model_%d' % i):
                        net = RewardNet(args.include_action,
                                        ob_shape[-1],
                                        ac_dims,
                                        num_layers=args.num_layers,
                                        embedding_dims=args.embedding_dims)

                        model = Model(net, batch_size=1)
                        model.saver.restore(
                            self.sess,
                            os.path.join(model_dir, 'model_%d.ckpt' % i))

                        models.append(model)
                self.models = models
コード例 #13
0
ファイル: predict.py プロジェクト: chicm-ms/ccir2018
def predict():
    print('Loading user vocab...')
    user_vocab = load_user_vocab()
    print('Loading doc vocab...')
    doc_vocab = load_doc_vocab()
    print('Loading label vocab...')
    label_vocab = load_label_vocab()
    print('Loading favs...')
    fav_dict = load_favs()
    print('Loading topic vocab...')
    topic_vocab = load_topic_vocab()

    filename = os.path.join(settings.TEST_DATA_DIR, 'test.pk')
    print('test file: ', filename)
    loader = CCIRDataLoader(filename, batch_size=64, shuffle=False)

    print(len(topic_vocab.stoi), len(doc_vocab.stoi), len(label_vocab.stoi))
    model = Model(len(topic_vocab.stoi), len(doc_vocab.stoi), len(label_vocab.stoi)).cuda()
    print('Loading checkpoint: ', CP)
    model.load_state_dict(torch.load(CP))
    model.eval()

    m = nn.Softmax()

    preds = None

    for data in loader:
        favs, read, unread = make_tensor(data, user_vocab, fav_dict, train=False)
        output = m(model(favs, read, unread))
        _, pred = output.topk(100, 1, True, True)
        pred = np.array(pred.cpu().tolist())

        if preds is None:
            preds = pred
        else:
            preds = np.vstack((preds, pred))
    print(preds.shape)
    np.save(os.path.join(settings.RESULT_DIR, NPY_FILE), preds)
コード例 #14
0
def first_stage_traing(config, input_config):
  print("First stage traing:")
  model_config, train_config, eval_config = config.values()
  models = {}

  for i in range(gen_iter):
    model_idx = "1_{}".format(i)
    upd_configs, params = update_config(config)
    upd_configs = update_augmentation_options(upd_configs)
    model_config, train_config, eval_config = upd_configs.values()
    
    model = Model(upd_configs, params)

    train_dir = os.path.join(FLAGS.train_dir, model_idx)
    if not os.path.exists(train_dir):
      os.makedirs(train_dir)
    
    total_loss = train_process(model_config, input_config, train_config, train_dir)

    # def evaluate(config, create_input_dict_fn, model_fn, train_dir, eval_dir):
    eval_dir = train_dir.replace('train', 'eval')
    evaluation = evaluate([model_config, eval_config, input_config], train_dir, eval_dir)
    model.set_total_loss(total_loss)
    model.set_eval_metrics(evaluation)

    models[model_idx] = model


  # print({i: history[i]['total_loss'] for i in history})
  # print(history)

  best_models = sorted(models, key=lambda k: models[k].total_loss, reverse=True)[:top]

  print('Best models: %s' % best_models)
  best_models = {i: models[i] for i in best_models}
  print('Best models: %s' % best_models)
  return best_models
コード例 #15
0
def main():
    width = 400
    height = 400
    img = np.zeros((width, height, 3), dtype=np.uint8)
    img2 = np.zeros((width, height, 3), dtype=np.uint8)
    model = Model('D:\graphExample\iafrican_head.obj')

    for i in range(model.nfaces()):
        face = model.face(i)
        triang = [0, 0, 0]
        for j in range(3):
            vert = model.vert(face[j])
            triang[j] = 400 - int((vert[0] + 1) * width / 2), int(
                abs(height - (vert[1] + 1) * height / 2))
        color = get_rand_color()
        img = triangle(triang[0][0], triang[0][1], triang[1][0], triang[1][1],
                       triang[2][0], triang[2][1], img, color)
        cv2.drawContours(img2, [np.array([triang[0], triang[1], triang[2]])],
                         0, color, -1)

    cv2.imshow('My triangle', img)
    cv2.imshow('Default triagle', img2)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #16
0
def main(train_csv, model_dir, mode):
    start_time = time.time()

    #df = pd.read_csv(args.train_csv, low_memory = False)
    df = pd.read_csv(train_csv)
    is_big = df.memory_usage().sum() > BIG_DATASET_SIZE

    # dict with data necessary to make predictions
    model_config = {}
    model_config['is_big'] = is_big

    preprocessor = Preprocessor()
    df_X, df_y = preprocessor.fit_transform(df)

    model_config['features'] = preprocessor.features

    print('Dataset read, shape {}'.format(df_X.shape))

    # fitting
    model_config['mode'] = mode
    if mode == 'regression':
        ridge_model = Ridge()

        cb_model = cb.CatBoostRegressor(
            iterations=300,
            boosting_type=('Ordered' if len(df_X) < 1000 else 'Plain'),
            od_type="IncToDec",
            depth=6,
            od_pval=0.0001,
            #learning_rate=0.03,
            loss_function='RMSE')
        models = [ridge_model, cb_model]
    else:
        log_reg_model = LogisticRegression()

        cb_model = cb.CatBoostClassifier(
            iterations=300,
            boosting_type=('Ordered' if len(df_X) < 1000 else 'Plain'),
            od_type="IncToDec",
            depth=6,
            od_pval=0.0001,
            #learning_rate=0.03,
            loss_function='Logloss',
            logging_level='Verbose')
        models = [log_reg_model, cb_model]

    for model in models:
        model.fit(df_X, df_y)

    D = [1 / np.std(model.predict(df_X) - df_y)**2 for model in models]
    s = sum(D)
    coef = [d / s for d in D]

    model = Model(models, coef)

    model_config['model'] = model

    model_config_filename = os.path.join(model_dir, 'model_config.pkl')
    with open(model_config_filename, 'wb') as fout:
        pickle.dump(model_config, fout, protocol=pickle.HIGHEST_PROTOCOL)

    print('Train time: {}'.format(time.time() - start_time))
コード例 #17
0
ファイル: train.py プロジェクト: VirajBagal/MMBERT
    df = pd.concat([train_df, val_df, test_df]).reset_index(drop=True)

    ans2idx = {ans: idx for idx, ans in enumerate(df['answer'].unique())}
    idx2ans = {idx: ans for ans, idx in ans2idx.items()}
    df['answer'] = df['answer'].map(ans2idx).astype(int)
    train_df = df[df['mode'] == 'train'].reset_index(drop=True)
    val_df = df[df['mode'] == 'val'].reset_index(drop=True)
    test_df = df[df['mode'] == 'test'].reset_index(drop=True)

    num_classes = len(ans2idx)

    args.num_classes = num_classes

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    model = Model(args)

    if args.use_pretrained:
        model.load_state_dict(torch.load(args.model_dir))

    model.classifier[2] = nn.Linear(args.hidden_size, num_classes)

    model.to(device)

    wandb.watch(model, log='all')

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               patience=args.patience,
                                               factor=args.factor,
                                               verbose=True)
コード例 #18
0
    x_sample = np.cumsum(seq_x, 0)
    y_sample = np.cumsum(seq_y, 0)
    z_sample = np.array(seq_z)
    sequence_coo = np.stack([x_sample, y_sample, z_sample]).T
    sequence_offset = np.stack([np.array(seq_x),
                               np.array(seq_y), np.array(z_sample)]).T
    return(sequence_coo, sequence_offset)


# load hp
hp_path = 'draw_models/hp_folder/' + args_draw.model[:-4] + '.pickle'
with open(hp_path, 'rb') as handle:
    hp = pickle.load(handle)
hp.use_cuda = use_cuda
# load model
model = Model(hyper_parameters=hp, parametrization='point')
encoder_name = 'draw_models/encoder_' + args_draw.model
decoder_name = 'draw_models/decoder_' + args_draw.model
model.load(encoder_name, decoder_name)


if args_draw.experiment not in ['uncondition', 'complete']:
    raise ValueError('experiment should either be uncondition or complete')

if args_draw.experiment == 'uncondition':
    model.conditional_generation_point(uncondition=True, plot=True,
                                       sigma=float(args_draw.sigma))
    # 10 is arbitrary.
elif args_draw.experiment == 'complete':
    # TODO: process the image to complete
    regular = '[_][0-9]+'
コード例 #19
0
c = pre.transform_data(test_data, normalize=True)

c = torch.from_numpy(c)
testset = torch.utils.data.TensorDataset(c)


test_loader = torch.utils.data.DataLoader(testset, batch_size=config['batch_size'], shuffle=False, num_workers=1)

if args.net == 'ours':
    net = DTS()
elif args.net == 'unet':
    net = UNet(n_channels=2, n_classes=2)
elif args.net == 'uresnet':
    net = UResNet(num_classes=2, input_channels=2, inplanes=16)

model = Model(net=net, config=config)

save_path = config['save_path']


model.resume(save_path=config['save_path'], filename='ckpt_%d.t7' % config['epoch'])

print('Prepare data for second phase training:')
test_images = model.prepare_second_phase_data(test_loader)
test_images = pre.correction_data(test_images)

if not os.path.exists(config['save_prediction_path']):
    os.mkdir(config['save_prediction_path'])
os.chdir(config['save_prediction_path'])
print('Save images...')
np.save("%s_%s_%s.npy" % (args.data, config['view'], config['norm_axis']), test_images)
コード例 #20
0
ファイル: drex.py プロジェクト: haochihlin/CoRL2019-DREX
def eval_reward(args):
    env = gym.make(args.env_id)

    dataset = BCNoisePreferenceDataset(env)

    loaded = dataset.load_prebuilt(args.noise_injected_trajs)
    assert loaded

    # Load Seen Trajs
    seen_trajs = [
        (obs,actions,rewards) for _,trajs in dataset.trajs for obs,actions,rewards in trajs
    ]

    # Load Unseen Trajectories
    if args.unseen_trajs:
        with open(args.unseen_trajs,'rb') as f:
            unseen_trajs = pickle.load(f)
    else:
        uneen_trajs = []

    # Load Demo Trajectories used for BC
    with open(args.bc_trajs,'rb') as f:
        bc_trajs = pickle.load(f)

    # Load T-REX Reward Model
    graph = tf.Graph()
    config = tf.ConfigProto() # Run on CPU
    config.gpu_options.allow_growth = True

    with graph.as_default():
        models = []
        for i in range(args.num_models):
            with tf.variable_scope('model_%d'%i):
                net = RewardNet(args.include_action,env.observation_space.shape[-1],env.action_space.shape[-1],num_layers=args.num_layers,embedding_dims=args.embedding_dims)

                model = Model(net,batch_size=1)
                models.append(model)

    sess = tf.Session(graph=graph,config=config)
    for i,model in enumerate(models):
        with sess.as_default():
            model.saver.restore(sess,os.path.join(args.log_dir,'trex','model_%d.ckpt'%i))

    # Calculate Predicted Returns
    def _get_return(obs,acs):
        with sess.as_default():
            return np.sum([model.get_reward(obs,acs) for model in models]) / len(models)

    seen = [1] * len(seen_trajs) + [0] * len(unseen_trajs) + [2] * len(bc_trajs)
    gt_returns, pred_returns = [], []

    for obs,actions,rewards in seen_trajs+unseen_trajs+bc_trajs:
        gt_returns.append(np.sum(rewards))
        pred_returns.append(_get_return(obs,actions))
    sess.close()

    # Draw Result
    def _draw(gt_returns,pred_returns,seen,figname=False):
        """
        gt_returns: [N] length
        pred_returns: [N] length
        seen: [N] length
        """
        import matplotlib
        matplotlib.use('agg')
        import matplotlib.pylab
        from matplotlib import pyplot as plt
        from imgcat import imgcat

        matplotlib.rcParams.update(matplotlib.rcParamsDefault)
        plt.style.use('ggplot')
        params = {
            'text.color':'black',
            'axes.labelcolor':'black',
            'xtick.color':'black',
            'ytick.color':'black',
            'legend.fontsize': 'xx-large',
            #'figure.figsize': (6, 5),
            'axes.labelsize': 'xx-large',
            'axes.titlesize':'xx-large',
            'xtick.labelsize':'xx-large',
            'ytick.labelsize':'xx-large'}
        matplotlib.pylab.rcParams.update(params)

        def _convert_range(x,minimum, maximum,a,b):
            return (x - minimum)/(maximum - minimum) * (b - a) + a

        def _no_convert_range(x,minimum, maximum,a,b):
            return x

        convert_range = _convert_range
        #convert_range = _no_convert_range

        gt_max,gt_min = max(gt_returns),min(gt_returns)
        pred_max,pred_min = max(pred_returns),min(pred_returns)
        max_observed = np.max(gt_returns[np.where(seen!=1)])

        # Draw P
        fig,ax = plt.subplots()

        ax.plot(gt_returns[np.where(seen==0)],
                [convert_range(p,pred_min,pred_max,gt_min,gt_max) for p in pred_returns[np.where(seen==0)]], 'go') # unseen trajs
        ax.plot(gt_returns[np.where(seen==1)],
                [convert_range(p,pred_min,pred_max,gt_min,gt_max) for p in pred_returns[np.where(seen==1)]], 'bo') # seen trajs for T-REX
        ax.plot(gt_returns[np.where(seen==2)],
                [convert_range(p,pred_min,pred_max,gt_min,gt_max) for p in pred_returns[np.where(seen==2)]], 'ro') # seen trajs for BC

        ax.plot([gt_min-5,gt_max+5],[gt_min-5,gt_max+5],'k--')
        #ax.plot([gt_min-5,max_observed],[gt_min-5,max_observed],'k-', linewidth=2)
        #ax.set_xlim([gt_min-5,gt_max+5])
        #ax.set_ylim([gt_min-5,gt_max+5])
        ax.set_xlabel("Ground Truth Returns")
        ax.set_ylabel("Predicted Returns (normalized)")
        fig.tight_layout()

        plt.savefig(figname)
        plt.close()

    save_path = os.path.join(args.log_dir,'gt_vs_pred_rewards.pdf')
    _draw(np.array(gt_returns),np.array(pred_returns),np.array(seen),save_path)
コード例 #21
0
if (flag == True):  #rearrange columns to bring blue flux next to blue columns
    df_combined.insert(5, 'flux' + suffix, "")
    print('flux' + suffix)
else:
    print('flux' + suffix)
    df_combined.insert(13, 'flux' + suffix, "")

for index, row in df_combined.iterrows():
    z = row['z' + suffix]
    sigma_v = row['vel' + suffix]
    amp = row['amp' + suffix]
    width = width_calc(sigma_v)

    if (sigma_v > 0):
        #calibrate model
        model_test = Model(z, wg, width, 0.7, amp).squeeze()
        calibrated_model = model_test / flux_calibs
        calibrated_model[np.isnan(calibrated_model) == True] = 0.

        #calculate flux within 3-sigma
        wg_integ = wg[np.abs(wg - (lambda27*(1+z) - 3*width)).argmin():\
         np.abs(wg - (lambda29*(1+z) + 3*width)).argmin()]
        calibrated_model_integ = \
        calibrated_model[np.abs(wg - \
             (lambda27*(1+z) - 3*width)).argmin():\
             np.abs(wg - (lambda29*(1+z) + 3*width)).argmin()]

        #multiply by 1e-19 because that is the unit of header and divide by 1e-17
        #to get in DESI units
        flux = trapz(y=calibrated_model_integ, x=wg_integ) * (1e-19) / (
            (1e-17))
コード例 #22
0
def SNR_calculator_modified(maskname, data, z, rel_strngth, fudge_factor):
    """modified version of SNR_calculator"""

    #Read data
    image = data['data_ivar'][:, 0, :]
    ivar = data['data_ivar'][:, 1, :]
    crval1, wg = wave_grid(data)
    """Gaussian width, sigma = sqrt(sigma_lambda^2 + sigma_slit^2) where, 
	sigma_lambda = sigma_v/c*lambda(z); sigma_v = [0, 300] km/s
	sigma_slit = 3.3/sqrt(12)*delLambda_pixel	
	"""

    delLambda_pixel = float(str(data['headers'][1]).split("CDELT1")[1]\
     .split("=")[1].split("/")[0])*10. #size of the pixel in angstrom

    #fudge factor introduced to make fit better. Required because size of object may be smaller
    sigma_slit = ((3.3 / np.sqrt(12)) * delLambda_pixel) * fudge_factor
    c = 299792.458  #km/s

    def widthlist(z):
        """Returns an array of possible Gaussian widths for the [OII] doublet
		model testing"""

        sigma_lambda = sigma_v / c * (lambda0 * (1 + z))  #in observing frame

        return np.sqrt(sigma_lambda**2 + sigma_slit**2)

    #sigma_v size same as number of Gaussian width models
    #store all the SNR values (z x num. of objects x width)
    SNRs = np.zeros((image.shape[0], sigma_v.size))

    #Save all the amplitudes and chi_sq to pass this to the PeakZoom function
    #Amps -> (z x num. of objects x width)
    #del_chi_sq -> (z x num. of objects x width)
    Amps = np.zeros(SNRs.shape)
    del_chi_sq = np.zeros(SNRs.shape)

    #Store all the widths b/c widths are a func. of z
    widths = np.zeros((sigma_v.size))

    #-------------------------------------------#
    #WORK ON THIS LATER
    #Save all the medians to pass to PeakZoom function
    medians = np.zeros((image.shape[0]))
    #-------------------------------------------#

    wg2 = Window(z, wg)
    widths = widthlist(
        z
    )  #Annoying bug. MUST save widths becuase widths = widths(z). Previously was using last z
    model = Model(z, wg2, widths, relative_strength=rel_strngth)

    #Find the idx of the edges of the windows and slice the image file to multiply with modelPrime
    minidx = np.where(wg == np.min(wg2))[0][0]
    maxidx = np.where(wg == np.max(wg2))[0][0]
    imageSliced = np.copy(
        image[:, minidx:maxidx +
              1])  #imageSliced -> (num. of obj x window wavelength)
    ivarSliced = np.copy(
        ivar[:, minidx:maxidx +
             1])  #ivarSliced -> (num. of obj x window wavelength)

    #create a copy of imageSliced to set peak area to 0 to get proper dc level
    #[z-0.001, z + 0.001] range where we expect the doublet to be and set it to 0
    upperlam = lambda0 * (1 + (z + 0.001))
    lowerlam = lambda0 * (1 + (z - 0.001))

    #find corresponding indices to set imageSliced values inside that range to be 0
    rightidx = np.abs(wg2 - upperlam).argmin()
    leftidx = np.abs(wg2 - lowerlam).argmin()

    imageSlicedtmp = np.copy(imageSliced)
    imageSlicedtmp[:, leftidx:rightidx + 1] = 0

    #Ignore 0s in imageSliced when calculating median
    #Source: https://stackoverflow.com/questions/22049140/how-can-i-ignore-zeros-when-i-take-the-median-on-columns-of-an-array/22049849#22049849
    median_val = np.apply_along_axis(lambda v: np.median(v[v != 0]), 1,
                                     imageSlicedtmp)
    median_val[np.isnan(
        median_val
    )] = 0.  #Need to do this because some windows are all 0 and results in NaN in np.median(v[v!=0])
    medians = median_val
    imageSlicedtmp2 = imageSliced - median_val[:,
                                               np.newaxis]  #median subtraction
    imageSliced = imageSlicedtmp2
    """numpy dot -> sum product over last axis of a and
	second-to-last axis of b where np.dot(a,b)
	Here, (imageSliced*ivarSliced) -> elementwise multiplication
	producing (num of obj x window) and model -> (window x width).
	Hence, summing over window, i.e. range of pixels gives us the 
	numerator -> (num of obj x width)"""
    trm1 = np.multiply(imageSliced, ivarSliced)[:, :, np.newaxis]
    trm2 = model[np.newaxis, :, :]
    Numerator = trm1 * trm2
    Numerator = np.sum(Numerator, axis=1)
    """numpy dot -> sum product over last axis of a and
	second-to-last axis of b where np.dot(a,b)
	Here, (model * model) -> elementwise multiplication
	producing (window x width) and ivar -> (num of obj x window).
	Hence, summing over window, i.e. range of pixels gives us the 
	denominator. We can do ivar.model^2 because since the operation
	is pixel-wise, it does not matter whether it is model^2.ivar
	or ivar.model^2.
	Denominator -> (num of obj x width)"""
    trm3 = ivarSliced[:, :, np.newaxis]
    trm4 = np.multiply(model, model)
    trm5 = trm4[np.newaxis, :, :]
    Denominator = trm3 * trm5
    Denominator = np.sum(Denominator, axis=1)
    """
	M' = M/sigma_px; D' = D/sigma_px
	A = (D'.M')/(M'.M')
	sigmaA^(-2) = M'.M'
	Let, isigmaA = sqrt(M'.M')
	SNR = A/sigmaA => SNR = A*isigmaA
	"""

    Amp = Numerator / (Denominator + 1.0e-100)  #
    isigmaA = np.sqrt(Denominator)
    SNR = Amp * isigmaA

    SNRs = SNR
    Amps = Amp
    """chi_sq = sum over pixel ((image - model*amp)^2*ivar)
	del chi_sq = sum over pixel ((image - model*amp)^2*ivar) - (image^2*ivar)
	firstrm = image
	secondtrm = model*amp
	thirdtrm = image^2 = firstrm^2
	delterm1 = ((image - model*amp)^2*ivar)
	delterm2 = (image^2*ivar)
	"""

    firstrm = imageSliced[:, np.newaxis, :]  #(ngal x 1 x range)
    secondtrm = Amp[:, :, np.newaxis] * model.T[
        np.newaxis, :, :]  #(ngal x width x range)
    thirdtrm = firstrm**2  #(ngal x 1 x range)
    ivartrm = ivarSliced[:, np.newaxis, :]  #(ngal x 1 x range)

    diff = firstrm - secondtrm
    delterm1 = (diff**2) * ivartrm
    delterm2 = thirdtrm * ivartrm

    del_chi = delterm1 - delterm2
    del_chi2 = np.nansum(del_chi, axis=2)
    del_chi_sq = del_chi2

    SNRs_final = SNRs  #This maintains the indices
    Amps_final = Amps
    del_chi_sq_final = del_chi_sq

    return widths, SNRs_final, Amps_final, del_chi_sq_final, medians
コード例 #23
0
ファイル: main.py プロジェクト: flamz3d/vae-pytorch
        vae_model = gamma_vae(opt.dataset)
        compute_vae = compute_gamma


    maps_folder = os.path.join(data_folder, "maps", opt.model)
    if not os.path.isdir(maps_folder):
        os.makedirs(os.path.join(maps_folder,"train"))
        os.makedirs(os.path.join(maps_folder,"val"))

    models_folder = os.path.join(data_folder, "models")
    if not os.path.isdir(models_folder):
        os.makedirs(models_folder)

    print("{} model chosen.\n".format(opt.model))

    vae = Model(vae_model,z_dim=opt.z_dim)

    best_loss = float("inf")
    best_epoch = -1

    for epoch in range(opt.epochs):

        for m in metrics:
            m.reset()

        print("====== Epoch {} ======".format(epoch))
        train(epoch, vae, t_generator, compute_vae, metrics, (models_folder, maps_folder), opt, train_logger)
        vae_loss,log_p_x = val(epoch, vae, v_generator, compute_vae, metrics, (models_folder, maps_folder), opt, val_logger)
        
        is_best = False
        if vae_loss < best_loss:
コード例 #24
0
def tina_et_charlie(model_name,
                    use_cuda,
                    nbr_point_next,
                    painting_completing,
                    painting_conditioning,
                    sig=0.1):
    # transform seq of stroke into (nbr,3)
    painting_completing = from_larray_to_3array(painting_completing)
    painting_conditioning = from_larray_to_3array(painting_conditioning)

    # load hp
    hp_path = 'draw_models/hp_folder/' + model_name[:-4] + '.pickle'
    with open(hp_path, 'rb') as handle:
        hp = pickle.load(handle)
    hp.use_cuda = use_cuda

    # load model
    model = Model(hyper_parameters=hp, parametrization='point')
    encoder_name = 'draw_models/encoder_' + model_name
    decoder_name = 'draw_models/decoder_' + model_name
    model.load(encoder_name, decoder_name)

    # It is in format (x,y,p) put it into that (dx,dy,p) format
    datum = painting_completing
    # offset the coordinate
    datum[1:, 0:2] = datum[1:, 0:2] - datum[:-1, 0:2]
    # compute the std of initial image
    mean_ini, std_ini = compute_variance(datum)
    # normalize the painting to complete
    datum = scale_stroke(datum, std_ini)
    # format from (dx,dy,p) to the 5
    img_to_complete = make_image_point(datum)

    # determining the image that will condition the latent vector z.
    # format (x,y,p) to (dx,dy,p)
    img_full = painting_conditioning
    img_full[1:, 0:2] = img_full[1:, 0:2] - img_full[:-1, 0:2]
    mean_full, std_full = compute_variance(img_full)
    img_full = scale_stroke(img_full, std_full)
    img_full = make_image_point(img_full)

    # complete
    img_tail = model.finish_drawing_point(img_to_complete,
                                          use_cuda,
                                          nbr_point_next=nbr_point_next,
                                          img_full=img_full,
                                          sigma=sig)

    # process the tail so that it has the same variance as the images
    # it tries to complete.
    mean_tail, std_tail = compute_variance(img_tail)
    img_tail = scale_stroke(img_tail, std_tail)
    img_total = np.concatenate((datum, img_tail), 0)

    # plot the image..
    (img_coo, img_offset) = make_seq(img_total[:, 0], img_total[:, 1],
                                     img_total[:, 2])
    (img_tail_coo, img_tail_offset) = make_seq(img_tail[:, 0], img_tail[:, 1],
                                               img_tail[:, 2])

    make_image(img_coo, 1, dest_folder=None, name='_output_',
               plot=True)  #plot=args_draw.plot)
    make_image(img_tail_coo, 2, dest_folder=None, name='_output_', plot=True)

    # Transform (nbr,3) to list of array (nbr,2)
    # TODO: verifier la question de l'origine
    img_completed = from_3array_to_larray(img_tail_coo)
    return img_completed
コード例 #25
0
def complete(model_name,
             use_cuda,
             nbr_point_next,
             painting_completing=None,
             painting_conditioning=None,
             idx=None,
             sig=0.1):
    '''
    Methods:
    --------
    painting_completing/conditioning are images in format (nbr, 3) (x, y, p)
    and we want to complete them. Beware there are others format. (dx, dy, p) where
    (dx, dy) is the offset with respect to the previous point. An another one
    of size (nbr, 5), the one used by the neural net.

    Inputs:
    -------
        - model_name : a string of the type 'broccoli_car_cat_20000.pth'.
        use_cuda : Boolean.
        - idx : integer representing the index of an image to the dataset
    associated with model_name.
        - painting_completing: should be a parametred version ready for the neural network. It
    should be of the same format as 'datum'. Hence a numpy array of dimension
    (nbr_points, 3) and each line being (x,y,p) where (x,y) represents
    the coordinate (TODO: with respect to what???) and p\in{0,1} saying weither
    or not the point are linked.
        - painting_conditioning: comes in the same format. The goal is to provide
    a latent vector z.
        - sig : the variance of the latent normal vector z if not using the
    latent vector of a global image. (Although we may want to have the latent
    vector of a given area..)

    Outputs:
    --------
    '''
    if painting_completing is None and idx is None:
        raise ValueError('there should at least one of the two that is\
                         None.')
    # load hp
    hp_path = 'draw_models/hp_folder/' + args_draw.model[:-4] + '.pickle'
    with open(hp_path, 'rb') as handle:
        hp = pickle.load(handle)
    hp.use_cuda = use_cuda

    # load model
    model = Model(hyper_parameters=hp, parametrization='point')
    encoder_name = 'draw_models/encoder_' + args_draw.model
    decoder_name = 'draw_models/decoder_' + args_draw.model
    model.load(encoder_name, decoder_name)

    # prepare img to complete and image that condition
    if idx is not None:
        # Then completing an image of the dataset
        regular = '[_][0-9]+'
        name_mid = re.split(regular, args_draw.model[:-4])
        name_mid = name_mid[0]
        path_data = 'data/' + name_mid + '.npz'
        try:
            dataloader = DataLoader(path_data, hp)
        except:
            # TODO: find which is the error to except
            print('the path to dataset is not working')
        idx = np.random.randint(1, 30)
        datum = dataloader.data[idx]
        (nbr_points_datum, _) = datum.shape
        # TODO: make 0.6 as a parameter...
        datum = datum[:int(nbr_points_datum * 0.6)]
        # TODO: remember what make_image_point is doing
        img_full = make_image_point(datum)
        img_to_complete = make_image_point(
            datum)  # img is in the parametrized format
    else:
        # completing our own image
        # It is in format (x,y,p) put it into that (dx,dy,p) format
        datum = painting_completing
        # offset the coordinate
        datum[1:, 0:2] = datum[1:, 0:2] - datum[:-1, 0:2]
        # compute the std of initial image
        mean_ini, std_ini = compute_variance(datum)
        # normalize the painting to complete
        datum = scale_stroke(datum, std_ini)
        # format from (dx,dy,p) to the 5
        img_to_complete = make_image_point(datum)

        # determining the image that will condition the latent vector z.
        if painting_conditioning is not None:
            # format (x,y,p) to (dx,dy,p)
            img_full = painting_conditioning
            img_full[1:, 0:2] = img_full[1:, 0:2] - img_full[:-1, 0:2]
            mean_full, std_full = compute_variance(img_full)
            img_full = scale_stroke(img_full, std_full)
            img_full = make_image_point(img_full)
        else:
            img_full = None

    # complete the stuff : img_tail is in format (dx, dy, p)
    # max_length_mean =
    img_tail = model.finish_drawing_point(img_to_complete,
                                          use_cuda,
                                          nbr_point_next=nbr_point_next,
                                          img_full=img_full,
                                          sigma=sig)
    # process the tail so that it has the same variance as the images
    # it tries to complete.
    mean_tail, std_tail = compute_variance(img_tail)
    # print(mean_tail, std_tail)

    img_tail = scale_stroke(img_tail, std_tail)

    # TODO: check that the concatenation is ok
    img_total = np.concatenate((datum, img_tail), 0)

    # plot the image..
    (img_coo, img_offset) = make_seq(img_total[:, 0], img_total[:, 1],
                                     img_total[:, 2])
    (img_tail_coo, img_tail_offset) = make_seq(img_tail[:, 0], img_tail[:, 1],
                                               img_tail[:, 2])

    make_image(img_coo, 1, dest_folder=None, name='_output_',
               plot=True)  #plot=args_draw.plot)
    make_image(img_tail_coo, 2, dest_folder=None, name='_output_', plot=True)
コード例 #26
0
ファイル: predict.py プロジェクト: HTY2003/KiBoard
from keras_preprocessing.text import Tokenizer
from nltk.corpus import gutenberg
from utils import Model, TrainingValues, clean_text

#---------- Recreating data used to train model ----------
text = " ".join(clean_text(gutenberg.raw(i)) for i in gutenberg.fileids()[:3])
tokenizer = Tokenizer()
tokenizer.fit_on_texts([text])
encoded = tokenizer.texts_to_sequences([text])[0]
vocab_size = len(tokenizer.word_index) + 1
word2idx = tokenizer.word_index
idx2word = tokenizer.index_word

#---------- Restoring model from checkpoint ----------
model = Model(vocab_size, TrainingValues.EMBEDDING_DIM, TrainingValues.UNITS,
              TrainingValues.BATCH_SIZE)
optimizer = tf.train.AdamOptimizer()
checkpoint_dir = './training_checkpoints_1'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))


#---------- Obtaining predictions from model ----------
class NextWord:
    def predict(word, n=1):
        try:
            input_eval = [word2idx[word.lower()]]
            input_eval = tf.expand_dims(input_eval, 0)
            hidden = [tf.zeros((1, TrainingValues.UNITS))]
            predictions, hidden = model(input_eval, hidden)
コード例 #27
0
        if args.exp_num in [64, 65]:
            net = PairFiltering(args)
            loss = nn.CrossEntropyLoss()
            monitor_outputs = [0]
        elif args.exp_num in [70]:
            net = RedesignedModel(args)
            loss = [nn.CrossEntropyLoss()] * 3 + [
                nn.SmoothL1Loss(), special_loss
            ]
        else:
            net = RedesignedModel(args)
            loss = [nn.CrossEntropyLoss()] + [nn.SmoothL1Loss()]
            monitor_outputs = [1]

        model = Model(net, args)
        model.train()

        train_dataset = VRDDataset('train', args)
        val_dataset = VRDDataset('test', args)

        train_loader = DataLoader(train_dataset,
                                  shuffle=True,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers)
        val_loader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                num_workers=args.num_workers)

        dset_loaders = {'train': train_loader, 'val': val_loader}
        dset_sizes = {'train': len(train_dataset), 'val': len(val_dataset)}
コード例 #28
0
ファイル: Classifier.py プロジェクト: yuv4r4j/CS6230-OMML
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
    tr_dset = SVHN(root=p.dataroot,
                   split='train',
                   transform=transformations,
                   download=True)
    te_dset = SVHN(root=p.dataroot,
                   split='test',
                   transform=transformations,
                   download=True)

tr_d_loader = DataLoader(dataset=tr_dset, batch_size=64, shuffle=True)
te_d_loader = DataLoader(dataset=te_dset, batch_size=5000, shuffle=True)

# Build classifier architecture
model = Model(json.load(open(p.architecture)), p.init)
print(model)

if p.cuda != -1:
    model = model.cuda()

# Build Optimizer
optimizer = return_optimizer(p.opt, json.load(open(p.opt_params)),
                             model.parameters())
print(optimizer)

# Loss function
loss_fn = nn.CrossEntropyLoss()
if p.cuda != -1:
    loss_fn = loss_fn.cuda()
コード例 #29
0
                     'audio_files',
                     additional=['nlp_keywords', 'latest_keywords'])
train, test = dp.get_train_test()

X_train, X_val = train_test_split(train,
                                  test_size=0.2,
                                  stratify=train['label'],
                                  shuffle=True,
                                  random_state=4738)

batch_size = 3

train_loader = DataLoader(Data(X_train, data_augmentation=True),
                          batch_size=batch_size)
val_loader = DataLoader(Data(X_val, data_augmentation=False),
                        batch_size=batch_size)

lr_monitor = LearningRateMonitor(logging_interval='epoch')
mc = pl.callbacks.ModelCheckpoint(filepath='{epoch}-{CE_val:.5f}',
                                  save_top_k=3,
                                  save_weights_only=True,
                                  monitor='CE_val')

model = Model()
trainer = pl.Trainer(gpus=None,
                     precision=32,
                     callbacks=[mc, lr_monitor],
                     progress_bar_refresh_rate=5,
                     max_epochs=120)
trainer.fit(model, train_loader, val_loader)
コード例 #30
0
    data = data[data[:, 0] > object_threshold, :]

    scores_, class_, boxes_ = data[:, 0:1], data[:, 1:2], data[:, 2:]

    return boxes_, scores_, class_


def convert_box_to_vrd(box, img_id=None):
    if img_id == '194654941_052c0bd67f_o.jpg':
        return [box[i] for i in [0, 2, 1, 3]]
    else:
        return [box[i] for i in other_to_vrd]


net = PairFiltering()
model = Model(net)
model.load(
    'model_checkpoints/Experiment_64/weights.03-train_loss:0.01-train_acc:0.85-val_loss:0.01-val_acc:0.84.pkl'
)
model.eval()


def filter_pair(union_bbox, subject_bbox, object_bbox, subject_id, object_id,
                img):
    union_mask = torch.zeros(*img.shape[:2])
    union_mask[union_bbox[0]:union_bbox[1], union_bbox[2]:union_bbox[3]] = 1

    subject_mask = torch.zeros(*img.shape[:2])
    subject_mask[subject_bbox[0]:subject_bbox[1],
                 subject_bbox[2]:subject_bbox[3]] = 1