Exemplo n.º 1
0
def main():
    """Run only when this file is called directly."""
    # Setup hyperparameters
    CONFIG = get_config()

    # Log to File and Console
    logger = get_logger(log_to_console=True, log_to_file=CONFIG.LOG_TO_FILE)

    # Choose CPU and GPU
    torch.set_num_threads(CONFIG.CPU_THREADS)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if not torch.cuda.is_available():
        logger.warning("GPU not available: this run could be slow.")

    # Setup environment
    env = gym.make(CONFIG.ENV_NAME)
    eval_env = gym.make(CONFIG.ENV_NAME)

    # Fix random seeds
    if CONFIG.RANDOM_SEED is not None:
        set_global_random_seeds(CONFIG.RANDOM_SEED,
                                use_numpy=True,
                                use_torch=True)
        # NOTE(seungjaeryanlee): Seed for env and eval_env are different for fair evaluation
        set_env_random_seeds(env, CONFIG.RANDOM_SEED)
        set_env_random_seeds(eval_env, CONFIG.RANDOM_SEED + 1)
    else:
        logger.warning(
            "Running without a random seed: this run is NOT reproducible.")

    # Setup agent and replay buffer
    q_net = QNetwork(env.observation_space.shape[0],
                     env.action_space.n).to(device)
    optimizer = optim.RMSprop(
        q_net.parameters(),
        lr=CONFIG.RMSPROP_LR,
        alpha=CONFIG.RMSPROP_DECAY,
        eps=CONFIG.RMSPROP_EPSILON,
        momentum=CONFIG.RMSPROP_MOMENTUM,
        weight_decay=CONFIG.RMSPROP_WEIGHT_DECAY,
        centered=CONFIG.RMSPROP_IS_CENTERED,
    )
    if CONFIG.LOAD_PATH:
        # Load parameters if possible
        load_models(CONFIG.LOAD_PATH, q_net=q_net, optimizer=optimizer)
    dqn_agent = DQNAgent(env, q_net, optimizer, device)

    replay_buffer = CircularReplayBuffer(env,
                                         maxlen=CONFIG.REPLAY_BUFFER_SIZE,
                                         device=device)

    # Train and evaluate agent
    train_eval(dqn_agent, replay_buffer, env, eval_env, device, logger, CONFIG)
Exemplo n.º 2
0
def webcam():

    models = load_models()

    iteration = 0

    cam = cv2.VideoCapture(0)
    cv2.namedWindow('HackDrone', cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty('HackDrone', cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    while True:

        ret_val, image = cam.read()

        if iteration > 5:
            # RECOGNITION
            print('Recognition')
            result = recognize(image, models)
            if result is not None:
                letter, prob = result
                text = 'Letter %s, confidence: %.2f' % (letter, prob)
                cv2.putText(image, text, bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)

        cv2.imshow('HackDrone', image)

        sleep(1)

        iteration += 1

        if cv2.waitKey(1) == 27:
            break  # esc to quit

    cv2.destroyAllWindows()
Exemplo n.º 3
0
def main(args):
  
  vocab = load_vocab()
  
  encoder = CNNEncoder()
  decoder = DecoderRNN(512,512,len(vocab))
  
  encoder_state_dict, decoder_state_dict, optimizer, *meta = utils.load_models(args.checkpoint_file,False)
  encoder.load_state_dict(encoder_state_dict)
  decoder.load_state_dict(decoder_state_dict)
  
  if torch.cuda.is_available():
    encoder.cuda()
    decoder.cuda()
    
  transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
  
  inp = cv2.imread(args.image_path)
  inp = transform(Image.fromarray(inp)).unsqueeze(0)
  inp = utils.to_var(inp, volatile=True)
  
  features = encoder(inp)
  sampled_ids = decoder.sample(features)
  
  sampled_ids = sampled_ids.cpu().data.numpy()[0]
  sentence = utils.convert_back_to_text(sampled_ids, vocab)
  
  print('Caption:', sentence)
    def __init__(self):
        super(FaceEnvironementContinuous, self).__init__()
        self.id = "FaceEnv"
        self.random_seed = 666
        self.action_space = spaces.Box(low=0, high=224, shape=(68, 2),
                                       dtype=np.float32)
        self.observation_space = spaces.Box(low=0, high=224, shape=(68, 2),
                                            dtype=np.float32)

        (self.embedder,
         self.generator,
         self.discriminator) = load_models(len(glob.glob(f"{ROOT_DATASET}/*")))
        self.embedder = self.embedder.eval()
        self.generator = self.generator.eval()
        self.discriminator = self.discriminator.eval()
        self.landmarks = None
        self.landmarks_done = deque(maxlen=MAX_DEQUE_LANDMARKS)
        self.contexts = None
        self.user_ids = None
        self.embeddings = None
        self.paramWeights = None
        self.paramBias = None
        self.layersUp = None
        self.iterations = 0
        self.episodes = 0
        self.max_iter = MAX_ITER_PERSON
        self.fig, self.axes = plt.subplots(2, 2)
        self.state = None
        self._max_episode_steps = 1000
        self.p = 0
Exemplo n.º 5
0
def main():

    with tf.Session(config=sess_config) as sess:

        # build environment
        _, env = build_env(config.env)

        # load actor & critic
        actor, critic = build_actor_critic(sess, env)
        load_models(sess, config.model_path)

        # evaluate final policy
        test_actor(env,
                   actor,
                   eval_len=config.test_episode_len // 100,
                   verbose=True)
    def __init__(self, path_weights):
        super(FaceEnvironementDiscreete, self).__init__()
        # 0 Nothing / 1 Left / 2 Right / 3 Up / 4 Down
        self.id = "FaceEnv"
        self.action_space = spaces.Discrete(68 * 4)
        self.observation_space = spaces.Discrete(68 * 2)
        self._max_episode_steps = MAX_ITER_PERSON
        self.seed()

        (self.embedder,
         self.generator,
         self.discriminator) = load_models(3000)
        self.embedder = self.embedder.eval()
        self.generator = self.generator.eval()
        self.discriminator = self.discriminator.eval()

        self.landmarks = None
        self.landmarks_done = deque(maxlen=MAX_DEQUE_LANDMARKS)

        self.contexts = None
        self.user_ids = None
        self.embeddings = None
        self.paramWeights = None
        self.paramBias = None
        self.layersUp = None

        self.iterations = 0
        self.episodes = -1
        self.state = None

        self.fig, self.axes = plt.subplots(2, 2)
Exemplo n.º 7
0
def load_pool_models(pool_name, max_pool_size, agent_config,
                     exclude_current_from_opponents,
                     max_pool_size_exclude_current_from_opponent,
                     best_iteration_opponents, record_videos_new_iteration):
    # List all files in the agent pool, create a random agent if there are none
    this_folder = os.path.dirname(__file__)
    agents_folder = os.path.join(this_folder,
                                 '../Deep Learning Agents/' + pool_name)
    Path(agents_folder).mkdir(parents=True, exist_ok=True)
    agent_paths = [f for f in os.listdir(agents_folder) if f[-3:] == '.h5']
    if not agent_paths:
        agent_paths = create_initial_random_agents(agents_folder, agent_config)
        if record_videos_new_iteration:
            utils.record_videos(os.path.join(agents_folder, agent_paths[0]),
                                agent_config['num_agents_per_game'],
                                agent_config['num_mirror_dim'])

    # Order the agent models on their iteration ids
    agent_paths = utils.decreasing_sort_on_iteration_id(agent_paths)
    agent_paths = agent_paths[:max_pool_size]

    # Load all agent models
    agent_full_paths = [os.path.join(agents_folder, p) for p in agent_paths]
    if best_iteration_opponents is None:
        agents = utils.load_models(agent_full_paths)
        this_agent = agents[0]
        if exclude_current_from_opponents or len(
                agents) <= max_pool_size_exclude_current_from_opponent:
            agents = agents[1:]
    else:
        # Load the latest iteration of all opponent pools
        this_agent = utils.load_models(agent_full_paths[:1])[0]
        agents = []
        for opponent_pool in best_iteration_opponents:
            opponent_dir = os.path.join(
                this_folder, '../Deep Learning Agents/' + opponent_pool)
            opponent_paths = [
                f for f in os.listdir(opponent_dir) if f[-3:] == '.h5'
            ]
            opponent_paths = utils.decreasing_sort_on_iteration_id(
                opponent_paths)
            opponent_full_path = os.path.join(opponent_dir, opponent_paths[0])
            agents.append(utils.load_models([opponent_full_path])[0])

    return this_agent, agents, agent_full_paths
Exemplo n.º 8
0
def segment_text(input_text):
    with tempfile.TemporaryDirectory() as temp_dir:

        input_dir = join(temp_dir, 'input')
        output_dir = join(temp_dir, 'output')
        for dirname in [input_dir, output_dir]:
            os.makedirs(dirname)
        with open(join(input_dir, 'input.txt'), 'w') as wf:
            wf.write(input_text)

        embeddings, vocabulary = utils.load_models()
        segment.run_segmentation(input_dir, output_dir, embeddings, vocabulary)

        segmented_text = open(join(output_dir, 'input.txt.seg')).readlines()
        return segmented_text
Exemplo n.º 9
0
def main():
    """Run NFQ."""
    # Setup hyperparameters
    parser = configargparse.ArgParser()
    parser.add("-c", "--config", required=True, is_config_file=True)
    parser.add("--EPOCH", type=int)
    parser.add("--TRAIN_ENV_MAX_STEPS", type=int)
    parser.add("--EVAL_ENV_MAX_STEPS", type=int)
    parser.add("--DISCOUNT", type=float)
    parser.add("--INIT_EXPERIENCE", type=int)
    parser.add("--INCREMENT_EXPERIENCE", action="store_true")
    parser.add("--HINT_TO_GOAL", action="store_true")
    parser.add("--RANDOM_SEED", type=int)
    parser.add("--TRAIN_RENDER", action="store_true")
    parser.add("--EVAL_RENDER", action="store_true")
    parser.add("--SAVE_PATH", type=str, default="")
    parser.add("--LOAD_PATH", type=str, default="")
    parser.add("--USE_TENSORBOARD", action="store_true")
    parser.add("--USE_WANDB", action="store_true")
    CONFIG = parser.parse_args()
    if not hasattr(CONFIG, "INCREMENT_EXPERIENCE"):
        CONFIG.INCREMENT_EXPERIENCE = False
    if not hasattr(CONFIG, "HINT_TO_GOAL"):
        CONFIG.HINT_TO_GOAL = False
    if not hasattr(CONFIG, "TRAIN_RENDER"):
        CONFIG.TRAIN_RENDER = False
    if not hasattr(CONFIG, "EVAL_RENDER"):
        CONFIG.EVAL_RENDER = False
    if not hasattr(CONFIG, "USE_TENSORBOARD"):
        CONFIG.USE_TENSORBOARD = False
    if not hasattr(CONFIG, "USE_WANDB"):
        CONFIG.USE_WANDB = False

    print()
    print(
        "+--------------------------------+--------------------------------+")
    print(
        "| Hyperparameters                | Value                          |")
    print(
        "+--------------------------------+--------------------------------+")
    for arg in vars(CONFIG):
        print("| {:30} | {:<30} |".format(
            arg,
            getattr(CONFIG, arg) if getattr(CONFIG, arg) is not None else ""))
    print(
        "+--------------------------------+--------------------------------+")
    print()

    # Log to File, Console, TensorBoard, W&B
    logger = get_logger()

    if CONFIG.USE_TENSORBOARD:
        from torch.utils.tensorboard import SummaryWriter

        writer = SummaryWriter(log_dir="tensorboard_logs")
    if CONFIG.USE_WANDB:
        import wandb

        wandb.init(project="implementations-nfq", config=CONFIG)

    # Setup environment
    train_env = CartPoleRegulatorEnv(mode="train")
    eval_env = CartPoleRegulatorEnv(mode="eval")

    # Fix random seeds
    if CONFIG.RANDOM_SEED is not None:
        make_reproducible(CONFIG.RANDOM_SEED, use_numpy=True, use_torch=True)
        train_env.seed(CONFIG.RANDOM_SEED)
        eval_env.seed(CONFIG.RANDOM_SEED)
    else:
        logger.warning(
            "Running without a random seed: this run is NOT reproducible.")

    # Setup agent
    nfq_net = NFQNetwork()
    optimizer = optim.Rprop(nfq_net.parameters())
    nfq_agent = NFQAgent(nfq_net, optimizer)

    # Load trained agent
    if CONFIG.LOAD_PATH:
        load_models(CONFIG.LOAD_PATH, nfq_net=nfq_net, optimizer=optimizer)

    # NFQ Main loop
    # A set of transition samples denoted as D
    all_rollouts = []
    total_cost = 0
    if CONFIG.INIT_EXPERIENCE:
        for _ in range(CONFIG.INIT_EXPERIENCE):
            rollout, episode_cost = train_env.generate_rollout(
                None, render=CONFIG.TRAIN_RENDER)
            all_rollouts.extend(rollout)
            total_cost += episode_cost
    for epoch in range(CONFIG.EPOCH + 1):
        # Variant 1: Incermentally add transitions (Section 3.4)
        # TODO(seungjaeryanlee): Done before or after training?
        if CONFIG.INCREMENT_EXPERIENCE:
            new_rollout, episode_cost = train_env.generate_rollout(
                nfq_agent.get_best_action, render=CONFIG.TRAIN_RENDER)
            all_rollouts.extend(new_rollout)
            total_cost += episode_cost

        state_action_b, target_q_values = nfq_agent.generate_pattern_set(
            all_rollouts)

        # Variant 2: Clamp function to zero in goal region
        # TODO(seungjaeryanlee): Since this is a regulator setting, should it
        #                        not be clamped to zero?
        if CONFIG.HINT_TO_GOAL:
            goal_state_action_b, goal_target_q_values = train_env.get_goal_pattern_set(
            )
            goal_state_action_b = torch.FloatTensor(goal_state_action_b)
            goal_target_q_values = torch.FloatTensor(goal_target_q_values)
            state_action_b = torch.cat([state_action_b, goal_state_action_b],
                                       dim=0)
            target_q_values = torch.cat(
                [target_q_values, goal_target_q_values], dim=0)

        loss = nfq_agent.train((state_action_b, target_q_values))

        # TODO(seungjaeryanlee): Evaluation should be done with 3000 episodes
        eval_episode_length, eval_success, eval_episode_cost = nfq_agent.evaluate(
            eval_env, CONFIG.EVAL_RENDER)

        if CONFIG.INCREMENT_EXPERIENCE:
            logger.info(
                "Epoch {:4d} | Train {:3d} / {:4.2f} | Eval {:4d} / {:5.2f} | Train Loss {:.4f}"
                .format(  # noqa: B950
                    epoch,
                    len(new_rollout),
                    episode_cost,
                    eval_episode_length,
                    eval_episode_cost,
                    loss,
                ))
            if CONFIG.USE_TENSORBOARD:
                writer.add_scalar("train/episode_length", len(new_rollout),
                                  epoch)
                writer.add_scalar("train/episode_cost", episode_cost, epoch)
                writer.add_scalar("train/loss", loss, epoch)
                writer.add_scalar("eval/episode_length", eval_episode_length,
                                  epoch)
                writer.add_scalar("eval/episode_cost", eval_episode_cost,
                                  epoch)
            if CONFIG.USE_WANDB:
                wandb.log({"Train Episode Length": len(new_rollout)},
                          step=epoch)
                wandb.log({"Train Episode Cost": episode_cost}, step=epoch)
                wandb.log({"Train Loss": loss}, step=epoch)
                wandb.log({"Evaluation Episode Length": eval_episode_length},
                          step=epoch)
                wandb.log({"Evaluation Episode Cost": eval_episode_cost},
                          step=epoch)
        else:
            logger.info(
                "Epoch {:4d} | Eval {:4d} / {:5.2f} | Train Loss {:.4f}".
                format(epoch, eval_episode_length, eval_episode_cost, loss))
            if CONFIG.USE_TENSORBOARD:
                writer.add_scalar("train/loss", loss, epoch)
                writer.add_scalar("eval/episode_length", eval_episode_length,
                                  epoch)
                writer.add_scalar("eval/episode_cost", eval_episode_cost,
                                  epoch)
            if CONFIG.USE_WANDB:
                wandb.log({"Train Loss": loss}, step=epoch)
                wandb.log({"Evaluation Episode Length": eval_episode_length},
                          step=epoch)
                wandb.log({"Evaluation Episode Cost": eval_episode_cost},
                          step=epoch)

        if eval_success:
            logger.info(
                "Epoch {:4d} | Total Cycles {:6d} | Total Cost {:4.2f}".format(
                    epoch, len(all_rollouts), total_cost))
            if CONFIG.USE_TENSORBOARD:
                writer.add_scalar("summary/total_cycles", len(all_rollouts),
                                  epoch)
                writer.add_scalar("summary/total_cost", total_cost, epoch)
            if CONFIG.USE_WANDB:
                wandb.log({"Total Cycles": len(all_rollouts)}, step=epoch)
                wandb.log({"Total Cost": total_cost}, step=epoch)
            break

    # Save trained agent
    if CONFIG.SAVE_PATH:
        save_models(CONFIG.SAVE_PATH, nfq_net=nfq_net, optimizer=optimizer)

    train_env.close()
    eval_env.close()
Exemplo n.º 10
0
from collections import defaultdict
import os

from nltk.tag import pos_tag

from utils import get_related_forms, dump_json_set, load_json_set, clean_title
from utils import get_variants_and_derivatives, get_syn_thesaurus_net, get_syn_thesaurus_com
from utils import get_column_variants_per_pos, get_describe, get_related
from utils import get_common_words, load_models
from utils import get_ant_thesaurus_com, get_ant_antonymfor
from database import open_session, initialize_db
from database import ColumnD, ColumnI, ColumnJ, ColumnK, ColumnL, ColumnN
from tqdm import tqdm


MODELS = load_models()

def task_2():
    if os.path.exists('output/task_2.json'):
        print("Task 2 data found...")
        return load_json_set('output/task_2.json')
    else:
        print("Loading data for Task 2...")
        df = pd.read_excel('DATASET.xlsx', sheet_name='Task 2')
        set1 = set(map(lambda x: x.strip(), df['Unnamed: 1'].dropna().values))
        print("Getting related forms...")
        nouns, adjs, advs, vrbs = get_related_forms(set1, should_ban=True)
        print(len(nouns), len(adjs), len(advs), len(vrbs))
        with Pool(5) as p:
            f = p.map(get_related_forms, [nouns, adjs, advs, vrbs])
Exemplo n.º 11
0
date = datetime.datetime.now().replace(microsecond=0)
train_id = "_".join(CONFIG.values())
wandb.init(project="SacadeDetect",
           id=train_id,
           name=train_id,
           resume=LOAD_PREVIOUS,
           config=CONFIG)

vectorized_persons, voc = load_data(ROOT_DATASET)
train_loader, valid_loader = get_data_loader(vectorized_persons,
                                             workers=NUM_WORKERS)

check = Checkpoints()
(sacade_rnn, classifier,
 autoencoder) = load_models(voc.num_user,
                            load_previous_state=LOAD_PREVIOUS,
                            load_classifier=False)

wandb.watch((sacade_rnn, classifier, autoencoder))

Cel = nn.CrossEntropyLoss()

optimizerGru = torch.optim.Adam(sacade_rnn.parameters(), lr=LEARNING_RATE)
optimizerClas = torch.optim.Adam(classifier.parameters(), lr=LEARNING_RATE)
optimizerAuto = torch.optim.Adam(autoencoder.parameters(), lr=LEARNING_RATE)

sacade_rnn = sacade_rnn.to(DEVICE)
classifier = classifier.to(DEVICE)
autoencoder = autoencoder.to(DEVICE)
Cel = Cel.to(DEVICE)
Exemplo n.º 12
0
        with SummaryWriter(comment=f'_{h.name}') as writer:
            
            print(f'Run name: {h.name}')

            writer.add_hparams(vars(h),{})
                    
            dataset = BasicDataset(model_dir=h.dataroot)
            dataloader = torch.utils.data.DataLoader(dataset, batch_size=h.batch_size, num_workers=8, shuffle=True)
        
            print('Loading models')
            
            gen = Generator(h.nz, h.nc, h.ngf, device)
            discr = Discriminator(h.nc, h.ndf, device)
            
            if h.load_name.lower() != 'none':
                fname = load_models(os.path.join(h.modelroot,h.load_name),gen,discr,h.load_step)
                print (f'Loaded model: {fname}')
                
            gen_opt = optim.Adam(gen.parameters(), lr=h.lrG, betas=(h.beta1, h.beta2))
            discr_opt = optim.Adam(discr.parameters(), lr=h.lrD, betas=(h.beta1, h.beta2))
        
            # Create save dir
            save_dir = os.path.join(h.modelroot,h.name)        
            if not os.path.exists(save_dir):
                os.mkdir(save_dir)
                
            print('Beginning training')
        
            completed = train(
                dataloader,
                discr,
Exemplo n.º 13
0
    print("Stored.")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Segmenting text documents from a given directory.")
    parser.add_argument(
        "input_dir",
        type=str,
        help="Path to the directory containing text documents to be segmented")
    parser.add_argument(
        "output_dir",
        type=str,
        help=
        "Path to the directory where the serialized tfrecords will be saved.")
    parser.add_argument(
        "--scores",
        type=int,
        default=0,
        help=
        "Indicates whether to print segmentation prediction probabilities next to each sentence (default 0 = false)."
    )

    args = parser.parse_args()
    embeddings, vocabulary = utils.load_models()
    predict(args.input_dir,
            args.output_dir,
            embeddings,
            vocabulary,
            scores=False if args.scores == 0 else True)
Exemplo n.º 14
0
        kernel = 2
        x1 = np.arange(0.1, 1, 0.1)
        for i in tqdm(x1):
            train_and_store(X_train, y_train, i, kernel)
        x1 = np.arange(1, 31)
        for i in tqdm(x1):
            train_and_store(X_train, y_train, i, kernel)

    error_random = []
    error_last = []
    error_avg = []
    error_vote = []
    kernel = 1
    try:
        for i in np.arange(0.1, 1, 0.1):
            load_models(i, 1, 0)
            load_models(i, 2, 0)
        for i in np.arange(1, 11):
            load_models(i, 1, 0)
            load_models(i, 2, 0)
        for i in np.arange(11, 31):
            load_models(i, 2, 0)
    except:
        print("Error: are you sure you copied the files in the model dir?")

    print("epoch: from 0.1 to 0.9 kernel:{}".format(kernel))
    x1 = np.arange(0.1, 1, 0.1)
    x2 = np.arange(1, 11)
    for i in tqdm(x1):
        e_r, e_l, e_a, e_v = load_and_test(X_train, X_test, y_test, i, kernel)
        error_random.append(e_r)
Exemplo n.º 15
0
utils.log_dirctory()
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir=PATH_LOG + "/log")


def init_weights(model):
    for name, param in model.named_parameters():
        nn.init.uniform_(param.data, -0.1, 0.1)


################################################
###
#model and dataset loading

if FINE_TUNING:
    model, word2id, id2word = utils.load_models()
    train_dataloader, test_dataloader, (word2id, id2word) = read_csv.table2dl(
        w2i=word2id, i2w=id2word)
else:
    train_dataloader, test_dataloader, (word2id, id2word) = read_csv.table2dl()
    model = model_making.load_model(len(word2id))
    model.apply(init_weights)

optimizer, scheduler = model_making.optimizer_scheduler(
    model, len(train_dataloader))
criterion = nn.CrossEntropyLoss(ignore_index=0)

example, lengths = utils.str2id(word2id)
###
#train
history_kappa_valid = []
Exemplo n.º 16
0
def main(args):
    # hyperparameters
    batch_size = args.batch_size
    num_workers = 2

    # Image Preprocessing
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    vocab = load_vocab()

    loader = get_basic_loader(dir_path=os.path.join(args.image_path),
                              transform=transform,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=num_workers)

    # Build the models
    embed_size = args.embed_size
    num_hiddens = args.num_hidden
    checkpoint_path = 'checkpoints'

    encoder = CNN(embed_size)
    decoder = RNN(embed_size,
                  num_hiddens,
                  len(vocab),
                  1,
                  rec_unit=args.rec_unit)

    encoder_state_dict, decoder_state_dict, optimizer, *meta = utils.load_models(
        args.checkpoint_file)
    encoder.load_state_dict(encoder_state_dict)
    decoder.load_state_dict(decoder_state_dict)

    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()

    # Train the Models
    try:
        results = []
        with torch.no_grad():
            for step, (images, image_ids) in enumerate(tqdm(loader)):
                images = utils.to_var(images)

                features = encoder(images)
                captions = beam_sample(decoder, features)
                # captions = decoder.sample(features)
                captions = captions.cpu().data.numpy()
                captions = [
                    utils.convert_back_to_text(cap, vocab) for cap in captions
                ]
                captions_formatted = [{
                    'image_id': int(img_id),
                    'caption': cap
                } for img_id, cap in zip(image_ids, captions)]
                results.extend(captions_formatted)
                print('Sample:', captions_formatted[0])
    except KeyboardInterrupt:
        print('Ok bye!')
    finally:
        import json
        file_name = 'captions_model.json'
        with open(file_name, 'w') as f:
            json.dump(results, f)
Exemplo n.º 17
0
from pydantic import BaseModel

from os import path
from decouple import config as cfg #para variavel de ambiente
import argparse, joblib
from utils import load_models, check_inputs
from train import load_data2, transform
import numpy as np
from sklearn.model_selection import train_test_split

DIR_NAME = path.dirname(__file__)
DATA_FOLDER = path.join(DIR_NAME, 'models', 'exp_01_default') 
DATA_CSV = cfg('DATA_CSV', cast=str)

#load models
model, tf_feature = load_models()
datas = pd.read_csv(path.join(DATA_FOLDER, DATA_CSV))

class Item(BaseModel):
    #classe: Optional[str] = None
    user: Optional[str] = 'None'
    week: Optional[int] = 7
    total_sessions: Optional[int] = 4
    total_mediaids: Optional[int] = 7
    total_days: Optional[int] = 3
    total_played: Optional[float] = 130.450
    max_played_time: Optional[float] = 58.280
    age_without_access: Optional[int] = -285
    sexo: Optional[str] = 'M'
    idade: Optional[float] = 42
    cidade: Optional[str] = 'None'
Exemplo n.º 18
0
import argparse, joblib
from os import path

from train import load_data

from utils import load_models, check_inputs

from flask import Flask, request, jsonify
app = Flask(__name__)

# Load models
model, tf = load_models()


@app.route('/predict', methods=['POST'])
def predict():
    if request.method == 'POST':
        # Check inputs
        x = check_inputs(request.json['features'])
        y_hat = model.predict(tf.transform(x))

        return jsonify(output={"y_hat": y_hat.tolist()},
                       status=200,
                       message='Model Working')


@app.route('/predict_test')
def predict_test():
    X, y = load_data()
    X_tf = tf.transform(X)
    y_hat = model.predict(X_tf)
Exemplo n.º 19
0
warnings.simplefilter(action = 'ignore')


#path_to_data = '/courses/cs342/Assignment2/'
path_to_data = ''
split_count = 3
columns = 64

start = time.time()
chunks = 5000000

test_meta = pd.read_csv(path_to_data+'test_set_metadata.csv')

g_wtable, eg_wtable = utils.get_wtables(path_to_data)

g_clfs = utils.load_models(split_count, True, g_wtable)
eg_clfs = utils.load_models(split_count, False, eg_wtable)

folds = StratifiedKFold(n_splits = split_count, shuffle = True)

straddler = None

for i_c, data_chunk in enumerate(pd.read_csv(path_to_data + 'test_set.csv', chunksize = chunks, iterator = True)):

    if i_c != 0:
        data_chunk = pd.concat([straddler, data_chunk], ignore_index=True)

    arr = data_chunk['object_id'].unique()
    straddler = data_chunk.loc[data_chunk['object_id'] == arr[len(arr)-1]]
    
    data_chunk = data_chunk[data_chunk.object_id != arr[len(arr)-1]]
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if not os.path.isdir(args.log_dir):
        os.makedirs(args.log_dir)

    summary_writer = SummaryWriter(log_dir=args.log_dir) if args.log else None

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Lambda(lambda x: (x > .5).float())
        ])),
                                               batch_size=args.bsize,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Lambda(lambda x: (x > .5).float())
        ])),
                                              batch_size=args.bsize,
                                              shuffle=True)

    context_encoder = ContextEncoder()
    context_to_dist = ContextToLatentDistribution()
    decoder = Decoder()
    if args.aggregator == "mean":
        aggregator = MeanAgregator()
    elif args.aggregator == "vector_attention":
        aggregator = VectorAttentionAggregator(128)
    else:
        assert args.aggregator == "query_attention"
        aggregator = QueryAttentionAggregator(128)

    if args.resume_file is not None:
        load_models(args.resume_file, context_encoder, context_to_dist,
                    decoder, aggregator)
    context_encoder = context_encoder.to(device)
    decoder = decoder.to(device)
    context_to_dist = context_to_dist.to(device)
    aggregator = aggregator.to(device)

    full_model_params = list(context_encoder.parameters()) + list(
        decoder.parameters()) + list(context_to_dist.parameters()) + list(
            aggregator.parameters())
    optimizer = optim.Adam(full_model_params, lr=args.lr)

    train(context_encoder,
          context_to_dist,
          decoder,
          aggregator,
          train_loader,
          test_loader,
          optimizer,
          args.epochs,
          device,
          args.models_path,
          summary_writer=summary_writer,
          save_every=args.save_every,
          log=args.log)
Exemplo n.º 21
0
def main(args):
    # hyperparameters
    batch_size = args.batch_size
    num_workers = 1

    # Image Preprocessing
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    # load COCOs dataset
    IMAGES_PATH = 'data/train2014'
    CAPTION_FILE_PATH = 'data/annotations/captions_train2014.json'

    vocab = load_vocab()
    train_loader = get_coco_data_loader(path=IMAGES_PATH,
                                        json=CAPTION_FILE_PATH,
                                        vocab=vocab,
                                        transform=transform,
                                        batch_size=batch_size,
                                        shuffle=True,
                                        num_workers=num_workers)

    IMAGES_PATH = 'data/val2014'
    CAPTION_FILE_PATH = 'data/annotations/captions_val2014.json'
    val_loader = get_coco_data_loader(path=IMAGES_PATH,
                                      json=CAPTION_FILE_PATH,
                                      vocab=vocab,
                                      transform=transform,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=num_workers)

    losses_val = []
    losses_train = []

    # Build the models
    ngpu = 1
    initial_step = initial_epoch = 0
    embed_size = args.embed_size
    num_hiddens = args.num_hidden
    learning_rate = 5e-4
    num_epochs = 2
    log_step = args.log_step
    save_step = 500
    checkpoint_dir = args.checkpoint_dir

    encoder = CNNEncoder()
    decoder = DecoderRNN(embed_size, num_hiddens, len(vocab))

    # Loss
    criterion = nn.CrossEntropyLoss()

    if args.checkpoint_file:
        encoder_state_dict, decoder_state_dict, optimizer, *meta = utils.load_models(
            args.checkpoint_file, args.sample)
        initial_step, initial_epoch, losses_train, losses_val = meta
        encoder.load_state_dict(encoder_state_dict)
        decoder.load_state_dict(decoder_state_dict)
    else:
        params = list(decoder.parameters()) + list(
            encoder.batchnorm.parameters())
        optimizer = torch.optim.Adam(params, lr=learning_rate)

    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()

    if args.sample:
        return utils.sample(encoder, decoder, vocab, val_loader)

    # Train the Models
    total_step = len(train_loader)
    try:
        for epoch in range(initial_epoch, num_epochs):

            for step, (images, captions,
                       lengths) in enumerate(train_loader, start=initial_step):

                # Set mini-batch dataset
                images = utils.to_var(images, volatile=True)
                captions = utils.to_var(captions)
                targets = pack_padded_sequence(captions,
                                               lengths,
                                               batch_first=True)[0]
                # Forward, Backward and Optimize
                decoder.zero_grad()
                encoder.zero_grad()
                if ngpu > 1:
                    # run on multiple GPU
                    features = nn.parallel.data_parallel(
                        encoder, images, range(ngpu))
                    outputs, alphas = nn.parallel.data_parallel(
                        decoder, features, range(ngpu))
                else:
                    # run on single GPU
                    features = encoder(images)
                    outputs, alphas = decoder(features, captions, lengths)

                train_loss = criterion(outputs, targets.cpu())
                train_loss += ((1. - alphas.sum(dim=1))**2).mean()
                losses_train.append(train_loss.data)
                train_loss.backward()
                optimizer.step()

                print('Epoch: {} - Step: {} - Train Loss: {}'.format(
                    epoch, step, losses_train[-1]))
                # Run validation set and predict
                if step % log_step == 404:
                    encoder.batchnorm.eval()
                    # run validation set
                    batch_loss_val = []
                    for val_step, (images, captions,
                                   lengths) in enumerate(val_loader):
                        images = utils.to_var(images, volatile=True)
                        captions = utils.to_var(captions, volatile=True)

                        targets = pack_padded_sequence(captions,
                                                       lengths,
                                                       batch_first=True)[0]
                        features = encoder(images)
                        outputs, alphas = decoder(features, captions, lengths)
                        val_loss = criterion(outputs, targets.cpu())
                        val_loss += ((1. - alphas.sum(dim=1))**2).mean()
                        batch_loss_val.append(val_loss.data)
                        if val_step % 50 == 0:
                            print('Epoch: {} - Step: {} - Mini Eval Loss: {}'.
                                  format(epoch, val_step, val_loss))
                            sampled_ids = decoder.sample(features)
                            sampled_ids = sampled_ids.cpu().data.numpy()[0]
                            sentence = utils.convert_back_to_text(
                                sampled_ids, vocab)
                            print('Sample:', sentence)

                            true_ids = captions.cpu().data.numpy()[0]
                            sentence = utils.convert_back_to_text(
                                true_ids, vocab)
                            print('Target:', sentence)

                    losses_val.append(np.mean(batch_loss_val))
                    # predict

                    print('Epoch: {} - Step: {} - Eval Loss: {}'.format(
                        epoch, step, losses_val[-1]))
                    encoder.batchnorm.train()

                # Save the models
                if (step + 1) % save_step == 0:
                    utils.save_models(encoder, decoder, optimizer, step, epoch,
                                      losses_train, losses_val, checkpoint_dir)
                    utils.dump_losses(
                        losses_train, losses_val,
                        os.path.join(checkpoint_dir, 'losses.pkl'))

    except KeyboardInterrupt:
        pass
    finally:
        # Do final save
        utils.save_models(encoder, decoder, optimizer, step, epoch,
                          losses_train, losses_val, checkpoint_dir)
        utils.dump_losses(losses_train, losses_val,
                          os.path.join(checkpoint_dir, 'losses.pkl'))
Exemplo n.º 22
0
        label = type + str(mistake_factor)
        #label = type + 'periodic'
        ghost = LorenzModel(sigma=mistake_factor * 10)
        #ghost = PeriodicApprox()
        f = KnowledgeModel(hidden_dim=50, known_model=ghost, io_dim=data_dim)
        model_dir = project_dir + '/FullStateNODE/models/M-NODE/' + str(
            mistake_factor) + 'sigma/knowledge'
        #model_dir = project_dir + '/FullStateNODE/models/M-NODE/periodic_model/knowledge'
    elif type == 'pure':
        label = "pure"
        f = Net(hidden_dim=256, io_dim=data_dim)
        model_dir = project_dir + '/FullStateNODE/models/PureNODE/3DLorenzmodel'
    else:
        print("type '{}' doesn't exist".format(type))
        exit()
    load_models(model_dir, f)

    ic_state, ic_future = next(iter(dataloader))
    ic_state = ic_state.view(batch_size, data_dim)

    #vec = torch.tensor([[2, 10, 2]]).float()
    t = torch.zeros(1)
    dstatedt = f(t, ic_state)
    dxdt_NODE = dstatedt[:, 0].detach().numpy()
    if type == 'pure':
        dxdt_NODE = dxdt_NODE / 10
    dxdt = x_of_exact_lorenz(ic_state).detach().numpy()

    comp = np.arange(-10, 10)
    plt.plot(dxdt, dxdt_NODE, 'x', color='orange')
    plt.xlabel(r'$(1-\alpha)\sigma(y-x)$')
Exemplo n.º 23
0
 def __init__(self, models_path):
     self.models = utils.load_models(models_path)
Exemplo n.º 24
0
    LR = 0.01
    HIDDEN_DIM = 500
    num_rnn_layers = 2

    # Construct model
    encoder_rnn = CreationRNN(input_dim=1,
                              hidden_dim=HIDDEN_DIM,
                              num_layers=num_rnn_layers,
                              output_dim=latent_dim,
                              nbatch=batch_size)
    print(encoder_rnn)
    params = list(encoder_rnn.parameters())
    optimizer = optim.Adam(params, lr=LR)

    f = Net(hidden_dim=256)
    load_models(ddd_model_dir, f)

    if TRAIN_MODEL:

        train_dataset = DDDLorenzData(dddtrain_data_dir,
                                      lookahead=lookahead,
                                      tau=tau,
                                      k=8,
                                      max_len=max_len)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      drop_last=True)

        val_dataset = DDDLorenzData(dddval_data_dir,
                                    lookahead,
def main():
    """Run only when this file is called directly."""
    # Setup hyperparameters
    CONFIG = get_config()

    # Log to File and Console
    logger = get_logger(log_to_console=True, log_to_file=CONFIG.LOG_TO_FILE)

    # Choose CPU and GPU
    torch.set_num_threads(CONFIG.CPU_THREADS)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if not torch.cuda.is_available():
        logger.warning("GPU not available: this run could be slow.")

    # Setup environment
    # v4 variant: No repeat action
    env = gym.make(CONFIG.ENV_NAME)
    eval_env = gym.make(CONFIG.ENV_NAME)
    # AtariPreprocessing:
    # - Max NOOP on start: 30
    # - Frameskip: CONFIG.FRAME_SKIP
    #   - If Frameskip > 1, max pooling is done
    # - Screen size: 84
    # - Terminal on life loss: True
    # - Grayscale obs: True
    env = AtariPreprocessing(env,
                             frame_skip=CONFIG.FRAME_SKIP,
                             terminal_on_life_loss=True)
    # NOTE(seungjaeryanlee): In evaluation, episode does not end in life loss
    # https://github.com/deepmind/dqn/blob/9d9b1d1/dqn/train_agent.lua#L119
    eval_env = AtariPreprocessing(eval_env,
                                  frame_skip=CONFIG.FRAME_SKIP,
                                  terminal_on_life_loss=False)
    # Stack frames to create observation
    env = FrameStack(env, stack_size=CONFIG.FRAME_STACK)
    eval_env = FrameStack(eval_env, stack_size=CONFIG.FRAME_STACK)

    # Fix random seeds
    if CONFIG.RANDOM_SEED is not None:
        set_global_random_seeds(CONFIG.RANDOM_SEED,
                                use_numpy=True,
                                use_torch=True)
        # NOTE(seungjaeryanlee): Seed for env and eval_env are different for fair evaluation
        set_env_random_seeds(env, CONFIG.RANDOM_SEED)
        set_env_random_seeds(eval_env, CONFIG.RANDOM_SEED + 1)
    else:
        logger.warning(
            "Running without a random seed: this run is NOT reproducible.")

    # Setup agent and replay buffer
    q_net = AtariQNetwork(CONFIG.FRAME_STACK, env.action_space.n).to(device)
    optimizer = optim.RMSprop(
        q_net.parameters(),
        lr=CONFIG.RMSPROP_LR,
        alpha=CONFIG.RMSPROP_DECAY,
        eps=CONFIG.RMSPROP_EPSILON,
        momentum=CONFIG.RMSPROP_MOMENTUM,
        weight_decay=CONFIG.RMSPROP_WEIGHT_DECAY,
        centered=CONFIG.RMSPROP_IS_CENTERED,
    )
    if CONFIG.LOAD_PATH:
        # Load parameters if possible
        load_models(CONFIG.LOAD_PATH, q_net=q_net, optimizer=optimizer)
    dqn_agent = DQNAgent(env, q_net, optimizer, device)
    replay_buffer = CircularReplayBuffer(
        env,
        maxlen=CONFIG.REPLAY_BUFFER_SIZE,
        device=device,
        preprocess_batch=NATUREDQN_ATARI_PREPROCESS_BATCH,
    )

    # Train and evaluate agent
    train_eval(dqn_agent, replay_buffer, env, eval_env, device, logger, CONFIG)
Exemplo n.º 26
0
            raise
        print("Start")
        train_df = pd.read_pickle(args.data + '/' + Train_Data)
        valid_df = pd.read_pickle(args.data + '/' + Valid_Data)
        eval_df = pd.read_pickle(args.data + '/' + Eval_Data)
        embed_df = pd.read_pickle(args.data + '/' + Embed_Data)

    if (args.mode == 'train'):

        print("Preparing to Train the model")
        ''' Load and vectorize data '''
        if (args.load_existing):
            print("Loading Existing !!! Loading Dictionary")
            corpus = load_dictionary(args)
            print("Loading Existing !!! Loading Models")
            Encoder, Classifier = load_models(args, True)
            if Encoder is None or Classifier is None:
                print(
                    "Loading Existing !!! Models not loaded building from scratch"
                )
                Encoder, Classifier = build_model_from_scratch(
                    args, corpus, embed_df)
        else:
            if (args.build_dict):
                print("Build Dictionary Given! Building dictionary now!")
                corpus = build_dictionary_from_scratch(args, train_df,
                                                       valid_df, eval_df)
            else:
                print("Building dictionary now!")
                corpus = load_dictionary(args)
            print("Building models from scratch now!")
Exemplo n.º 27
0
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 24 03:08:09 2019

@author: mustafa
"""

import numpy as np
import torch

from utils import get_obs, load_models

#Load model
net = load_models("py_src/models/REINGINE-checkpoint.dat")
a = 10
action = None


def step(state_dic):
    """
    Parameters
    ----------
    state_dic : dictionary
        current state of agent coming from environment

    Returns
    -------
    Action: angle

    """
    global net, a, action
Exemplo n.º 28
0
                time.sleep(0.025)

            print('No hand detected on this window')

            # Draw the window
            # clone = resized.copy()
            # cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
            # cv2.imshow('Window', clone)
            # plt.imshow(clone)
            # filename = 'test.png'
            # cv2.imwrite(filename, clone)
            # cv2.waitKey(1)

            time.sleep(0.025)

        return None


if __name__ == '__main__':
    modes = load_models()
    data_dir = 'data/sample/'
    for filename in [f for f in os.listdir(data_dir) if '.jpg' in f]:
        print('Analysing: %s' % (filename))
        filename = os.path.join(data_dir, filename)
        image = cv2.imread(filename)
        print('Recogize:')
        recognize(image, models)
        print('Recognition done!')
        time.sleep(2)
        break
Exemplo n.º 29
0
    'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain',
    'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree',
    'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy',
    'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket',
    'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail',
    'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper',
    'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train',
    'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf',
    'woman', 'worm'
]  #for CIFAR100, fine names

# model = models.vgg19(pretrained=True)
files = list(
    filter(lambda filename: filename.endswith('.npy'), os.listdir(args.path)))

model, loader, preprocess_image = load_models()

for _class in classes[:1]:
    filtered_files = filter(lambda x: _class in x, files)
    for inp, outp in (('conv5_3',
                       'conv5_4'), ):  #('fc1', 'fc2'),('fc2', 'output')):
        inmask = torch.from_numpy(
            np.load(
                os.path.join(args.path,
                             'class-{}-{}.npy'.format(_class,
                                                      inp))).mean(axis=0))
        outmask = torch.from_numpy(
            np.load(
                os.path.join(args.path,
                             'class-{}-{}.npy'.format(_class,
                                                      outp))).mean(axis=0))
Exemplo n.º 30
0
        "input_dir",
        type=str,
        help="Path to the directory containing text documents to be segmented")
    parser.add_argument(
        "output_dir",
        type=str,
        help=
        "Path to the directory where the serialized tfrecords will be saved.")
    parser.add_argument(
        "--train",
        type=int,
        default=0,
        help=
        "Indicates if you're preparing tfrecords for training (value 1) the model or instances on which to predict (value 0) the segmentation scores."
    )
    parser.add_argument(
        "--ssplit",
        type=int,
        default=1,
        help=
        "Indicates whether the texts are already sentence split, one sentence per line in the text files (value 1), or the texts need to be first split for sentences (value 0)"
    )
    args = parser.parse_args()

    _, vocabulary = utils.load_models()
    preprocess(args.input_dir,
               args.output_dir,
               vocabulary,
               train=True if args.train == 1 else False,
               ssplit=True if args.ssplit == 1 else False)