Пример #1
0
def main(unused_argv):
    # create output dirs
    output_dir = Path(FLAGS.output_dir)
    Path.mkdir(output_dir, exist_ok=True)

    decode_dbl = parse_emotion_dbl(FLAGS.eval_file_path)

    if FLAGS.cpc_path is not None:
        cpc = load_model(FLAGS.cpc_path).eval().to(device)
    else:
        cpc = NoCPC().eval().to(device)
    model = load_model(FLAGS.model_path).eval().to(device)

    set_seeds()
    # Need the enumeration to ensure unique files
    for i, dbl_entry in enumerate(decode_dbl):
        filename = Path(dbl_entry.audio_path)
        preds = decode_emotions_from_file(filename.as_posix(), cpc, model,
                                          FLAGS.window_size)

        with open(str(output_dir / filename.name) + "_" + str(i),
                  "w") as out_f:
            for pred in preds:
                out_f.write("{:.3f} {:.3f} {}\n".format(
                    pred.start, pred.end, pred.label))

        with open(output_dir / "score.dbl", "a") as dbl_fh:
            dbl_fh.write(str(output_dir / filename.name) + "_" + str(i) + "\n")
Пример #2
0
    def setup(self):

        set_seeds(self.config.seed, "cuda" in self.config.device)

        self.setup_data()
        self.setup_model()

        self.config.epoch_str_template = "{:0" + str(
            len(str(self.config.n_epochs))) + "d}"
        self.clog.show_text(self.model.__repr__(), "Model")
Пример #3
0
def main(unused_argv):
    # create output dirs
    output_dir = Path(FLAGS.output_dir)
    Path.mkdir(output_dir, exist_ok=True)

    if FLAGS.cpc_path is not None:
        cpc = load_model(FLAGS.cpc_path).eval().to(device)
    else:
        cpc = NoCPC().eval().to(device)
    model = load_model(FLAGS.model_path).eval().to(device)

    dataset = AudioDataset(FLAGS.eval_file_path, train=False)
    dataloader = AudioDataLoader(
        dataset,
        window_size=None,
        batch_size=1,
        feature_transform=cpc.data_class,
        num_workers=8,
        shuffle=False,
    )

    set_seeds()
    # Need the enumeration to ensure unique files
    for i, batch in enumerate(dataloader):
        data = batch["data"].to(device)
        cpc.reset_state()

        preds = []
        prev_end_s = 0.0
        windows = torch.split(data, FLAGS.window_size, dim=1)
        for window in windows:
            with torch.no_grad():
                features = cpc(window)
                pred = model(features).argmax(dim=2).squeeze(dim=0)

            outputs, prev_end_s = preds_to_output(
                pred,
                window.shape[1],
                dataloader.sampling_rate,
                prev_end_s,
            )
            preds.extend(outputs)

        filename = Path(batch["files"][0])
        with open(str(output_dir / filename.name) + "_" + str(i),
                  "w") as out_f:
            for pred in preds:
                out_f.write("{:.3f} {:.3f} {}\n".format(
                    pred.start, pred.end, pred.label))

        with open(output_dir / "score.dbl", "a") as dbl_fh:
            dbl_fh.write(str(output_dir / filename.name) + "_" + str(i) + "\n")
Пример #4
0
def train(unused_argv):
    set_seeds(FLAGS.seed)
    write_current_pid(FLAGS.expdir)
    # setup logging
    tb_logger = prepare_tb_logging()
    prepare_standard_logging("training")
    loss_dir = Path(f"{FLAGS.expdir}/losses")
    loss_dir.mkdir(exist_ok=True)
    train_losses_fh = open(loss_dir / "train.txt", "a", buffering=1)
    valid_losses_fh = open(loss_dir / "valid.txt", "a", buffering=1)

    if FLAGS.dry_run is True:
        setup_dry_run(FLAGS)
    if not FLAGS.model_out:
        FLAGS.model_out = FLAGS.expdir + "/model.pt"
    if not FLAGS.checkpoint_out:
        FLAGS.checkpoint_out = FLAGS.expdir + "/checkpoint.pt"

    if FLAGS.checkpoint_autoload is True and not FLAGS.checkpoint:
        FLAGS.checkpoint = get_checkpoint_to_start_from(FLAGS.checkpoint_out)
        logging.info(f"autosetting checkpoint: {FLAGS.checkpoint}")

    if FLAGS.cpc_path is not None:
        cpc = load_model(FLAGS.cpc_path).to(device)
        cpc.reset_state()
    else:
        cpc = NoCPC()
    cpc.eval()

    # write information about cpc into metadata
    with open(f"{FLAGS.expdir}/metadata.txt", "a") as fh:
        fh.write(f"sampling_rate_hz {cpc.data_class.SAMPLING_RATE_HZ}\n")
        fh.write(f"feat_dim {cpc.feat_dim}\n")

    # define training data
    parsed_train_dbl = parse_emotion_dbl(FLAGS.train_data)
    train_streams = [
        DblStream(
            DblSampler(parsed_train_dbl),
            EmotionIDSingleFileStream,
            FLAGS.window_size,
            emotion_set_path=FLAGS.emotion_set_path,
            audiostream_class=cpc.data_class,
        )
        for _ in range(FLAGS.batch_size)
    ]
    train_datastream = MultiStreamDataLoader(train_streams, device=device)
    # define validation data
    parsed_valid_dbl = parse_emotion_dbl(FLAGS.val_data)
    parsed_test_dbl = parse_emotion_dbl(FLAGS.test_data)
    val_streams = [
        DblStream(
            DblSampler(parsed_valid_dbl),
            EmotionIDSingleFileStream,
            FLAGS.window_size,
            emotion_set_path=FLAGS.emotion_set_path,
            audiostream_class=cpc.data_class,  # TODO ensure un-augmented stream
        )
        for _ in range(FLAGS.batch_size)
    ]
    valid_datastream = MultiStreamDataLoader(val_streams, device=device)
    if not FLAGS.val_every:
        FLAGS.val_every = max(100, FLAGS.steps // 50)
    if not FLAGS.save_every:
        FLAGS.save_every = FLAGS.val_every
    if not FLAGS.valid_steps:
        FLAGS.valid_steps = max(20, FLAGS.val_every // 100)
    valid_frames = FLAGS.batch_size * FLAGS.window_size * FLAGS.valid_steps

    feat_dim = cpc.feat_dim
    num_emotions = len(get_emotion_to_id_mapping(FLAGS.emotion_set_path))

    if FLAGS.model == "linear":
        model = LinearEmotionIDModel(feat_dim, num_emotions).to(device)
    elif FLAGS.model == "baseline":
        model = BaselineEmotionIDModel(feat_dim, num_emotions).to(device)
    elif FLAGS.model == "mlp2":
        model = MLPEmotionIDModel(
            feat_dim,
            num_emotions,
            no_layers=2,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
            batch_norm_on=FLAGS.batch_norm,
        ).to(device)
    elif FLAGS.model == "mlp4":
        model = MLPEmotionIDModel(
            feat_dim,
            num_emotions,
            no_layers=4,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
            batch_norm_on=FLAGS.batch_norm,
        ).to(device)
    elif FLAGS.model == "conv":
        model = ConvEmotionIDModel(
            feat_dim,
            num_emotions,
            no_layers=4,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
        ).to(device)
    elif FLAGS.model == "rnn":
        model = RecurrentEmotionIDModel(
            feat_dim=feat_dim,
            num_emotions=num_emotions,
            bidirectional=False,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
        ).to(device)
    elif FLAGS.model == "rnn_bi":
        model = RecurrentEmotionIDModel(
            feat_dim=feat_dim,
            num_emotions=num_emotions,
            bidirectional=True,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
        ).to(device)
    elif FLAGS.model == "wavenet":
        model = WaveNetEmotionIDModel(feat_dim, num_emotions).to(device)
        padding_percentage = 100 * model.max_padding / FLAGS.window_size
        logging.info(f"max padding {model.max_padding}, percentage {padding_percentage}%")
        logging.info(f"receptve field {model.receptive_field}")
    elif FLAGS.model == "wavenet_unmasked":
        model = WaveNetEmotionIDModel(feat_dim, num_emotions, masked=False).to(device)
        padding_percentage = 100 * model.max_padding / FLAGS.window_size
        logging.info(f"max padding {model.max_padding}, percentage {padding_percentage}%")
        logging.info(f"receptve field {model.receptive_field}")
    else:
        raise NameError("Model name not found")

    logging.info(f"number of classes {num_emotions}")
    logging.info(f"model param count {sum(x.numel() for x in model.parameters()):,}")

    optimizer = RAdam(model.parameters(), eps=1e-05, lr=FLAGS.lr)
    if FLAGS.lr_schedule:
        scheduler = FlatCA(optimizer, steps=FLAGS.steps, eta_min=0)
    else:
        scheduler = EmptyScheduler(optimizer)

    step = 0
    optimizer.best_val_loss = inf

    if FLAGS.checkpoint:
        # loading state_dicts in-place
        load_checkpoint(FLAGS.checkpoint, model, optimizer, scheduler=scheduler)
        step = optimizer.restored_step

    dump_checkpoint_on_kill(model, optimizer, scheduler, FLAGS.checkpoint_out)
    set_seeds(FLAGS.seed + step)

    model.train()
    for batch in train_datastream:
        data, labels = batch["data"].to(device), batch["labels"]
        features = cpc(data)
        pred = model(features)
        labels = resample_1d(labels, pred.shape[1]).reshape(-1).to(device)

        # get cross entropy loss against emotion labels and take step
        optimizer.zero_grad()
        output = model(features).reshape(-1, num_emotions)
        loss = F.cross_entropy(output, labels)
        loss.backward()
        clip_grad_norm_(model.parameters(), FLAGS.clip_thresh)

        optimizer.step()
        scheduler.step()
        # log training losses
        logging.info(f"{step} train steps, loss={loss.item():.5}")
        tb_logger.add_scalar("train/loss", loss, step)
        train_losses_fh.write(f"{step}, {loss.item()}\n")

        tb_logger.add_scalar("train/lr", scheduler.get_lr()[0], step)

        # validate periodically
        if step % FLAGS.val_every == 0 and step != 0:

            valid_loss = validate(valid_datastream, cpc, model, num_emotions)
            # log validation losses
            logging.info(
                f"{step} validation, loss={valid_loss.item():.5}, "
                f"{valid_frames:,} items validated"
            )
            tb_logger.add_scalar("valid/loss", valid_loss, step)
            valid_losses_fh.write(f"{step}, {valid_loss}\n")

            val_results = validate_filewise(parsed_valid_dbl, cpc, model, num_emotions)
            test_results = validate_filewise(parsed_test_dbl, cpc, model, num_emotions)
            for results, dataset in zip([val_results, test_results], ["valid", "test"]):
                tb_logger.add_scalar(f"{dataset}/full_loss", results["average_loss"], step)
                for name in ["framewise", "filewise"]:
                    cm = fig2tensor(results[name]["confusion_matrix"])
                    tb_logger.add_scalar(f"{dataset}/accuracy", results[name]["accuracy"], step)
                    tb_logger.add_scalar(f"{dataset}/f1_score", results[name]["average_f1"], step)
                    tb_logger.add_image(f"{dataset}/confusion_matrix", cm, step)

            for emotion, f1 in val_results["framewise"]["class_f1"].items():
                tb_logger.add_scalar(f"f1/{emotion}", f1, step)

            if valid_loss.item() < optimizer.best_val_loss:
                logging.info("Saving new best validation")
                save(model, FLAGS.model_out + ".bestval")
                optimizer.best_val_loss = valid_loss.item()

        # save out model periodically
        if step % FLAGS.save_every == 0 and step != 0:
            save(model, FLAGS.model_out + ".step" + str(step))

        if step >= FLAGS.steps:
            break

        step += 1

    save(model, FLAGS.model_out)

    # close loss logging file handles
    train_losses_fh.close()
    valid_losses_fh.close()
    def train_net(self
                  # train_set_,
                  #       val_set_,
                  #       writer,
                  #       seed_,
                  #       save_path
                  ):

        writer = SummaryWriter(self.log_path)
        best_validation_error = 1000.0
        set_seeds(self.seed)

        # dataset loaders
        train_dataset, train_loader = init_dataset(self.train_set, train=True, batch_size=self.batch_size, workers=self.workers)
        test_dataset, test_loader = init_dataset(self.validation_set, train=False, batch_size=self.batch_size, workers=self.workers)

        # load model
        self.model = self.model.train()
        if self.on_GPU:
            self.model = self.model.cuda()

        # define optimizer
        optimizer = torch.optim.SGD(self.model.get_config_optim(self.lr, self.lrp),
                                    lr=self.lr,
                                    momentum=self.momentum,
                                    weight_decay=self.weight_decay)

        criterion = nn.MSELoss()
        j = 0
        for epoch in range(self.num_epochs):  # loop over the dataset multiple times
            train_loader = tqdm(train_loader, desc='Training')
            self.model.train()

            for i, data in enumerate(train_loader):
                # get the inputs
                inputs_datas, labels = data
                inputs, img_names = inputs_datas

                # wrap them in Variable
                if self.on_GPU:
                    inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward + backward + optimize
                outputs = self.model.forward(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                n_iter = j
                writer.add_scalar('Train/Loss', loss.data, n_iter)
                j += 1

            # save a new model for each epoch
            with torch.no_grad():
                validation_error = self.validate_current_model(val_loader=test_loader)
            print("validation error: ", validation_error)

            if validation_error < best_validation_error:
                best_validation_error = validation_error

                # save a new model for each epoch
                print('saving model: %s_epoch_%d' % (self.save_path, epoch))
                torch.save(self.model.state_dict(), ('%s/seed_%d_best_checkpoint.pth' % (self.save_path, self.seed)))

        print('Finished Training')
        return self.model, best_validation_error
Пример #6
0
def train(unused_argv):
    set_seeds(FLAGS.seed)
    # setup logging
    tb_logger = SummaryWriter(FLAGS.expdir, flush_secs=10)
    loss_dir = Path(f"{FLAGS.expdir}/losses")
    loss_dir.mkdir(exist_ok=True)
    train_losses_fh = open(loss_dir / "train.txt", "a", buffering=1)
    valid_losses_fh = open(loss_dir / "valid.txt", "a", buffering=1)

    if not FLAGS.model_out:
        FLAGS.model_out = FLAGS.expdir + "/model.pt"

    if FLAGS.cpc_path is not None:
        cpc = load_model(FLAGS.cpc_path).to(device)
        cpc.reset_state()
    else:
        cpc = NoCPC()
    cpc.eval()

    # write information about cpc into metadata
    with open(f"{FLAGS.expdir}/metadata.txt", "a") as fh:
        fh.write(f"data_class {cpc.data_class}\n")
        fh.write(f"feat_dim {cpc.feat_dim}\n")

    # define training data
    train_dataset = EmotionDataset(FLAGS.train_data, FLAGS.emotion_set_path)
    train_dataloader = AudioDataLoader(
        train_dataset,
        window_size=FLAGS.window_size,
        batch_size=FLAGS.batch_size,
        feature_transform=cpc.data_class,
        num_workers=FLAGS.num_workers,
        shuffle=True,
        drop_last=True,
    )
    # define validation data
    val_dataset = EmotionDataset(FLAGS.val_data, FLAGS.emotion_set_path)
    val_dataloader = AudioDataLoader(
        val_dataset,
        window_size=FLAGS.window_size,
        batch_size=FLAGS.batch_size,
        feature_transform=cpc.data_class,
        num_workers=FLAGS.num_workers,
        shuffle=False,
        drop_last=True,
    )
    # filewise validation (like decode time)
    decode_dataset = EmotionDataset(FLAGS.val_data,
                                    FLAGS.emotion_set_path,
                                    train=False)
    decode_dataloader = AudioDataLoader(
        decode_dataset,
        window_size=None,
        batch_size=1,
        feature_transform=cpc.data_class,
        num_workers=FLAGS.num_workers,
        shuffle=False,
    )

    if not FLAGS.val_every:
        FLAGS.val_every = max(100, FLAGS.steps // 50)
    if not FLAGS.save_every:
        FLAGS.save_every = FLAGS.val_every
    if not FLAGS.valid_steps:
        FLAGS.valid_steps = max(20, FLAGS.val_every // 100)
    valid_frames = FLAGS.batch_size * FLAGS.window_size * FLAGS.valid_steps

    feat_dim = cpc.feat_dim
    num_emotions = len(
        train_dataset.get_emotion_to_id_mapping(FLAGS.emotion_set_path))

    if FLAGS.model == "linear":
        model = LinearEmotionIDModel(feat_dim, num_emotions).to(device)
    elif FLAGS.model == "baseline":
        model = BaselineEmotionIDModel(feat_dim, num_emotions).to(device)
    elif FLAGS.model == "mlp2":
        model = MLPEmotionIDModel(
            feat_dim,
            num_emotions,
            no_layers=2,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
            batch_norm_on=FLAGS.batch_norm,
        ).to(device)
    elif FLAGS.model == "mlp4":
        model = MLPEmotionIDModel(
            feat_dim,
            num_emotions,
            no_layers=4,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
            batch_norm_on=FLAGS.batch_norm,
        ).to(device)
    elif FLAGS.model == "conv":
        model = ConvEmotionIDModel(
            feat_dim,
            num_emotions,
            no_layers=4,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
        ).to(device)
    elif FLAGS.model == "rnn":
        model = RecurrentEmotionIDModel(
            feat_dim=feat_dim,
            num_emotions=num_emotions,
            bidirectional=False,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
        ).to(device)
    elif FLAGS.model == "rnn_bi":
        model = RecurrentEmotionIDModel(
            feat_dim=feat_dim,
            num_emotions=num_emotions,
            bidirectional=True,
            hidden_size=FLAGS.hidden_size,
            dropout_prob=FLAGS.dropout_prob,
        ).to(device)
    elif FLAGS.model == "wavenet":
        model = WaveNetEmotionIDModel(feat_dim, num_emotions).to(device)
        padding_percentage = 100 * model.max_padding / FLAGS.window_size
        logging.info(
            f"max padding {model.max_padding}, percentage {padding_percentage}%"
        )
        logging.info(f"receptve field {model.receptive_field}")
    elif FLAGS.model == "wavenet_unmasked":
        model = WaveNetEmotionIDModel(feat_dim, num_emotions,
                                      masked=False).to(device)
        padding_percentage = 100 * model.max_padding / FLAGS.window_size
        logging.info(
            f"max padding {model.max_padding}, percentage {padding_percentage}%"
        )
        logging.info(f"receptve field {model.receptive_field}")
    else:
        raise NameError("Model name not found")

    logging.info(f"number of classes {num_emotions}")
    logging.info(
        f"model param count {sum(x.numel() for x in model.parameters()):,}")

    optimizer = RAdam(model.parameters(), eps=1e-05, lr=FLAGS.lr)
    scheduler = CosineAnnealingLR(optimizer, FLAGS.steps, eta_min=1e-6)

    step = 0
    best_val_loss = inf

    model.train()
    for batch in train_dataloader:
        data, labels = batch["data"].to(device), batch["labels"].to(device)
        features = cpc(data)
        pred = model(features)

        # get cross entropy loss against emotion labels and take step
        optimizer.zero_grad()
        pred = pred.reshape(-1, num_emotions)
        labels = labels.reshape(-1)
        loss = F.cross_entropy(pred, labels)
        loss.backward()
        clip_grad_norm_(model.parameters(), FLAGS.clip_thresh)

        optimizer.step()
        scheduler.step()
        # log training losses
        logging.info(f"{step} train steps, loss={loss.item():.5}")
        tb_logger.add_scalar("01_train/loss", loss, step)
        train_losses_fh.write(f"{step}, {loss.item()}\n")

        tb_logger.add_scalar("01_train/lr", scheduler.get_lr()[0], step)

        # validate periodically
        if step % FLAGS.val_every == 0 and step != 0:

            valid_loss = validate(val_dataloader, cpc, model, num_emotions)
            # log validation losses
            logging.info(f"{step} validation, loss={valid_loss.item():.5}, "
                         f"{valid_frames:,} items validated")
            tb_logger.add_scalar("02_valid/loss", valid_loss, step)
            valid_losses_fh.write(f"{step}, {valid_loss}\n")

            val_results = validate_filewise(decode_dataloader, cpc, model,
                                            num_emotions)
            tb_logger.add_scalar("02_valid/full_loss",
                                 val_results["average_loss"], step)
            for name in ["framewise", "filewise"]:
                cm = fig2tensor(val_results[name]["confusion_matrix"])
                tb_logger.add_scalar(f"02_valid/accuracy_{name}",
                                     val_results[name]["accuracy"], step)
                tb_logger.add_scalar(f"02_valid/f1_score_{name}",
                                     val_results[name]["average_f1"], step)
                tb_logger.add_image(f"02_valid/confusion_matrix_{name}", cm,
                                    step)

            for emotion, f1 in val_results["framewise"]["class_f1"].items():
                tb_logger.add_scalar(f"03_f1/{emotion}", f1, step)

            if valid_loss.item() < best_val_loss:
                logging.info("Saving new best validation")
                save(model, FLAGS.model_out + ".bestval")
                best_val_loss = valid_loss.item()

        # save out model periodically
        if step % FLAGS.save_every == 0 and step != 0:
            save(model, FLAGS.model_out + ".step" + str(step))

        if step >= FLAGS.steps:
            break

        step += 1

    save(model, FLAGS.model_out)

    # close loss logging file handles
    train_losses_fh.close()
    valid_losses_fh.close()
Пример #7
0
Файл: pacman.py Проект: fha/RL
def readCommand(argv):
    """
    Processes the command used to run pacman from the command line.
    """
    from optparse import OptionParser
    usageStr = """
    USAGE:      python pacman.py <options>
    """
    parser = OptionParser(usageStr)

    parser.add_option('-T',
                      '--teamName',
                      dest='teamName',
                      help="Enter your team's name",
                      default=None)
    parser.add_option(
        '-p',
        '--agentName',
        dest='agentName',
        help="Enter your agents's name (for testing purposes only)",
        default=None)
    parser.add_option('-n',
                      '--numGames',
                      dest='numGames',
                      type='int',
                      help=default('the number of GAMES to play'),
                      default=1)
    parser.add_option('-d',
                      '--dataCollectionMode',
                      action='store_true',
                      dest='dataCollectionMode',
                      help='Enter data-collection mode',
                      default=False)
    parser.add_option(
        '-a',
        '--agentArgs',
        dest='agentArgs',
        help=
        'Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"'
    )
    parser.add_option('-q',
                      '--quietTextGraphics',
                      action='store_true',
                      dest='quietGraphics',
                      help='Generate minimal output and no graphics',
                      default=False)
    parser.add_option('-m',
                      '--maxMoves',
                      dest='maxMoves',
                      type='int',
                      help=default('the maximum number of moves in a game'),
                      default=-1)
    parser.add_option('-z',
                      '--zoom',
                      type='float',
                      dest='zoom',
                      help=default('Zoom the size of the graphics window'),
                      default=1.0)
    parser.add_option(
        '-t',
        '--frameTime',
        dest='frameTime',
        type='float',
        help=default('Time to delay between frames; Must be > 0'),
        default=0.1)
    parser.add_option('-s',
                      '--seed',
                      dest='seed',
                      type='int',
                      help=default('random seed'),
                      default=3)

    options, otherjunk = parser.parse_args()
    if len(otherjunk) != 0:
        raise Exception('Command line input not understood: ' + otherjunk)

    if options.frameTime <= 0:
        raise Exception("the frameTime option must be greater than 0")

    # set the seed before we do anything else
    util.set_seeds(options.seed)

    # add team directory to python path
    #sys.path.append(os.path.join(os.path.dirname(os.path.abspath("pacman.py")), options.teamDirectory))
    sys.path.append(os.path.dirname(os.path.abspath("pacman.py")))
    args = dict()

    # generate a layout
    import layout
    args['layout'] = layout.RandomLayout()

    # make sure we have a team name, not a directory
    if options.teamName is not None and options.teamName.endswith("/"):
        options.teamName = options.teamName[:-1]

    # Choose a Pacman agent
    pacmanType = loadAgent(options.teamName, options.agentName)
    agentOpts = parseAgentArgs(options.agentArgs)
    pacman = pacmanType(**agentOpts)  # Instantiate Pacman with agentArgs
    args['pacman'] = pacman

    import graphicsDisplay
    if options.quietGraphics:
        args['display'] = graphicsDisplay.QuietGraphics()
    else:
        args['display'] = graphicsDisplay.FirstPersonPacmanGraphics(options.zoom, \
                                                                True, \
                                                            frameTime = options.frameTime)

    args['numGames'] = options.numGames
    args['maxMoves'] = options.maxMoves
    args['dataCollectionMode'] = options.dataCollectionMode

    return args
Пример #8
0
def run_cpc(unused_argv):

    # setup logging
    set_seeds(FLAGS.seed)

    # initialise unset flags
    if not FLAGS.model_out:
        FLAGS.model_out = FLAGS.expdir + "/model.pt"
    if not FLAGS.val_every:
        FLAGS.val_every = max(100, FLAGS.steps // 50)
    if not FLAGS.val_steps:
        FLAGS.val_steps = max(20, FLAGS.steps // FLAGS.batch_size // 50)
    if not FLAGS.save_every:
        FLAGS.save_every = FLAGS.val_every
    logging.info(f"model_out {FLAGS.model_out}")
    logging.info(f"steps {FLAGS.steps}")
    logging.info(f"val_steps {FLAGS.val_steps}")
    logging.info(f"log_every {FLAGS.log_every}")
    logging.info(f"log_tb_every {FLAGS.log_tb_every}")
    logging.info(f"val_every {FLAGS.val_every}")
    logging.info(f"save_every {FLAGS.save_every}")

    # model and optimization
    model = CPCModel(
        FLAGS.features_in,
        FLAGS.timestep,
        FLAGS.batch_size,
        FLAGS.window_size,
        FLAGS.hidden_size,
        FLAGS.out_size,
        FLAGS.no_gru_layers,
    ).to(device)
    logging.info(
        f"param count {sum(p.numel() for p in model.parameters() if p.requires_grad)}"
    )
    optimizer = RAdam(model.parameters(), lr=4e-4)
    scheduler = CosineAnnealingLR(optimizer, FLAGS.steps, eta_min=1e-6)

    # dataloaders
    train_dataset = AudioDataset(FLAGS.train_data)
    train_dataloader = AudioDataLoader(
        train_dataset,
        window_size=FLAGS.window_size,
        batch_size=FLAGS.batch_size,
        feature_transform=FLAGS.features_in,
        num_workers=FLAGS.num_workers,
        shuffle=True,
        drop_last=True,
    )

    val_dataset = AudioDataset(FLAGS.val_data)
    val_dataloader = AudioDataLoader(
        val_dataset,
        window_size=FLAGS.window_size,
        batch_size=FLAGS.batch_size,
        feature_transform=FLAGS.features_in,
        num_workers=FLAGS.num_workers,
        shuffle=False,
        drop_last=True,
    )

    # start training
    train(model, optimizer, scheduler, train_dataloader, val_dataloader, FLAGS)
Пример #9
0
def readCommand( argv ):
    """
    Processes the command used to run pacman from the command line.
    """
    from optparse import OptionParser
    usageStr = """
    USAGE:      python pacman.py <options>
    """
    parser = OptionParser(usageStr)

    parser.add_option('-T', '--teamName', dest='teamName',
                    help="Enter your team's name", default=None)
    parser.add_option('-p', '--agentName', dest='agentName',
                    help="Enter your agents's name (for testing purposes only)",
                    default=None)
    parser.add_option('-n', '--numGames', dest='numGames', type='int',
                      help=default('the number of GAMES to play'), default=1)
    parser.add_option('-d', '--dataCollectionMode', action='store_true',
        dest='dataCollectionMode', help='Enter data-collection mode', default=False)
    parser.add_option('-a','--agentArgs',dest='agentArgs',
                      help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
    parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
                      help='Generate minimal output and no graphics', default=False)
    parser.add_option('-m', '--maxMoves', dest='maxMoves', type='int',
                      help=default('the maximum number of moves in a game'), default=1000)
    parser.add_option('-z', '--zoom', type='float', dest='zoom',
                      help=default('Zoom the size of the graphics window'), default=1.0)
    parser.add_option('-t', '--frameTime', dest='frameTime', type='float',
                      help=default('Time to delay between frames; Must be > 0'), default=0.1)
    parser.add_option('-s', '--seed', dest='seed', type='int',
                      help=default('random seed'), default=3)


    options, otherjunk = parser.parse_args()
    if len(otherjunk) != 0:
        raise Exception('Command line input not understood: ' + otherjunk)

    if options.frameTime <= 0:
        raise Exception("the frameTime option must be greater than 0")

    # set the seed before we do anything else
    util.set_seeds(options.seed)

    # add team directory to python path
    #sys.path.append(os.path.join(os.path.dirname(os.path.abspath("pacman.py")), options.teamDirectory))
    sys.path.append(os.path.dirname(os.path.abspath("pacman.py")))
    args = dict()

    # generate a layout
    import layout
    args['layout'] = layout.RandomLayout()

    # make sure we have a team name, not a directory
    if options.teamName is not None and options.teamName.endswith("/"):
        options.teamName = options.teamName[:-1]

    # Choose a Pacman agent
    pacmanType = loadAgent(options.teamName, options.agentName)
    agentOpts = parseAgentArgs(options.agentArgs)
    pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
    args['pacman'] = pacman

    import graphicsDisplay
    if options.quietGraphics:
        args['display'] = graphicsDisplay.QuietGraphics()
    else:
        args['display'] = graphicsDisplay.FirstPersonPacmanGraphics(options.zoom, \
                                                                True, \
                                                            frameTime = options.frameTime)

    args['numGames'] = options.numGames
    args['maxMoves'] = options.maxMoves
    args['dataCollectionMode'] = options.dataCollectionMode

    return args
Пример #10
0
def run_cpc(unused_argv):

    # setup logging
    set_seeds(FLAGS.seed)
    prepare_standard_logging("training")

    # dry run
    if FLAGS.dry_run is True:
        setup_dry_run(FLAGS)

    # initialise unset flags
    if not FLAGS.model_out:
        FLAGS.model_out = FLAGS.expdir + "/model.pt"
    if not FLAGS.val_every:
        FLAGS.val_every = max(100, FLAGS.steps // 50)
    if not FLAGS.val_steps:
        FLAGS.val_steps = max(20, FLAGS.steps // FLAGS.batch_size // 50)
    if not FLAGS.save_every:
        FLAGS.save_every = FLAGS.val_every
    logging.info(f"model_out {FLAGS.model_out}")
    logging.info(f"steps {FLAGS.steps}")
    logging.info(f"val_steps {FLAGS.val_steps}")
    logging.info(f"log_every {FLAGS.log_every}")
    logging.info(f"log_tb_every {FLAGS.log_tb_every}")
    logging.info(f"val_every {FLAGS.val_every}")
    logging.info(f"save_every {FLAGS.save_every}")

    # model and optimization
    model = CPCModel(
        FLAGS.features_in,
        FLAGS.timestep,
        FLAGS.batch_size,
        FLAGS.window_size,
        FLAGS.hidden_size,
        FLAGS.out_size,
        FLAGS.no_gru_layers,
    ).to(device)
    logging.info(
        f"param count {sum(p.numel() for p in model.parameters() if p.requires_grad)}"
    )
    optimizer = RAdam(model.parameters(), lr=4e-4)
    scheduler = FlatCA(optimizer,
                       steps=FLAGS.steps,
                       eta_min=1e-6,
                       decay_proportion=1.0 / 3)

    # dataloaders
    if FLAGS.features_in == "raw":
        stream_class = RawStream
    elif FLAGS.features_in == "fbank":
        stream_class = FbankStream
    else:
        raise (f"Feature input {FLAGS.features_in} has not been implemented")

    train_dbl = parse_audio_dbl(FLAGS.train_data)
    train_streams = [
        DblStream(DblSampler(train_dbl), stream_class, FLAGS.window_size)
        for _ in range(FLAGS.batch_size)
    ]
    train_datastream = MultiStreamDataLoader(train_streams, device=device)

    val_dbl = parse_audio_dbl(FLAGS.val_data)
    val_streams = [
        DblStream(DblSampler(val_dbl), stream_class, FLAGS.window_size)
        for _ in range(FLAGS.batch_size)
    ]
    val_datastream = MultiStreamDataLoader(val_streams, device=device)

    # start training
    train(model, optimizer, scheduler, train_datastream, val_datastream, FLAGS)