def main():  # pragma: no cover
    args = get_args()

    client = TChannel('localhost:%d' % args.port)
    register_example_endpoints(client)
    client.listen()

    tornado.ioloop.IOLoop.instance().start()
Exemple #2
0
def main():  # pragma: no cover
    args = get_args()

    client = TChannel(
        name='tchannel_server',
        hostport='%s:%d' % (args.host, args.port),
    )

    register_example_endpoints(client)
    client.listen()

    tornado.ioloop.IOLoop.instance().start()
Exemple #3
0
def main(argv):
    K.set_image_data_format('channels_last')
    args, save_dir = options.get_args(argv)

    if args.dataset == 'indian_pines' or 'salinas':
        num_classes = 16
    elif args.dataset == 'pavia_university':
        num_classes = 9

    # 获得数据
    x_train, y_train, x_test, y_test = handle_data(args.dataset,
                                                   args.train_scale,
                                                   args.spatial_size, args.bf)

    # expand_dims
    x_train = np.expand_dims(x_train, axis=4)
    x_test = np.expand_dims(x_test, axis=4)

    # y_eval给后处理
    y_eval = y_test

    #one-hot
    y_train = np_utils.to_categorical(y_train)[:, 1:num_classes + 1]
    y_test = np_utils.to_categorical(y_test)[:, 1:num_classes + 1]
    print(x_train.shape)
    print(y_train.shape)
    print(x_test.shape)
    print(y_test.shape)

    if args.network == 'igc1':
        s_model = igc1_net(x_train[0].shape, num_classes, args.depth,
                           args.firstgroup, args.secondgroup, args.dropout,
                           args.pooling)
    elif args.network == 'igc':
        s_model = igc_net(x_train[0].shape, num_classes, args.depth,
                          args.firstgroup, args.secondgroup)
    elif args.network == 'plain1':
        s_model = plain1_net(x_train[0].shape, num_classes, args.depth,
                             args.firstgroup, args.secondgroup, args.dropout,
                             args.pooling)
    elif args.network == 'plain':
        s_model = plain_net(x_train[0].shape, num_classes, args.depth,
                            args.firstgroup, args.secondgroup)
    elif args.network == 'resnet':
        s_model = res_net(x_train[0].shape, num_classes, args.depth,
                          args.firstgroup, args.secondgroup)
    elif args.network == 'resnet_igc':
        s_model = igc_res_net(x_train[0].shape, num_classes, args.depth,
                              args.firstgroup, args.secondgroup)

    #data=(x_train,x_test,y_train,y_test)
    plot_model(s_model, to_file=save_dir + 'model.png', show_shapes=True)
def main():

    args = get_args()

    tchannel = TChannel(name="raw-client")

    request = tchannel.request(hostport="%s:%s" % (args.host, args.port))

    response = yield request.send("hi", None, None)

    body = yield response.get_body()

    print body
Exemple #5
0
def main():
    args = options.get_args()

    cycleGAN = networks.CycleGAN(args)

    if args.train == args.test:
        print("What are we even doing here?")
    elif args.train:
        print("Training")
        print(args)
        cycleGAN.train(args)
    elif args.test:
        print("Testing")
        cycleGAN.test(args)
def main():

    args = get_args()
    conn = yield StreamConnection.outgoing('%s:%d' % (args.host, args.port))
    conn.tchannel = TChannel()
    N = 10000
    before = time.time()
    batch_size = 100
    for _ in xrange(N / batch_size):
        yield [conn.ping() for _ in xrange(batch_size)]

    after = time.time()
    elapsed = (after - before) * 1000
    print("Finish %d iterations in %d ms" % (N, elapsed))
    print("%.4f ops/s" % ((N / elapsed) * 1000))
def main():
    args = get_args()

    app = TChannel(
        name='raw-server',
        hostport='%s:%d' % (args.host, args.port),
    )

    register_example_endpoints(app)

    app.listen()

    print("listening on %s" % app.hostport)
    sys.stdout.flush()

    tornado.ioloop.IOLoop.instance().start()
def main():
    args = get_args()

    arg1 = InMemStream("echo")
    arg2 = InMemStream()
    arg3 = InMemStream()

    ioloop = tornado.ioloop.IOLoop.current()
    if args.filename == "stdin":
        arg3 = PipeStream(sys.stdin.fileno())
        send_stream(arg1, arg2, arg3, args.host)
        ioloop.start()
    elif args.filename:
        f = os.open(args.filename, os.O_RDONLY)
        arg3 = PipeStream(f)
        ioloop.run_sync(lambda: send_stream(arg1, arg2, arg3, args.host))
    else:
        raise NotImplementedError()
def main():
    args = get_args()

    app = TChannel(
        name='json-server',
        hostport='%s:%d' % (args.host, args.port),
    )

    register_example_endpoints(app)

    def say_hi_json(request, response, proxy):
        response.write_body({'hi': 'Hello, world!'})

    app.register(endpoint="hi-json", scheme="json", handler=say_hi_json)

    app.listen()

    tornado.ioloop.IOLoop.instance().start()
Exemple #10
0
def main():
    args = get_args()
    setup_logger('{}/log-train'.format(args.dir), args.log_level)
    logging.info(' '.join(sys.argv))

    if torch.cuda.is_available() == False:
        logging.error('No GPU detected!')
        sys.exit(-1)

    # WARNING(fangjun): we have to select GPU at the very
    # beginning; otherwise you will get trouble later
    kaldi.SelectGpuDevice(device_id=args.device_id)
    kaldi.CuDeviceAllowMultithreading()
    device = torch.device('cuda', args.device_id)

    den_fst = fst.StdVectorFst.Read(args.den_fst_filename)

    # TODO(fangjun): pass these options from commandline
    opts = chain.ChainTrainingOptions()
    opts.l2_regularize = 5e-4
    opts.leaky_hmm_coefficient = 0.1

    den_graph = chain.DenominatorGraph(fst=den_fst, num_pdfs=args.output_dim)

    model = get_chain_model(feat_dim=args.feat_dim,
                            output_dim=args.output_dim,
                            lda_mat_filename=args.lda_mat_filename,
                            hidden_dim=args.hidden_dim,
                            kernel_size_list=args.kernel_size_list,
                            stride_list=args.stride_list)

    start_epoch = 0
    num_epochs = args.num_epochs
    learning_rate = args.learning_rate
    best_objf = -100000

    if args.checkpoint:
        start_epoch, learning_rate, best_objf = load_checkpoint(
            args.checkpoint, model)
        logging.info(
            'loaded from checkpoint: start epoch {start_epoch}, '
            'learning rate {learning_rate}, best objf {best_objf}'.format(
                start_epoch=start_epoch,
                learning_rate=learning_rate,
                best_objf=best_objf))

    model.to(device)

    dataloader = get_egs_dataloader(egs_dir=args.cegs_dir,
                                    egs_left_context=args.egs_left_context,
                                    egs_right_context=args.egs_right_context)

    optimizer = optim.Adam(model.parameters(),
                           lr=learning_rate,
                           weight_decay=args.l2_regularize)

    scheduler = MultiStepLR(optimizer, milestones=[1, 2, 3, 4, 5], gamma=0.5)
    criterion = KaldiChainObjfFunction.apply

    tf_writer = SummaryWriter(log_dir='{}/tensorboard'.format(args.dir))

    best_epoch = start_epoch
    best_model_path = os.path.join(args.dir, 'best_model.pt')
    best_epoch_info_filename = os.path.join(args.dir, 'best-epoch-info')
    try:
        for epoch in range(start_epoch, args.num_epochs):
            learning_rate = scheduler.get_lr()[0]
            logging.info('epoch {}, learning rate {}'.format(
                epoch, learning_rate))
            tf_writer.add_scalar('learning_rate', learning_rate, epoch)

            objf = train_one_epoch(dataloader=dataloader,
                                   model=model,
                                   device=device,
                                   optimizer=optimizer,
                                   criterion=criterion,
                                   current_epoch=epoch,
                                   opts=opts,
                                   den_graph=den_graph,
                                   tf_writer=tf_writer)
            scheduler.step()

            if best_objf is None:
                best_objf = objf
                best_epoch = epoch

            # the higher, the better
            if objf > best_objf:
                best_objf = objf
                best_epoch = epoch
                save_checkpoint(filename=best_model_path,
                                model=model,
                                epoch=epoch,
                                learning_rate=learning_rate,
                                objf=objf)
                save_training_info(filename=best_epoch_info_filename,
                                   model_path=best_model_path,
                                   current_epoch=epoch,
                                   learning_rate=learning_rate,
                                   objf=best_objf,
                                   best_objf=best_objf,
                                   best_epoch=best_epoch)

            # we always save the model for every epoch
            model_path = os.path.join(args.dir, 'epoch-{}.pt'.format(epoch))
            save_checkpoint(filename=model_path,
                            model=model,
                            epoch=epoch,
                            learning_rate=learning_rate,
                            objf=objf)

            epoch_info_filename = os.path.join(args.dir,
                                               'epoch-{}-info'.format(epoch))
            save_training_info(filename=epoch_info_filename,
                               model_path=model_path,
                               current_epoch=epoch,
                               learning_rate=learning_rate,
                               objf=objf,
                               best_objf=best_objf,
                               best_epoch=best_epoch)

    except KeyboardInterrupt:
        # save the model when ctrl-c is pressed
        model_path = os.path.join(args.dir,
                                  'epoch-{}-interrupted.pt'.format(epoch))
        # use a very small objf for interrupted model
        objf = -100000
        save_checkpoint(model_path,
                        model=model,
                        epoch=epoch,
                        learning_rate=learning_rate,
                        objf=objf)

        epoch_info_filename = os.path.join(
            args.dir, 'epoch-{}-interrupted-info'.format(epoch))
        save_training_info(filename=epoch_info_filename,
                           model_path=model_path,
                           current_epoch=epoch,
                           learning_rate=learning_rate,
                           objf=objf,
                           best_objf=best_objf,
                           best_epoch=best_epoch)

    tf_writer.close()
    logging.warning('Done')
Exemple #11
0
import time, os
import torch
import numpy as np
import pandas as pd
import random
from datetime import datetime

from options import get_args
from data import build_dataset
from model import build_model

if __name__ == '__main__':

    tic = time.time()
    opt, message = get_args()
    print(message)
    if not opt.isTest:
        #random.seed(opt.seed)
        #np.random.seed(opt.seed)
        #torch.manual_seed(opt.seed)
        #torch.cuda.manual_seed_all(opt.seed)
        #torch.backends.cudnn.benchmark = True

        # build the dataset according to the options
        tr_loader, val_loader = build_dataset(opt)
        model = build_model(opt)
        model.setup()

        metric = []
        best_val_loss = 1000.0
        for epoch in range(opt.start_epoch,
Exemple #12
0
def main():
    args = get_args()
    args.train_dir = 'F:/data/train_withAug_new_map.hdf5'
    args.val_dir = 'F:/data/val_withAug_new_map.hdf5'
    args.batch_size = 4
    # model initial
    model = FAN(depth_S=args.depth_S,
                depth_U=args.depth_U,
                feature_dims=args.feature_dims,
                wave_pattern=args.wave,
                level=args.level)
    model = nn.DataParallel(model).cuda()
    if args.weight is not None:
        model.load_state_dict(args.weight)
    else:
        model = weight_init_kaiming(model)
        if Path(args.output_path).exists():
            shutil.rmtree(args.output_path)
        Path(args.output_path).mkdir(parents=True)

    # optimizer and loss
    optimizer = optim.Adam(model.parameters(), lr=args.lr_initial)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                     T_max=args.cycle_epochs,
                                                     eta_min=5e-10)
    loss_fn = Loss(args.loss_mode)

    # dataset
    train_set = DataFromH5File(args.train_dir)
    train_loader = data.DataLoader(dataset=train_set,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True)
    val_set = DataFromH5File(args.val_dir)
    val_loader = data.DataLoader(dataset=val_set,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=args.num_workers,
                                 pin_memory=True)

    # train model
    min_loss = float('inf')
    max_psnr = 0
    iteration = train_set.__len__() // args.batch_size
    val_iter = val_set.__len__() // args.batch_size
    writer = SummaryWriter(args.log_dir)
    print('\nBegin training with GPU')
    for i in range(args.epochs):
        tic = time.time()
        eval_loss = 0.0

        # train stage
        model.train()
        for num, (batch_noisy, batch_gt,
                  batch_gtmap) in enumerate(train_loader):
            optimizer.zero_grad()
            pred_map, pred_img = model(batch_noisy.cuda())
            loss = loss_fn(
                [pred_img, pred_map],
                [batch_gt.cuda(), batch_gtmap.cuda()])
            loss.backward()
            optimizer.step()
            eval_loss = eval_loss + loss.item()

            # print log
            if (num + 1) % args.print_freq == 0:
                log_str = '[Epoch: {:2d}/{:2d}] iteration: {:4d}/{:4d} Loss = {:+4.6f}'
                print(
                    log_str.format(i + 1, args.epochs, num + 1, iteration,
                                   loss.item()))
                writer.add_scalar('Train Loss iter', loss.item(), num + 1)
        log_str = 'Train loss of epoch {:2d}/{:2d}: {:+.10e}'
        eval_loss = eval_loss / iteration
        print(log_str.format(i + 1, args.epochs, eval_loss))
        writer.add_scalar('Loss_epoch', eval_loss, i)

        # test stage
        print('Test Stage Begin')
        model.eval()
        eval_psnr = 0.0
        for num, (batch_noisy, batch_gt, batch_gtmap) in enumerate(val_loader):
            with torch.set_grad_enabled(False):
                pred_map, pred_img = model(batch_noisy.cuda())
            batch_gt = batch_gt.cuda()
            psnr = PSNR_batch(batch_gt, pred_img)
            eval_psnr += psnr.item()
        eval_psnr = eval_psnr / val_iter
        log_str = '[Test for Epoch: {:d}/{:d} psnr per epoch = {:f}'
        print(log_str.format(i + 1, args.epochs, eval_psnr))

        scheduler.step()
        # save model
        if (i+1) % args.save_model_freq == 0 or i+1 == args.epochs or \
                (min_loss > eval_loss) or (eval_psnr > max_psnr):
            model_prefix = 'model_'
            save_path_model = Path(
                args.output_path).joinpath(model_prefix + str(i + 1) +
                                           '_psnr_' + str(eval_psnr) + 'dB')
            torch.save(
                {
                    'epoch': i + 1,
                    'optimizer_state_dict': optimizer.state_dict()
                }, save_path_model)
            model_state_prefix = 'model_state_'
            save_path_model_sate = Path(
                args.output_path).joinpath(model_state_prefix + str(i + 1))
            torch.save(model.state_dict(), save_path_model_sate)

        min_loss = eval_loss if eval_loss < min_loss else min_loss
        max_psnr = eval_psnr if max_psnr < eval_psnr else max_psnr
        writer.add_scalar('PSNR_epoch_test', eval_psnr, i)
        toc = time.time()
        print('This epoch takes time {:2f}\n'.format(toc - tic))

    writer.close()
    print('Training is over')
Exemple #13
0
def main():
    # Parse arguments
    args = options.get_args(description="Debug a network",
                            options=[
                                "batch",
                                "workers",
                                "device",
                                "load",
                                "net",
                                "loss",
                            ])

    if args.load == "":
        print("No model file specified to load!")
        exit()

    # Construct datasets
    random.seed(1337)
    _, _, test_loader = utils.get_kitti_split(args.batch, args.workers)

    # The model
    model = networks.architectures.get_net(args)
    loss_fn = sfm_loss.get_loss_fn(args)

    # Load
    checkpoint = torch.load(args.load, map_location=torch.device(args.device))
    model.load_state_dict(checkpoint["model"])

    # Window
    pango.CreateWindowAndBind('Main', int(640 * (3 / 2)), int(480 * (3 / 2)))
    gl.glEnable(gl.GL_DEPTH_TEST)

    # Define Projection and initial ModelView matrix
    scam = pango.OpenGlRenderState(
        pango.ProjectionMatrix(640, 480, 420, 420, 320, 240, 0.2, 200),
        pango.ModelViewLookAt(0, -0.5, -0.5, 0, 0, 1, 0, -1, 0))
    handler = pango.Handler3D(scam)

    # Create Interactive View in window
    dcam = pango.CreateDisplay()
    dcam.SetBounds(pango.Attach(0), pango.Attach(1), pango.Attach(0),
                   pango.Attach(1), -640.0 / 480.0)
    dcam.SetHandler(handler)

    # Test
    model.train()
    model = model.to(args.device)

    def step_fn(step, inputs):

        # Forward pass and loss
        with torch.no_grad():
            loss, data = utils.forward_pass(model, loss_fn, inputs)

        print("loss %f" % loss.item())

        print(data.keys())

        print(data["pose"].shape)
        for i in range(args.batch):
            print(list(data["pose"][i, 0, :].cpu().detach().numpy()))
            print(list(data["pose"][i, 1, :].cpu().detach().numpy()))
            print("--")

        depth_img = viz.tensor2depthimg(
            torch.cat((*data["depth"][0][:, 0], ), dim=0))
        tgt_img = viz.tensor2img(torch.cat((*data["tgt"], ), dim=1))
        img = np.concatenate((tgt_img, depth_img), axis=1)

        warp_imgs = []
        #diff_imgs = []
        for warp, diff in zip(data["warp"], data["diff"]):
            warp = restack(restack(warp, 1, -1), 0, -2)
            diff = restack(restack(diff, 1, -1), 0, -2)
            warp_imgs.append(viz.tensor2img(warp))
            #diff_imgs.append(viz.tensor2diffimg(diff))

        world = reconstruction.depth_to_3d_points(data["depth"][0], data["K"])
        points = world[0, :].view(3, -1).transpose(
            1, 0).cpu().detach().numpy().astype(np.float64)
        colors = (data["tgt"][0, :].view(3, -1).transpose(
            1, 0).cpu().detach().numpy().astype(np.float64) + 1) / 2

        loop = True
        while loop:
            key = cv2.waitKey(10)
            if key == 27 or pango.ShouldQuit():
                exit()
            elif key != -1:
                loop = False
            cv2.imshow("target and depth", img)
            #for i, (warp, diff) in enumerate(zip(warp_imgs, diff_imgs)):
            for i, warp in enumerate(warp_imgs):
                cv2.imshow("warp scale: %d" % i, warp)
                #cv2.imshow("diff scale: %d" % i, diff)

            gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
            gl.glClearColor(1.0, 1.0, 1.0, 1.0)
            dcam.Activate(scam)
            gl.glPointSize(5)
            pango.DrawPoints(points, colors)
            pose = np.identity(4)
            pose[:3, 3] = 0
            gl.glLineWidth(1)
            gl.glColor3f(0.0, 0.0, 1.0)
            pango.DrawCamera(pose, 0.5, 0.75, 0.8)
            pango.FinishFrame()

    utils.iterate_loader(args, test_loader, step_fn)
Exemple #14
0
def main():
    # Parse arguments
    args = options.get_args(description="Debug a network",
                            options=[
                                "batch",
                                "workers",
                                "device",
                                "load",
                                "smooth-weight",
                                "explain-weight",
                                "net",
                            ])

    if args.load == "":
        print("No model file specified to load!")
        exit()

    # Construct datasets
    random.seed(1337)
    _, _, test_loader = utils.get_kitti_split(args.batch, args.workers)

    # The model
    model = networks.architectures.get_net(args)
    loss_fn = sfm_loss.get_loss_fn(args)

    # Load
    checkpoint = torch.load(args.load, map_location=torch.device(args.device))
    model.load_state_dict(checkpoint["model"])

    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1000, 500))

    # Test
    model.train()
    model = model.to(args.device)

    def step_fn(step, inputs):

        # Forward pass and loss
        with torch.no_grad():
            loss, data = utils.forward_pass(model, loss_fn, inputs)

        print("loss %f" % loss.item())

        print(data.keys())

        print(data["pose"].shape)
        for i in range(4):
            print(list(data["pose"][i, 0, :].cpu().detach().numpy()))
            print(list(data["pose"][i, 1, :].cpu().detach().numpy()))
            print("--")

        depth_img = viz.tensor2depthimg(
            torch.cat((*data["depth"][0][:, 0], ), dim=0))
        tgt_img = viz.tensor2img(torch.cat((*data["tgt"], ), dim=1))
        img = np.concatenate((tgt_img, depth_img), axis=1)

        warp_imgs = []
        diff_imgs = []
        for warp, diff in zip(data["warp"], data["diff"]):
            warp = restack(restack(warp, 1, -1), 0, -2)
            diff = restack(restack(diff, 1, -1), 0, -2)
            warp_imgs.append(viz.tensor2img(warp))
            diff_imgs.append(viz.tensor2diffimg(diff))

        world = inverse_warp.depth_to_3d_points(data["depth"][0], data["K"])
        points = world[0, :].view(3, -1).transpose(
            1, 0).cpu().detach().numpy().astype(np.float64)
        colors = (data["tgt"][0, :].view(3, -1).transpose(
            1, 0).cpu().detach().numpy().astype(np.float64) + 1) / 2

        test_mayavi.draw_rgb_points(fig, points, colors)

        loop = True
        while loop:
            key = cv2.waitKey(10)
            if key == 27:
                exit()
            elif key != -1:
                loop = False
            cv2.imshow("target and depth", img)
            for i, (warp, diff) in enumerate(zip(warp_imgs, diff_imgs)):
                cv2.imshow("warp scale: %d" % i, warp)
                cv2.imshow("diff scale: %d" % i, diff)
            mlab.show(10)

    utils.iterate_loader(args, test_loader, step_fn)
Exemple #15
0
                self.label_list[i].setText('')
            self.label_list[i].setAlignment(QtCore.Qt.AlignCenter)

    def enable_input(self):
        for i in range(self.m * self.n):
            self.label_list[i].setEnabled(True)

    def disable_input(self):
        for i in range(self.m * self.n):
            self.label_list[i].setEnabled(False)

    def set_size(self, width, height):
        dw = width // self.m
        dh = height // self.n
        for i in range(self.m * self.n):
            self.label_list[i].setBaseSize(dw, dh)
            if len(self.label_list[i].toPlainText()) > 1:
                self.label_list[i].setFontPointSize(dw * 0.2)
            else:
                self.label_list[i].setFontPointSize(dw * 0.2)
        self.setBaseSize(width, height)


if __name__ == "__main__":
    app = QApplication([])
    args = get_args()
    opt = args.parse_args()
    u = input_unit(opt)
    print(u.get_state())
    app.exec_()
Exemple #16
0
import time
import torch
import numpy as np
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional
from utils import *
from models.net import SARPN
from options import get_args
from collections import OrderedDict
from dataloader import nyudv2_dataloader

args = get_args('test')
# lode nyud v2 test set
TestImgLoader = nyudv2_dataloader.getTestingData_NYUDV2(
    args.batch_size, args.testlist_path, args.root_path)
# model
model = SARPN(args)
model = nn.DataParallel(model)
model.cuda()

# load test model
if args.loadckpt is not None:
    print("loading model {}".format(args.loadckpt))
    state_dict = torch.load(args.loadckpt)
    model.load_state_dict(state_dict)
else:
    print("You have not loaded any models.")


def test():
Exemple #17
0
def main(argv):
    args = options.get_args(argv, parse=False)
    net = options.get_network(args)
    args.network += '_d%d' % args.depth
    file_name = show_net(args.network, args.data_shape)
Exemple #18
0
import torch
import numpy as np
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.backends.cudnn as cudnn
from utils import *
from options import get_args
from models.net import SARPN
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from dataloader import nyudv2_dataloader
from models.loss import adjust_gt, total_loss

cudnn.benchmark = True
args = get_args('train')

# Create folder
makedir(args.checkpoint_dir)
makedir(args.logdir)

# creat summary logger
logger = SummaryWriter(args.logdir)

# dataset, dataloader
TrainImgLoader = nyudv2_dataloader.getTrainingData_NYUDV2(
    args.batch_size, args.trainlist_path, args.root_path)
# model, optimizer
model = SARPN(args)
model = nn.DataParallel(model)
model.cuda()
Exemple #19
0
def main(argv):
    args = options.get_args(argv)
    train(args)
Exemple #20
0
def main():
    args = get_args()
    setup_logger('{}/log-inference'.format(args.dir), args.log_level)
    logging.info(' '.join(sys.argv))

    if torch.cuda.is_available() == False:
        logging.warning('No GPU detected! Use CPU for inference.')
        device = torch.device('cpu')
    else:
        device = torch.device('cuda', args.device_id)

    model = get_chain_model(feat_dim=args.feat_dim,
                            output_dim=args.output_dim,
                            lda_mat_filename=args.lda_mat_filename,
                            hidden_dim=args.hidden_dim,
                            kernel_size_list=args.kernel_size_list,
                            stride_list=args.stride_list)

    load_checkpoint(args.checkpoint, model)

    model.to(device)
    model.eval()

    specifier = 'ark,scp:{filename}.ark,{filename}.scp'.format(
        filename=os.path.join(args.dir, 'nnet_output'))

    if args.save_as_compressed:
        Writer = kaldi.CompressedMatrixWriter
        Matrix = kaldi.CompressedMatrix
    else:
        Writer = kaldi.MatrixWriter
        Matrix = kaldi.FloatMatrix

    writer = Writer(specifier)

    dataloader = get_feat_dataloader(
        feats_scp=args.feats_scp,
        model_left_context=args.model_left_context,
        model_right_context=args.model_right_context,
        batch_size=32)

    for batch_idx, batch in enumerate(dataloader):
        key_list, padded_feat, output_len_list = batch
        padded_feat = padded_feat.to(device)
        with torch.no_grad():
            nnet_output, _ = model(padded_feat)

        num = len(key_list)
        for i in range(num):
            key = key_list[i]
            output_len = output_len_list[i]
            value = nnet_output[i, :output_len, :]
            value = value.cpu()

            m = kaldi.SubMatrixFromDLPack(to_dlpack(value))
            m = Matrix(m)
            writer.Write(key, m)

        if batch_idx % 10 == 0:
            logging.info('Processed batch {}/{} ({:.6f}%)'.format(
                batch_idx, len(dataloader),
                float(batch_idx) / len(dataloader) * 100))

    writer.Close()
    logging.info('pseudo-log-likelihood is saved to {}'.format(
        os.path.join(args.dir, 'nnet_output.scp')))
Exemple #21
0
                        best_accuracy = max(accuracy, best_accuracy)
                        logging(
                            "[{}] Valid | curr accu {:.4f} | best accu {:.4f}".
                            format(train_step // (args.train_steps // 10),
                                   accuracy, best_accuracy))

    ##### make prediction
    if is_master(args) and args.write_prediction:
        rev_label_dict = dict((v, k) for k, v in label_dict.items())
        model.load_state_dict(torch.load(args.model_path,
                                         map_location="cpu")[0],
                              strict=False)
        model = model.to(args.device)
        predict(args, model, te_loader,
                os.path.join(args.model_dir, "test_results.txt"),
                rev_label_dict)
        predict(args, model, va_loader,
                os.path.join(args.model_dir, "valid_results.txt"),
                rev_label_dict)


if __name__ == "__main__":
    args = options.get_args()
    args.distributed_world_size = 1
    args.distributed = False
    if 'WORLD_SIZE' in os.environ:
        args.distributed = int(os.environ['WORLD_SIZE']) > 1
        args.distributed_world_size = int(os.environ['WORLD_SIZE'])
        args.device_id = args.local_rank
    main(args)
Exemple #22
0
from training.learnD import learnD_Realness
from training.learnG import learnG_Realness
from training.loss import CategoricalLoss


def print_now(cmd, file=None):
	time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
	if file is None:
		print('%s %s' % (time_now, cmd))
	else:
		print_str = '%s %s' % (time_now, cmd)
		print(print_str, file=file)
	sys.stdout.flush()

from options import get_args
param = get_args()

start = time.time()

if param.load_ckpt is None:
    if param.gen_extra_images > 0 and not os.path.exists(f"{param.extra_folder}"):
        os.mkdir(f"{param.extra_folder}")
    print_now(param)

if param.cuda:
    import torch.backends.cudnn as cudnn
    cudnn.deterministic = True
    device = 'cuda'

random.seed(param.seed)
np.random.seed(param.seed)
Exemple #23
0
'''
Train model on Cifar10, Cifar100, and SVHN.
Contact: Liming Zhao ([email protected])
'''
import mxnet as mx
import argparse
import os
import logging
import numpy as np
import options
import visualization as vis

args = options.get_args(parse=False)
net = options.get_network(args)
args.network += '_d%d' % args.depth

dot = vis.plot_network(
    net,
    shape={"data": (1, 3, args.data_shape, args.data_shape)},
    node_attrs={
        "shape": 'rect',
        "fixedsize": 'false',
        'fontsize': "54",
        'fontname': 'Arial',
        'ratio': "auto",
        'width': "0",
        'height': "0",
        'len': '0.1',
        'margin': "0.3,0.1",
        'penwidth': '3.0'
    })
Exemple #24
0
def main():
    args = options.get_args()
    train(args)
Exemple #25
0
            global_step += 1
            if idx % args.log_interval == 0:
                logger.emit("Training", metrics_names, result)
                tensorboard.create_summary(global_step,
                                           result,
                                           model,
                                           prefix='train')

        # Validation / Test
        for dataset, set_type in ((valid_data, 'valid'), (test_data, 'test')):
            for datum in dataset:
                xs, ys = create_input_sample(datum)
                result = model.test_on_batch(xs, ys, reset_metrics=False)
            is_best = logger.emit(set_type, metrics_names, result)
            # if is_best:
            #    tensorboard.save_model(model, 'best')
            tensorboard.create_summary(global_step,
                                       result,
                                       model,
                                       prefix=set_type)
            model.reset_metrics()

        logger.best("valid")

    logger.emit_history("test", logger.best_index("valid"))
    tensorboard.save_model(model, 'last')


if __name__ == '__main__':
    train(get_args())
Exemple #26
0
def parse_args(extra=[], overwrite={}):
    always = ["net", "workers", "device", "load"]
    return options.get_args(description="Train, debug or test a network",
                            options=always + extra,
                            overwrite=overwrite)