Beispiel #1
0
def train(args):
    print("run tensorflow squeezenet")
    startTime = time.time()
    x = tf.placeholder(tf.float32, shape=(args.batch_size, 3, 32, 32))
    y = tf.placeholder(tf.int32, shape=(args.batch_size))
    logits = squeezenet.Squeezenet_CIFAR(args).build(x, is_training=True)
    with tf.name_scope('loss'):
        cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y,
                                                               logits=logits)
        cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('optimizer'):
        train_step = tf.train.GradientDescentOptimizer(
            args.lr).minimize(cross_entropy)

    batch = inputs.Batch(args.input_file, args.batch_size)

    config = tf.ConfigProto()
    config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        loopStart = time.time()
        loss_save = []
        time_save = []
        for epoch in range(args.epochs):
            train_accuracy = 0.0
            start = time.time()
            for i in range(batch.total_size // batch.batch_size):
                (input_x, input_y) = batch.batch()
                _, loss = sess.run([train_step, cross_entropy],
                                   feed_dict={
                                       x: input_x,
                                       y: input_y
                                   })
                train_accuracy += loss
                if (i + 1) % (batch.total_size // batch.batch_size // 10) == 0:
                    print('epoch %d: step %d, training loss %f' %
                          (epoch + 1, i + 1, train_accuracy / (i * 100)))
            stop = time.time()
            time_save.append(stop - start)
            average_loss = train_accuracy / (60000 / args.batch_size)
            print(
                'Training completed in {}ms ({}ms/image), with average loss {}'
                .format((stop - start), (stop - start) / 60000, average_loss))
            loss_save.append(average_loss)

    loopEnd = time.time()
    prepareTime = loopStart - startTime
    loopTime = loopEnd - loopStart
    timePerEpoch = loopTime / args.epochs

    time_save.sort()
    median_time = time_save[int(args.epochs / 2)]

    with open(args.write_to, "w") as f:
        f.write("unit: " + "1 epoch\n")
        for loss in loss_save:
            f.write(str(loss) + "\n")
        f.write("run time: " + str(prepareTime) + " " + str(median_time) +
                "\n")
Beispiel #2
0
def train(args):
    startTime = time.time()
    torch.set_num_threads(1)
    torch.manual_seed(args.seed)

    model = pytorch_squeeze_cifar10.SqueezeNet()
    if args.use_gpu:
        model.cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    batch = inputs.Batch(args.input_file, args.batch_size)

    def train_epoch(epoch):
        tloss = 0.0
        for i in range(batch.total_size // batch.batch_size):
            (input_x, input_y) = batch.batch()
            optimizer.zero_grad()
            if args.use_gpu:
                inputX = Variable(torch.from_numpy(input_x)).cuda()
                inputY = Variable(torch.from_numpy(input_y)).cuda()
            else:
                inputX = Variable(torch.from_numpy(input_x))
                inputY = Variable(torch.from_numpy(input_y))
            res = model(inputX)
            loss = F.nll_loss(F.log_softmax(res, dim=1), inputY)
            tloss += loss.item()
            loss.backward()
            optimizer.step()
            if (i + 1) % (batch.total_size // batch.batch_size // 10) == 0:
                print('epoch %d: step %d, training loss %f' %
                      (epoch + 1, i + 1, tloss / (i)))
        return tloss / (batch.batch_size)

    def inference_epoch(epoch):
        for i in range(batch.total_size // batch.batch_size):
            (input_x, input_y) = batch.batch()
            if args.use_gpu:
                inputX = Variable(torch.from_numpy(input_x)).cuda()
            else:
                inputX = Variable(torch.from_numpy(input_x))
            res = model(inputX)
        return 0

    loopStart = time.time()
    loss_save = []
    time_save = []
    for epoch in range(args.epochs):
        start = time.time()
        if args.inference:
            inference_epoch(epoch)
            stop = time.time()
            time_save.append(stop - start)
            print('Inferencing completed in {} sec ({} sec/image)'.format(
                (stop - start), (stop - start) / 60000))
        else:
            loss_save.append(train_epoch(epoch))
            stop = time.time()
            time_save.append(stop - start)
            print('Training completed in {} sec ({} sec/image)'.format(
                (stop - start), (stop - start) / 60000))
    loopEnd = time.time()

    prepareTime = loopStart - startTime
    loopTime = loopEnd - loopStart
    timePerEpoch = loopTime / args.epochs

    time_save.sort()
    median_time = time_save[int(args.epochs / 2)]
    with open(args.write_to, "w") as f:
        f.write("unit: " + "1 epoch\n")
        for loss in loss_save:
            f.write("{}\n".format(loss))
        f.write("run time: " + str(prepareTime) + " " + str(median_time) +
                "\n")
Beispiel #3
0
    parser.add_argument('--write_to',
                        type=str,
                        default='result_PyTorch',
                        help='Directory for saving performance data')
    parser.add_argument('--generate_onnx',
                        type=str,
                        default='',
                        help='Directory for outputing onnx model')
    parser.add_argument('--use_gpu',
                        type=bool,
                        default=False,
                        help='Set to true if you want to use GPU')
    parser.add_argument(
        '--inference',
        type=bool,
        default=False,
        help='Set to false if you want to measure inference time')
    args = parser.parse_args()

    if args.generate_onnx == '':
        train(args)
    else:
        torch.manual_seed(args.seed)
        model = pytorch_squeeze_cifar10.SqueezeNet()
        batch = inputs.Batch(args.input_file, 64)
        (input_x, input_y) = batch.batch()
        torch.onnx.export(model,
                          Variable(torch.from_numpy(input_x)),
                          args.generate_onnx,
                          verbose=True)
    tensor = tensor.reshape(-1)
    amax = 0
    for i in range(tensor.shape[0]):
        if abs(tensor[i]) > amax:
            amax = tensor[i]
    print("{0:0.5f}".format(amax), end=" || ")
    for i in range(n):
        print("{0:0.5f}".format(tensor[i]), end=" ")
    print("\n")


if __name__ == '__main__':
    torch.manual_seed(42)
    model = SqueezeNet()
    optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0)
    batch = inputs.Batch('../cifar10_data/cifar-10-batches-py/data_batch_1',
                         64)

    tloss = 0.0
    for i in range(batch.total_size // batch.batch_size):
        (input_x, input_y) = batch.batch()
        optimizer.zero_grad()
        res = model(Variable(torch.from_numpy(input_x)))
        loss = F.nll_loss(F.log_softmax(res, dim=1),
                          Variable(torch.from_numpy(input_y)))
        tloss += loss.data[0]
        loss.backward()
        optimizer.step()
        if (i + 1) % (batch.total_size // batch.batch_size // 10) == 0:
            print('epoch %d: step %d, training loss %f' % (1, i + 1, tloss /
                                                           (i)))
Beispiel #5
0
def train(args):
    startTime = time.time()
    cudnn.benchmark = True
    cudnn.deterministic = True
    torch.set_num_threads(1)
    torch.manual_seed(args.seed)

    model = resnet50.resnet50Cifar10()
    if args.use_gpu:
        model.cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    batch = inputs.Batch(args.input_file, args.batch_size)

    def train_epoch(epoch):
        tloss = 0.0
        for i in range(batch.total_size // batch.batch_size):
            (input_x, input_y) = batch.batch()
            inputX = Variable(torch.from_numpy(input_x))
            inputY = Variable(torch.from_numpy(input_y))
            if args.use_gpu:
                inputX = inputX.cuda()
                inputY = inputY.cuda()
            optimizer.zero_grad()
            loss = F.nll_loss(F.log_softmax(model(inputX), dim=1), inputY)
            tloss += loss.data.item()
            loss.backward()
            optimizer.step()
            if (i + 1) % (batch.total_size // batch.batch_size // 10) == 0:
                print('epoch %d: step %d, training loss %f' %
                      (epoch + 1, i + 1, tloss / (i)))
        return tloss / (batch.batch_size)

    def inference_epoch(epoch):
        model.eval()
        for i in range(batch.total_size // batch.batch_size):
            (input_x, input_y) = batch.batch()
            inputX = Variable(torch.from_numpy(input_x))
            if args.use_gpu:
                inputX = inputX.cuda()
            resnet50.printHead(10, inputX, "input")
            res = model(inputX)
            resnet50.printHead(10, res, "output")
            exit(0)
            if (i + 1) % (batch.total_size // batch.batch_size // 10) == 0:
                print('epoch %d: step %d, training loss %f' %
                      (epoch + 1, i + 1, tloss / (i)))
        return 0

    loopStart = time.time()
    loss_save = []
    time_save = []
    for epoch in range(args.epochs):
        start = time.time()
        if args.inference:
            loss_save.append(inference_epoch(epoch))
        else:
            loss_save.append(train_epoch(epoch))
        stop = time.time()
        time_save.append(stop - start)
        print('Training completed in {} sec ({} sec/image)'.format(
            (stop - start), (stop - start) / 60000))
    loopEnd = time.time()

    prepareTime = loopStart - startTime
    loopTime = loopEnd - loopStart
    timePerEpoch = loopTime / args.epochs

    median_time = statistics.median(time_save)

    with open(args.write_to, "w") as f:
        f.write("unit: " + "1 epoch\n")
        for loss in loss_save:
            f.write("{}\n".format(loss))
        f.write("run time: " + str(prepareTime) + " " + str(median_time) +
                "\n")