def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    total_n = 0
    lr = args.lr * (0.01**(epoch // 50))
    t_sets.set_lr(optimizer, lr)
    # pbar = progressbar.ProgressBar(max_value=n_traindata//train_batch_size)
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.data[0]
        print("Trainloss: {:.02f}".format(loss.data[0]))
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()
        # pbar.update(batch_idx)
        total_n = batch_idx + 1
    # Save checkpoint.
    t_acc = 100. * correct / total
    t_loss = train_loss / (total_n)

    w_line = '\nTrain:\t{:d}\t{:.04f}\t{:.04f}\tLR {:0.6f}\n'.format(
        epoch, t_loss, t_acc, lr)
    print(w_line)
def train(epoch):
    print('\nEpoch: %d' % epoch)
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    total_n = 0
    # before it was 150, 225 and 300
    lr = args.lr * (0.1 ** (epoch // 150)) * (0.1 ** (epoch // 225))
    t_sets.set_lr(optimizer, lr)
    if args.verbose:
        pbar = progressbar.ProgressBar(max_value=n_traindata//train_batch_size)

    # acc_batch_loss = 0

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()


        # if batch_idx % update_rate ==0:
        optimizer.zero_grad()
            # acc_batch_loss = 0

        inputs, targets = Variable(inputs), Variable(targets)
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        # acc_batch_loss += loss.cpu().numpy()[0]

        # if batch_idx % update_rate ==0:
        optimizer.step()
        train_loss += loss.data[0]


        # print("Trainloss: {:.02f}".format(loss.data[0]))
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()
        if args.verbose:
            pbar.update(batch_idx)
        total_n = batch_idx + 1
        if p_constraint and positive_clipper.frequency % total_n == 0:
            model.apply(positive_clipper)

    # Save checkpoint.
    t_acc = 100. * correct / total
    t_loss = train_loss / (total_n)

    w_line = '\nTrain:\t {:d}\tLoss: \tAcc: {:.04f}\t{:.04f}\tLR {:0.6f}'.format(epoch, t_loss, t_acc, lr)
    print (w_line)
    result_file = os.path.join(save_dir, 'ckpt_result.txt')
    if not os.path.isfile(result_file):
        with open(result_file, 'w') as f:
            f.write(w_line)
    else:
        with open(result_file, 'a') as f:
            f.write(w_line)
    sys.stdout.flush()