示例#1
0
def train_model(model, train_loader, epoch, num_epochs, optimizer, writer,
                current_lr, log_every=1, n_classes=3):
    metric = torch.nn.CrossEntropyLoss()

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []
    model.train()

    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    for i, (image, label, case_id) in enumerate(train_loader):
        print(f'Starting with batch {i}, case id {case_id}')

        # if i == 18:
        # pdb.set_trace()

        optimizer.zero_grad()
        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()

        image = torch.squeeze(image, dim=0)

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())
        loss.backward()
        optimizer.step()
        print(f'Done with batch {i}')

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues.append(label.item())

        metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
        n_iter = epoch * len(train_loader) + i
        writer.add_scalar('Train/Loss', loss_value, n_iter)

        if (i % log_every == 0) and i > 0:
            utils.print_progress(epoch + 1, num_epochs, i, len(train_loader),
                                 np.mean(losses), current_lr, metric_collects)

    train_loss_epoch = np.round(np.mean(losses), 4)
    return train_loss_epoch, metric_collects
示例#2
0
def validate(model, val_loader, data_param, train_param, _log):
    with th.no_grad():
        criterion = nn.BCEWithLogitsLoss(pos_weight=th.Tensor([train_param['pos_weight']]).cuda())
        val_iter = iter(val_loader)
        running_loss = 0
        prune = data_param['prune']
        for _ in range(len(val_iter)):
            batch_sample = val_iter.next()
            data = batch_sample['data'].cuda()
            gt = batch_sample['gt'].cuda()
            prds = model.forward(data)[:, :, prune:-prune, prune:-prune]
            indices = gt>=0
            loss = criterion(prds[indices], gt[indices])
            running_loss += loss.item()
        del data, gt, prds, indices
        return running_loss/len(val_iter)
示例#3
0
def train(trainloader, model, optimization, start_epoch, stop_epoch, params,
          config):
    if optimization == 'Adam':
        optimizer = torch.optim.Adam(model.parameters())
    else:
        raise ValueError('Unknown optimization, please define by yourself')
    loss_fn = nn.CrossEntropyLoss()
    pgd = attack.AttackPGD(config)
    for epoch in range(start_epoch, stop_epoch):
        model.train()
        print_freq = 50
        avg_loss = 0
        correct, total = 0, 0
        for i, (x, y, _) in enumerate(trainloader):
            noise = torch.zeros_like(x).uniform_(-8 / 255., 8 / 255.)
            x = torch.clamp(x + noise, 0., 1.)
            x, y = x.cuda(), y.cuda()
            optimizer.zero_grad()
            scores, _ = model.forward(x)
            predicted = torch.argmax(scores, 1)
            correct += (predicted == y).sum().item()
            total += predicted.size(0)
            loss = loss_fn(scores, y)
            loss.backward()
            optimizer.step()

            avg_loss = avg_loss + loss.data.item()

            if i % print_freq == 0:
                print(
                    'Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Train Acc {:f}'
                    .format(epoch, i, len(trainloader),
                            avg_loss / float(i + 1), 100. * correct / total))

        if not os.path.isdir(params.checkpoint_dir):
            os.makedirs(params.checkpoint_dir)

        if (epoch % params.save_freq == 0) or (epoch == stop_epoch - 1):
            outfile = os.path.join(params.checkpoint_dir,
                                   '{:d}.tar'.format(epoch))
            state_dict = {}
            state_dict['epoch'] = epoch
            state_dict['feature'] = model.feature.state_dict()
            state_dict['classifier'] = model.classifier.state_dict()
            torch.save(state_dict, outfile)

    return model
示例#4
0
def train():
    # Turn on training mode which enables dropout.
    model.train()
    total_loss = 0
    start_time = time.time()
    nlabels = args.labels
    hidden = model.init_hidden(args.batch_size)

    #for batch, load in enumerate(range(0,args.batch_size,len(corpus.trainid))):
    for batch, load in enumerate(range(0, len(corpus.trainid),
                                       args.batch_size)):
        #print("TRAIN CORPUS LEN",len(corpus.trainid))
        if load + args.batch_size > len(corpus.trainid):
            continue
        data, targets, lens = get_batch(corpus.trainid, corpus.trainlab,
                                        corpus.trainlen, load, args.batch_size)
        # Starting each batch, we detach the hidden state from how it was previously produced.
        # If we didn't, the model would try backpropagating all the way to start of the dataset.
        hidden = repackage_hidden(hidden)
        model.zero_grad()
        #print(data)
        output, hidden, perm_index = model.forward(data, hidden, lens)
        targets = targets[:, perm_index]
        #print(output.view(-1, nlabels))
        #print(torch.t(targets))
        loss = criterion(output.view(-1, nlabels), torch.t(targets))
        loss.backward()

        # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
        torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
        for p in model.parameters():
            p.data.add_(-lr, p.grad.data)

        total_loss += loss.data

        if batch % args.log_interval == 0 and batch > 0:
            cur_loss = total_loss[0] / args.log_interval
            elapsed = time.time() - start_time
            print(
                '| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
                'loss {:5.2f} | ppl {:8.2f}'.format(
                    epoch, batch,
                    len(corpus.trainid) // args.bptt, lr,
                    elapsed * 1000 / args.log_interval, cur_loss,
                    math.exp(cur_loss)))
            total_loss = 0
            start_time = time.time()
示例#5
0
def evaluate_model(model, val_loader, epoch, num_epochs, writer, current_lr,
                   log_every=20):
    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    model.eval()
    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []

    for i, (image, label, case_id) in enumerate(val_loader):

        # if torch.cuda.is_available():
        #     image = image.cuda()
        #     label = label.cuda()

        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        y_trues.append(label.item())

        metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)

        n_iter = epoch * len(val_loader) + i
        writer.add_scalar('Val/Loss', loss_value, n_iter)

        if (i % log_every == 0) & (i > 0):
            prefix = '*Val|'
            utils.print_progress(epoch + 1, num_epochs, i, len(val_loader),
                                 np.mean(losses), current_lr, metric_collects,
                                 prefix=prefix)

    val_loss_epoch = np.round(np.mean(losses), 4)
    return val_loss_epoch, metric_collects
示例#6
0
def train(model, training_data, loss_fn, optimiser, batch_size=1):
    for (
        train,
        target,
    ) in training_data:  # i in range(0, len(training_data) - batch_size, batch_size):
        # train = torch.tensor([x[0] for x in training_data[i:batch_size]])
        # target = torch.tensor([x[0] for x in training_data[i:batch_size]])
        model.zero_grad()

        pred = model.forward(train)

        loss = loss_fn(pred[-1], target[-1])
        # print("Target", target, "Loss", loss.item())

        optimiser.zero_grad()
        loss.backward()
        optimiser.step()

    return loss.item()
示例#7
0
def evaluate_1step_pred(args, model, test_dataset):
    # turn on evaluation mode which disables dropout
    model.eval()
    total_loss = 0
    with torch.no_grad():
        hidden = model.init_hidden(args.eval_batch_size)
        for nbatch, i in enumerate(
                range(0,
                      test_dataset.size(0) - 1, args.bptt)):

            inputSeq, targetSeq = get_batch(args, test_dataset, i)
            outSeq, hidden = model.forward(inputSeq, hidden)

            loss = criterion(outSeq.view(args.batch_size, -1),
                             targetSeq.view(args.batch_size, -1))
            hidden = model.repackage_hidden(hidden)
            total_loss += loss.item()

    return total_loss / nbatch
示例#8
0
def train_model(model,
                train_loader,
                epoch,
                num_epochs,
                optimizer,
                current_lr,
                log_every=100):
    n_classes = model.n_classes
    metric = torch.nn.CrossEntropyLoss()

    y_probs = np.zeros((0, n_classes), np.float)
    losses, y_trues = [], []
    model.train()

    for m in model.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.train()
            m.weight.requires_grad = False
            m.bias.requires_grad = False

    for i, (image, label) in enumerate(train_loader):
        optimizer.zero_grad()
        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
        prediction = model.forward(image.float())
        loss = metric(prediction, label.long())
        loss.backward()
        optimizer.step()

        loss_value = loss.item()
        losses.append(loss_value)
        y_prob = F.softmax(prediction, dim=1)
        y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
        label = np.array(label)
        y_trues = np.append(y_trues, label)

    y_trues = np.array(y_trues)
    metric_collects = utils.calc_multi_cls_measures(y_probs, y_trues)
    train_loss_epoch = np.round(np.mean(losses), 4)

    return train_loss_epoch, metric_collects
示例#9
0
文件: train.py 项目: belaalb/h-GAN
def save_testdata_statistics(model, data_loader, cuda_mode):

    for batch in data_loader:

        x, y = batch

        x = torch.autograd.Variable(x)

        out = model.forward(x).data.cpu().numpy()

        try:
            np.concatenate([logits, out], 0)
        except NameError:
            logits = out

    m = logits.mean(0)
    C = np.cov(logits, rowvar=False)

    pfile = open('./test_data_statistics.p', "wb")
    pickle.dump({'m': m, 'C': C}, pfile)
    pfile.close()
示例#10
0
def evaluate(data_source, test=False):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    nlabels = args.labels
    hidden = model.init_hidden(eval_batch_size)
    #for i in range(0, data_source.size(0) - 1, args.bptt):
    #for batch, load in enumerate(range(0,eval_batch_size,len(corpus.validid))):
    for batch, load in enumerate(range(0, len(corpus.validid),
                                       eval_batch_size)):
        if test:
            if load + eval_batch_size > len(corpus.testid):
                continue
            data, targets, lens = get_batch(corpus.testid,
                                            corpus.testlab,
                                            corpus.testlen,
                                            load,
                                            eval_batch_size,
                                            evaluation=True)
        else:
            if load + eval_batch_size > len(corpus.validid):
                continue
            data, targets, lens = get_batch(corpus.validid,
                                            corpus.validlab,
                                            corpus.validlen,
                                            load,
                                            eval_batch_size,
                                            evaluation=True)
        output, hidden, perm_index = model.forward(data, hidden, lens)
        targets = targets[:, perm_index]
        output_flat = output.view(-1, nlabels)
        #print(output_flat, torch.t(targets))
        total_loss += criterion(output_flat, torch.t(targets)).data
        #print(total_loss[0])
        hidden = repackage_hidden(hidden)
    if test:
        return total_loss[0]
    else:
        return total_loss[0]
示例#11
0
    def validation(test_loader, model):
        correct = 0
        total_test = 0
        cnt = 0
        cross_entropy = 0
        model.eval()
        with torch.no_grad():
            for sample_batch in test_loader:
                images, labels = sample_batch
                if params.useGPU:
                    images, labels = Variable(images.cuda()), Variable(
                        labels.cuda())
                out = model.forward(images)
                loss = torch.nn.CrossEntropyLoss()(out, labels)

                _, pred = torch.max(out, 1)
                correct += (pred == labels).sum().item()
                cross_entropy += loss
                total_test += labels.size(0)
                cnt += 1

        return correct / total_test, cross_entropy / cnt
示例#12
0
def evaluate(conf, params, X_data, Y_data):
    """Evaluate a trained model on X_data.

    Args:
        conf: Configuration dictionary
        params: Dictionary with parameters
        X_data: numpy array of floats with shape [input dimension, number of examples]
        Y_data: numpy array of integers with shape [output dimension, number of examples]
    Returns:
        num_correct_total: Integer
        num_examples_evaluated: Integer
    """

    num_examples = X_data.shape[1]
    num_examples_evaluated = 0
    num_correct_total = 0
    start_ind = 0
    end_ind = conf['batch_size']
    while True:
        X_batch = X_data[:, start_ind:end_ind]
        Y_batch = model.one_hot(Y_data[start_ind:end_ind],
                                conf['output_dimension'])
        Y_proposal, _ = model.forward(conf, X_batch, params, is_training=False)
        _, num_correct = model.cross_entropy_cost(Y_proposal, Y_batch)
        num_correct_total += num_correct

        num_examples_evaluated += end_ind - start_ind

        start_ind += conf['batch_size']
        end_ind += conf['batch_size']

        if end_ind >= num_examples:
            end_ind = num_examples

        if start_ind >= num_examples:
            break

    return num_correct_total, num_examples_evaluated
示例#13
0
def test(test_data, ground_truth):
    with tf.Graph().as_default():
        x = tf.placeholder(tf.float32, [None, IMG_SIZE[0], IMG_SIZE[1], CH])

        logits = model.forward(x,
                               False,
                               with_dropout=False,
                               keep_prob=0.5,
                               batch_size=1)
        logits_reshape = tf.reshape(logits, [-1, model.Num_Classes])
        prob = tf.nn.softmax(logits_reshape, -1)
        prediction = tf.argmax(prob, -1)
        y = tf.reshape(prediction, IMG_SIZE)

        saver = tf.train.Saver()

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                print(global_step)

                Num = test_data.shape[0]
                ImgOut = np.zeros([Num, IMG_SIZE[0], IMG_SIZE[1]],
                                  dtype=np.int64)
                for i in range(Num):
                    img = sess.run(y,
                                   feed_dict={x: test_data[i:i + 1, :, :, :]})
                    ImgOut[i, :, :] = np.array(img)

                scipy.io.savemat('results.mat', {'mydata': ImgOut})
            else:
                print("No checkpoint is found.")
                return
示例#14
0
文件: train.py 项目: MRNet-UCD/niamh
def train_model(model,
                train_loader,
                epoch,
                num_epochs,
                optimizer,
                writer,
                current_lr,
                log_every=100):
    _ = model.train()

    if torch.cuda.is_available():
        model.cuda()

    y_preds = []
    y_trues = []
    losses = []
    a = np.zeros([1, 1])
    for i, (image, label, weight) in enumerate(train_loader):
        optimizer.zero_grad()

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
            weight = weight.cuda()

        label = label[0]
        weight = weight[0]

        prediction = model.forward(image.float()).squeeze(0)

        loss = torch.nn.BCEWithLogitsLoss(weight=weight)(prediction, label)
        loss.backward()
        optimizer.step()

        loss_value = loss.item()
        losses.append(loss_value)

        probas = torch.sigmoid(prediction)

        y_trues.append(int(label[0]))
        y_preds.append(probas[0].item())

        try:
            auc = metrics.roc_auc_score(y_trues, y_preds)
        except:
            auc = 0.5

        writer.add_scalar('Train/Loss', loss_value,
                          epoch * len(train_loader) + i)
        writer.add_scalar('Train/AUC', auc, epoch * len(train_loader) + i)

        if (i % log_every == 0) & (i > 0):
            print(
                '''[Epoch: {0} / {1} |Single batch number : {2} / {3} ]| avg train loss {4} | train auc : {5} | lr : {6}'''
                .format(epoch + 1, num_epochs, i, len(train_loader),
                        np.round(np.mean(losses), 4), np.round(auc, 4),
                        current_lr))

    writer.add_scalar('Train/AUC_epoch', auc, epoch + i)

    train_loss_epoch = np.round(np.mean(losses), 4)
    train_auc_epoch = np.round(auc, 4)
    return train_loss_epoch, train_auc_epoch
示例#15
0
def fit(hp, model, train_data, test_data, dev_data):
    logs = [
        'Epoch', 'LR', 'Loss', 'Time(m)', 'DevWS', 'DevPOS', 'TestWS',
        'TestPOS'
    ]
    print(hp)
    print('\t'.join(logs))
    utils.dump_data(hp, hp['HYPERPARAMS'])

    decay_counter = 0
    best_dev_score = 0.
    indice = [i for i in range(len(train_data.ws_data))]
    for e in range(hp['EPOCH']):
        t = time.time()
        losses = 0.
        random.shuffle(indice)
        for i in indice:
            # Word Segmentation
            X = train_data.ws_data[i][0]
            Y = train_data.ws_data[i][1]
            obs = model.encode_ws(X, train=True)
            gold_score = model.score_sentence(obs, Y)
            forward_score = model.forward(obs)
            loss = forward_score - gold_score
            # Update
            loss.backward()
            model.trainer.update()
            losses += loss.value()

            # POS-tagging
            X = train_data.pos_data[i][0]
            Y = train_data.pos_data[i][1]
            loss = model.get_POStagging_loss(X, Y)
            losses += loss.value()
            # Update
            loss.backward()
            model.trainer.update()

        model.model.save(hp['EPOCH_MODEL'])
        dev_ws_f, dev_pos_f = evaluation(hp,
                                         fn_model=hp['EPOCH_MODEL'],
                                         data=dev_data)

        if dev_ws_f > best_dev_score:
            best_dev_score = dev_ws_f
            decay_counter = 0
            model.model.save(hp['MODEL'])
            test_ws_f, test_pos_f = evaluation(hp,
                                               fn_model=hp['MODEL'],
                                               data=test_data)
        else:
            decay_counter += 1
            if decay_counter >= hp['DECAY']:
                model.trainer.learning_rate = model.trainer.learning_rate / 2
                decay_counter = 0

        logs = [
            e, model.trainer.learning_rate, losses, (time.time() - t) / 60,
            dev_ws_f, dev_pos_f, test_ws_f, test_pos_f
        ]
        print('\t'.join([log[:5] for log in map(str, logs)]))
def test_model():
    x = tf.placeholder(tf.float32, [batch_size,input_dim,1,1])
    y_ = tf.placeholder(tf.int32, [batch_size, n_class])
    y = model.forward(x, two_ch, 'adxd', False)
    global_step = tf.Variable(0, trainable=False) 
    with tf.name_scope('loss'):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y))
    
        tf.summary.scalar('loss', loss)
        
    correct_predition = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(correct_predition,tf.float32)) 

    saver = tf.train.Saver()
    
    merged = tf.summary.merge_all()
    
    with tf.Session() as sess:
        
        coord=tf.train.Coordinator()  
        threads= tf.train.start_queue_runners(coord=coord) 
        
        init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
        sess.run(init_op)
        
        #第一次训练模型注释掉以下两行
        model_path=os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
        tf.train.Saver().restore(sess,model_path)
        
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        k = 0
        m = 0
        writer = tf.summary.FileWriter("./train", sess.graph)
        
        print('#######################################################')
        doubel_file_name = get_doubel_file_from_txt('doubel_name_test.txt')
        d=0
        for doubel_file in doubel_file_name:
            print('第%d个人的数据:' % d)
            d = d+1
            
            file_1 = doubel_file[0]
            file_2 = doubel_file[1]
            EEG_1, EEG_2, onset, duration, stage_code = get_data(file_1, file_2, data_dir)
            EEG1_sequence, EEG2_sequence, stage_sequence = split_data_to_30s(EEG_1
                                                , EEG_2, onset, duration, stage_code)
            if k!=0:
                EEG1_sequence, EEG2_sequence, stage_sequence = K_EEG_together(EEG1_sequence,
                                                        EEG2_sequence, stage_sequence, k)
        
            batch_time = len(EEG1_sequence)/batch_size
            print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', d)
            for i in range(int(batch_time)):
                if two_ch:
                    batch_data, batch_label = two_ch_batch(EEG1_sequence, 
                                               EEG2_sequence, stage_sequence, batch_size)
                    #最后返回 batch_data, batch_label
                else:
                    batch_data, batch_label = one_ch_batch(EEG1_sequence, 
                                                           stage_sequence, batch_size)
                    
                summary, loss_value,acc, y_pre = sess.run([merged, loss, accuracy, y], 
                                                    feed_dict= {x:batch_data, y_:batch_label})
                writer.add_summary(summary, m)
                writer.close()
                m = m+1
             
                print('loss is %g......acc is %f' % (loss_value,acc))
#
#

classes = jsonfile.read_json_file(os.path.join(DATA_ROOT, 'classes.json'))

f = open(os.path.join(out_folder, 'result.csv'), 'w')
f.write('file_id,{}\n'.format(','.join(classes)))

for track in test_list:
    track_id = track[0]
    track_data = samples.get(track_id)[()].astype(np.float32)
    track_data = track_data[:TRACK_LENGTH]

    if track_data.size < TRACK_LENGTH:
        track_data = np.pad(track_data, (0, TRACK_LENGTH - track_data.size),
                            mode='constant',
                            constant_values=0)

    track_data = track_data.reshape(-1, 44100)
    input_var = autograd.Variable(torch.from_numpy(track_data))

    if not args.no_cuda:
        input_var = input_var.cuda()

    prediction = model.forward(input_var).data.cpu().numpy()
    avg = prediction.mean(axis=0)

    f.write('{},{}\n'.format(track_id, ','.join(str(x) for x in avg)))

f.close()
示例#18
0
# Training the model
epochs = results.epochs
steps = 0
running_loss = 0

training_losses, validation_losses = [], []

for e in range(epochs):
    for inputs, targets in dataloaders['train']:
        steps += 1
        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()

        logps = model.forward(inputs)
        loss = criterion(logps, targets)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
    else:
        valid_loss = 0
        accuracy = 0
        model.eval()
        with torch.no_grad():
            for inputs, labels in dataloaders['valid']:
                inputs, labels = inputs.to(device), labels.to(device)
                logps = model.forward(inputs)
                batch_loss = criterion(logps, labels)
示例#19
0
文件: train.py 项目: MRNet-UCD/niamh
def evaluate_model(model,
                   val_loader,
                   epoch,
                   num_epochs,
                   writer,
                   current_lr,
                   log_every=20):
    _ = model.eval()

    if torch.cuda.is_available():
        model.cuda()

    y_trues = []
    y_preds = []
    losses = []
    a = np.zeros([1, 1])
    for i, (image, label, weight) in enumerate(val_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
            weight = weight.cuda()

        label = label[0]
        weight = weight[0]

        prediction = model.forward(image.float())
        b = label.numpy()
        c = pd.concat([pd.DataFrame(a), pd.DataFrame(b)], axis=1)
        c.columns = ['dud', 'target']
        c['dud'] = 1 - c['target']
        print(c)
        label = torch.from_numpy(np.asarray(c))

        loss = torch.nn.BCEWithLogitsLoss(weight=weight)(prediction, label)

        loss_value = loss.item()
        losses.append(loss_value)

        probas = torch.sigmoid(prediction)

        y_trues.append(int(label[0][1]))
        y_preds.append(probas[0][1].item())

        try:
            auc = metrics.roc_auc_score(y_trues, y_preds)
        except:
            auc = 0.5

        writer.add_scalar('Val/Loss', loss_value, epoch * len(val_loader) + i)
        writer.add_scalar('Val/AUC', auc, epoch * len(val_loader) + i)

        if (i % log_every == 0) & (i > 0):
            print(
                '''[Epoch: {0} / {1} |Single batch number : {2} / {3} ] | avg val loss {4} | val auc : {5} | lr : {6}'''
                .format(epoch + 1, num_epochs, i, len(val_loader),
                        np.round(np.mean(losses), 4), np.round(auc, 4),
                        current_lr))

    writer.add_scalar('Val/AUC_epoch', auc, epoch + i)

    val_loss_epoch = np.round(np.mean(losses), 4)
    val_auc_epoch = np.round(auc, 4)
    return val_loss_epoch, val_auc_epoch
示例#20
0
def evaluate_model(model,
                   val_loader,
                   epoch,
                   num_epochs,
                   writer,
                   current_lr,
                   log_every=20):
    _ = model.eval()

    if torch.cuda.is_available():
        model.cuda()

    y_trues = []
    y_preds = []
    losses = []

    for i, (image, label, weight) in enumerate(val_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
            weight = weight.cuda()

        label = label[0]
        weight = weight[0]

        prediction = model.forward(image.float())

        loss = torch.nn.BCEWithLogitsLoss(weight=weight)(prediction, label)

        loss_value = loss.item()
        losses.append(loss_value)

        probas = torch.sigmoid(prediction)

        y_trues.append(int(label[0][1]))
        y_preds.append(probas[0][1].item())

        #confusion_matrix(y_trues, y_preds)

        try:

            auc = metrics.roc_auc_score(y_trues, y_preds)

        except:

            auc = 0.5

        #writer.add_scalar('Val/Loss', loss_value, epoch * len(val_loader) + i)
        #writer.add_scalar('Val/AUC', auc, epoch * len(val_loader) + i)

        if (i % log_every == 0) & (i > 0):
            print(
                '''[Epoch: {0} / {1} |Single batch number : {2} / {3} ] | avg val loss {4} | val auc : {5} | lr : {6}'''
                .format(epoch + 1, num_epochs, i, len(val_loader),
                        np.round(np.mean(losses), 4), np.round(auc, 4),
                        current_lr))

    writer.add_scalar('Val/AUC_epoch', auc, epoch + i)

    val_loss = np.round(np.mean(losses), 4)
    val_auc = np.round(auc, 4)

    precision = my_precision(y_trues, y_preds)
    recall = my_recall(y_trues, y_preds)
    f1_score = my_f1_score(y_trues, y_preds)
    accuracy = my_accuracy(y_trues, y_preds)

    writer.add_scalar('Val/precision_epoch', precision, epoch + i)
    writer.add_scalar('Val/recall_epoch', recall, epoch + i)
    writer.add_scalar('Val/f1_score_epoch', f1_score, epoch + i)
    writer.add_scalar('Val/accuracy_epoch', accuracy, epoch + i)

    return precision, recall, f1_score, accuracy, val_loss, val_auc
示例#21
0
    if os.path.exists(model_path) == False:
        print("Error: Model not found!")
        exit(1)

    train_dataset = dataset.Image2SteeringDataset(dataset_path)

    dataloader = DataLoader(train_dataset,
                            batch_size=4,
                            shuffle=True,
                            num_workers=2)

    model = torch.load(model_path, map_location=device)
    model.eval()

    while True:

        image_np = random.choice(train_dataset)['image']
        image = np.expand_dims(image_np.copy(), axis=0)
        image = torch.from_numpy(image)
        image = image.to(device)

        output = model.forward(image.float())

        print(output)

        image_np = np.reshape(
            image_np,
            (image_np.shape[1], image_np.shape[2], image_np.shape[0]))
        cv2.imshow('image', image_np)
        cv2.waitKey(0)
示例#22
0
文件: train.py 项目: MRNet-UCD/niamh
def evaluate_model(model,
                   val_loader,
                   valid_loader,
                   epoch,
                   num_epochs,
                   writer,
                   current_lr,
                   log_every=20):
    _ = model.eval()

    if torch.cuda.is_available():
        model.cuda()

    y_trues = []
    y_preds = []
    losses = []
    a = np.zeros([1, 1])
    for i, (image, label, weight) in enumerate(val_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
            weight = weight.cuda()

        label = label[0]
        weight = weight[0]

        prediction = model.forward(image.float()).squeeze(0)

        loss = torch.nn.BCEWithLogitsLoss(weight=weight)(prediction, label)

        loss_value = loss.item()
        losses.append(loss_value)

        probas = torch.sigmoid(prediction)

        y_trues.append(int(label[0]))
        y_preds.append(probas[0].item())

        try:
            auc = metrics.roc_auc_score(y_trues, y_preds)
        except:
            auc = 0.5

        writer.add_scalar('Val/Loss', loss_value, epoch * len(val_loader) + i)
        writer.add_scalar('Val/AUC', auc, epoch * len(val_loader) + i)

        if ((i % log_every == 0) & (i > 0)) | (i == 281):
            print(
                '''[Epoch: {0} / {1} |Single batch number : {2} / {3} ] | avg val loss {4} | val auc : {5} | lr : {6}'''
                .format(epoch + 1, num_epochs, i, len(val_loader),
                        np.round(np.mean(losses), 4), np.round(auc, 4),
                        current_lr))

    writer.add_scalar('Val/AUC_epoch', auc, epoch + i)

    val_loss_epoch = np.round(np.mean(losses), 4)
    val_auc_epoch = np.round(auc, 4)
    vle = val_loss_epoch
    vae = val_auc_epoch

    y_trues = []
    y_preds = []
    losses = []
    a = np.zeros([1, 1])
    for i, (image, label, weight) in enumerate(valid_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
            weight = weight.cuda()

        label = label[0]
        weight = weight[0]

        prediction = model.forward(image.float()).squeeze(0)

        probas = torch.sigmoid(prediction)

        y_trues.append(int(label[0]))
        y_preds.append(probas[0].item())

        try:
            auc = metrics.roc_auc_score(y_trues, y_preds)
        except:
            auc = 0.5

        if ((i % log_every == 0) & (i > 0)) | (i == 119):
            print(
                '''[Epoch: {0} / {1} |Single batch number : {2} / {3} ] | val auc : {4} | lr : {5}'''
                .format(epoch + 1, num_epochs, i, len(valid_loader),
                        np.round(auc, 4), current_lr))

    return vle, vae, auc
示例#23
0
gt_labels_ids_length=[]
for i in range(len(test_data)):
    gt_labels_ids.append([total_node_list.index(label) for label in test_data[i]['label']])
    gt_labels_ids_length.append(len(test_data[i]['label']))

for epoch in range(1):
    for batch_idx, (context,entity,ids,left_right,lens,gt_labels) in enumerate(test_data_loader):

        gt_labels_id = [total_node_list.index(i) for i in gt_labels[0]]
        parent_list=[parent_of[total_node_list.index(label)] for label in gt_labels[0]]

        if eval_on_leaf_only == True:
            is_leaf=is_only_leaf(gt_labels[0])
            if is_leaf != 0:
                continue
        predict_quant_embd,id_list = model.forward(context.to(device), lens.to(device),ids,left_right)
        predict_quant_embd=predict_quant_embd.data.cpu().numpy()
        mask_matrix=np.multiply(quantam_concept_embds, predict_quant_embd)
        normalizing_constant=np.sum(np.abs(predict_quant_embd)**2,axis=-1)
        score=(np.sum(np.abs(mask_matrix) ** 2, axis=-1))/normalizing_constant

        predicted_labels_id,best_level_1_node,best_level_1_score,best_level_2_node,best_level_2_score=predict_labels(score,threshold_level_wise,constant_to_divide)
        predicted_labels=[total_node_list[i] for i in predicted_labels_id]
        temp_dict = {}
        temp_dict['gt_label'] = gt_labels[0]
        temp_dict['predicted_label'] = predicted_labels
        temp_dict['best_level_1_node']=best_level_1_node
        temp_dict['best_level_2_node'] = best_level_2_node
        temp_dict['best_level_1_score']=best_level_1_score
        temp_dict['best_level_2_score']=best_level_2_score
示例#24
0
dataset_testing = dataset.Create(ds, testing_count, batch_size, device="cuda")

input_shape = dataset_training.get_input_shape()
output_shape = dataset_training.get_output_shape()

model = model.Create(input_shape[1], output_shape[1])

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

epoch_count = 100

for epoch in range(epoch_count):
    for i in range(training_count // batch_size):
        target_output, input = dataset_training.get_random_batch()

        predicted_output = model.forward(input)

        optimizer.zero_grad()

        loss = ((target_output - predicted_output)**2).mean()
        loss.backward()
        optimizer.step()

    target_output, input = dataset_testing.get_random_batch()
    predicted_output = model.forward(input)

    loss = ((target_output - predicted_output)**2).mean()

    print("EPOCH = ", epoch)
    for i in range(batch_size):
        print("target =    ", target_output[i].detach().to("cpu").numpy())
示例#25
0
    for index, sample in enumerate(dataloader):

        img = Variable(sample['image'].float()).cuda()
        label = Variable(sample['label'].float()).cuda()
        label = label.unsqueeze(dim=1)
        #print(label.shape)

        #onehot = torch.FloatTensor(opts.batch_size, 2).cuda()
        #onehot.zero_()
        #onehot.scatter_(1,label,1)

        #one_hot = label.scatter_(1,label,1)
        #print(onehot)
        #one_hot = torch.FloatTensor(opts.batch_size, 2, n).zero_()

        prediction_prob = model.forward(img)
        #print(prediction_prob)
        #print(label)
        #print(prediction)

        loss = criterion(prediction_prob, label)

        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        if index % 500 == 0:
            print('prediction prob {}'.format(prediction_prob))
            prediction = prediction_prob > 0.5

            # print('Ground truth {}'.format(label))
示例#26
0
def train(conf, X_train, Y_train, X_devel, Y_devel):
    """Run training

    Args:
        conf: Configuration dictionary
        X_train: numpy array of floats with shape [input dimension, number of train examples]
        Y_train: numpy array of integers with shape [output dimension, number of train examples]
        X_devel: numpy array of floats with shape [input dimension, number of devel examples]
        Y_devel: numpy array of integers with shape [output dimension, number of devel examples]
    Returns:
        params: Dictionary with trained parameters
        train_progress: Dictionary with progress data, to be used in visualization.
        devel_progress: Dictionary with progress data, to be used in visualization.
    """
    print("Run training")

    # Preparation
    num_examples_in_epoch = X_train.shape[1]
    example_indices = np.arange(0, num_examples_in_epoch)
    np.random.shuffle(example_indices)

    # Initialisation
    params = model.initialization(conf)

    # For displaying training progress
    train_steps = []
    train_ccr = []
    train_cost = []
    devel_steps = []
    devel_ccr = []

    # Start training
    step = 0
    epoch = 0
    num_correct_since_last_check = 0
    batch_start_index = 0
    batch_end_index = conf['batch_size']
    print("Number of training examples in one epoch: ", num_examples_in_epoch)
    print("Start training")
    while True:
        start_time = time.time()
        batch_indices = get_batch_indices(example_indices, batch_start_index,
                                          batch_end_index)
        X_batch = X_train[:, batch_indices]
        Y_batch = model.one_hot(Y_train[batch_indices],
                                conf['output_dimension'])

        Y_proposal, features = model.forward(conf,
                                             X_batch,
                                             params,
                                             is_training=True)
        print("Finish Foward")
        cost_value, num_correct = model.cross_entropy_cost(Y_proposal, Y_batch)
        #print("Finish Cross Entropy")
        grad_params = model.backward(conf, Y_proposal, Y_batch, params,
                                     features)
        print("Finish Backward")
        params = model.gradient_descent_update(conf, params, grad_params)
        print("Finish Gradient Update")
        print("Finish Training Number" + repr(step))

        num_correct_since_last_check += num_correct

        batch_start_index += conf['batch_size']
        batch_end_index += conf['batch_size']
        if batch_start_index >= num_examples_in_epoch:
            epoch += 1
            np.random.shuffle(example_indices)
            batch_start_index = 0
            batch_end_index = conf['batch_size']

        step += 1

        if np.isnan(cost_value):
            print("ERROR: nan encountered")
            break

        if step % conf['train_progress'] == 0:
            elapsed_time = time.time() - start_time
            sec_per_batch = elapsed_time / conf['train_progress']
            examples_per_sec = conf['batch_size'] * conf[
                'train_progress'] / elapsed_time
            ccr = num_correct / conf['batch_size']
            running_ccr = (num_correct_since_last_check /
                           conf['train_progress'] / conf['batch_size'])
            num_correct_since_last_check = 0
            train_steps.append(step)
            train_ccr.append(running_ccr)
            train_cost.append(cost_value)
            if conf['verbose']:
                print(
                    "S: {0:>7}, E: {1:>4}, cost: {2:>7.4f}, CCR: {3:>7.4f} ({4:>6.4f}),  "
                    "ex/sec: {5:>7.3e}, sec/batch: {6:>7.3e}".format(
                        step, epoch, cost_value, ccr, running_ccr,
                        examples_per_sec, sec_per_batch))

        if step % conf['devel_progress'] == 0:
            num_correct, num_evaluated = evaluate(conf, params, X_devel,
                                                  Y_devel)
            devel_steps.append(step)
            devel_ccr.append(num_correct / num_evaluated)
            if conf['verbose']:
                print(
                    "S: {0:>7}, Test on development set. CCR: {1:>5} / {2:>5} = {3:>6.4f}"
                    .format(step, num_correct, num_evaluated,
                            num_correct / num_evaluated))

        if step >= conf['max_steps']:
            print("Terminating training after {} steps".format(step))
            break

    train_progress = {
        'steps': train_steps,
        'ccr': train_ccr,
        'cost': train_cost
    }
    devel_progress = {'steps': devel_steps, 'ccr': devel_ccr}

    return params, train_progress, devel_progress
 def bow_evaluate(x, y, model, cost,test_word_to_ix):
     predict = model.forward(x, model.lookuptable,test_word_to_ix)
     right, total = rightness(predict, y)  # 返回值为(正确样例数,总样本数)
     loss = cost(predict, y)
     return right, total, loss
def backward(train_data,train_labels,train_num):
    with tf.Graph().as_default() as g:
        with tf.name_scope('input'):
            x = tf.placeholder(dtype=tf.float32,shape=[BATCH_SIZE,IMG_SIZE[0],IMG_SIZE[1],IMG_CHANNEL])
            y_ = tf.placeholder(dtype=tf.float32,shape=[BATCH_SIZE,IMG_SIZE[0],IMG_SIZE[1],IMG_CHANNEL])
        # forward
        y,output_Stokes,output_Stokes_GT,grad_output,grad_output_GT = model.forward(x,y_,True)
        # learning rate
        global_step = tf.Variable(0,trainable=False)
        learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,
                                                   train_num//BATCH_SIZE,
                                                   LEARNING_RATE_DECAY, staircase=True)
        #loss function
        with tf.name_scope('loss'):
            mse = (1.0/BATCH_SIZE)*tf.nn.l2_loss(tf.subtract(y,y_))
            mse_stokes = (1.0/BATCH_SIZE)*tf.nn.l2_loss(tf.subtract(output_Stokes, output_Stokes_GT))
            mse_grad = (1.0/BATCH_SIZE)*tf.nn.l2_loss(tf.subtract(grad_output, grad_output_GT))
            loss_init = mse + mse_stokes
            loss = mse + mse_stokes + 0.1*mse_grad
        #Optimizer
        # GradientDescent
        with tf.name_scope('train'):
            # Adam
            optimizer = tf.train.AdamOptimizer(learning_rate)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op_init = optimizer.minimize(loss_init, global_step=global_step)
            train_op = optimizer.minimize(loss,global_step=global_step)
        
        # Save model

        variables = tf.contrib.framework.get_variables_to_restore()
        variables_to_resotre = [v for v in variables if v.name.split('/')[0] != 'Gradient']
        saver = tf.train.Saver(variables_to_resotre,max_to_keep=50)
        epoch = 0

        config = tf.ConfigProto()
        with tf.Session(config=config) as sess:
            tf.global_variables_initializer().run()

            ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess,ckpt.model_checkpoint_path)
                epoch = int(ckpt.model_checkpoint_path.split('/')[-1].split('_')[-1].split('-')[-2])
            
            while epoch<MAX_EPOCH:
                max_step = train_num//BATCH_SIZE
                listtmp = np.random.permutation(train_num)
                j = 0
                for i in range(max_step):
                    file =open("loss.txt",'a')
                    ind = listtmp[j:j+BATCH_SIZE]
                    j = j + BATCH_SIZE
                    xs = train_data[ind,:,:,:]
                    ys = train_labels[ind,:,:,:]
                    mode = np.random.permutation(8)
                    xs = DA.data_augmentation(xs,mode[0])
                    ys = DA.data_augmentation(ys,mode[0])
                    if epoch <50:
                        _, loss_v, step = sess.run([train_op_init, loss_init, global_step], feed_dict={x: xs, y_: ys})
                        file.write("Epoch: %d  Step is: %d After [ %d / %d ] training,  the batch loss is %g.\n" % (epoch + 1, step, i + 1, max_step, loss_v))
                        file.close()
                    else:
                        _,loss_v,step = sess.run([train_op,loss,global_step],feed_dict={x:xs, y_:ys})
                        file.write("Epoch: %d  Step is: %d After [ %d / %d ] training,  the batch loss is %g.\n" % (epoch + 1, step, i + 1, max_step, loss_v))
                        file.close()
                    #print("Epoch: %d  After [ %d / %d ] training,  the batch loss is %g." % (epoch + 1, i + 1, max_step, loss_v))
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME+'_epoch_'+str(epoch+1)),global_step = global_step)
                epoch +=1
示例#29
0
def evaluate_specific(data, model, suffix=""):
    # Evaluate
    if args.threshold is None:
        print("direct predict task")
    else:
        print("threshold task,threshold={}".format(args.threshold))
    if not args.logits:
        print("use softmax")
    correct = 0
    correct2 = 0
    correct3 = 0
    correct4 = 0
    total = 0
    total2 = 0
    mrr = 0
    nota_fp = nota_fp2 = nota_tp = nota_tp2 = nota_fn = nota_fn2 = nota_tn = nota_tn2 = 0
    prob_nota = []
    prob_other = []
    num_batches = math.ceil(len(data) / args.batch_size)
    for i in range(num_batches):
        batch_rows = data[i * args.batch_size:(i + 1) * args.batch_size]
        is_nota = [e[1][0] == '_nota' for e in batch_rows]
        if len(batch_rows) == 0:
            continue
        if args.threshold is not None:  #remove _nota for threshold experiments
            batch_rows[:] = [
                list(filter((["_nota"]).__ne__, b)) for b in batch_rows
            ]
        ctx_seq, ctx_lens, resp_seq, resp_lens = model.prep_batch(batch_rows)
        ctx_seq = ctx_seq.to(device)
        resp_seq = resp_seq.to(device)
        # Forward pass
        proba = model.forward(ctx_seq, ctx_lens, resp_seq, resp_lens)
        if not args.logits:
            proba = F.softmax(proba.squeeze(1), dim=-1)
        proba = proba.squeeze(1)

        # Compute metrics
        alter_best = []
        for b in range(len(batch_rows)):
            if args.threshold is None:
                nota_pred = batch_rows[b][proba[b].argmax() + 1][0] == "_nota"
                nota_pred2 = batch_rows[b][proba[b, :2].argmax() +
                                           1][0] == "_nota"
            else:
                nota_pred = proba[b].max().item() < args.threshold
                nota_pred2 = proba[b, :2].max().item() < args.threshold
            if is_nota[b]:
                prob_nota.append(proba[b].cpu().detach())
                correct3 += int(nota_pred)
                correct4 += int(nota_pred2)
                nota_tp += int(nota_pred)
                nota_tp2 += int(nota_pred2)
                nota_fn += int(not nota_pred)
                nota_fn2 += int(not nota_pred2)
            else:
                prob_other.append(proba[b].cpu().detach())
                correct += int(proba[b].argmax() == 0 and not nota_pred)
                correct2 += int(proba[b, :2].argmax() == 0 and not nota_pred2)
                correct3 += int(not nota_pred)
                correct4 += int(not nota_pred2)
                nota_fp += int(nota_pred)
                nota_fp2 += int(nota_pred2)
                nota_tn += int(not nota_pred)
                nota_tn2 += int(not nota_pred2)
                total += 1
        total2 += proba.size(0)
    with open(
            args.model_path + "{}_{}_distri{}.pkl".format(
                "logits" if args.logits else "prob", 99, suffix), "wb") as f:
        pickle.dump(prob_nota, f)
        pickle.dump(prob_other, f)
    #print("max non-NOTA",max(alter_best))
    print('Current Test R@1/100:{:.4f}'.format(correct / total))
    #print('Current Test R@1/2:{:.4f}'.format(correct2/total))
    print('Current Test NOTA@100 ACC:{:.4f}'.format(correct3 / total2))
    #print('Current Test NOTA@2 ACC:{:.4f}'.format(correct4/total2))
    #print('NOTA percentage:'.format(total2/total))
    f1_nota, f1_other = calculate_f1_nota(nota_tp, nota_fp, nota_fn, nota_tn)
    #f1_nota2,f1_other2 = calculate_f1_nota(nota_tp2,nota_fp2,nota_fn2,nota_tn2)
    print('NOTA@100 F1:{:.4f}'.format(f1_nota))
    #print('NOTA@2 F1:{:.4f}'.format(f1_nota2))
    print('non-NOTA@100 F1:{:.4f}'.format(f1_other))
    #print('non-NOTA@2 F1:{:.4f}'.format(f1_other2))
    #print("weighted F1:{:.4f}".format((f1_nota+f1_nota2+f1_other+f1_other2)/4))

    return correct / total
示例#30
0
def backward():
    x = tf.placeholder(tf.float32, [batch_size, input_dim, 1, 2])
    y_ = tf.placeholder(tf.int32, [batch_size, n_class])
    y = model.forward(x, two_ch, 'add', False)
    global_step = tf.Variable(0, trainable=False)

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

    train_step = tf.train.AdamOptimizer(learning_rate=lr,
                                        beta1=0.9,
                                        beta2=0.999,
                                        epsilon=1e-8,
                                        name="Adam").minimize(loss)

    correct_predition = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_predition, tf.float32))

    saver = tf.train.Saver()

    with tf.Session() as sess:

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)

        #第一次训练模型注释掉以下两行
        #model_path=os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
        #tf.train.Saver().restore(sess,model_path)

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        k = 0

        epochs = 10

        for epoch in range(epochs):
            print('#######################################################')
            doubel_file_name = get_doubel_file(data_dir)
            d = 0
            for doubel_file in doubel_file_name:
                print('第%d轮训练数据,第%d个人的数据:' % (epoch, d))
                d = d + 1

                file_1 = doubel_file[0]
                file_2 = doubel_file[1]
                EEG_1, EEG_2, onset, duration, stage_code = get_data(
                    file_1, file_2, data_dir)
                EEG1_sequence, EEG2_sequence, stage_sequence = split_data_to_30s(
                    EEG_1, EEG_2, onset, duration, stage_code)
                if k != 0:
                    EEG1_sequence, EEG2_sequence, stage_sequence = K_EEG_together(
                        EEG1_sequence, EEG2_sequence, stage_sequence, k)

                batch_time = len(EEG1_sequence) / batch_size
                print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', d)
                for i in range(int(batch_time)):
                    if two_ch:
                        batch_data, batch_label = two_ch_batch(
                            EEG1_sequence, EEG2_sequence, stage_sequence,
                            batch_size)
                        #最后返回 batch_data, batch_label
                    else:
                        batch_data, batch_label = one_ch_batch(
                            EEG1_sequence, stage_sequence, batch_size)

                    _, loss_value, acc, y_pre = sess.run(
                        [train_step, loss, accuracy, y],
                        feed_dict={
                            x: batch_data,
                            y_: batch_label
                        })

                    print('loss is %g......acc is %f' % (loss_value, acc))
        saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME))