def test(model,
         epoch,
         batch_size,
         data_path,
         struc_feats,
         fold,
         gpu,
         dicts,
         model_dir,
         testing,
         test_frac=1,
         thresh=None):
    """
        Testing loop.
        Returns metrics
    """
    filename = data_path.replace('train', fold)
    filename = filename[:-4] + "_reversed.csv"
    print('\nfile for evaluation: %s' % filename)

    y, yhat, yhat_raw, hids, losses = [], [], [], [], []

    model.eval()
    gen = datasets.data_generator(filename, dicts, batch_size)
    for batch_idx, tup in enumerate(gen):

        data, target, hadm_ids = tup

        batch_size_safe = min(
            batch_size, struc_feats.shape[0] -
            batch_idx * batch_size)  # Avoiding going out of range

        if batch_idx * batch_size > test_frac * struc_feats.shape[0]:
            print("Reached {} of test/val set".format(test_frac))
            break

        struc_data = struc_feats[
            batch_idx * batch_size:batch_idx * batch_size +
            batch_size_safe].todense(
            )  # Only need in second index b/c batch_size_safe should be < batch_size only once

        data, struc_data, target = Variable(torch.LongTensor(data)), Variable(
            torch.FloatTensor(struc_data)), Variable(torch.FloatTensor(target))

        if gpu:
            data = data.cuda()
            struc_data = struc_data.cuda()
            target = target.cuda()

        model.zero_grad()

        output, main_loss, _, _ = model(data, struc_data,
                                        target)  # Forward pass

        output = output.data.cpu().numpy()
        losses.append(main_loss.data[0])
        target_data = target.data.cpu().numpy()

        #save predictions, target, hadm ids
        yhat_raw.append(output)
        output = np.round(
            output)  # Rounds to 0 for <= 0.5, up to one for > 0.5
        yhat.append(output)

        y.append(target_data)
        hids.extend(hadm_ids)


#        del output, main_loss, data, target, struc_data

    y = np.concatenate(y, axis=0)
    yhat = np.concatenate(yhat, axis=0)
    yhat_raw = np.concatenate(yhat_raw, axis=0)

    print("\nMax Prediction This Epoch:")
    print(max(yhat_raw))

    #write the predictions
    persistence.write_preds(yhat, model_dir, hids, fold, yhat_raw)

    metrics = evaluation.all_metrics(yhat_raw, y, thresh)
    evaluation.print_metrics(metrics)
    metrics['loss_%s' % fold] = np.float64(
        np.mean(losses))  #float64 for json serialization

    return metrics
def train(model,
          optimizer,
          epoch,
          batch_size,
          data_path,
          struc_feats,
          struc_labels,
          gpu,
          dicts,
          quiet,
          struc_aux_loss_wt,
          conv_aux_loss_wt,
          train_frac=1):  ### struc feats = full structured sparse matrix
    """
        Training loop.
        output: losses for each example for this iteration
    """
    losses = []

    #how often to print some info to stdout
    print_interval = 50

    model.train()  # PUTS MODEL IN TRAIN MODE

    gen = datasets.data_generator(data_path, dicts, batch_size)
    for batch_idx, tup in enumerate(gen):

        if batch_idx * batch_size > train_frac * struc_feats.shape[0]:
            print("Reached {} of train set".format(train_frac))
            break

        data, target, hadm = tup

        batch_size_safe = min(
            batch_size, struc_feats.shape[0] -
            batch_idx * batch_size)  # Avoiding going out of range

        struc_data = struc_feats[batch_idx *
                                 batch_size:batch_idx * batch_size +
                                 batch_size_safe].todense()
        struc_labels_batch = struc_labels[
            batch_idx * batch_size:batch_idx * batch_size +
            batch_size_safe]  ### CAN USE THIS TO CONFIRM THAT LABELS MATCH BW STRUC AND TEXT

        if np.sum(target == struc_labels_batch) != batch_size_safe:
            print("Labels wrong, mismatch indices")
            break

        optimizer.zero_grad()

        loss = batch(model, data, struc_data, target, gpu, struc_aux_loss_wt,
                     conv_aux_loss_wt)

        print(loss)

        optimizer.step()
        losses.append(loss)

        print(len(losses))

        #        if not quiet and batch_idx % print_interval == 0:
        #print the average loss of the last 100 batches
        #            print("Train epoch: {} [batch #{}, \tLoss: {:.6f}".format(
        #                epoch+1, np.mean(losses[-100:])))
        #            print(losses)

        gc.collect()

    return losses
def train(model,
          optimizer,
          epoch,
          batch_size,
          data_path,
          struc_feats,
          struc_labels,
          gpu,
          dicts,
          quiet,
          struc_aux_loss_wt,
          conv_aux_loss_wt,
          train_frac=1):  ### struc feats = full structured sparse matrix
    """
        Training loop.
        output: losses for each example for this iteration
    """
    losses = []

    data_path = data_path[:-4] + "_reversed.csv"

    #how often to print some info to stdout
    print_interval = 50

    model.train()  # PUTS MODEL IN TRAIN MODE

    gen = datasets.data_generator(data_path, dicts, batch_size)
    for batch_idx, tup in enumerate(gen):

        if batch_idx * batch_size > train_frac * struc_feats.shape[0]:
            print("Reached {} of train set".format(train_frac))
            break

        data, target, hadm = tup

        batch_size_safe = min(
            batch_size, struc_feats.shape[0] -
            batch_idx * batch_size)  # Avoiding going out of range

        struc_data = struc_feats[batch_idx *
                                 batch_size:batch_idx * batch_size +
                                 batch_size_safe].todense()
        struc_labels_batch = struc_labels[
            batch_idx * batch_size:batch_idx * batch_size +
            batch_size_safe]  ### CAN USE THIS TO CONFIRM THAT LABELS MATCH BW STRUC AND TEXT

        if np.sum(target == struc_labels_batch) != batch_size_safe:
            print("Labels wrong, mismatch indices")
            break

        data, struc_data, target = Variable(torch.LongTensor(data)), Variable(
            torch.FloatTensor(struc_data)), Variable(torch.FloatTensor(target))

        if gpu:
            data = data.cuda()
            struc_data = struc_data.cuda()
            target = target.cuda()

        optimizer.zero_grad()

        output, main_loss, struc_aux_loss, conv_aux_loss = model(
            data, struc_data, target)  # FORWARD PASS

        loss = main_loss + struc_aux_loss * struc_aux_loss_wt + conv_aux_loss * conv_aux_loss_wt

        loss.backward()
        optimizer.step()
        losses.append(loss.data[0])

        if not quiet and batch_idx % print_interval == 0:
            #print the average loss of the last 100 batches
            print(
                "Train epoch: {} [batch #{}, batch_size {}, seq length {}]\tLoss: {:.6f}"
                .format(epoch + 1, batch_idx,
                        data.size()[0],
                        data.size()[1], np.mean(losses[-100:])))
            print("Main loss, struc loss, conv loss: " +
                  str((main_loss.data[0], struc_aux_loss.data[0],
                       conv_aux_loss.data[0])))


#        del output, loss, main_loss, struc_aux_loss, conv_aux_loss, data, target, struc_data, struc_labels_batch

    return losses
示例#4
0
def test(model, epoch, batch_size, data_path, fold, gpu, dicts, model_dir,
         testing):
    """
        Testing loop.
        Returns metrics
    """
    filename = data_path.replace('train', fold)
    print('\nfile for evaluation: %s' % filename)

    y, yhat, yhat_raw, hids, losses = [], [], [], [], []

    model.eval()
    gen = datasets.data_generator(filename, dicts, batch_size)
    for batch_idx, tup in enumerate(gen):

        data, target, hadm_ids = tup

        data, target = Variable(torch.LongTensor(data),
                                volatile=True), Variable(
                                    torch.FloatTensor(target))

        if gpu:
            data = data.cuda()
            target = target.cuda()

        model.zero_grad()

        output, loss = model(data, target)  # Forward pass

        output = output.data.cpu().numpy()
        losses.append(loss.data[0])
        target_data = target.data.cpu().numpy()

        #save predictions, target, hadm ids
        yhat_raw.append(output)
        output = np.round(
            output)  # Rounds to 0 for <= 0.5, up to one for > 0.5
        yhat.append(output)

        y.append(target_data)
        hids.extend(hadm_ids)

    y = np.concatenate(y, axis=0)
    yhat = np.concatenate(yhat, axis=0)
    yhat_raw = np.concatenate(yhat_raw, axis=0)

    print("\nMax Prediction:")
    print(max(yhat_raw))

    #    print("y shape: " + str(y.shape))
    #    print("yhat shape: " + str(yhat.shape))

    #write the predictions
    persistence.write_preds(yhat, model_dir, hids, fold, yhat_raw)

    metrics = evaluation.all_metrics(yhat_raw, y)
    evaluation.print_metrics(metrics)
    metrics['loss_%s' % fold] = np.float64(
        np.mean(losses))  #float64 for json serialization

    return metrics
def test(model,
         epoch,
         batch_size,
         data_path,
         fold,
         gpu,
         dicts,
         model_dir,
         testing,
         thresh=None,
         obs_limit=None):
    """
        Testing loop. Returns metrics on desired fold (validation or test).
    """
    filename = data_path.replace('train', fold)
    print('\nfile for evaluation: %s' % filename)

    y, yhat, yhat_raw, hids, losses = [], [], [], [], []

    model.eval()
    gen = datasets.data_generator(filename, dicts, batch_size)
    for batch_idx, tup in enumerate(gen):

        if obs_limit:  # Early stopping for debugging
            obs_seen = batch_idx * batch_size
            if obs_seen > obs_limit:
                print("Reached {} of test/val set".format(obs_limit))
                break

        data, target, hadm_ids = tup

        data, target = Variable(torch.LongTensor(data),
                                volatile=True), Variable(
                                    torch.FloatTensor(target))

        if gpu:
            data = data.cuda()
            target = target.cuda()

        model.zero_grad()

        output, loss = model(data, target)  # Forward pass

        output = output.data.cpu().numpy()
        losses.append(loss.data[0])
        target_data = target.data.cpu().numpy()

        #save predictions, target, hadm ids
        yhat_raw.append(output)
        output = np.round(
            output)  # Rounds to 0 for <= 0.5, up to one for > 0.5
        yhat.append(output)

        y.append(target_data)
        hids.extend(hadm_ids)

    y = np.concatenate(y, axis=0)
    yhat = np.concatenate(yhat, axis=0)
    yhat_raw = np.concatenate(yhat_raw, axis=0)

    print("\nMax Prediction This Epoch:")
    print(max(yhat_raw))

    #write the predictions
    persistence.write_preds(yhat, model_dir, hids, fold, yhat_raw)

    metrics = evaluation.all_metrics(yhat_raw, y, thresh)
    evaluation.print_metrics(metrics)
    metrics['loss_%s' % fold] = np.float64(
        np.mean(losses))  #float64 for json serialization

    return metrics
def train(model,
          optimizer,
          epoch,
          batch_size,
          data_path,
          gpu,
          dicts,
          quiet,
          obs_limit=None):
    """
        Training loop.
        output: losses for each example for this iteration
    """
    losses = []

    #how often to print some info to stdout
    print_interval = 50

    #    data_path = data_path[:-4] + "_e1.csv"

    data_path = data_path[:-4] + "_reversed.csv"

    #    # Grabbing data set based on epoch number, allows for shuffling between epochs
    #    if epoch < 3:
    #        data_path = data_path[:-4] + "_e" + str(epoch + 1) + ".csv"
    #        print(data_path)
    #    else:
    #        data_path = data_path[:-4] + "_e" + str(epoch - 2) + ".csv"
    #        print(data_path)

    model.train()  # PUTS MODEL IN TRAIN MODE

    gen = datasets.data_generator(data_path, dicts, batch_size)
    for batch_idx, tup in enumerate(gen):

        if obs_limit:  # Early stopping for debugging`
            obs_seen = batch_idx * batch_size
            if obs_seen > obs_limit:
                print("Reached {} of test/val set".format(obs_limit))
                break

        data, target, hadm = tup

        data, target = Variable(torch.LongTensor(data)), Variable(
            torch.FloatTensor(target))

        if gpu:
            data = data.cuda()
            target = target.cuda()

        optimizer.zero_grad()

        output, loss = model(data, target)  # FORWARD PASS

        loss.backward()
        optimizer.step()

        losses.append(loss.data[0])

        if not quiet and batch_idx % print_interval == 0:
            #print the average loss of the last 100 batches
            print(
                "Train epoch: {} [batch #{}, batch_size {}, seq length {}]\tLoss: {:.6f}"
                .format(epoch + 1, batch_idx,
                        data.size()[0],
                        data.size()[1], np.mean(losses[-100:])))

            ### Printing memory
            total = 0
            for obj in gc.get_objects():

                if torch.is_tensor(obj):
                    obj_size = reduce(op.mul,
                                      obj.size()) if len(obj.size()) > 0 else 0
                    #                    print(reduce(op.mul, obj.size()) if len(obj.size()) > 0 else 0, type(obj), obj.size())
                    total += obj_size
            print(total)

        del output, loss, data, target

    return losses
示例#7
0
def test(model, epoch, batch_size, data_path, fold, gpu, dicts, samples, model_dir, testing, debug):

    """
        Testing loop.
        Returns metrics
    """
    filename = data_path.replace('train', fold)
    print('\nfile for evaluation: %s' % filename)
    
    #initialize stuff for saving attention samples
    if samples:
        tp_file = open('%s/tp_%s_examples_%d.txt' % (model_dir, fold, epoch), 'w')
        fp_file = open('%s/fp_%s_examples_%d.txt' % (model_dir, fold, epoch), 'w')

    y, yhat, yhat_raw, hids, losses = [], [], [], [], []
    
    model.eval()
    gen = datasets.data_generator(filename, dicts, batch_size)
    for batch_idx, tup in tqdm(enumerate(gen)):
        if debug and batch_idx > 50:
            break
        
        data, target, hadm_ids = tup
        
        data, target = Variable(torch.LongTensor(data), volatile=True), Variable(torch.FloatTensor(target))
        if gpu:
            data = data.cuda()
            target = target.cuda()
            
        model.zero_grad()

        output, loss = model(data, target) # Forward pass

        output = output.data.cpu().numpy()
        losses.append(loss.data[0])
        target_data = target.data.cpu().numpy()
        
        #save predictions, target, hadm ids
        yhat_raw.append(output) 
        output = np.round(output) # Rounds to 0 for <= 0.5, up to one for > 0.5
        yhat.append(output)
        
        y.append(target_data)
        hids.extend(hadm_ids)

    if samples:
        tp_file.close()
        fp_file.close()
    
    y = np.concatenate(y, axis=0)
    yhat = np.concatenate(yhat, axis=0)
    yhat_raw = np.concatenate(yhat_raw, axis=0)
    
    print("\nMax Prediction:")
    print(max(yhat_raw))

#    print("y shape: " + str(y.shape))
#    print("yhat shape: " + str(yhat.shape))

    #write the predictions
    persistence.write_preds(yhat, model_dir, hids, fold, yhat_raw)
        
    metrics = evaluation.all_metrics(yhat, y, yhat_raw=yhat_raw)
    evaluation.print_metrics(metrics)
    metrics['loss_%s' % fold] = np.mean(losses)
    
    return metrics