def test(model, test_loader, criterion, metric, device=None):
    model.eval()
    sigmoid = nn.Sigmoid()
    batchs = 0
    Loss = 0
    total = 0
    outputs = torch.zeros((test_loader.n_samples, model.num_classes))
    targets = torch.zeros((test_loader.n_samples, model.num_classes))

    with torch.no_grad():
        start = 0
        for batch_idx, (data, target) in enumerate(test_loader):
            end = len(data) + start
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = criterion(output, target)

            outputs[start:end, :] = output
            targets[start:end, :] = target
            start = end

            Loss += loss
            batchs += 1
            total += target.size(0)

        cc = metric(to_np(sigmoid(outputs), device), to_np(targets, device))

    return Loss / total, cc
def test(model, test_loader, criterion, metric, indices, device=None):
    model.eval()
    sigmoid = nn.Sigmoid()
    Outputs = torch.zeros((test_loader.n_samples, model.num_classes))
    Targets = torch.zeros((test_loader.n_samples, model.num_classes))

    with torch.no_grad():
        start = 0
        for batch_idx, (data, target) in enumerate(test_loader):
            end = len(data) + start

            outputs = torch.zeros(len(target), model.num_classes)
            targets = torch.zeros(len(target), model.num_classes)

            for i in range(len(data)):
                data[i], target[i] = data[i].to(device), target[i].to(device)
                output = model(data[i])

                outputs[i:i + 1, :] = output
                targets[i:i + 1, :] = target[i]

            Outputs[start:end, :] = outputs
            Targets[start:end, :] = targets
            start = end

        if not indices is None:
            loss = criterion(Outputs[:, indices], Targets[:, indices])
        else:
            loss = criterion(Outputs, Targets)
        cc = metric(to_np(sigmoid(Outputs), device), to_np(Targets, device))

    return loss, cc
def valid(model, valid_loader, criterion, metric, indices, device=None):
    sigmoid = nn.Sigmoid()
    model.eval()
    cc = 0
    Loss = 0
    batchs = 0
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(valid_loader):
            loss = torch.tensor(0).to(device, dtype=torch.float)
            outputs = torch.zeros(len(target), target[0].shape[1])
            targets = torch.zeros(len(target), target[0].shape[1])

            for i in range(len(data)):
                data[i], target[i] = data[i].to(device), target[i].to(device)
                output = model(data[i])
                if not indices is None:
                    loss_i = criterion(output[:, indices], target[i][:, indices])
                else:
                    loss_i = criterion(output, target[i])
                loss += loss_i
                outputs[i:i + 1, :] = output
                targets[i:i + 1, :] = target[i]

            loss /= len(data)
            c = metric(to_np(sigmoid(outputs), device), to_np(targets, device))
            cc += c
            Loss += loss
            batchs += 1

    return Loss / batchs, cc / batchs
def train(model, optimizer, train_loader, criterion, metric, indices, epoch, device=None):
    sigmoid = nn.Sigmoid()
    model.train()
    cc = 0
    Loss = 0
    batchs = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        batch_start = time.time()

        loss = torch.tensor(0).to(device, dtype=torch.float)
        outputs = torch.zeros(len(target), target[0].shape[1])
        targets = torch.zeros(len(target), target[0].shape[1])

        optimizer.zero_grad()
        # for name, param in model.named_parameters():
        #     # print("para nan:")
        #     # print(name,torch.isnan(param).any())


        for i in range(len(data)):
            data[i], target[i] = data[i].to(device), target[i].to(device)
            output = model(data[i])

            #######
            # d = data[i]
            # print("data nan:")
            # print(torch.isnan(d).any())


            if not indices is None:
                loss_i = criterion(output[:, indices], target[i][:, indices])
            else:
                loss_i = criterion(output, target[i])
            loss += loss_i
            outputs[i:i + 1, :] = output
            targets[i:i + 1, :] = target[i]

        loss /= len(data)
        loss.backward()

        aaa = [x.grad for x in optimizer.param_groups[0]['params']]
        # torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=20, norm_type=2)
        optimizer.step()

        c = metric(to_np(sigmoid(outputs), device), to_np(targets, device))
        cc += c
        Loss += loss
        batchs += 1

        if batch_idx % log_step == 0:
            batch_end = time.time()
            # logger.debug('Epoch: {} {} Loss: {:.6f}, 1 batch cost time {:.2f}'.format(epoch, batch_idx, loss.item(),
            #                                                                           batch_end - batch_start))
            print('Train Epoch: {} {} Loss: {:.6f}, 1 batch cost time {:.2f}'.format(epoch, progress(train_loader, batch_idx), loss.item(), batch_end - batch_start))

    return Loss / batchs, cc / batchs
def valid(model, valid_loader, criterion, metric, device=None):
    sigmoid = nn.Sigmoid()
    model.eval()
    cc = 0
    Loss = 0
    total = 0
    batchs = 0
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(valid_loader):
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = criterion(output, target)

            c = metric(to_np(sigmoid(output), device), to_np(target, device))
            cc += c
            Loss += loss
            total += target.size(0)
            batchs += 1

    return Loss / total, cc / batchs
def train(model,
          optimizer,
          train_loader,
          criterion,
          metric,
          indices,
          epoch,
          device=None):
    sigmoid = nn.Sigmoid()
    model.train()
    cc = 0
    Loss = 0
    total = 0
    batchs = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        batch_start = time.time()
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        if not indices is None:
            loss = criterion(output[:, indices], target[:, indices])
        else:
            loss = criterion(output, target)
        loss.backward()
        optimizer.step()

        c = metric(to_np(sigmoid(output), device), to_np(target, device))
        cc += c
        Loss += loss
        total += target.size(0)
        batchs += 1

        if batch_idx % log_step == 0:
            batch_end = time.time()
            # logger.debug('Epoch: {} {} Loss: {:.6f}, 1 batch cost time {:.2f}'.format(epoch, batch_idx, loss.item(),
            #                                                                           batch_end - batch_start))
            print('Train Epoch: {} {} Loss: {:.6f}, 1 batch cost time {:.2f}'.
                  format(epoch, progress(train_loader, batch_idx), loss.item(),
                         batch_end - batch_start))

    return Loss / total, cc / batchs