Example #1
0
def test(epoch):
    global best_acc
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            tools.progress_bar(
                batch_idx, len(testloader),
                'Loss: %.4f | Acc: %.4f%% (%d/%d)' %
                (test_loss /
                 (batch_idx + 1), 100. * correct / total, correct, total))

    # Save checkpoint
    acc = 100. * correct / total
    if acc > best_acc:
        print('Saving...')
        state = {
            'model': model.state_dict(),
            'acc': acc,
            'epoch': epoch,
        }
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, 'checkpoint/ckpt.pth')
        best_acc = acc
Example #2
0
def train(epoch):
    print('\nEpoch: %d' % epoch)
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        inputs, targets = inputs.to(device), targets.to(device)

        # Forward pass
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        train_loss += loss.item()
        _, pred_idx = torch.max(outputs.data, 1)

        total += targets.size(0)
        correct += pred_idx.eq(targets.data).cpu().sum().float()

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        progress_bar(
            batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (train_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))

    return train_loss / batch_idx, 100. * correct / total
Example #3
0
def test(epoch, test_loader):
    global globe_train
    globe_train = False
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            inputs, targets = inputs.to(device), targets.to(device)
            with torch.no_grad():
                outputs, pert_inputs = net(inputs, targets)
                loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, pred_idx = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += pred_idx.eq(targets.data).cpu().sum().float()

            # Bar visualization
            progress_bar(batch_idx, len(test_loader),
                         'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                         % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))

    return test_loss / batch_idx, 100. * correct / total
Example #4
0
def train(epoch, train_loader):
    print('\nEpoch: %d' % epoch)
    net.train()
    global globe_train, mask_train_cnt
    globe_train = True
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        mask_train_cnt = math.ceil((batch_idx + 1) / (50000/batch_size))
        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()
        outputs, pert_inputs = net(inputs, targets)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, pred_idx = torch.max(outputs.data, 1)

        total += targets.size(0)
        correct += pred_idx.eq(targets.data).cpu().sum().float()

        # Bar visualization
        progress_bar(batch_idx, len(train_loader),
                     'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                     % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))

    return train_loss / batch_idx, 100. * correct / total
Example #5
0
def MF_Layer(train_data, train=True):
    if train:
        for i in range(train_data.shape[0]):
            maskp = args.startp + math.ceil(
                (i + 1) / 50000) * (args.endp - args.startp) / args.mask_num
            train_data[i] = globals()[args.me_type](train_data[i], maskp)
            # Bar visualization
            progress_bar(i, train_data.shape[0], ' | Training data')

    else:
        for i in range(train_data.shape[0]):
            maskp = (args.startp + args.endp) / 2
            train_data[i] = globals()[args.me_type](train_data[i], maskp)
            # Bar visualization
            progress_bar(i, train_data.shape[0], ' | Testing data')

    return train_data
Example #6
0
    def iterate(self, n_iter):
        "loop for n_iter iterations with progress bar"

        # create the progress bar
        self.progress_bar = progress_bar(n_iter)

        for i in range(n_iter):
            self.update_clusters()
            self.update_centroids()
            self.progress_bar(i)
Example #7
0
def test(epoch):
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(test_loader):
        inputs, targets = inputs.to(device), targets.to(device)
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        test_loss += loss.item()
        _, pred_idx = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += pred_idx.eq(targets.data).cpu().sum().float()

        progress_bar(
            batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
            (test_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))

    return test_loss / batch_idx, 100. * correct / total
Example #8
0
def train(epoch):
    print('Epoch: %d' % epoch)
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        tools.progress_bar(
            batch_idx, len(trainloader), 'Loss: %.4f | Acc: %.4f%% (%d/%d)' %
            (train_loss /
             (batch_idx + 1), 100. * correct / total, correct, total))
Example #9
0
    def download(self, url, dest_folder, filename=None, determine_filename=False):
        if self.working:
            self.logger.error('Downloader is busy.')
            return
        self.working = True

        dest_folder = os.path.abspath(dest_folder)
        if not os.path.exists(os.path.dirname(dest_folder)):
            self.logger.warning('Destination path does not exist')
            os.makedirs(os.path.dirname(dest_folder), exist_ok=True)

        self.logger.info("Retrieving file metadata..")
        accepts_range, filesize, srv_filename = tools.check_httpfile_info(url)
        if accepts_range:
            ranges = tools.divide_range(filesize)
        else:
            ranges = tools.divide_range(filesize, max_threads=1)
        if dest_folder[-1] != '/':
            dest_folder = dest_folder+'/'
        dest = '%s%s' % (dest_folder, srv_filename if determine_filename else filename)

        if len(ranges) == 1:
            self.download_thread(url, dest, ranges[0][0], ranges[0][1], single_thread=True)
        else:
            for ran in ranges:
                self.thread_pool.add_pool(self.download_thread, url, dest, ran[0], ran[1])
            # self.thread_pool.wait_all()
            prev_size = 0
            while not self.thread_pool.iscomplete():
                time.sleep(1)
                pgsb = tools.progress_bar(self.downloaded_size / filesize, 40, \
                                        self.downloaded_size, True)
                print('%s %s/s' % (pgsb, tools.readable_filesize(self.downloaded_size - prev_size)))
                prev_size = self.downloaded_size
            tools.merge_files(dest)
            
        self.working = False
Example #10
0
def preprocess_vital_signs(vital_signs_raw_, save=None):
    # Filling missing data with patient median or median of all patients
    # vital_signs_raw_ = vital_signs_raw_.head(n=1200)  # Split into a smaller set

    # Group by patient
    patient_data = vital_signs_raw_.groupby(by="pid")
    median_all_patients = patient_data.median().median()

    # Fill nans with median of patient or all patients
    print("First patient before impunation: \n", patient_data.get_group(1))
    patient_data_impunated = patient_data.head(0)
    patient_features_list = []

    load = False
    if load:
        # Load impunated data
        patient_data_impunated = pd.read_csv(
            'data/vital_signs_median_impunated_age100_removed_outliers_labeled.csv'
        )
    else:
        print("\nImpunating data...")
        starting_time, cts = time.time(), 0
        for _, patient in patient_data:
            # Replace age with median if it is 100
            # if patient["Age"].mean() == 100.0:
            #     patient["Age"] = median_all_patients["Age"]

            # Fill all NaNs with patient median of variable
            patient = patient.fillna(patient.median(skipna=True))

            # Fill with hospital median if a variable is all NaNs
            if patient.isnull().sum().sum():
                patient = patient.fillna(median_all_patients)

            patient_data_impunated = patient_data_impunated.append(patient)
            # print progress
            tools.progress_bar(cts,
                               len(patient_data),
                               start_time=starting_time)
            cts = cts + 1

        # print("First patient after impunation: \n", patient_data_impunated.head(12))
        # Get rid of outliers
    #     print("Fitting ellipse")
    #     fit_elliptic_envelope = EllipticEnvelope(contamination=0.2)
    #     labels_outliers = fit_elliptic_envelope.fit_predict(patient_data_impunated)
    #     patient_data_impunated.insert(1, "outlier", labels_outliers, True)
    #
    # # Go through patients and get rid of outlier patients
    # patient_data_impunated_no_outliers = patient_data_impunated.head(0)
    # starting_time, cts = time.time(), 0
    # for _, patient in patient_data_impunated.groupby(by="pid"):
    #     # print progress
    #     tools.progress_bar(cts, len(patient_data_impunated), start_time=starting_time)
    #     cts = cts + 1
    #
    #     # Cancel outliers
    #     if any(patient["outlier"] == -1):
    #         continue
    #     else:
    #         patient_data_impunated_no_outliers = patient_data_impunated_no_outliers.append(patient)
    patient_data_impunated_no_outliers = patient_data_impunated

    one_to_twelve = np.tile(np.linspace(1, 12, 12, dtype=int),
                            int(len(patient_data_impunated_no_outliers) / 12))
    patient_data_impunated_no_outliers.insert(1, "hour", one_to_twelve, True)
    vital_signs = patient_data_impunated_no_outliers.set_index(['pid', 'hour'])

    # Save vital signs to file
    if save:
        filename = 'data/vital_signs_median_impunated_outliers_and_age100_removed.csv'
        vital_signs.to_csv(filename)
        print('Saved file to ', filename)

    return vital_signs, patient_data_impunated
Example #11
0
def make_all_pdf(in_dir='', out_dir=''):
    # check if the given path has a \ at the end
    if out_dir[-1] != delimiter:
        out_dir += delimiter

    print("Searching directory...")

    # gets the path name from root of all the subdirectories and its children in the given root directory
    # currently it cannot work with images in the root

    dirs: list = tools.get_all_dirs(in_dir)

    files_found = len(dirs)
    processed_files = 0
    finished_files = 0

    failed_process = []

    print("Found " + str(files_found) + " folders!")

    file = open(out_dir + "error_log.txt", "w", encoding='utf-8')

    for i in dirs:
        try:
            # calculate how far the progress is
            processed_files += 1

            if make_pdf(i, out_dir):
                finished_files += 1

            else:
                # if an error happens just continue on to the next folder
                continue
            # progress = round(processed_files / files_found * 100, 1)
            tools.progress_bar(processed_files,
                               files_found,
                               status="converting images...")

        except Exception:
            log = "[Error]directory: " + i + "\n" + str(
                traceback.format_exc()) + "\n"
            failed_process.append(i.split(delimiter)[-1])
            file.write(log)
            print(log)
            continue

        except (KeyboardInterrupt, SystemExit):
            exit_program = input("wish to exit program? (yes, no)")
            if exit_program == "yes" or exit_program == "y":
                print("shutting down...")
                exit(0)

            elif exit_program == "no" or exit_program == "n":
                print("continuing process")
                continue

    file.write("=====================failed folders======================\n")

    for i in failed_process:
        file.write(i + "\n")
    file.write("Total failed files: " + str(processed_files - finished_files))
    file.close()
    print("Converted " + str(finished_files) + " files!")