import machine_learning as ml
import preprocessing as pp
import bayesian as bn

if __name__ == '__main__':
    print(Fore.GREEN +
          f"--------------------------------------------------------")
    print("BAYESIAN NETWORK INTERFACING TOOL - Thomas Tiotto (2019)")
    print(f"--------------------------------------------------------")
    print(Fore.RESET + "")

    data_set_path = '../DBMedico/DBBCTI_20042014_VMMZ_GL.xls'

    data_set_path = input(f"Data set [{data_set_path}] : ") or data_set_path

    df = hp.read_dataset(data_set_path, "excel", sheet="DB3")

    print(f"Number of records in data set before cleaning: {len( df )}")

    # drop records according to specifications
    df = pp.drop_records(df)

    print(f"Number of records in data set after cleaning: {len( df )}")
    print("")

    # bin data in columns according to specifications
    df = pp.bin_records(df)

    NUM_VALUES = len(df.columns)

    # make dictionaries mapping categorical codes to original values and viceversa
import datetime

from colorama import Fore

import helper as hp
import preprocessing as pp
import bayesian as bn

if __name__ == '__main__':
    print( Fore.GREEN + f"-----------------------------------------" )
    print( f"{datetime.datetime.now()}" )
    print( f"-----------------------------------------" )

    df = hp.read_dataset( '../DBMedico/DBBCTI_20042014_VMMZ_GL.xls', "excel", sheet="DB3" )

    print( f"Number of lines before cleaning: {len( df )}" )

    # drop records according to specifications
    df = pp.drop_records( df )

    print( f"Number of lines after cleaning: {len( df )}" )

    # bin data in columns according to specifications
    df = pp.bin_records( df )

    NUM_VALUES = len( df.columns )

    # make dictionaries mapping categorical codes to original values and viceversa
    df_values, df_codes, code_to_value_map, value_to_code_map = pp.make_mappings( df, NUM_VALUES )

    # slice original dataset into value and code datasets
Example #3
0
elif args.dataset == 'cifar10':
    dataset = dset.CIFAR10(root=args.dataroot, download=True,
                           transform=transforms.Compose([
                               transforms.Scale(args.imageSize),
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                           ]))
    testset = dset.CIFAR10(root=args.dataroot, train=False, download=True,
                           transform=transforms.Compose([
                               transforms.Scale(args.imageSize),
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                           ]))

elif args.dataset == 'ecg':
    trainx, trainy = helper.read_dataset(args.dataroot + "/training_data4.hdf5")
    testx, testy = helper.read_dataset(args.dataroot + "/test_data4.hdf5")
    trainx = torch.from_numpy(trainx[:,10:,30:-20,:])
    # testx = torch.from_numpy(testx)
    # testx = np.array(testx)
    # test =[]
    # for i, img in enumerate(testx):
    #     img = skimage.transform.resize(img,(110,110))
    #     test.append(img)
    # trainy = torch.from_numpy(np.argmax(trainy, axis=1))
    testy = torch.from_numpy(np.argmax(testy, axis=1))
    testx = torch.from_numpy(testx[:,10:,30:-20,:])
    dataset = torch.utils.data.TensorDataset(trainx, trainy)
    print(testx.size())
    testset = torch.utils.data.TensorDataset(testx, testy)
Example #4
0
                               transforms.Scale(args.imageSize),
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5),
                                                    (0.5, 0.5, 0.5)),
                           ]))
    testset = dset.CIFAR10(root=args.dataroot,
                           train=False,
                           download=True,
                           transform=transforms.Compose([
                               transforms.Scale(args.imageSize),
                               transforms.ToTensor(),
                               transforms.Normalize((0.5, 0.5, 0.5),
                                                    (0.5, 0.5, 0.5)),
                           ]))
elif args.dataset == 'ecg':
    trainx, trainy = helper.read_dataset(args.dataroot + "/FILE.hdf5")
    testx, testy = helper.read_dataset(args.dataroot + "/FILE.hdf5")

    testx = torch.from_numpy(testx[:, 10:, 30:-20, :])
    # train = []
    # trainx = np.array(trainx)
    # for i, img in enumerate(trainx):
    #     if i%1000 == 0:
    #         print(i)
    #     img = skimage.transform.resize(img,(110,110))
    #     train.append(img)

    # # print(np.s)
    trainx = torch.from_numpy(trainx[:, 10:, 30:-20, :])
    trainy = torch.from_numpy(np.argmax(trainy, axis=1))
    testy = torch.from_numpy(np.argmax(testy, axis=1))