コード例 #1
0
def main(argv):
    ## Formating the argv from reseource manager
    for i in range(len(argv)):
        argv[i] = str(argv[i]).replace('[', '').replace(']',
                                                        '').replace(',', '')

    ## Formatiing the workers IP to build
    ## a tensorflow cluster in executor class
    temp_workers = []
    for i in range(len(argv) - 3):
        temp_workers.append(argv[i + 3])
    temp_workers = ','.join(temp_workers)

    ## 1) Define stacked layers funtion activation, number of layers and their neurons
    layer_1 = [Linear(1300, 4)]

    ## 2) Select the neural network architecture and pass the hyper-parameters
    cnn_model = CustomGraph(input_size_1=1300,
                            input_size_2=1,
                            output_size=4,
                            loss=CrossEntropy,
                            optimizer=Adam(lr=0.0001),
                            layers=layer_1)

    ## 3) Dataset configurations for splitting, batching and target selection
    data_config = Batching(dataset_name="ECG",
                           valid_size=0.1,
                           test_size=0.1,
                           batch_size=50,
                           devices_number=4)

    ## 4) Select the computational platform and pass the DNN and Dataset configurations
    if ON_ASTRO:
        testbed_path = "/home/mpiuser/cloud/0/diagnosenet/samples/1_customgraph/testbed"
    else:
        testbed_path = "/home/mpiuser/cloud/diagnosenet/samples/1_customgraph/testbed"
    platform = Distibuted_GRPC(model=cnn_model,
                               datamanager=data_config,
                               monitor=enerGyPU(testbed_path=testbed_path,
                                                machine_type="arm",
                                                file_path=workspace_path),
                               max_epochs=20,
                               early_stopping=3,
                               ip_ps=argv[2],
                               ip_workers=temp_workers)

    ## 5) Uses the platform modes for training in an efficient way
    if ON_ASTRO:
        dataset_path = "/home/mpiuser/cloud/0/diagnosenet/samples/1_customgraph//dataset/"
    else:
        dataset_path = "/home/mpiuser/cloud/diagnosenet/samples/1_customgraph/dataset"
    platform.asynchronous_training(dataset_name="ECG",
                                   dataset_path=dataset_path,
                                   inputs_name="xdata.npy",
                                   targets_name="undim-ydata.npy",
                                   job_name=argv[0],
                                   task_index=int(argv[1]))
コード例 #2
0
                              optimizer=Adam(lr=0.001),
                              dropout=0.8)

## 3) Dataset configurations for splitting, batching and target selection
data_config = MultiTask(dataset_name="MCP-PMSI",
                        valid_size=0.05,
                        test_size=0.15,
                        batch_size=250,
                        target_name='Y11',
                        target_start=0,
                        target_end=14)

## 4) Select the computational platform and pass the DNN and Dataset configurations
platform = DesktopExecution(model=mlp_model_1,
                            datamanager=data_config,
                            monitor=enerGyPU(machine_type="arm",
                                             file_path=workspace_path),
                            max_epochs=10,
                            early_stopping=5)

### Read the PMSI-Dataset using Pickle from diagnosenet.io_functions
X = IO_Functions()._read_file("dataset/patients_features.txt")
y = IO_Functions()._read_file("dataset/medical_targets_Y14.txt")

## 5) Uses the platform modes for training in an efficient way
platform.training_memory(X, y)

print("Execution Time: {}".format((time.time() - execution_start)))

####################################################
## Path for Octopus Machine: Full Representation
#path = "/data_B/datasets/drg-PACA/healthData/sandbox-FULL-W1_x1_x2_x3_x4_x5_x7_x8_Y1/1_Mining-Stage/binary_representation/"
コード例 #3
0
                            layers=layers,
                            loss=CrossEntropy,
                            optimizer=Adam(lr=0.001),
                            dropout=0.8)

## 3) Dataset configurations for splitting, batching and target selection
data_config_1 = Batching(dataset_name="MCP-PMSI",
                         valid_size=0.05,
                         test_size=0.10,
                         devices_number=7,
                         batch_size=40)

## 4) Select the computational platform and pass the DNN and Dataset configurations
platform = Distibuted_MPI(model=mlp_model,
                          datamanager=data_config_1,
                          monitor=enerGyPU(machine_type="arm",
                                           file_path=file_path),
                          max_epochs=20,
                          early_stopping=3)

## 5) Uses the platform modes for training in an efficient way
platform.asynchronous_training(dataset_name="MCP-PMSI",
                               dataset_path="dataset/",
                               inputs_name="patients_features.txt",
                               targets_name="medical_targets_Y14.txt",
                               weighting=1)

# platform.synchronous_training(dataset_name="MCP-PMSI",
#                               dataset_path="dataset/",
#                               inputs_name="patients_features.txt",
#                               targets_name="medical_targets_Y14.txt")
コード例 #4
0
def main(argv):
    ## Formating the argv from reseource manager
    for i in range(len(argv)):
        argv[i] = str(argv[i]).replace('[', '').replace(']',
                                                        '').replace(',', '')

    ## Formatiing the workers IP to build
    ## a tensorflow cluster in executor class
    temp_workers = []
    for i in range(len(argv) - 3):
        temp_workers.append(argv[i + 3])
    temp_workers = ','.join(temp_workers)

    ## PMSI-ICU Dataset shapes
    X_shape = 10833  #14637
    y_shape = 381
    Y1_shape = 14
    Y2_shape = 239
    Y3_shape = 5

    ## 1) Define stacked layers funtion activation, number of layers and their neurons
    stacked_layers_1 = [
        Relu(X_shape, 1024),
        Relu(1024, 1024),
        Linear(1024, Y1_shape)
    ]

    stacked_layers_2 = [
        Relu(X_shape, 512),
        Relu(512, 512),
        Relu(512, 512),
        Relu(512, 512),
        Linear(512, Y1_shape)
    ]

    stacked_layers_3 = [
        Relu(X_shape, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Linear(256, Y1_shape)
    ]

    ## 2) Select the neural network architecture and pass the hyper-parameters
    mlp_model = SequentialGraph(input_size=X_shape,
                                output_size=Y1_shape,
                                layers=stacked_layers_3,
                                loss=CrossEntropy,
                                optimizer=Adam(lr=0.001),
                                dropout=0.8)

    ## 3) Dataset configurations for splitting, batching and target selection
    data_config_1 = Batching(dataset_name="MCP-PMSI",
                             valid_size=0.05,
                             test_size=0.10,
                             devices_number=4,
                             batch_size=10)

    ## 4) Select the computational platform and pass the DNN and Dataset configurations
    if ON_ASTRO:
        testbed_path = "/home/mpiuser/cloud/0/diagnosenet/samples/0_sequentialgraph/testbed"
    else:
        testbed_path = "/home/mpiuser/cloud/diagnosenet/samples/0_sequentialgraph/testbed"
    platform = Distibuted_GRPC(model=mlp_model,
                               datamanager=data_config_1,
                               monitor=enerGyPU(testbed_path=testbed_path,
                                                machine_type="arm",
                                                file_path=workspace_path),
                               max_epochs=20,
                               early_stopping=3,
                               ip_ps=argv[2],
                               ip_workers=temp_workers)

    ## 5) Uses the platform modes for training in an efficient way
    if ON_ASTRO:
        dataset_path = "/home/mpiuser/cloud/0/diagnosenet/samples/0_sequentialgraph/dataset/"
    else:
        dataset_path = "/home/mpiuser/cloud/PMSI-Dataset/"
    platform.asynchronous_training(dataset_name="MCP-PMSI",
                                   dataset_path=dataset_path,
                                   inputs_name="patients_features-full.txt",
                                   targets_name="medical_targets-full.txt",
                                   job_name=argv[0],
                                   task_index=int(argv[1]))