def main(argv):
    ## Formating the argv from reseource manager
    for i in range(len(argv)):
        argv[i] = str(argv[i]).replace('[', '').replace(']',
                                                        '').replace(',', '')

    ## Formatiing the workers IP to build
    ## a tensorflow cluster in executor class
    temp_workers = []
    for i in range(len(argv) - 3):
        temp_workers.append(argv[i + 3])
    temp_workers = ','.join(temp_workers)

    ## 1) Define stacked layers funtion activation, number of layers and their neurons
    layer_1 = [Linear(1300, 4)]

    ## 2) Select the neural network architecture and pass the hyper-parameters
    cnn_model = CustomGraph(input_size_1=1300,
                            input_size_2=1,
                            output_size=4,
                            loss=CrossEntropy,
                            optimizer=Adam(lr=0.0001),
                            layers=layer_1)

    ## 3) Dataset configurations for splitting, batching and target selection
    data_config = Batching(dataset_name="ECG",
                           valid_size=0.1,
                           test_size=0.1,
                           batch_size=50,
                           devices_number=4)

    ## 4) Select the computational platform and pass the DNN and Dataset configurations
    if ON_ASTRO:
        testbed_path = "/home/mpiuser/cloud/0/diagnosenet/samples/1_customgraph/testbed"
    else:
        testbed_path = "/home/mpiuser/cloud/diagnosenet/samples/1_customgraph/testbed"
    platform = Distibuted_GRPC(model=cnn_model,
                               datamanager=data_config,
                               monitor=enerGyPU(testbed_path=testbed_path,
                                                machine_type="arm",
                                                file_path=workspace_path),
                               max_epochs=20,
                               early_stopping=3,
                               ip_ps=argv[2],
                               ip_workers=temp_workers)

    ## 5) Uses the platform modes for training in an efficient way
    if ON_ASTRO:
        dataset_path = "/home/mpiuser/cloud/0/diagnosenet/samples/1_customgraph//dataset/"
    else:
        dataset_path = "/home/mpiuser/cloud/diagnosenet/samples/1_customgraph/dataset"
    platform.asynchronous_training(dataset_name="ECG",
                                   dataset_path=dataset_path,
                                   inputs_name="xdata.npy",
                                   targets_name="undim-ydata.npy",
                                   job_name=argv[0],
                                   task_index=int(argv[1]))
## 1) Define the stacked layers as the number of layers and their neurons
layers = [
    Relu(X_shape, 2048),
    Relu(2048, 2048),
    Relu(2048, 2048),
    Relu(2048, 2048),
    Linear(2048, y_shape)
]

## 2) Select the neural network architecture and pass the hyper-parameters
mlp_model = SequentialGraph(input_size=X_shape,
                            output_size=y_shape,
                            layers=layers,
                            loss=CrossEntropy(),
                            optimizer=Adam(lr=0.001),
                            dropout=0.8)

## 3) Dataset configurations for splitting, batching and target selection
data_config = MultiTask(dataset_name="W1-TEST_x1_x2_x3_x4_x5_x7_x8_Y1",
                        valid_size=0.10,
                        test_size=0.10,
                        batch_size=200,
                        target_name='Y11',
                        target_start=0,
                        target_end=14)

## 4) Select the computational platform and pass the DNN and Dataset configurations
platform = MultiGPU(model=mlp_model,
                    datamanager=data_config,
                    monitor=enerGyPU(file_path=file_path),
def main(argv):
    ## Formating the argv from reseource manager
    for i in range(len(argv)):
        argv[i] = str(argv[i]).replace('[', '').replace(']',
                                                        '').replace(',', '')

    ## Formatiing the workers IP to build
    ## a tensorflow cluster in executor class
    temp_workers = []
    for i in range(len(argv) - 3):
        temp_workers.append(argv[i + 3])
    temp_workers = ','.join(temp_workers)

    ## PMSI-ICU Dataset shapes
    X_shape = 10833  #14637
    y_shape = 381
    Y1_shape = 14
    Y2_shape = 239
    Y3_shape = 5

    ## 1) Define stacked layers funtion activation, number of layers and their neurons
    stacked_layers_1 = [
        Relu(X_shape, 1024),
        Relu(1024, 1024),
        Linear(1024, Y1_shape)
    ]

    stacked_layers_2 = [
        Relu(X_shape, 512),
        Relu(512, 512),
        Relu(512, 512),
        Relu(512, 512),
        Linear(512, Y1_shape)
    ]

    stacked_layers_3 = [
        Relu(X_shape, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Relu(256, 256),
        Linear(256, Y1_shape)
    ]

    ## 2) Select the neural network architecture and pass the hyper-parameters
    mlp_model = SequentialGraph(input_size=X_shape,
                                output_size=Y1_shape,
                                layers=stacked_layers_3,
                                loss=CrossEntropy,
                                optimizer=Adam(lr=0.001),
                                dropout=0.8)

    ## 3) Dataset configurations for splitting, batching and target selection
    data_config_1 = Batching(dataset_name="MCP-PMSI",
                             valid_size=0.05,
                             test_size=0.10,
                             devices_number=4,
                             batch_size=10)

    ## 4) Select the computational platform and pass the DNN and Dataset configurations
    if ON_ASTRO:
        testbed_path = "/home/mpiuser/cloud/0/diagnosenet/samples/0_sequentialgraph/testbed"
    else:
        testbed_path = "/home/mpiuser/cloud/diagnosenet/samples/0_sequentialgraph/testbed"
    platform = Distibuted_GRPC(model=mlp_model,
                               datamanager=data_config_1,
                               monitor=enerGyPU(testbed_path=testbed_path,
                                                machine_type="arm",
                                                file_path=workspace_path),
                               max_epochs=20,
                               early_stopping=3,
                               ip_ps=argv[2],
                               ip_workers=temp_workers)

    ## 5) Uses the platform modes for training in an efficient way
    if ON_ASTRO:
        dataset_path = "/home/mpiuser/cloud/0/diagnosenet/samples/0_sequentialgraph/dataset/"
    else:
        dataset_path = "/home/mpiuser/cloud/PMSI-Dataset/"
    platform.asynchronous_training(dataset_name="MCP-PMSI",
                                   dataset_path=dataset_path,
                                   inputs_name="patients_features-full.txt",
                                   targets_name="medical_targets-full.txt",
                                   job_name=argv[0],
                                   task_index=int(argv[1]))