예제 #1
0
    def clone(self):
        """Generates a clone of the GraphVertexData object. Results in a 
           splitting in the DAG.
        """
        cloned_layers = [] 

        for i,node in enumerate(self.layers):
            temp = lbann.Split(node)
            layers[i] = lbann.Identity(temp)
            cloned_layers.append(lbann.Identity(temp))


        return GraphVertexData(cloned_layers, self.num_features)
예제 #2
0
                        help='Keep error signals (default: False)')

    parser.add_argument('--batch-job',
                        action='store_true',
                        help='Run as a batch job (default: false)')

    lbann.contrib.args.add_optimizer_arguments(
        parser,
        default_optimizer="adam",
        default_learning_rate=0.001,
    )
    args = parser.parse_args()

    # Construct layer graph
    input = lbann.Input(io_buffer='partitioned', target_mode='regression')
    universes = lbann.Split(input)
    secrets = lbann.Split(input)
    statistics_group_size = 1 if args.local_batchnorm else -1
    preds = CosmoFlow(
        input_width=args.input_width,
        output_size=args.num_secrets,
        use_bn=args.use_batchnorm,
        bn_statistics_group_size=statistics_group_size)(universes)
    mse = lbann.MeanSquaredError([preds, secrets])
    obj = lbann.ObjectiveFunction([mse])
    layers = list(lbann.traverse_layer_graph(input))

    # Set parallel_strategy
    parallel_strategy = get_parallel_strategy_args(
        sample_groups=args.mini_batch_size, depth_groups=args.depth_groups)
    pooling_id = 0
예제 #3
0
파일: vae_mnist.py 프로젝트: benson31/lbann
parser.add_argument(
    '--data-reader',
    action='store',
    default='default',
    type=str,
    help=
    'Data reader options: \"numpy_npz_int16\", or \"mnist\" (default: data_reader_mnist.prototext)'
)
lbann.contrib.args.add_optimizer_arguments(parser, default_learning_rate=0.1)
args = parser.parse_args()

# Start of layers

# Construct layer graph
input_ = lbann.Input(name='data')
image = lbann.Split(input_, name='image')
dummy = lbann.Dummy(input_, name='dummy')

# Encoder
encode1 = lbann.FullyConnected(image,
                               name="encode1",
                               num_neurons=1000,
                               has_bias=True)

encode1neuron = lbann.Relu(encode1, name="encode1neuron")

encode2 = lbann.FullyConnected(encode1neuron,
                               name="encode2",
                               num_neurons=500,
                               has_bias=True)
예제 #4
0
parser.add_argument(
    '--data-reader',
    action='store',
    default='./data_readers/data_reader_candle_pilot1.prototext',
    type=str,
    help=
    'scheduler job name (default: data_readers/data_reader_candle_pilot1.prototext)'
)
lbann.contrib.args.add_optimizer_arguments(parser, default_learning_rate=0.1)
args = parser.parse_args()

# Start of layers

# Construct layer graph
input_ = lbann.Input(name='input', target_mode="reconstruction")
data = lbann.Split(input_, name='data')
dummy = lbann.Dummy(input_, name='dummy')

# Encoder
encode1 = lbann.FullyConnected(data,
                               name="encode1",
                               data_layout="model_parallel",
                               num_neurons=2000,
                               has_bias=True)

relu1 = lbann.Relu(encode1, name="relu1", data_layout="model_parallel")

encode2 = lbann.FullyConnected(relu1,
                               name="encode2",
                               data_layout="model_parallel",
                               num_neurons=1000,
예제 #5
0
parser.add_argument(
    '--data-reader',
    action='store',
    default='data_readers/data_reader_candle_pilot1.prototext',
    type=str,
    help=
    'scheduler job name (default: data_readers/data_reader_candle_pilot1.prototext)'
)
lbann.contrib.args.add_optimizer_arguments(parser, default_learning_rate=0.1)
args = parser.parse_args()

# Start of layers

# Construct layer graph
input_ = lbann.Input(name='data')
finetunedata = lbann.Split(input_, name='finetunedata')
label = lbann.Split(input_, name='label')

# Encoder
encode1 = lbann.FullyConnected(finetunedata,
                               name="encode1",
                               data_layout="model_parallel",
                               num_neurons=2000,
                               has_bias=True)

relu1 = lbann.Relu(encode1, name="relu1", data_layout="model_parallel")

encode2 = lbann.FullyConnected(relu1,
                               name="encode2",
                               data_layout="model_parallel",
                               num_neurons=1000,
예제 #6
0
파일: unet3d.py 프로젝트: oyamay/lbann
                        help='Run as a batch job (default: false)')

    lbann.contrib.args.add_optimizer_arguments(
        parser,
        default_optimizer="adam",
        default_learning_rate=0.001,
    )
    args = parser.parse_args()

    parallel_strategy = get_parallel_strategy_args(
        sample_groups=args.mini_batch_size, depth_groups=args.depth_groups)

    # Construct layer graph
    input = lbann.Input(io_buffer='partitioned',
                        target_mode='label_reconstruction')
    volume = lbann.Split(input)
    output = UNet3D()(volume)
    segmentation = lbann.Split(input)
    ce = lbann.CrossEntropy([output, segmentation], use_labels=True)
    obj = lbann.ObjectiveFunction([ce])
    layers = list(lbann.traverse_layer_graph(input))
    for l in layers:
        l.parallel_strategy = parallel_strategy

    # Setup model
    metrics = [lbann.Metric(ce, name='CE', unit='')]
    callbacks = [
        lbann.CallbackPrint(),
        lbann.CallbackTimer(),
        lbann.CallbackGPUMemoryUsage(),
        lbann.CallbackProfiler(skip_init=True),