def __init__(self, model_layers, supervised=True):
		self.model_layers = model_layers
		self.placeholders = {}
		self.placeholders['inputs'] = []
		self.lastlayer = ''
		self.num_dropout = 0

		self.hidden_feed_dict = {}
		self.is_training = tf.placeholder(tf.bool, name='is_training')
		self.hidden_feed_dict[self.is_training] = True

		self.network = OrderedDict()	
		self.build_layers()

		if supervised:
			targets = utils.placeholder(shape=(None, model_layers[-1]['num_units']), name='output')
			self.placeholders['targets'] = targets
			self.network['output'] = self.network[self.lastlayer]
		else:
			self.placeholders['targets'] = self.placeholders['inputs']
def model(input_shape, num_labels=None):
    # design a neural network model

    # placeholders
    inputs = utils.placeholder(shape=input_shape, name='input')
    is_training = tf.placeholder(tf.bool, name='is_training')
    keep_prob_conv = tf.placeholder(tf.float32, name='keep_prob_conv')

    keep_prob_dense = tf.placeholder(tf.float32, name='keep_prob_dense')

    targets = utils.placeholder(shape=(None, num_labels), name='output')

    # placeholder dictionary
    placeholders = {
        'inputs': inputs,
        'targets': targets,
        'keep_prob_conv': keep_prob_conv,
        'keep_prob_dense': keep_prob_dense,
        'is_training': is_training
    }

    # create model
    layer1 = {'layer': 'input', 'inputs': inputs, 'name': 'input'}
    layer2 = {
        'layer': 'conv2d',
        'num_filters': 32,
        'filter_size': (1, 5),
        'batch_norm': is_training,
        'activation': 'prelu',
        'dropout': keep_prob_conv,
        'name': 'conv1'
    }
    layer3 = {
        'layer': 'residual-conv2d',
        'function': 'prelu',
        'filter_size': (1, 5),
        'dropout': keep_prob_conv,
        'batch_norm': is_training,
        'name': 'resid1'
    }
    layer4 = {
        'layer': 'conv2d',
        'num_filters': 64,
        'filter_size': (4, 5),
        'batch_norm': is_training,
        'activation': 'prelu',
        'dropout': keep_prob_conv,
        'name': 'conv2'
    }
    layer5 = {
        'layer': 'residual-conv2d',
        'function': 'prelu',
        'filter_size': (1, 5),
        'batch_norm': is_training,
        'dropout': keep_prob_conv,
        'pool_size': (1, 10),
        'name': 'resid2'
    }
    layer6 = {
        'layer': 'conv2d',
        'num_filters': 128,
        'filter_size': (1, 1),
        'batch_norm': is_training,
        'activation': 'prelu',
        'dropout': keep_prob_conv,
        'name': 'conv3'
    }
    layer7 = {
        'layer': 'dense',
        'num_units': 256,
        'activation': 'prelu',
        'dropout': keep_prob_dense,
        'name': 'dense1'
    }
    layer8 = {
        'layer': 'residual-dense',
        'function': 'prelu',
        'batch_norm': is_training,
        'dropout': keep_prob_dense,
        'name': 'resid3'
    }
    layer9 = {
        'layer': 'dense',
        'num_units': num_labels,
        'activation': 'softmax',
        'name': 'dense2'
    }

    #from tfomics import build_network
    model_layers = [
        layer1, layer2, layer3, layer4, layer5, layer6, layer7, layer8, layer9
    ]
    net = build_network(model_layers)

    # optimization parameters
    optimization = {
        "objective": "categorical",
        "optimizer": "adam",
        "learning_rate": 0.001,
        "l2": 1e-6,
        # "l1": 0,
    }

    return net, placeholders, optimization
def model(input_shape, num_labels=None):
    # design a neural network model

    # placeholders
    inputs = utils.placeholder(shape=input_shape, name='input')
    targets = utils.placeholder(shape=(None, num_labels), name='output')

    # placeholder dictionary
    placeholders = {
        'inputs': inputs,
        'targets': targets,
        'keep_prob': keep_prob,
        'is_training': is_training
    }

    # create model
    layer1 = {'layer': 'input', 'inputs': inputs, 'name': 'input'}
    layer2 = {
        'layer': 'conv2d',
        'num_filters': 18,
        'filter_size': (2, 5),
        'batch_norm': is_training,
        'activation': 'leaky_relu',
        'name': 'conv1'
    }
    layer3 = {
        'layer': 'conv2d',
        'num_filters': 40,
        'filter_size': (2, 5),
        'batch_norm': is_training,
        'activation': 'leaky_relu',
        'pool_size': (1, 10),
        'name': 'conv2'
    }
    layer4 = {
        'layer': 'conv2d',
        'num_filters': 15,
        'filter_size': (1, 1),
        'batch_norm': is_training,
        'activation': 'leaky_relu',
        'name': 'conv3'
    }
    layer5 = {
        'layer': 'dense',
        'num_units': 100,
        'activation': 'leaky_relu',
        'dropout': keep_prob,
        'name': 'dense1'
    }
    layer6 = {
        'layer': 'dense',
        'num_units': num_labels,
        'activation': 'softmax',
        'name': 'dense2'
    }

    #from tfomics import build_network
    model_layers = [layer1, layer2, layer3, layer4, layer5, layer6]
    net = build_network(model_layers)

    # optimization parameters
    optimization = {
        "objective": "categorical",
        "optimizer": "adam",
        "learning_rate": 0.001,
        "l2": 1e-6,
        # "l1": 0,
    }

    return net, placeholders, optimization
Пример #4
0
def model(input_shape, num_labels):

    # placeholders
    inputs = utils.placeholder(shape=input_shape, name='input')
    is_training = tf.placeholder(tf.bool, name='is_training')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    targets = utils.placeholder(shape=(None, num_labels), name='output')

    # placeholder dictionary
    placeholders = {
        'inputs': inputs,
        'targets': targets,
        'keep_prob': keep_prob,
        'is_training': is_training
    }

    # create model
    layer1 = {'layer': 'input', 'inputs': inputs, 'name': 'input'}
    layer2 = {
        'layer': 'conv1d',
        'num_filters': {
            'start': 20,
            'bounds': [1, 200],
            'scale': 20
        },
        'filter_size': {
            'start': 5,
            'bounds': [3, 19],
            'scale': 6,
            'multiples': 2,
            'offset': 0
        },
        'batch_norm': is_training,
        'padding': 'SAME',
        'activation': 'relu',
        'pool_size': {
            'start': 4,
            'bounds': [1, 10],
            'scale': 6,
            'multiples': 4
        },
        'name': 'conv1'
    }
    layer3 = {
        'layer': 'conv1d',
        'num_filters': {
            'start': 20,
            'bounds': [1, 200],
            'scale': 20
        },
        'filter_size': {
            'start': 5,
            'bounds': [3, 19],
            'scale': 6,
            'multiples': 2,
            'offset': 0
        },
        'batch_norm': is_training,
        'padding': 'SAME',
        'activation': 'relu',
        'pool_size': {
            'start': 4,
            'bounds': [1, 10],
            'scale': 6,
            'multiples': 4
        },
        'dropout': keep_prob,
        'name': 'conv2'
    }
    layer4 = {
        'layer': 'dense',
        'num_units': {
            'start': 120,
            'bounds': [20, 1000],
            'scale': 100,
            'multiples': 10
        },
        'activation': 'sigmoid',
        'dropout': keep_prob,
        'name': 'dense1'
    }
    layer5 = {
        'layer': 'dense',
        'num_units': num_labels,
        'activation': 'sigmoid',
        'name': 'dense2'
    }

    #from tfomics import build_network
    model_layers = [layer1, layer2, layer3, layer4, layer5]

    # optimization parameters
    optimization = {
        "objective": "binary",
        "optimizer": "adam",
        "learning_rate": {
            'start': -3,
            'bounds': [-4, -1],
            'scale': 1.5,
            'transform': 'log'
        },
        "l2": {
            'start': -6,
            'bounds': [-8, -2],
            'scale': 3,
            'transform': 'log'
        },
        # "l1": 0,
    }
    return model_layers, placeholders, optimization