Exemplo n.º 1
0
    def _create_network(self):
        self.vars = vardict()
        self.vars.x = tf.placeholder("float", shape=[None, self.xlen])
        self.vars.y = tf.placeholder("float", shape=[None, 1])

        #def fully_connected():
            
        # Create Model
        self.parameters["W1"] = tf.Variable(tf.truncated_normal([1, self.xlen], stddev=0.1), name="weight")
        self.parameters["b1"] = tf.Variable(tf.constant(0.1, shape=[1, 1]), name="bias")
                
        self.vars.y_predicted = tf.matmul( self.vars.x, tf.transpose(self.W1)) + self.b1
        self.saver = tf.train.Saver()
        return self.vars.y_predicted
Exemplo n.º 2
0
    def _create_network(self):
        self.vars = vardict()
        self.vars.x = tf.placeholder("float", shape=[None, self.xlen])
        self.vars.y = tf.placeholder("float", shape=[None, 1])

        #def fully_connected():
        # Create Model
        self.parameters["W1"] = tf.Variable(tf.truncated_normal([1, self.xlen],
                                                                stddev=0.1),
                                            name="weight")
        self.parameters["b1"] = tf.Variable(tf.constant(0.1, shape=[1, 1]),
                                            name="bias")

        self.vars.y_predicted = tf.matmul(self.vars.x, tf.transpose(
            self.W1)) + self.b1
        self.saver = tf.train.Saver()
        return self.vars.y_predicted
Exemplo n.º 3
0
    def _create_network(self):
        self.vars = vardict()
        self.train_time = tf.placeholder(tf.bool, name='train_time')
        self.vars.x = tf.placeholder("float", shape=[None, self.xlen], name = "x")
        self.vars.y = tf.placeholder("float", shape=[None, 3], name = "y")

        self.vars.x = batch_norm(self.vars.x, self.train_time)

        # Create Model
        self.parameters["decay_rate"] = tf.Variable(tf.constant(0.1, shape=[1]),
                                                    name="decay_rate")
        self.parameters["synthesis_rate"] = tf.Variable(tf.constant(0.1, shape=[1, 1]),
                                                        name="synthesis_rate")

        self.parameters["z0"] = tf.Variable(tf.truncated_normal(
                                [self.n_samples, 1], stddev=0.1), name="z0")

        self.parameters["effect_size_act"] = tf.Variable(tf.truncated_normal(
                                [self.xlen, 1], stddev=0.1), name="effect_size")
        #self.parameters["ar_design_matrix"] = tf.concat( [np.zeros((self.N, self.N)), np.eye(self.N), np.eye(self.N)],)
        #tf.Variable(tf.truncated_normal([1, self.xlen], stddev=0.1), name="effect_size")

        # A: [ 1 x 3 ]
        A = tf.concat(0, [tf.Variable(tf.constant(1.0, shape=[1])),
                          tf.exp( - self.parameters["decay_rate"]),
                          tf.exp( - 2 * self.parameters["decay_rate"])
                         ], name="A" )
        # decay: [ N x 3 ]
        carryover_amount = A * self.parameters["z0"]
        # synthesis: [ N x 3 ]
        print("x", self.vars.x.get_shape(), file=sys.stderr)
        print("beta", self.parameters["effect_size_act"].get_shape(), file=sys.stderr)
        new_amount = (1-A) * tf.matmul(self.vars.x,
                                       (self.parameters["effect_size_act"]) )

        self.vars.y_predicted = carryover_amount + new_amount
        self.saver = tf.train.Saver()
        return self.vars.y_predicted
Exemplo n.º 4
0
 def _init_vars(self):
     if ( not hasattr(self, "vars") ) or len(self.vars.keys()) == 0:
         self.vars = vardict()
Exemplo n.º 5
0
    def fit(self, train_X, train_Y , test_X= None, test_Y = None, load = True,
                 epochs = 0,
                 d_epochs = 2, g_epochs = 1):
        if epochs > 0:
            self.epochs = epochs

        self.last_ckpt_num = 0
        self.train = True
        #self.X = train_X
        self.xlen = train_X.shape[1]
        self.r2_progress = []
        self.train_summary = []
        self.test_summary = []
        yvar = train_Y.var()
        #print("variance(y) = ", yvar, file = sys.stderr)
        # n_samples = y.shape[0]
        g = tf.Graph()
        with g.as_default():
            self.vars = vardict()
            x_gen = self._create_network_g( )
            y_predicted = self._create_network_d( x_gen )
            info( " created discr model ")

            """ Discriminative  training"""
            discriminative_tot_loss = self._create_loss()
            d_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "discriminative" )
            d_train_op = self.optimizer( self.learning_rate ).minimize( discriminative_tot_loss , var_list= d_train_vars)

            """ Generative training """
            "! loss is negative "
            g_total_loss = - self._create_loss() 
            g_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generative")
            g_train_op = self.optimizer( self.learning_rate ).minimize( g_total_loss , var_list= g_train_vars)

            """ must be called from within a graph scope """
            sess_config = tf.ConfigProto(inter_op_parallelism_threads=self.NUM_CORES,
                                       intra_op_parallelism_threads= self.NUM_CORES)
            # Initializing the variables
            init = tf.initialize_all_variables()
            with  tf.Session(config = sess_config) as sess:
                if load:
                    self._load_(sess)
                else:
                    sess.run(init)
                """ x_gen == self.vars.x_predicddted, """
                if not ("keep_prob" in self.vars or hasattr( self.vars, "keep_prob") ):
                    self.dropout = 0.0
                # Merge all the summaries and write them out
                summary_op = tf.merge_all_summaries()

                # Initializing the variables
                init = tf.initialize_all_variables()
                " training per se"

                # write summaries out
                summary_writer = tf.train.SummaryWriter("./tmp/mnist_logs", sess.graph_def)
                summary_proto = tf.Summary()
                # Fit all training data
                print("training epochs: %u ... %u, saving each %u' epoch" % \
                        (self.last_ckpt_num, self.last_ckpt_num + self.epochs, self.display_step),
                        file = sys.stderr)
                for macro_epoch in tqdm(range( self.last_ckpt_num//self.display_step ,
                                         (self.last_ckpt_num + self.epochs)//  self.display_step )):
                    for subepoch in tqdm(range(self.display_step)):
                        self._train( sess, train_X, 
                                discriminative = {"train_op": d_train_op, "epochs": d_epochs }, 
                                generative = {"train_op":    g_train_op, "epochs" : g_epochs} )

                        """ END OF EPOCH """
                    epoch = macro_epoch * self.display_step

                    """ Display logs once in `display_step` epochs """
                    """
                    _sets_ = {"train" :  train_feed_dict }
                    summaries = {}
                    summaries_plainstr = []
                    if (test_feed_dict is not None):
                        if all( (type(x) is str for x  in test_feed_dict.keys()) ):
                            _sets_[ "test" ] = { self.vars[ kk ] : vv for kk, vv  in test_feed_dict }

                    for _set_, feed_dict in  _sets_.items():
                        if self.dropout:
                            feed_dict[ self.vars.keep_prob ] = self.dropout
                        summary_str = sess.run(summary_op, feed_dict=feed_dict)
                        summary_writer.add_summary(summary_str, epoch)
                        summary_d = summary_dict(summary_str, summary_proto)
                        summaries[_set_] = summary_d
                        #summary_d["epoch"] = epoch

                        summaries_plainstr.append(  "\t".join(["",_set_] +["{:s}: {:.4f}".format(k,v) if type(v) is float else "{:s}: {:s}".format(k,v) for k,v in summary_d.items() ]) )

                        self.train_summary.append( summaries["train"] )
                        if  "test" in summaries:
                            self.test_summary.append( summaries["test"] )

                        logstr = "Epoch: {:4d}\t".format(epoch) + "\n"+ "\n".join(summaries_plainstr)
                        print(logstr, file = sys.stderr )
                        """
                    self.saver.save(sess, self.checkpoint_dir + '/' +'model.ckpt',
                       global_step=  epoch)
                    self.last_ckpt_num = epoch
                        
                print("Optimization finished!", file = sys.stderr)

        return
Exemplo n.º 6
0
 def _init_vars(self):
     if (not hasattr(self, "vars")) or len(self.vars.keys()) == 0:
         self.vars = vardict()
Exemplo n.º 7
0
    def fit(self,
            train_X,
            train_Y,
            test_X=None,
            test_Y=None,
            load=True,
            epochs=0,
            d_epochs=2,
            g_epochs=1):
        if epochs > 0:
            self.epochs = epochs

        self.last_ckpt_num = 0
        self.train = True
        #self.X = train_X
        self.xlen = train_X.shape[1]
        self.r2_progress = []
        self.train_summary = []
        self.test_summary = []
        yvar = train_Y.var()
        #print("variance(y) = ", yvar, file = sys.stderr)
        # n_samples = y.shape[0]
        g = tf.Graph()
        with g.as_default():
            self.vars = vardict()
            x_gen = self._create_network_g()
            y_predicted = self._create_network_d(x_gen)
            info(" created discr model ")
            """ Discriminative  training"""
            discriminative_tot_loss = self._create_loss()
            d_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                             "discriminative")
            d_train_op = self.optimizer(self.learning_rate).minimize(
                discriminative_tot_loss, var_list=d_train_vars)
            """ Generative training """
            "! loss is negative "
            g_total_loss = -self._create_loss()
            g_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                             "generative")
            g_train_op = self.optimizer(self.learning_rate).minimize(
                g_total_loss, var_list=g_train_vars)
            """ must be called from within a graph scope """
            sess_config = tf.ConfigProto(
                inter_op_parallelism_threads=self.NUM_CORES,
                intra_op_parallelism_threads=self.NUM_CORES)
            # Initializing the variables
            init = tf.initialize_all_variables()
            with tf.Session(config=sess_config) as sess:
                if load:
                    self._load_(sess)
                else:
                    sess.run(init)
                """ x_gen == self.vars.x_predicddted, """
                if not ("keep_prob" in self.vars
                        or hasattr(self.vars, "keep_prob")):
                    self.dropout = 0.0
                # Merge all the summaries and write them out
                summary_op = tf.merge_all_summaries()

                # Initializing the variables
                init = tf.initialize_all_variables()
                " training per se"

                # write summaries out
                summary_writer = tf.train.SummaryWriter(
                    "./tmp/mnist_logs", sess.graph_def)
                summary_proto = tf.Summary()
                # Fit all training data
                print("training epochs: %u ... %u, saving each %u' epoch" % \
                        (self.last_ckpt_num, self.last_ckpt_num + self.epochs, self.display_step),
                        file = sys.stderr)
                for macro_epoch in tqdm(
                        range(self.last_ckpt_num // self.display_step,
                              (self.last_ckpt_num + self.epochs) //
                              self.display_step)):
                    for subepoch in tqdm(range(self.display_step)):
                        self._train(sess,
                                    train_X,
                                    discriminative={
                                        "train_op": d_train_op,
                                        "epochs": d_epochs
                                    },
                                    generative={
                                        "train_op": g_train_op,
                                        "epochs": g_epochs
                                    })
                        """ END OF EPOCH """
                    epoch = macro_epoch * self.display_step
                    """ Display logs once in `display_step` epochs """
                    """
                    _sets_ = {"train" :  train_feed_dict }
                    summaries = {}
                    summaries_plainstr = []
                    if (test_feed_dict is not None):
                        if all( (type(x) is str for x  in test_feed_dict.keys()) ):
                            _sets_[ "test" ] = { self.vars[ kk ] : vv for kk, vv  in test_feed_dict }

                    for _set_, feed_dict in  _sets_.items():
                        if self.dropout:
                            feed_dict[ self.vars.keep_prob ] = self.dropout
                        summary_str = sess.run(summary_op, feed_dict=feed_dict)
                        summary_writer.add_summary(summary_str, epoch)
                        summary_d = summary_dict(summary_str, summary_proto)
                        summaries[_set_] = summary_d
                        #summary_d["epoch"] = epoch

                        summaries_plainstr.append(  "\t".join(["",_set_] +["{:s}: {:.4f}".format(k,v) if type(v) is float else "{:s}: {:s}".format(k,v) for k,v in summary_d.items() ]) )

                        self.train_summary.append( summaries["train"] )
                        if  "test" in summaries:
                            self.test_summary.append( summaries["test"] )

                        logstr = "Epoch: {:4d}\t".format(epoch) + "\n"+ "\n".join(summaries_plainstr)
                        print(logstr, file = sys.stderr )
                        """
                    self.saver.save(sess,
                                    self.checkpoint_dir + '/' + 'model.ckpt',
                                    global_step=epoch)
                    self.last_ckpt_num = epoch

                print("Optimization finished!", file=sys.stderr)

        return
    def _create_network(self):
        #print("conv1_channels", self.conv1_channels)
        #print("conv2_channels", self.conv2_channels)
        weight_decay = self.weight_decay
        conv_wd=self.weight_decay
        self.vars = vardict()
        self.train_time = tf.placeholder(tf.bool, name='train_time')
        self.vars.x = tf.placeholder("float", shape=[None, 1, self.xlen, self.xdepth], name = "x")
        self.vars.y = tf.placeholder("float", shape=[None, self.xlen], name = "y")

        self.vars.x = batch_norm(self.vars.x, is_training=self.train_time)
        print("x placeholder", self.vars.x.get_shape())
        # Need the batch size for the transpose layers.
        batch_size = tf.shape(self.vars.x)[0]

        # Create Model
        with tf.variable_scope('conv1') as scope:
            kernel = _variable_with_weight_decay('weights',
                            shape=[1, 5, self.xdepth, self.conv1_channels],
                            stddev=1e-4, wd= conv_wd)
            conv = tf.nn.conv2d(self.vars.x, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.get_variable('biases', [self.conv1_channels],
                                     initializer=tf.constant_initializer(0.01))
            #biases = _variable_on_cpu('biases', [conv1_channels],
            #                          tf.constant_initializer(0.0))
            bias = tf.nn.bias_add(conv, biases)
            conv1 = tf.nn.relu(bias, name=scope.name)
            print("conv1", conv1.get_shape())
            _activation_summary(conv1)
            conv1 = tf.nn.dropout(conv1, 1-self.dropout)
            if self.batch_norm:
                conv1 = batch_norm(conv1,
                    is_training=self.train_time, n_out=self.conv1_channels, scope=scope)
        # pool1
        #pool1 = tf.nn.max_pool(conv1, ksize=[1, 1, 3, 1], strides=[1, 1, 2, 1],
        #                       padding='SAME', name='pool1')
        # norm1
        #norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
        #                  name='norm1')
        # conv2
        with tf.variable_scope('conv2') as scope:
            kernel = _variable_with_weight_decay('weights',
                                    shape=[1, 5, self.conv1_channels, self.conv2_channels],
                                    stddev=1e-4, wd=conv_wd)
            conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')
            #biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
            biases = tf.get_variable('biases', [self.conv2_channels],
                                     initializer=tf.constant_initializer(0.01))
            bias = tf.nn.bias_add(conv, biases)
            conv2 = tf.nn.relu(bias, name=scope.name)
            print("conv2", conv2.get_shape())
            _activation_summary(conv2)
            conv2 = tf.nn.dropout(conv2, 1-self.dropout)
            if self.batch_norm:
                conv2 = batch_norm(conv2, is_training=self.train_time,
                               n_out=self.conv2_channels, scope=scope)

        map_sparsity = tf.add(tf.reduce_mean(tf.abs(conv2)),
                tf.reduce_mean((conv2)**2,)/2,
                name = "map0_sparsity")
        tf.add_to_collection('losses', self.sparsity * map_sparsity)
        # norm2
        #norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
        #                  name='norm2')
        # pool2
        #pool2 = tf.nn.max_pool(norm2, ksize=[1, 1, 3, 1],
        #                       strides=[1, 1, 2, 1], padding='SAME', name='pool2')
        # tconv1 
        #print("tconv1", pool2.get_shape())
        with tf.variable_scope('tconv1') as scope:
            kernel_h = 1
            kernel_w = 5
            stride_h = 1
            stride_w = 1
            pad_h = 1
            pad_w = 1
            kernel = _variable_with_weight_decay('weights',
                            shape=[kernel_h, kernel_w, self.tconv1_channels, self.conv2_channels],
                            stddev=1e-4, wd=conv_wd)
            inpshape = tf.shape(conv2)
            #print(scope.name, "inpshape", inpshape) 
            h = ((inpshape[1] - 1) * stride_h) + kernel_h - 2 * pad_h
            w = ((inpshape[2] - 1) * stride_w) + kernel_w - 2 * pad_w
            #output_shape =  [batch_size, h, w, self.xlen]
            output_shape =  [batch_size, (inpshape[1] + stride_h - 1),
                                (inpshape[2] + stride_w - 1) , self.tconv1_channels]
            print(scope.name, output_shape)
            output_shape = tf.pack(output_shape)
            tconv1 = tf.nn.conv2d_transpose(conv2, kernel, output_shape, strides=[1,1,1,1],
                    padding='SAME', name=None)
            #tconv1 = batch_norm(tconv1, is_training=self.train_time,
            #                    n_out=self.tconv1_channels, scope=scope)

            _activation_summary(tconv1)

        map_sparsity = tf.add(tf.reduce_mean(tf.abs(tconv1)),
                tf.reduce_mean((tconv1)**2,)/2, name ="map1_sparsity")
        tf.add_to_collection('losses', self.sparsity * map_sparsity)

        with tf.variable_scope('tconv2') as scope:
            kernel_h = 1
            kernel_w = 5
            stride_h = 1
            stride_w = 1
            pad_h = 1
            pad_w = 1
            output_channels = 1
            kernel = _variable_with_weight_decay('weights',
                            shape=[kernel_h, kernel_w,
                                   output_channels, self.tconv1_channels],
                            stddev=1e-4, wd=conv_wd)
            inpshape = tf.shape(tconv1)
            h = ((inpshape[1] - 1) * stride_h) + kernel_h - 2 * pad_h
            w = ((inpshape[2] - 1) * stride_w) + kernel_w - 2 * pad_w
            #output_shape =  [batch_size, h, w, self.xlen]
            output_shape =  [batch_size, (inpshape[1] + stride_h - 1),
                                (inpshape[2] + stride_w - 1) , output_channels]
            print(scope.name, output_shape)
            output_shape = tf.pack(output_shape)
            tconv2 = tf.nn.conv2d_transpose(tconv1, kernel,
                            output_shape, strides=[1,1,1,1],
                    padding='SAME', name=None)
            #tconv2 = batch_norm(tconv2, is_training=self.train_time)#, n_out=output_channels, scope=scope)
            tconv2 = tf.reshape(tconv2, [-1, self.xlen])
            _activation_summary(tconv2)

        self.vars.y_predicted = tconv2
        #self.vars.y_predicted = tf.reshape(self.vars.y_predicted, [-1, 1])

        #self.vars.y_predicted = gts * 1e-2
        self.saver = tf.train.Saver()
        return self.vars.y_predicted