예제 #1
0
 def __init__(self, predit_funct=None):
     Callback.__init__(self)
     # output_notebook()
     self.loss = np.array([])
     self.psnrs = np.array([])
     output_server("line")
     self.imagew = 512
     self.min_loss = 10000
     self.predit_funct = predit_funct
     self.p = figure()
     self.p2 = figure()
     self.x = np.array([])
     self.y = np.array([])
     self.bx = np.array([])
     self.by = np.array([])
     self.cx = np.array([])
     self.epochNo = 0
     self.p.line(self.x, self.y, name='line', color="tomato", line_width=2)
     self.p.line(self.bx, self.by, name='batch_line', color="blue", line_width=2)
     self.p2.line(self.cx, self.psnrs, name='psnr', color="green", line_width=2)
     show(self.p)
     # show(self.p2)
     # self.p2 = figure(x_range=[0, self.imagew], y_range=[0, self.imagew])
     # self.p2.image_rgba(name='image', image=[np.array((self.imagew, self.imagew), dtype='uint32')], x=0, y=0, dw=self.imagew, dh=self.imagew)
     # show(self.p2)
     self.psnr = 0
예제 #2
0
    def __init__(self, name, fig_title, url):
        """
        fig_title: Figure Title
        url : str, optional
            Url of the bokeh-server. Ex: when starting the bokeh-server with
            ``bokeh-server --ip 0.0.0.0`` at ``alice``, server_url should be
            ``http://alice:5006``. When not specified the default configured
            by ``bokeh_server`` in ``.blocksrc`` will be used. Defaults to
            ``http://localhost:5006/``.

        Reference: mila-udem/blocks-extras
        """
        Callback.__init__(self)
        self.name = name
        self.fig_title = fig_title
        self.plots = []
        output_server(name, url=url)
        cursession().publish()
예제 #3
0
파일: callbacks.py 프로젝트: kelvict/seya
 def __init__(self, W, transpose=False):
     Callback.__init__(self)
     self.W = W
     self.W_shape = self.W.get_value().shape
     self.transpose = transpose
예제 #4
0
 def __init__(self, val = False):
     Callback.__init__(self)
     self.val = val
     self.losses = []
     self.batch_losses = []
     self.batch_accs = []
예제 #5
0
 def __init__(self, batch_size, **kwargs):
     Callback.__init__(self, **kwargs)
     self.batch_size = batch_size
 def __init__(self,cv_number):
     Callback.__init__(self)
     self.cv_number = cv_number
예제 #7
0
	def __init__(self, X_test, Y_test):
		Callback.__init__(self)
		self.X_test = X_test
		self.Y_test = Y_test
예제 #8
0
 def __init__(self):
     Callback.__init__(self)
     self.losses = []
예제 #9
0
 def __init__(self, W, transpose=False):
     Callback.__init__(self)
     self.W = W
     self.W_shape = self.W.get_value().shape
     self.transpose = transpose
예제 #10
0
 def __init__(self,outputDir, model):
     Callback.__init__(self)
     self.djmodel = model
     self.outputDir = outputDir
예제 #11
0
 def __init__(self, print_fcn=print):
     Callback.__init__(self)
     self.print_fcn = print_fcn
 def __init__(self, dir_name):
     Callback.__init__(self)
     # Create a saver.
     self.saver = tf.train.Saver()
예제 #13
0
    instrument_test_spec_2 = np.abs(instrument_test_2)

    instrument_spec = np.concatenate((instrument_spec_1, instrument_spec_2),
                                     axis=1)

    instrument_dev_spec = np.concatenate(
        (instrument_dev_spec_1, instrument_dev_spec_2), axis=1)

    instrument_test_spec = np.concatenate(
        (instrument_test_spec_1, instrument_test_spec_2), axis=1)

    #fit
    batch_size = 256
    nb_epoch = 200

    Callback()

    model = Sequential()
    model.add(Dense(100, input_shape=(257, )))
    #model.add(Dense(50, input_shape=(257,)))
    model.add(Activation('relu'))
    #model.add(Dropout(0.2))

    model.add(Dense(100))
    model.add(Activation('relu'))
    # model.add(Dropout(0.2))

    model.add(Dense(200))
    model.add(Activation('relu'))

    model.add(Dense(514))
예제 #14
0
 def __init__(self, logger: Logger) -> None:
     Callback.__init__(self)
     self.logger = logger
     self.format_epoch = 'Epoch: {} - {}'
     self.format_keyvalue = '{}: {:0.4f}'
     self.format_separator = ' - '
예제 #15
0
 def __init__(self, model_handler):
     Callback.__init__(self)
     self.model_handler = model_handler
예제 #16
0
    def train(self, X, Y, epoch_ypred=False, epoch_xtest=None):
        """ Fit the neural network model, save additional stats (as attributes) and return Y predicted values.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.

        Y : array-like, shape = [n_samples, 1]
            Response variables, where n_samples is the number of samples.

        Returns
        -------
        y_pred_train : array-like, shape = [n_samples, 1]
            Predicted y score for samples.
        """

        # If batch-size is None:
        if self.batch_size is None:
            self.batch_size = len(X)

        X1 = X[0]
        X2 = X[1]

        # Layer for X1
        input_X1 = Input(shape=(len(X1.T), ))
        layer1_X1 = Dense(self.n_neurons_l1, activation="sigmoid")(input_X1)
        layer1_X1 = Model(inputs=input_X1, outputs=layer1_X1)

        # Layer for X2
        input_X2 = Input(shape=(len(X2.T), ))
        layer1_X2 = Dense(self.n_neurons_l1, activation="sigmoid")(input_X2)
        layer1_X2 = Model(inputs=input_X2, outputs=layer1_X2)

        # Concatenate
        concat = concatenate([layer1_X1.output, layer1_X2.output])
        #model_concat = Dense(self.n_neurons_l2, activation="sigmoid")(concat)
        model_concat = Dense(1, activation="sigmoid")(concat)

        self.model = Model(inputs=[layer1_X1.input, layer1_X2.input],
                           outputs=model_concat)
        self.model.compile(optimizer=self.optimizer,
                           loss=self.loss,
                           metrics=["accuracy"])

        # If epoch_ypred is True, calculate ypred for each epoch
        if epoch_ypred is True:
            self.epoch = YpredCallback(self.model, X, epoch_xtest)
        else:
            self.epoch = Callback()

        # Fit
        self.model.fit([X1, X2],
                       Y,
                       epochs=self.n_epochs,
                       batch_size=self.batch_size,
                       verbose=self.verbose,
                       callbacks=[self.epoch])

        # Not sure about the naming scheme (trying to match PLS)
        y_pred_train = self.model.predict(X).flatten()

        # Storing X, Y, and Y_pred
        self.Y_pred = y_pred_train
        self.X = X
        self.Y = Y
        return y_pred_train