コード例 #1
0
    def __call__(self, input_pl):
        """callable object, a operator for model
        Args:
            input_pl: input placeholder
        """
        if not isinstance(input_pl, Input):
            raise ValueError('Input placeholder mus be same type with Input')

        # setup input_placeholder as current layer input, get shape
        self.set_input_shape(input_pl.shape)
        # get output_shape = input_pl_shape,  takes input_pl_shape as output shape of a layer
        output_shape = self.get_output_shape()
        # create a output_placeholder
        return Input(output_shape, [input_pl], self)
コード例 #2
0
ファイル: importhandler.py プロジェクト: pybender/cloudml
    def load_inputs(self, config):
        """
        Loads dictionary of the input parameters
        from import handler configuration.
        """
        if not hasattr(config, "inputs"):
            logging.debug("No input parameters declared")
            return

        inputs_conf = config.inputs
        if inputs_conf is not None:
            for param_conf in inputs_conf.xpath("param"):
                inp = Input(param_conf)
                self.inputs[inp.name] = inp
コード例 #3
0
    def __call__(self, input_pl1, input_pl2):
        """callable method to add 2 layer
        """
        if (not isinstance(input_pl1, Input)) and (not isinstance(input_pl2, Input)):
            raise ValueError('Layer1 and layer2 must be same type')
        
        if input_pl1.shape != input_pl2.shape:
            raise ValueError('Layer1 and layer2 must be same shape')

        # input_pl is current layer input
        self.set_input_shape(input_pl1.shape)
        output_shape = self.get_output_shape()

        # create output placeholder
        return Input(output_shape, [input_pl1, input_pl2], self)
コード例 #4
0
from perceptron import Perceptron, BIAS
from inputs import Input
from random import randint

perceptron = Perceptron()

train_datas = [Input(2 * i + 2 + randint(-10, 10) / 100) for i in range(10)]

expected = 1.0

EPOCHS = 1
for epoch in range(EPOCHS):
    for train_data in train_datas:
        print(f'wynik: {perceptron.run(train_data, BIAS)}')
        perceptron.train(expected, train_data, BIAS)
コード例 #5
0
ファイル: keyboard_input.py プロジェクト: devgr/asciicave
 def get_input(self):
     return Input(player_up=False,
                  player_down=False,
                  player_left=False,
                  player_right=False)
コード例 #6
0
 def __init__(self, log, conf=None):
     Input.__init__(self, log)
     self.es = {}
     if conf:
         for c in conf:
             self.add_source(c)
コード例 #7
0
ファイル: doLearning.py プロジェクト: ice871117/python_all
RESTORE = False

# if true, just train, otherwise restore cache and skip training
TRAIN = True

# publish result or just tuning
PUBLISH = not TRAIN

# remember when we start
start_time = time.time()
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

sess = tf.InteractiveSession()
train_data_source = Input(WORKING_PATH,
                          TRAIN_FILE,
                          shuffle=True,
                          loop=True, temp_file_path=TEMP_FILE,
                          n_mfcc=FLATTEN_SIZE_W,
                          fixed_sample=FLATTEN_SIZE_H)
CATEGORY_NUM = train_data_source.get_category_num()
# tf Graph Input
x = tf.placeholder(tf.float32, [None, FLATTEN_SIZE_W, FLATTEN_SIZE_H])
y_ = tf.placeholder(tf.float32, [None, CATEGORY_NUM])
keep_prob = tf.placeholder(tf.float32)

x_audio = tf.reshape(x, [-1, FLATTEN_SIZE_W, FLATTEN_SIZE_H, 1])

# conv layer 1
conv_1 = model.conv2d_layer("cnn_1", x_audio, [5, 5], 1, 64, use_BN=True, training=TRAIN)
# conv layer 2
conv_2 = model.conv2d_layer("cnn_2", conv_1, [3, 3], 64, 128, pool_size=[2, 2], use_BN=True, training=TRAIN)
# conv layer 3
コード例 #8
0
            weight = weight - self.learning_rate * self.error_fn_deriv(
                inputs[i])
            new_weights.append(weight)
        self.weights = new_weights

    def train(self, expected, *inputs):
        '''
        Funkcja train jest skrótem funkcji xtrain;
        Ma zastosowanie do wszystkich funkcji aktywacji, które nie potrzebują dodatkowych argumentów;
        '''
        return self.xtrain([], expected, *inputs)

    def xtrain(self, activation_args: list, expected, *inputs):
        '''
        Funkcja przyjmuje dodatkowe argumenty dla funkcji aktywacji oraz sygnały wejściowe;
        Jeśli nie ma konieczności podawania dodatkowych argumentów należy użyć funkcji skróconej train;
        Sygnały na wejściu powinny być obiektami klasy Input, która zwraca parametr liczbowy value;
        Funkcja uczy perceptron;
        '''
        self._run(inputs)
        print(f'Błąd przed korekcją: {self.sqr_error(expected)}')
        self.activation_deriv = activation_args
        self.error_fn_deriv = expected
        self.update_weights(self._inputs)
        # kontrolnie
        self._run(inputs)
        print(f'Błąd po korekcji: {self.sqr_error(expected)}')


BIAS = Input(1)