def Model_Structure_click(self):
        self.get_current_path()
        net, device = model_build(resume=True)
        for i in net._modules.items():
            self.textBrowser_image_3.append(str(i))
        self.textBrowser_image_3.append(' ')

        # 定义总参数量、可训练参数量及非可训练参数量变量
        Total_params = 0
        Trainable_params = 0
        NonTrainable_params = 0
        # 遍历model.parameters()返回的全局参数列表
        for param in net.parameters():
            mulValue = np.prod(param.size())  # 使用numpy prod接口计算参数数组所有元素之积
            Total_params += mulValue  # 总参数量
            if param.requires_grad:
                Trainable_params += mulValue  # 可训练参数量
            else:
                NonTrainable_params += mulValue  # 非可训练参数量
        self.textBrowser_image_3.append('Total params:        ' +
                                        str(Total_params))
        self.textBrowser_image_3.append('Trainable params:    ' +
                                        str(Trainable_params))
        self.textBrowser_image_3.append('NonTrainable params: ' +
                                        str(NonTrainable_params))
        self.textBrowser_image_3.moveCursor(
            self.textBrowser_image_3.textCursor().End)  # 文本框顯示到底部
Esempio n. 2
0
def predict(vocab_size, char2idx, idx2char):
    model = model_build(vocab_size, embedding_dim, rnn_units, batch_size=1)

    model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))

    model.build(tf.TensorShape([1, None]))
    print(generate_text(model, start_string='星 '))
Esempio n. 3
0
def train(dataset, vocab_size):
    model = model_build(vocab_size=vocab_size, embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE)

    # 配置训练
    model.compile(optimizer='adam', loss=loss)

    checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix,
                                                             save_weights_only=True)

    model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])
 def Test_click(self):
     self.get_current_path()
     # print('self.index= ' + str(self.index))
     transform_test = transforms.Compose([
         transforms.ToTensor(),
         transforms.Normalize((0.4914, 0.4822, 0.4465),
                              (0.2023, 0.1994, 0.2010)),
     ])
     testset = datasets.CIFAR10(root='./data',
                                train=False,
                                download=True,
                                transform=transform_test)
     # print('testset= ' + str(len(testset)))
     testloader = DataLoader(testset,
                             batch_size=100,
                             shuffle=False,
                             num_workers=2)
     # testloader = DataLoader(testset, batch_size=100, sampler=None, shuffle=False, num_workers=2)
     # print('testloader= ' + str(len(testloader)))
     classes_dict = {
         0: 'airplane',
         1: 'automobile',
         2: 'bird',
         3: 'cat',
         4: 'deer',
         5: 'dog',
         6: 'frog',
         7: 'horse',
         8: 'ship',
         9: 'truck'
     }
     net, device = model_build(resume=True)
     predict(net, testloader, device, classes_dict, self.index)
     img = cv2.imread(self.current_path + "/predict.png")
     # print(img.shape)
     cropped = img[0:1000, 240:730]  # 裁剪坐标为[y0:y1, x0:x1]
     img = cv2.resize(cropped, (331, 621))
     io.imsave(self.current_path + "/predictPixmap.png", img)
     pix = QPixmap(self.current_path + "/predictPixmap.png")
     self.label_inputimage_17.setPixmap(pix)
DIGITS = 4
REVERSE = True
BATCH_SIZE = 128

# Maximum length of input is 'int + int' (e.g., '345+678'). Maximum length of
# int is DIGITS.
MAXLEN = DIGITS + 1 + DIGITS

# All the numbers, plus sign and space for padding.
chars = '0123456789+ '
ctable = CharacterTable(chars)

x_train, y_train, x_val, y_val = read_additions(DIGITS, MAXLEN, chars, ctable)

print('Build model...')
model = model_build(DIGITS, MAXLEN, chars)
model.summary()

# Train the model each generation and show predictions against the validation
# dataset.
for iteration in range(1, 20):
    print()
    print('-' * 50)
    print('Iteration', iteration)
    model.fit(x_train,
              y_train,
              batch_size=BATCH_SIZE,
              epochs=1,
              validation_data=(x_val, y_val))
    # Select 10 samples from the validation set at random so we can visualize
    # errors.
Esempio n. 6
0
# Parameters for the model and dataset.
TRAINING_SIZE = 400000
DIGITS = 4
REVERSE = True
BATCH_SIZE = 128

# Maximum length of input is 'int + int' (e.g., '345+678'). Maximum length of
# int is DIGITS.
MAXLEN = DIGITS + 1 + DIGITS

# All the numbers, plus sign and space for padding.
chars = '0123456789+ '
ctable = CharacterTable(chars)

print('Load model...')
checkpoint = "addition_model.hdf5"
model = model_build(DIGITS, MAXLEN, chars, checkpoint=checkpoint)
model.summary()

# Predict addition provided by user
while(True):
    print('-' * 50)
    print('ATTENTION: ONLY {0} DIGITS MAX FOR EACH NUMBER, EXAMPLE: 4444+10'.format(DIGITS))
    sentence = input('sentence: ')
    _sentence = sentence + ' ' * (MAXLEN - len(sentence))
    _sentence = _sentence[::-1]
    x = ctable.encode(_sentence, MAXLEN)
    preds = model.predict_classes(np.array([x]), verbose=0)
    guess = ctable.decode(preds[0], calc_argmax=False)
    print(sentence + ' = ' + guess)
Esempio n. 7
0
when = str(args[3])

x = pd.read_csv(path + '/' + when + '_features_' + file)
y = pd.read_csv(path + '/' + when + '_target_'+ file)\
        ['Survived'].values
print('FILES NAME')
print('----> ' + path + '/' + when + '_features_' + file + ' <----')
print('----> ' + path + '/' + when + '_target_' + file + '   <----')

features = list(x.columns)
cat = to_list_categorical(x, do_print=False)
num = filter_list(raw=features, nasty=cat)

sectionizer('MODELING ', 65)
#----------------------------------------------------------------------
model = model_build(categorical_feat=cat, numerical_feat=num)

sectionizer(' - Train/Test Spliting ', 65)
#----------------------------------------------------------------------
x[num] = x[num].astype(float)
x_train, x_test,\
y_train, y_test = train_test_split(x,y,
                                   test_size = 0.25,
                                   random_state = 26)

sectionizer(' -- Training Model ', 65)
#----------------------------------------------------------------------
model.fit(x_train, y_train)

sectionizer('EVALUATING ', 65)
y_prob = model.predict_proba(x_test)
Esempio n. 8
0
File: knn.py Progetto: leugaux/KSPEU
#Импорт пакетов и зависимостей
import mnist  #Набор данных MNIST
#Загрузка набора данных
train_images = mnist.train_images()  #Изображения train-данных
train_labels = mnist.train_labels()  #Метки train-данных
test_images = mnist.test_images()  #Изображения test-данных
test_labels = mnist.test_labels()  #Метки test-данных

import train

train_images, test_images = train.reshape_images(train_images, test_images)

import model

model = model.model_build(train_images, train_labels, test_images, test_labels)

import inference

inference.test_show(model, test_images, test_labels, 9500, 9505)
Esempio n. 9
0
    wikidata = data.Dataset(params)
    wikidata.read(wikipath)
    print('wiki loaded')
    traindata = data.Dataset(params)
    traindata.read(trainpath, wikidata.char_id)
    testdata = data.Dataset(params)
    testdata.read(testpath, wikidata.char_id)
    webdata = data.Dataset(params)
    webdata.read(webpath, wikidata.char_id)
    print('data loaded')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)  
    model = model.Model(params)
    model.model_build(sess)
    print('model built')
    model.model_initialize(sess)
    print('model initialize')


    # model.lattice_train(wikidata, sess, 300000)
    # print('wiki_lattice finished')
    # model.train(traindata, sess, 40000)
    # print('all_train finished')
    # model.evaluate(testdata, sess)
    
    # model.train(traindata, sess, 40000)
    # print('train finished')
    # model.evaluate(testdata, sess)
    # print('lattice and text at the same time')
Esempio n. 10
0
from tensorflow import keras
from model import model_build

# load data
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images,
                               test_labels) = fashion_mnist.load_data()

train_images = train_images / 255.0

# train
model = model_build()
model.fit(train_images, train_labels, epochs=5)
model.save_weights('../models/weight.hdf5')