from keras.layers.core import Reshape from keras.callbacks import ModelCheckpoint from keras.optimizers import Adadelta, RMSprop from sklearn.utils import shuffle import imageio import numpy as np from model import model_final from data_load import load x_train, y_train, x_test, y_test = load() ##### Hyperparameters ############## #################################### shape = (28, 28, 1) epoch = 300 batch_size = 64 CheckDir = "sample/" #################################### model = model_final(shape) ada = Adadelta(lr=5.0, rho=0.95, epsilon=1e-08, decay=0.001) rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.001) model.compile(loss="mean_squared_error", optimizer=rms) model.summary() # import sys # sys.exit()
def choose_rate(call): data_load.load() if call.data == 'current rate': keyboard = telebot.types.InlineKeyboardMarkup() keyboard.row( telebot.types.InlineKeyboardButton(text='Рубль', callback_data='rur_cur'), telebot.types.InlineKeyboardButton(text='Доллар', callback_data='usd_cur')) keyboard.row( telebot.types.InlineKeyboardButton(text='Евро', callback_data='eur_cur'), telebot.types.InlineKeyboardButton(text='Биткойн', callback_data='btc')) bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Выберите валюту", reply_markup=keyboard) elif call.data == 'archive': keyboard = telebot.types.InlineKeyboardMarkup() keyboard.row( telebot.types.InlineKeyboardButton(text='Рубль', callback_data='rub_arch'), telebot.types.InlineKeyboardButton(text='Доллар', callback_data='usd_arch')) keyboard.add( telebot.types.InlineKeyboardButton(text='Евро', callback_data='eur_arch')) bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Выберите валюту", reply_markup=keyboard) if call.data == 'rur_cur' or call.data == 'usd_cur' or call.data == 'eur_cur' or call.data == 'btc': keyboard = telebot.types.InlineKeyboardMarkup() keyboard.row( telebot.types.InlineKeyboardButton(text='В начало', callback_data='start'), telebot.types.InlineKeyboardButton(text='Назад', callback_data='back_cur')) bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=out_rate(call.data[:3].upper(), 'cur'), reply_markup=keyboard) elif call.data == 'rub_arch' or call.data == 'usd_arch' or call.data == 'eur_arch': keyboard = telebot.types.InlineKeyboardMarkup() keyboard.row( telebot.types.InlineKeyboardButton(text='В начало', callback_data='start'), telebot.types.InlineKeyboardButton(text='Назад', callback_data='back_arch')) bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=out_rate(call.data[:3].upper(), 'arch'), reply_markup=keyboard) if call.data == 'start': bot.delete_message(chat_id=call.message.chat.id, message_id=call.message.message_id) start_message(call.message) elif call.data == 'back_cur': call.data = 'current rate' choose_rate(call) elif call.data == 'back_arch': call.data = 'archive' choose_rate(call)
from NN import NeuralNet from data_load import load import matplotlib.pyplot as plt import numpy as np # Data load x, y = load() shuffle_indices = np.random.permutation((len(y))) x = x[shuffle_indices] y = y[shuffle_indices] train_len = int(len(x) * 0.6) val_len = int(len(x) * 0.2) test_len = int(len(x) * 0.2) x_train = x[:train_len] y_train = y[:train_len] x_val = x[train_len:train_len + val_len] y_val = y[train_len:train_len + val_len] x_test = x[train_len + val_len:] y_test = y[train_len + val_len:] print("x_train len : [ " + str(len(x_train)) + " ]") print("x_val len : [ " + str(len(x_val)) + " ]") print("x_test len : [ " + str(len(x_test)) + " ]") train_size = x_train.shape[0] batch_size = 20 iters_num = 10000 learning_rate = 0.01 train_loss_list = []
""" import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from data_load import load BATCH_START = 0 TIME_STEPS = 70 #500 #20 BATCH_SIZE = 50 INPUT_SIZE = 255#1 OUTPUT_SIZE = 1 CELL_SIZE = 50 LR = 0.001 feature = 'feature1_short' label_file = 'label1_short' train_data, val_data, test_data, train_label, val_lable, test_label = load() after = 70 def avaliable_time(after, data): ava_list = [] startime = np.arange(100, data.shape[0] - 70, 170) for i in startime: tmp = list(range(i, i + after)) ava_list.append(tmp) ava_list = np.asarray(ava_list).flatten() return ava_list.tolist() def get_batch(data, label, ava_list):
sess.run(tf.assign(global_step, 0)) if current_epoch > 0: print("Restoring Checkpoint") saver.restore(sess, checkpoint_path + "/model-%d" % current_epoch) print('global step:', sess.run(global_step), 'learning_rate:', sess.run(learning_rate)) current_epoch += 1 train_iterations = current_epoch * config.net_params[ 'total_train'] / batch_size # 训练总数24000 validation_iterations = current_epoch * config.net_params[ 'total_validation'] / batch_size for epoch in range(current_epoch, n_epochs): # dl.shuffle(1) for part in range(total_partitions_train): cam1_container, cam2_container, cam3_container, depth1_container, depth2_container, depth3_container, \ expected_transform_container, velo_pose_container, match_points_container, decalib_mat_container, p_rects, trs = dl.load(part, mode = "train") for img1, img2, img3, depth_img1, depth_img2, depth_img3, exp_vector, velo_mat, pair_matches, decalib, p_rect, tr in zip( cam1_container, cam2_container, cam3_container, depth1_container, depth2_container, depth3_container, expected_transform_container, velo_pose_container, match_points_container, decalib_mat_container, p_rects, trs): random_disp = np.random.randint(batch_size) outputs = sess.run( [ expected_pose, pre_points_2d, predicted_pose, proj_img, exp_proj_img, train_loss, sift_match_mask, photometric_loss2, photometric_loss1, vector_loss, train_step, merge_train, exp_points_2d ],
AP.ToTensor() ]) test_transform = A.Compose([ A.Resize(224, 224, interpolation=1, always_apply=True, p=1), A.Normalize(mean=mean, std=std), AP.ToTensor() ]) train_set, test_set = form_data(unzip=True, data_path=LOCAL_ZIP_NAME, label_file=LABEL_FILE, img_dir=IMAGE_DIR, train_transform=train_transform, test_transform=test_transform) trainloader, testloader = load(train_set, test_set, batch_size=128) use_cuda = torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") print(device) if MODEL_NAME == 'mobilenetv2': print('model:mobilenetv2') model = torch.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=True).to(device) model.classifier[1] = nn.Sequential( nn.Linear(1280, 256), nn.ReLU(), nn.Linear(256, CLASSES), ).to(device) elif MODEL_NAME == 'resnet18':
tf.summary.scalar('eval_loss', loss) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) #%% if __name__ == "__main__": root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6' # Load training and eval data train_data, eval_data, test_data, gt = data_load.load() # Flags TRAIN = False PREDICT = True DRAW_SAMPLE = False # Construct model if DRAW_SAMPLE == True: # pic = np.random.randint((test_data['x']).shape[0]) pic = np.random.randint(len(test_data['x']))
from data_load import clear_data, load, binarize_data, train_test_split from id3 import ID3 from stat import tree_prune_stat m_data = clear_data(load("./mushroom.txt")) for i in range(len(m_data)): f, l = m_data[i][0], m_data[i][-1] m_data[i][0] = l m_data[i][-1] = f m_binary = binarize_data(m_data) m_train, m_test = train_test_split(m_binary, 0.8) m_tree = ID3(m_train) tree_prune_stat(m_tree, m_train, m_test)
# from tensorflow.keras.utils import to_categorical from sklearn.preprocessing import LabelEncoder, OneHotEncoder from dice_net import DiceNet from data_load import load from tensorflow.compat.v1 import ConfigProto from tensorflow.compat.v1 import InteractiveSession config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) print("gpu:", tf.test.gpu_device_name()) X, y = load('data') type_labels = [t[0] for t in y] val_labels = [v[1] for v in y] X = np.array(X) # X = np.expand_dims(X, 3) # type_labels = np.array(get_type_labels('data')) # val_labels = np.array(get_value_labels('data')) label_encoder = LabelEncoder() type_encoded = label_encoder.fit_transform(type_labels) val_encoded = label_encoder.fit_transform(val_labels) onehot_encoder = OneHotEncoder(sparse=False) type_onehot = onehot_encoder.fit_transform(type_encoded.reshape(-1, 1)) val_onehot = onehot_encoder.fit_transform(val_encoded.reshape(-1, 1))
from data_load import load from id3 import ID3 from stat import tree_prune_stat dane = load("./data1.txt", cast_to_int=True) test = load("./test1.txt", cast_to_int=True) tree = ID3(dane) tree_prune_stat(tree, dane, test)