def plot_nor_ms(songs): songs_num=len(songs) sum_play=np.array([0 for i in range(DAYS)]) sum_download=np.array([0 for i in range(DAYS)]) sum_collect=np.array([0 for i in range(DAYS)]) with open(SONG_P_D_C,'r') as fr: songs_id=fr.readline().strip("\n") while songs_id and songs_num>0: play = list(map(int, fr.readline().strip("\n").split(","))) download = list(map(int, fr.readline().strip("\n").split(","))) collect = list(map(int, fr.readline().strip("\n").split(","))) if songs_id in songs: play=np.array(play) sum_play+=play songs_num-=1 songs_id=fr.readline().strip("\n") p = plt.plot(sum_play, "bo", sum_play, "b-", marker="o") #d = plt.plot(download, "ro", download, "r-", marker="o") #c = plt.plot(collect, "go", collect, "g-", marker="o") #plt.legend([p[1], d[1],c[1]], ["play", "download","collect"]) plt.lengend(p[1],["play"]) plt.title('SUM OF THE NORMAL MUSIC') plt.xlabel('days') plt.ylabel('times') #plt.savefig(os.path.join(self.SONG_PLAY_FOLDER, songs_id+".png")) plt.show()
def on_epoch_end(self, epoch, logs={}): self.logs.append(logs) self.x.append(self.i) self.losses.append(logs.get("loss")) self.val_losses.append(logs.get("val_loss")) self.i = self.i + 1 clear_output(wait=True) plt.plot(self.x, self.losses, label="loss") plt.plot(self.x, self.var_losses, label="val_loss") plt.lengend() plt.show()
def plotRainDrops(dropInCircle, dropsOutOfCircle, lengthOfField=1, format = 'pdf'): numberOfDropsInCircle = len(dropInCircle) numberOfDropsOutCircle = len(dropInCircle) numberOfDrops = numberOfDropsInCircle + numberOfDropsOutCircle plt.figure() plt.xlim(-lengthOfField/2, lengthOfField/2) plt.ylim(-lengthOfField/2, lengthOfField/2) plt.scatter([e[0] for e in dropInCircle], [e[1] for e in dropInCircle], color = 'black', label = 'Drops en circle') plt.scatter([e[0] for e in dropsOutOfCircle], [e[1] for e in dropsOutOfCircle], color = 'red', label = 'Drops fuera circle') plt.lengend(loc='center') plt.title("%s drop: %s landed in circle, estimating $\pi$ as %.4f" %(numberOfDrops, numberOfDropsInCircle, 4 * numberOfDropsInCircle/numberOfDrops)) plt.savefig("%s_drops.%s" % (numberOfDrops, format))
def plot_acc(history): print("plot starts") acc = history.history['acc'] val_acc = history.history['val_acc'] epochs = range(1, len(acc) + 1) # "bo" is for "blue dot" plt.plot(epochs, acc, 'bo', label="Training acc") # b is for "solid blue line" plt.plot(epochs, val_acc, "b", label="Validation acc") plt.title('Training and validation acc') plt.xlabel('Epochs') plt.ylabel('ACC') plt.lengend() plt.show() print("plot finished!")
epoch_set = [] init = tf.initializer_all_variables() with tf.Session() as sess: sess.run(init) for epoch in range(training_epochs): avg_cost = 0.0 total_batch = int(mnist.train.num_examples / batch_size) for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys}) avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys}) / total_batch if epoch % display_step == 0: print "Epoch: ", '%04d' %(epoch+1), "cost=", '{:.9f}'.format(avg_cost) avg_set.append(avg_cost) epoch_set.append(eopch+1) print ('Training phase finished') plt.plot(epoch_set, avg_set, 'o', label='MLP Training Phase') plt.ylabel('cost') plt.xlabel('epoch') plt.lengend() plt.show() #Test model correct_prediction = tf.equal(tf.argmax(output_layer, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float)) print('Model Accuracy', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
loss.backward() # 参数更新 optimizer.step() import matplotlib.pyplot as plt # 绘制不同迭代过程的loss x = np.arange(max_epoch) y = np.array(iter_loss) plt.plot(x,y) plt.title('Loss Value in all interations') plt.xlabel('Interation') plt.ylabel('Mean loss Value') plt.show # 测试 output = model(text_x) predict_list = output.detach().numpy() print(predict_list) # 真实值与预测值的散点图 x = np.arange(text_x.shape[0]) y1 = np.arange(predict_list) y2 = np.arange(text_y) line1 = plt.scatter(x,y1,c='red',Label='predict') line2 = plt.scatter(x,y2,c='yellow',Label='real') plt.lengend(loc = 'best') plt.title('Prediction Vs Real') plt.ylabel('House Price') plt.show()
# plt.hist(train_transformed[:, i], alpha=0.3, label="Latent User " + str(i + 1), range=(0, 1), bins=20) # plt.xlabel("User Proportion from Latent User i", fontsize=20) # plt.ylabel("Count", fontsize=20) # plt.tick_params(labelsize=15) # plt.legend() # plt.show(block=False) plt.figure() targets = [1, 0] colors = ['r', 'b'] for target, color in zip(targets, colors): indicesToKeep = train['sex_m'] == target plt.scatter(train_transformed[indicesToKeep, 0], train_transformed[indicesToKeep, 1], c=color) plt.lengend(targets) plt.xlabel('component 1') plt.ylabel('component 2') plt.show(block=False) if n['name'] == 'PCA': kmeans = KMeans().fit(train_transformed) # n_clusters=2 # cluster_transformed = kmeans.transform() centroids = kmeans.cluster_centers_ print("Train - Average Log-Likelihood with Kmeans: ", kmeans.score(train_transformed) / train.shape[0]) print("Test - Average Log-Likelihood with Kmeans: ", kmeans.score(test_transformed) / test.shape[0]) plt.figure() plt.plot(train_transformed[:, 0], train_transformed[:, 1],
def main(): global args, start_epoch, best_acc1 args = config() if args.cuda and not torch.cuda.is_available(): raise Exception('No GPU found, please run without --cuda') print('\n=> Build ResNet..') model = mo.ResNet50() print(model) print('==> Complete build') criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True) start_epoch = 0 n_retrain = 0 if args.cuda: torch.cuda.set_device(args.gpuids[0]) with torch.cuda.device(args.gpuids[0]): model = model.cuda() criterion = criterion.cuda() model = nn.DataParallel(model, device_ids=args.gpuids, output_device=args.gpuids[0]) cudnn.benchmark = True # checkpoint file ckpt_dir = pathlib.Path('checkpoint') ckpt_file = ckpt_dir / args.dataset / args.ckpt # for resuming training if args.resume: if isfile(ckpt_file): print('\n==> Loading Checkpoint \'{}\''.format(args.ckpt)) checkpoint = load_model(model, ckpt_file, args) start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) print('==> Loaded Checkpoint \'{}\' (epoch {})'.format( args.ckpt, start_epoch)) else: print('==> no checkpoint found \'{}\''.format(args.ckpt)) return # Data loading print('\n==> Load data..') train_loader, val_loader = DataLoader(args.batch_size, args.workers, args.datapath, args.cuda) # for evaluation if args.evaluate: if isfile(ckpt_file): print('\n==> Loading Checkpoint \'{}\''.format(args.ckpt)) checkpoint = load_model(model, ckpt_file, args) print('==> Loaded Checkpoint \'{}\' (epoch {})'.format( args.ckpt, start_epoch)) # evaluate on validation set print('\n===> [ Evaluation ]') start_time = time.time() acc1, acc5 = validate(val_loader, model, criterion) elapsed_time = time.time() - start_time print('====> {:.2f} seconds to evaluate this model\n'.format( elapsed_time)) return else: print('==> no checkpoint found \'{}\''.format(args.ckpt)) return # train... train_time = 0.0 validate_time = 0.0 lr = args.lr list_Acc1 = [] list_Acc5 = [] list_epoch = [] for epoch in range(start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch, lr) print('\n==> Epoch: {}, lr = {}'.format( epoch, optimizer.param_groups[0]["lr"])) # train for one epoch print('===> [ Training ]') start_time = time.time() acc1_train, acc5_train = train(train_loader, epoch=epoch, model=model, criterion=criterion, optimizer=optimizer) elapsed_time = time.time() - start_time train_time += elapsed_time print( '====> {:.2f} seconds to train this epoch\n'.format(elapsed_time)) # evaluate on validation set print('===> [ Validation ]') start_time = time.time() acc1_valid, acc5_valid = validate(val_loader, model, criterion) elapsed_time = time.time() - start_time validate_time += elapsed_time print('====> {:.2f} seconds to validate this epoch\n'.format( elapsed_time)) # remember best Acc@1 and save checkpoint is_best = acc1_valid > best_acc1 best_acc1 = max(acc1_valid, best_acc1) state = { 'epoch': epoch + 1, 'model': model.state_dict(), 'optimizer': optimizer.state_dict() } save_model(state, epoch, is_best, args) list_Acc1.append(acc1_valid) list_Acc5.append(acc5_valid) list_epoch.append(epoch) plt.plot(list_epoch, list_Acc1) plt.plot(list_epoch, list_Acc5) plt.lengend(['ACC1', 'ACC5']) avg_train_time = train_time / (args.epochs - start_epoch) avg_valid_time = validate_time / (args.epochs - start_epoch) total_train_time = train_time + validate_time print('====> average training time per epoch: {:,}m {:.2f}s'.format( int(avg_train_time // 60), avg_train_time % 60)) print('====> average validation time per epoch: {:,}m {:.2f}s'.format( int(avg_valid_time // 60), avg_valid_time % 60)) print('====> training time: {}h {}m {:.2f}s'.format( int(train_time // 3600), int((train_time % 3600) // 60), train_time % 60)) print('====> validation time: {}h {}m {:.2f}s'.format( int(validate_time // 3600), int((validate_time % 3600) // 60), validate_time % 60)) print('====> total training time: {}h {}m {:.2f}s'.format( int(total_train_time // 3600), int((total_train_time % 3600) // 60), total_train_time % 60))
df[abbv] = (df[abbv] - df[abbv][0] / df[abbv][0] * 100) if main_df.empty: main_df = df else: main_df = main_df.join(df) print(main_df.head()) # Printing out Pickle pickle_out = open('fiddy_states.pickle', 'wb') pickle.dump(main_df, pickle_out) pickle_out.close() def HPI_Benchmark(): df = quandl.get("FMAC/HPI_USA", authtoken=api_key) df["United States"] = ( df["United States"] - df["United States"][0] / df["United States"][0] * 100) return df fig = plt.figure() ax1 = plt.subplot2grid((1, 1), (0, 0)) HPI_data = pd.read_pickle('pickle.pickle') HPI_data.plot() plt.lengend().remove() plt.show()
import matplotlib.pyplot as plt import pickle import pandas as pd #grab_initial_state_data() fig = plt.figure() ax1 = plt.subplot2grid((1,1), (0,0)) HPI_data = pd.read_pickle('fiddy_states.pickle') HPI_data['TX1yr'] = HPI_data['TX'].resample('A', how='mean') print(HPI_data[['TX','TX1yr']].head()) HPI_data[['TX','TX1yr']].plot(ax=ax1) plt.lengend(loc=4) plt.show()