def train_model_chunks(model, loader, batch_size=8, num_epoch=100, print_every=10, epoch_done=0): loss_avg = running_average(100) acc_avg = running_average(100) for i in range(num_epoch): for j in range(loader.num_chunks): print("\nEpoch no. %s, loading chunk no. %s\n" % (epoch_done + i + 1, j + 1)) X, Y = loader.next_chunk() for k in range(len(Y) / batch_size): loss, accuracy = model.train_on_batch( X[k * batch_size:(k + 1) * batch_size], Y[k * batch_size:(k + 1) * batch_size]) curr_avg_loss = loss_avg.upsert(loss) curr_avg_acc = acc_avg.upsert(accuracy) if (k + 1) % print_every == 0: print( "Epoch: %d, chunk: %d, batch: %d, loss=%.4f, accuracy=%.4f" % (epoch_done + i + 1, j + 1, k + 1, curr_avg_loss, curr_avg_acc)) # if (i+1) % 5 == 0: # model.save_weights('model_weights_cnn_'+str(epoch_done+i+1)+'_'+str(curr_avg_acc)+'.h5',overwrite=True) return model
def train_model_chunks(model,loader,batch_size=8,num_epoch=100,print_every=10,epoch_done=0): loss_avg = running_average(100) acc_avg = running_average(100) for i in range(num_epoch): for j in range(loader.num_chunks): print("\nEpoch no. %s, loading chunk no. %s\n"%(epoch_done+i+1,j+1)) X,Y = loader.next_chunk() for k in range(len(Y)/batch_size): loss, accuracy = model.train_on_batch(X[k*batch_size:(k+1)*batch_size], Y[k*batch_size:(k+1)*batch_size]) curr_avg_loss = loss_avg.upsert(loss) curr_avg_acc = acc_avg.upsert(accuracy) if (k+1)%print_every == 0: print("Epoch: %d, chunk: %d, batch: %d, loss=%.4f, accuracy=%.4f"%(epoch_done+i+1,j+1,k+1,curr_avg_loss,curr_avg_acc)) # if (i+1) % 5 == 0: # model.save_weights('model_weights_cnn_'+str(epoch_done+i+1)+'_'+str(curr_avg_acc)+'.h5',overwrite=True) return model
def main(): # parse cmd args if len(sys.argv) > 1: outfile = sys.argv[1] else: outfile = "average.out" if len(sys.argv) > 2: col = int(sys.argv[2]) - 1 else: col = 0 if len(sys.argv) > 3: width = int(sys.argv[3]) else: width = 1 # read data from stdin data = sys.stdin.readlines() # extract data values = np.zeros(len(data), dtype=np.float64); for i, line in enumerate(data): data[i] = line.split() values[i] = data[i][col] # compute running average average = running_average(values, width, "gaussian") # save average in file of = open(outfile, 'w') for i, line in enumerate(data): for j in line[:col]: of.write(j) of.write("\t") of.write(str(average[i])) of.write("\t") for j in line[col+1:]: of.write(j) of.write("\t") of.write("\n") of.close() print "Computed running average of column {} of input data ({} datapoints)".format(col+1, len(average))
def get_data(filename, width, wintype): datafile = open(filename, "r") data = datafile.readlines() datafile.close() times = np.zeros(len(data), dtype=float) values = np.zeros(len(data), dtype=float) for i, line in enumerate(data): times[i] = float(line.split()[0]) values[i] = float(line.split()[1]) # Convert time to ns times = times / 1000.0 average = running_average(values, width, win=wintype) print "Running average computed" return (average, times)
for i, line in enumerate(boundData): boundTime[i] = float(line.split()[0]) boundAngle[i] = float(line.split()[1]) for i, line in enumerate(freeData): freeAngle[i] = float(line.split()[1]) # Convert time to ns boundTime = boundTime / 1000.0 freeTime = freeTime / 1000.0 # compute running average width = 100 wintype = "gaussian" print "Starting average computation" boundAngleAverage = running_average(boundAngle, width, win=wintype) freeAngleAverage = running_average(freeAngle, width, win=wintype) print "Average computation done" # pickle Data pickleDataFile = open(pickleDataFilename, 'w') pickle.dump([boundAngle, freeAngle, boundTime, freeTime, \ boundAngleAverage, freeAngleAverage], pickleDataFile) pickleDataFile.close() # if pickle file is newer than data files else: # unpickle data pickleDataFile = open(pickleDataFilename, 'r') [boundAngle, freeAngle, boundTime, freeTime, boundAngleAverage, freeAngleAverage] = pickle.load(pickleDataFile) pickleDataFile.close()