def halo_annotations(code,redshift, multiple_widths='yes',all_axis='no'):
    data_pf = ld.load_data(code, redshift)
    catalog_dir='/home/nmiller/THE/halo_catalogs/catalog/'
    code_catalog=(catalog_dir+str(code))
    redshift_dir=(code_catalog+'/redshift'+str(redshift))
    file_dir=(redshift_dir+'/redshift'+str(redshift)+'.0.h5')
	
    hpf = load(str(file_dir))
    hc = HaloCatalog(halos_pf=hpf, output_dir="halo_catalogs/catalog")
    hc.load

    EnzoC = ( 0.3861618 ,  0.46086884,  0.49156952)

    if (multiple_widths=='yes'):
	ProjectionPlot(data_pf, 'z', 'density', center=EnzoC).annotate_halos(hc).annotate_title(str(code)+str(redshift)+'Halos').save(str(code)+str(redshift)+'Halos.png')
	ProjectionPlot(data_pf, 'z', 'density', center=EnzoC, width=(10000,'kpc')).annotate_halos(hc).annotate_title(str(code)+str(redshift)+'Halos').save(str(code)+str(redshift)+'Halos_width10k.png')
	ProjectionPlot(data_pf, 'z', 'density', center=EnzoC, width=(3000,'kpc')).annotate_halos(hc).annotate_title(str(code)+str(redshift)+'Halos').save(str(code)+str(redshift)+'Halos_width3k.png')
	ProjectionPlot(data_pf, 'z', 'density', center=EnzoC, width=(600,'kpc')).annotate_halos(hc).annotate_title(str(code)+str(redshift)+'Halos').save(str(code)+str(redshift)+'Halos_width600.png')

    else:
	ProjectionPlot(data_pf, 'z', 'density', center=EnzoC).annotate_halos(hc).annotate_title(str(code)+str(redshift)+'Halos').save(str(code)+str(redshift)+'Halos.png')
   
    if (all_axis=='yes'):
	ProjectionPlot(data_pf, 'z', 'density', center=EnzoC).annotate_halos(hc).annotate_title(str(code)+str(redshift)+'Halos').save(str(code)+str(redshift)+'Halos_z.png')
	ProjectionPlot(data_pf, 'x', 'density', center=EnzoC).annotate_halos(hc).annotate_title(str(code)+str(redshift)+'Halos').save(str(code)+str(redshift)+'Halos_x.png')
	ProjectionPlot(data_pf, 'y', 'density', center=EnzoC).annotate_halos(hc).annotate_title(str(code)+str(redshift)+'Halos').save(str(code)+str(redshift)+'Halos_y.png')

    else:
	return
Example #2
0
def star_creation(code, redshift):
    ds=ld.load_data(code, redshift)
    dd = ds.all_data()

    pm = dd[('io', 'particle_mass')]
    pi = dd[('io', 'particle_index')]
    if (code=='ramses'):
	ct = dd[('all', 'particle_age')]
	stars=(ct!=0)
	print 'done'
    else:
	ct = dd[('io', 'creation_time')]
    	stars=(ct > 0)

    pm = pm[stars]
    pi = pi[stars]
    ct = ct[stars]


    plt.plot(pi,ct, '.')
    plt.title('Particle Index vs. Creation Time for %s %s' %(code,redshift))
    plt.savefig(str(code)+str(redshift)+"index_vs_creation.png")

    plt.plot(pm,ct, '.')
    plt.title('Particle Mass vs. Creation Time for %s %s' %(code,redshift))
    plt.savefig(str(code)+str(redshift)+"particlemass_vs_creation.png")
def show(args):
    params = vars(args)
    train_data, train_label, test_data, test_label = load_data(
        params["datadir"], 1, 1)
    indices = np.arange(params["start"], train_data.shape[0] - 1)
    show_data(train_data[params["start"]:], train_label[params["start"]:],
              indices)
def property_file(code,redshift,property_attribute):
    # if this is run on its own the propery must be in the form of 				      	    those listed in possible_properties.txt    
    data_pf = ld.load_data(code,redshift)
    halo_list = HaloFinder(data_pf)
    # make the file
    halofile = open(property_attribute+'.txt', 'w')
    if (property_attribute=='center_of_mass'):
	for halo in halo_list:
    		halofile.write(str(halo.center_of_mass()) + "\n")
    elif (property_attribute=='total_mass'): 
	for halo in halo_list:
    		halofile.write(str(halo.total_mass()) + "\n")
    elif (property_attribute=='maximum_density'): 
	for halo in halo_list:
    		halofile.write(str(halo.maximum_density()) + "\n")
    elif (property_attribute=='particle_size'): 
	for halo in halo_list:
    		halofile.write(str(halo.get_size()) + "\n")
    elif (property_attribute=='max_radius'):
	for halo in halo_list:
    		halofile.write(str(halo.maximum_radius()) + "\n")
    elif (property_attribute =='virial_mass'): 
	for halo in halo_list:
    		halofile.write(str(halo.virial_mass()) + "\n")
    elif (property_attribute=='virial_radius'): 
	for halo in halo_list:
    		halofile.write(str(halo.halo.virial_radius()) + "\n")
    elif (property_attribute=='maximum_density_location'):
	for halo in halo_list:
    		halofile.write(str(halo.maximum_density_location()) + "\n")
    halofile.close()
    return
Example #5
0
def star_creation(code, redshift):
    ds=ld.load_data(code, redshift)
    dd = ds.all_data()
    pm,pi,ct = star_prop(dd) 

    plt.plot(pi,ct, '.')
    plt.title('Particle Index vs. Creation Time for %s %s' %(code,redshift))
    plt.savefig(str(code)+str(redshift)+"index_vs_creation.png")

    plt.plot(pm,ct, '.')
    plt.title('Particle Mass vs. Creation Time for %s %s' %(code,redshift))
    plt.savefig(str(code)+str(redshift)+"particlemass_vs_creation.png")
Example #6
0
    def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,
             img_channels=3, nb_classes=3):
        # 加载数据集到内存
        images, labels = load_data(self.path_name)

        train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.3,
                                                                                  random_state=random.randint(0, 100))
        _, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5,
                                                          random_state=random.randint(0, 100))

        # 当前的维度顺序如果为'th',则输入图片数据时的顺序为:channels,rows,cols,否则:rows,cols,channels
        # 这部分代码就是根据keras库要求的维度顺序重组训练数据集
        if K.image_dim_ordering() == 'th':
            train_images = train_images.reshape(train_images.shape[0], img_channels, img_rows, img_cols)
            valid_images = valid_images.reshape(valid_images.shape[0], img_channels, img_rows, img_cols)
            test_images = test_images.reshape(test_images.shape[0], img_channels, img_rows, img_cols)
            self.input_shape = (img_channels, img_rows, img_cols)
        else:
            train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels)
            valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels)
            test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels)
            self.input_shape = (img_rows, img_cols, img_channels)

            # 输出训练集、验证集、测试集的数量
            print(train_images.shape[0], 'train samples')
            print(valid_images.shape[0], 'valid samples')
            print(test_images.shape[0], 'test samples')

            # 我们的模型使用categorical_crossentropy作为损失函数,因此需要根据类别数量nb_classes将
            # 类别标签进行one-hot编码使其向量化,在这里我们的类别只有两种,经过转化后标签数据变为二维
            train_labels = np_utils.to_categorical(train_labels, nb_classes)
            valid_labels = np_utils.to_categorical(valid_labels, nb_classes)
            test_labels = np_utils.to_categorical(test_labels, nb_classes)

            # 像素数据浮点化以便归一化
            train_images = train_images.astype('float32')
            valid_images = valid_images.astype('float32')
            test_images = test_images.astype('float32')

            # 将其归一化,图像的各像素值归一化到0~1区间
            train_images /= 255
            valid_images /= 255
            test_images /= 255

            self.train_images = train_images
            self.valid_images = valid_images
            self.test_images = test_images
            self.train_labels = train_labels
            self.valid_labels = valid_labels
            self.test_labels = test_labels
def generate_cluster(name=None):

    #Load Dataset with inputs: node, dataset, transpose_flag
    node = "ABBV_sym"
    transpose_flag = 0
    dataset = 'dataset/pharma_pharma_dataset.csv'
    args = load_data(node, dataset, transpose_flag)

    #Extract Clusters
    cluster = extract_cluster(args)
    print(cluster)



    #visualize that cluster on index.html
    return render_template('index.html', name=name)
def mstar_mhalo(code,redshift,st=0,end=10):
    ds = ld.load_data(code, redshift)
    result=[]
    for halo in xrange(int(st),int(end)):
	print halo
	sphere_center=nh._center_of_mass(code,redshift,halo)
    	radius=nh._radius(code,redshift,halo)
	sp = ds.sphere(sphere_center, (int(radius), "kpc"))
	stars=s.find_stars(sp)
    	m_star=s.find_M_star(sp)
	star_mass = sum(int(x) for x in m_star)
        baryon_mass, particle_mass = sp.quantities.total_quantity(["cell_mass", "particle_mass"])
	baryon_mass=baryon_mass.in_units('Msun')
	particle_mass=particle_mass.in_units('Msun')
        total_mass=(baryon_mass+particle_mass)
	result.append([halo, total_mass, baryon_mass, particle_mass, star_mass])
    return result
def mstar_mhalo(code,redshift,halos=all):
    ds = ld.load_data(code, redshift)
    print 'loaded'
    result=[]
    if (halos=='all'):
	print 'in loop'
	file_name=cd.get_output_file_name(code,'com',redshift)
	file_com=open(file_name,'r')
	number_halos=[] 
	for line in file_com:
	    l=line.split()
	    number_halos.append(l)
	print len(number_halos)
        for halo in xrange(0,len(number_halos)):
	    print halo
	    sphere_center=nh._center_of_mass(code,redshift,halo)
    	    radius=nh._radius(code,redshift,halo)
	    sp = ds.sphere(sphere_center, (int(radius), "kpc"))
    	    m_star=s.star_prop(sp,'pm')
	    m_star=m_star.in_units('Msun')
	    star_mass = sum(int(x) for x in m_star)
            #star_mass=star_mass.in_units('Msun')
            baryon_mass, particle_mass = sp.quantities.total_quantity(["cell_mass", "particle_mass"])
	    baryon_mass=baryon_mass.in_units('Msun')
	    particle_mass=particle_mass.in_units('Msun')
	    total_mass=(baryon_mass+particle_mass)
	    result.append([halo, total_mass, baryon_mass, particle_mass, star_mass])
        return result

    else:
    	for halo in xrange(int(halos[0]),int(halos[1])):
	    print halo
	    sphere_center=nh._center_of_mass(code,redshift,halo)
    	    radius=nh._radius(code,redshift,halo)
	    sp = ds.sphere(sphere_center, (int(radius), "kpc"))
    	    m_star=s.star_prop(sp,'pm')
	    m_star=m_star.in_units('Msun')
	    star_mass = sum(int(x) for x in m_star)
            #star_mass=star_mass.in_units('Msun')
            baryon_mass, particle_mass = sp.quantities.total_quantity(["cell_mass", "particle_mass"])
	    baryon_mass=baryon_mass.in_units('Msun')
	    particle_mass=particle_mass.in_units('Msun')
	    total_mass=(baryon_mass+particle_mass)
	    result.append([halo, total_mass, baryon_mass, particle_mass, star_mass])
        return result
def total_mass_sphere(code, redshift, sphere_center, radius):
    # Load the dataset.
    ds = ld.load_data(code, redshift)

    # Create a 1 Mpc radius sphere, centered on the max density.
    sp = ds.sphere(sphere_center, (int(radius), "kpc"))

    # Use the total_quantity derived quantity to sum up the
    # values of the cell_mass and particle_mass fields
    # within the sphere.
    baryon_mass, particle_mass = sp.quantities.total_quantity(["cell_mass", "particle_mass"])
    massfile=(str(code)+str(redshift)+'spheremass.txt')
    ld.check_file(massfile)
    massfile = open(massfile, 'w')
    massfile.write("Total mass in sphere is %0.3e Msun (gas = %0.3e Msun, particles = %0.3e Msun)" % \
      ((baryon_mass + particle_mass).in_units('Msun'), \
       baryon_mass.in_units('Msun'), particle_mass.in_units('Msun')))
    massfile.close()
    return
Example #11
0
def display_data():

    flags, atmospheric_pressure = load_data()
    sst = load_data_custom('TS')
    at = load_data_custom('T')
    #print(at)
    pressure_flags = [
        value for sublist in flags for counter, value in enumerate(sublist)
        if counter == 10
    ]

    sst_flags = [
        value for sublist in flags for counter, value in enumerate(sublist)
        if counter == 13
    ]

    at_flags = [
        value for sublist in flags for counter, value in enumerate(sublist)
        if counter == 11
    ]

    side_by_side = list(zip(at, atmospheric_pressure, at_flags,
                            pressure_flags))
    print(len(side_by_side))
    """
  df_flags = pd.DataFrame(flags, columns=['time', 'lat', 'lon', 'PL_HD', 'PL_CRS',
  'DIR', 'PL_WDIR', 'PL_SPD', 'SPD', 'PL_WSPD', 'P', 'T', 'RH', 'TS', 'SSPS',])
  """

    df_flag_and_obs = pd.DataFrame(side_by_side,
                                   columns=[
                                       'air temp', 'pressure observation',
                                       'at flag', 'pressure flag'
                                   ])

    with pd.option_context('display.max_rows', None, 'display.max_columns',
                           None):
        print(df_flag_and_obs)
    #print(df_flag_and_obs)
    return df_flag_and_obs
Example #12
0
def evaluate_data():
    # Load dataset
    dataset = load_dataset.load_data()

    # Split-out validation dataset
    array = dataset.values
    X = array[:, 0:4]
    Y = array[:, 4]
    validation_size = 0.20
    seed = 7
    X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(
        X, Y, test_size=validation_size, random_state=seed)

    # Test options and evaluation metric
    scoring = 'accuracy'

    # Spot Check Algorithms
    models = []
    models.append(('LR', LogisticRegression()))
    models.append(('LDA', LinearDiscriminantAnalysis()))
    models.append(('KNN', KNeighborsClassifier()))
    models.append(('CART', DecisionTreeClassifier()))
    models.append(('NB', GaussianNB()))
    models.append(('SVM', SVC()))
    # evaluate each model in turn
    for name, model in models:
        kfold = model_selection.KFold(n_splits=10, random_state=seed)
        cv_results = model_selection.cross_val_score(model,
                                                     X_train,
                                                     Y_train,
                                                     cv=kfold,
                                                     scoring=scoring)
        results.append(cv_results)
        names.append(name)
        msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
        print(msg)

    return X_train, X_validation, Y_train, Y_validation
def setup():

	global X_train, X_test, Y_train, Y_test, input_shape
	X, y = load_data(loc_)
	X_train = X
	y_train = y

	# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

	if K.image_dim_ordering() == 'th':
		X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
		# X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
		input_shape = (1, img_rows, img_cols)
	else:
		X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
		# X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
		input_shape = (img_rows, img_cols, 1)

	X_train = X_train.astype('float32')
	X_train = X_train / 255

	# convert class vectors to binary class matrices
	Y_train = np_utils.to_categorical(y_train, nb_classes)
Example #14
0
PATCH_WIDTH = 100
PATCH_HEIGHT = 100
PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * 3

# processing command arguments

phone, batch_size, train_size, learning_rate, num_train_iters, \
w_content, w_color, w_texture, w_tv, \
dped_dir, vgg_dir, eval_step = utils.process_command_args(sys.argv)

np.random.seed(0)

# loading training and test data

print("Loading test data...")
test_data, test_answ, train_data, train_answ = load_data(dataset, 0.1)
print("data was loaded\n")

TEST_SIZE = test_data.shape[0]
num_test_batches = int(test_data.shape[0] / batch_size)

# defining system architecture

with tf.Graph().as_default(), tf.Session() as sess:

    # placeholders for training data

    phone_ = tf.placeholder(tf.float32, [None, PATCH_SIZE])
    phone_image = tf.reshape(phone_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 3])

    dslr_ = tf.placeholder(tf.float32, [None, PATCH_SIZE])
Example #15
0
def training():

	batch_size = 10
	noise_type = 'gaussian'
	noise_proportion = 0.2
	noise_mean = 0
	noise_std = 1
	noise_lam = 1
	noise_std_range = [1,5]
	noise_lam_range = [1,5]
	loss_type = 'l2_loss'
	study_rate = 1e-5

	current_dir = Path('.')

	train_true,__,test_true=load_data(dataset='mias',
		DIR=current_dir)

	train_true = train_true.repeat().batch(batch_size)
	train_true = train_true.prefetch(buffer_size =
		tf.data.experimental.AUTOTUNE)

	test_true = test_true.repeat().batch(batch_size)
	test_true = test_true.prefetch(buffer_size =
		tf.data.experimental.AUTOTUNE)

	iter_train_handle = train_true.make_one_shot_iterator().string_handle()
	iter_val_handle = test_true.make_one_shot_iterator().string_handle()

	handle = tf.placeholder(tf.string, shape=[])
	iterator = Iterator.from_string_handle(handle,
		train_true.output_types,
		train_true.output_shapes)

	next_batch = iterator.get_next()

	noise_args = {'proportion':noise_proportion}

	if noise_type == 'random':
		noise_fn = random_noise
		noise_args['std_range'] = noise_std_range
		noise_args['lam_range'] = noise_lam_range
	elif noise_type == 'poisson':
		noise_fn = poisson_noise
		noise_args['lam'] = noise_lam
	else:
		noise_fn = gaussian_noise
		noise_args['mean'] = noise_mean
		noise_args['std'] = noise_std


	true_img = tf.placeholder(tf.uint8, 
		shape=[batch_size, 64, 64, 1])

	noised_img = noise_fn(**noise_args,
		image=true_img)

	model_input = tf.cast(noised_img,
		dtype=tf.float32)

	denoised_img = QAE.build_QAE(model_input)

	if loss_type == 'l2_loss':
		train_loss = l2_loss(
			tf.cast(true_img,
				dtype=tf.float32),
			denoised_img)
		# val_loss = l2_loss(
		# 	tf.cast(test_true_img,
		# 		dtype=tf.float32),
		# 	test_denoised_img)

	total_train_loss = train_loss

	optimizer = tf.train.AdamOptimizer(\
		learning_rate=study_rate).minimize(
		total_train_loss)

	tf.summary.scalar('train_l2_loss',train_loss)
	# tf.summary.scalar('val_l2_loss',val_loss)


	tf.summary.scalar('total_train_loss',
		total_train_loss)

	merged_summary = tf.summary.merge_all()
	train_writer = tf.summary.FileWriter(
		current_dir/'train_data')


	init_vars = tf.group(
		tf.global_variables_initializer(),
		tf.local_variables_initializer())

	saver = tf.train.Saver()


	with tf.Session().as_default() as sess:
		global_step = tf.train.get_global_step()

		handle_train, handle_val = sess.run(
			[iter_train_handle, iter_val_handle])

		sess.run(init_vars)

		for step in range(500):
			train_true_img = sess.run(next_batch,
				feed_dict={handle: handle_train})
			test_true_img = sess.run(next_batch,
				feed_dict={handle: handle_val})

			_ = sess.run(optimizer, 
				feed_dict={true_img:train_true_img})

			t_summ = sess.run(merged_summary,
				feed_dict={true_img:train_true_img})

			t_loss = sess.run(total_train_loss,
				feed_dict={true_img:train_true_img})

			train_writer.add_summary(t_summ,step)

			print('Iter:{}, Training Loss {}'.format(
				step, t_loss))

			if step%20 == 0:
				fig,axes = 

		print('done')
import numpy as np
from stradimod.utils import sigmoid, relu, tanh
from stradimod.netwok import Network
from stradimod.layers import Dense
from load_dataset import load_data

np.random.seed(1)

x_train, y_train, x_test, y_test, classes = load_data()

# === TEST 1 =====

print("====== TEST 1 =====")
model = Network()
model.add(Dense(5, relu))
model.add(Dense(5, relu))
model.add(Dense(1, sigmoid))
model.build(12288)
model.train(x_train, y_train, epochs=3000, learning_rate=0.03)
accuracy = model.predict(x_test, y_test)
print(f"Accuracy: {accuracy*100}%")
print("")

# === TEST 2 ====

print("====== TEST 2 =====")
model = Network()
model.add(Dense(5, tanh))
model.add(Dense(5, tanh))
model.add(Dense(1, sigmoid))
model.build(12288)
Example #17
0
            print("batch_time: %dms - batch_loss: %.4f" % ((batch_end - batch_start)*1000, batch_loss))

        epoch_end = time.time()
        # 打印epoch的信息
        print("batchs: %d - epoch_time: %ds %dms/batch - loss: %.4f" % (batchs, epoch_end - epoch_start, (epoch_end-epoch_start)*1000/batchs, total_loss/batchs))
        
        # 按配置json文件里的save_interval的数值来保存检查点
        if epoch % save_interval == 0:
            manager.save()


if __name__ == "__main__":
    configs = get_config()

    epochs = configs["train"]["train_epochs"]
    data_path = configs["train"]["data_path"]
    num_examples = configs["train"]["num_examples"]
    dataset_name = configs["preprocess"]["dataset_name"]
    batch_size = configs["train"]["batch_size"]

    #加载数据,并构建数据生成器
    train_data = load_data(dataset_name, data_path, "train", num_examples)
    batchs = ceil(len(train_data[0]) / batch_size)
    train_data_generator = data_generator(train_data, "train", batchs, batch_size)

    # 加载模型
    model = get_ds2_model()
    optimizer = tf.keras.optimizers.Adam()
    #训练
    train(model, optimizer, train_data_generator, batchs, epochs)
    
Example #18
0
def simulation_plot(code,redshift):
    ds=ld.load_data(code, redshift)

    ProjectionPlot(ds, 2, 'density', width=(25.0, 'mpc'),weight_field=None).save(str(code)+str(redshift)+'complete-density.png')
    ProjectionPlot(ds, 2, 'temperature', width=(25.0, 'mpc'),weight_field=None).save(str(code)+str(redshift)+'complete-temperature.png')
Example #19
0
    AL, cache = forward_propagation(A, parameters['W2'], parameters['b2'], activation = "sigmoid")
    
    # convert probabilities to 0 or 1 
    for i in range(0, AL.shape[1]):
        if AL[0,i] > 0.5:
            p[0,i] = 1
        else:
            p[0,i] = 0
    
    print("Accuracy: "  + str(np.sum((p == y)/m)))
    
    return AL

# Load very popular dataset of catvsnon-cat for the testing purpose.
from load_dataset import load_data
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()

train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T  
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T

# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten / 255.
test_x = test_x_flatten / 255.

### CONSTANTS DEFINING THE MODEL ####
n_x = 12288    # length of input features # num_px * num_px * 3
n_h = 7        # Length of hidden layer
n_y = 1        # Length of output layer 
layer_dimention = (n_x, n_h, n_y)

#Build the model

#başarıyı hesaplamak için kullanılan fonksiyon
def calc_accuracy(prediction, actual):
    counter = 0
    index = 0
    while (index < len(prediction)):
        if (prediction[index] == actual[index]):
            counter += 1
        index += 1

    return counter / len(prediction)


#veriseti elde edilir
x_train, x_test, train_label, test_labels = load_data()

#sınıflar one-hot-encoding'e çevrilir
train_label = to_categorical(train_label, 10)
test_labels = to_categorical(test_labels, 10)

print("Data is ready...")
"""
Modelin tanımlandığı yer.
Bizim ilk girişi ve çıkışı vermemiz yeterli. Ara adımlar keras tarafından hesaplanır.
"""
model = models.Sequential()
#64 filtre, relu aktivasyon fonksiyonu, 3x3'lük filtreler. Padding yok ve stride = 1
model.add(
    layers.Conv2D(64, (3, 3), activation='relu', input_shape=(200, 200, 3)))
model.add(layers.MaxPooling2D((2, 2)))
Example #21
0
    model = get_ds2_model()
    checkpoint = tf.train.Checkpoint(model=model)
    manager = tf.train.CheckpointManager(
        checkpoint,
        directory=configs["checkpoint"]['directory'],
        max_to_keep=configs["checkpoint"]['max_to_keep'])
    if manager.latest_checkpoint:
        checkpoint.restore(manager.latest_checkpoint)

    test_data_path = configs["test"]["data_path"]
    num_examples = configs["test"]["num_examples"]
    dataset_name = configs["preprocess"]["dataset_name"]
    batch_size = configs["test"]["batch_size"]

    # 加载测试集数据生成器
    test_data = load_data(dataset_name, test_data_path, "test", num_examples)
    batchs = ceil(len(test_data[0]) / batch_size)
    test_data_generator = data_generator(test_data, "test", batchs, batch_size)

    aver_wers = 0
    aver_lers = 0
    aver_norm_lers = 0

    # 构建字符集对象
    index_word = get_index_word()

    for batch, (input_tensor, labels_list) in zip(range(1, batchs + 1),
                                                  test_data_generator):
        originals = labels_list
        results = []
        y_pred = model(input_tensor)
Example #22
0
# -*- coding: utf-8 -*-
from mlp import NeuralNetMLP
from load_dataset import load_data
import matplotlib.pyplot as plt
import os
import pickle
import numpy as np

#Carrega o dataset de treino e teste
path = os.getcwd()
#Carrega as 60000 instancias de treino
X_train, y_train = load_data(path, tipo='train')
print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))

#Carrega as 10000 instancias de teste
X_test, y_test = load_data(path, tipo='t10k')
print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))

#Cria uma instância de rede neural com os seguintes parâmetros
neural_network = NeuralNetMLP(n_output=10,
                              n_features=X_train.shape[1],
                              n_hidden=50,
                              l2=0.1,
                              l1=0.0,
                              epochs=2000,
                              eta=0.001,
                              alpha=0.001,
                              decrease_const=0.00001,
                              shuffle=True,
                              minibatches=50,
                              random_state=1)
            fp.write('{}\t{}\t{}\t{}\t{}\n'.format(
                i, loss[i], acc[i], val_loss[i], val_acc[i]))


if __name__ == '__main__':
    epochs = 50
    nb_classes = 6
    batch_size = 32
    original_size_image_path = '/home/deep/datasets/kth/images/original/'
    crop_size_image_path = '/home/deep/datasets/kth/images/crop/'
    output_path = 'results/'


    print('training with original size image')
    model_ori,_ = model_original_size()
    x_train, y_train, x_val, y_val = load_data(original_size_image_path,'original')
    y_train = keras.utils.to_categorical(y_train,nb_classes)
    y_val = keras.utils.to_categorical(y_val)

    model_ori.summary()
    lr = 0.005
    sgd = keras.optimizers.SGD(lr=lr, nesterov=True, momentum=0.9)
    model_ori.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=sgd,
                      metrics=['accuracy'])
    history = model_ori.fit(x_train,y_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  verbose=1,
                  callbacks=[onetenth_20_30_40(lr=lr)],
                  validation_data=(x_val,y_val),
from tensorflow.keras import models
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Flatten,Dense,Dropout,BatchNormalization
from tensorflow.keras.utils import to_categorical, normalize
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l1,l2
import numpy as np
from load_dataset import load_data, preprocess_data
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from time import process_time
import matplotlib.pyplot as plt


data_dir = '/home/kangle/dataset/PedBicCarData'
train_data, train_label, test_data, test_label = load_data(data_dir, 2, 2)
train_data, train_label, test_data, test_label = preprocess_data(train_data, train_label, test_data, test_label, 'cnn')

train_data, val_data, train_label, val_label = train_test_split(train_data, train_label, test_size=0.1, random_state=42)
print("Split training data into training and validation data:\n")
print("training data: %d" % train_data.shape[0])
print("validation data: %d" % val_data.shape[0])

model = models.Sequential()
model.add(Conv2D(16, [5, 5], input_shape=train_data.shape[1:], activation='relu', kernel_initializer='he_uniform', kernel_regularizer='l2', name='conv_1'))
model.add(MaxPooling2D())
model.add(Conv2D(32, [5, 5], activation='relu', kernel_initializer='he_uniform', kernel_regularizer='l2', name='conv_2'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120, activation='relu', kernel_initializer='he_uniform', name='dense_1'))
#model.add(BatchNormalization())
model.add(Dense(84, activation='relu', kernel_initializer='he_uniform', name='dense_2'))
Example #25
0
from yt import *
import load_dataset as ld


code='ramses'
redshift=1.0
ds=ld.load_data(code, redshift)
dd = ds.all_data()
pa = dd[('all', 'particle_age')]
print pa
Example #26
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)

    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)

    logging.info("args = %s", args)

    # 加载数据
    data_loaders, dataset_sizes, dataset = load_data(args.batch_size)

    # 损失函数
    criterion = nn.CrossEntropyLoss()
    # criterion = torch.nn.DataParallel(criterion)
    criterion = criterion.cuda()

    # PC-DARTS模型初始换
    model = Network(args.init_channels, CLASSES, args.layers, criterion)
    model = model.cuda().half()
    # Arcface
    margin = ArcMarginProduct(512, CLASSES)
    # margin = torch.nn.DataParallel(margin)
    margin = margin.cuda().half()

    if MULTI_GPU:
        model = torch.nn.DataParallel(model)
        margin = torch.nn.DataParallel(margin)
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    # pc-darts和margin的优化器
    if MULTI_GPU:
        optimizer = torch.optim.SGD(
            # [{'params': model.module.parameters(), 'weight_decay': args.weight_decay},
            [{
                'params': margin.module.parameters(),
                'weight_decay': args.weight_decay
            }],
            args.learning_rate,
            momentum=args.momentum)
        # 架构参数的优化器
        optimizer_a = torch.optim.Adam(model.module.arch_parameters(),
                                       lr=args.arch_learning_rate,
                                       betas=(0.5, 0.999),
                                       weight_decay=args.arch_weight_decay)
    else:
        optimizer = torch.optim.SGD([{
            'params': model.parameters(),
            'weight_decay': args.weight_decay
        }, {
            'params': margin.parameters(),
            'weight_decay': args.weight_decay
        }],
                                    args.learning_rate,
                                    momentum=args.momentum)
        # 架构参数的优化器
        optimizer_a = torch.optim.Adam(model.arch_parameters(),
                                       lr=args.arch_learning_rate,
                                       betas=(0.5, 0.999),
                                       weight_decay=args.arch_weight_decay)

    if resume:
        checkpoint = torch.load(
            '/data/face_recognition/PC-DARTS-master/checkpoints_ms1m/search-try-20200423-094408/PC-DARTS_FACE.pth.tar'
        )
        model.load_state_dict(checkpoint['model_state_dict'])
        # margin.load_state_dict(checkpoint['margin_state_dict'])
        # optimizer.load_state_dict(checkpoint['optimizer'])
        # # optimizer_a.load_state_dict(checkpoint['optimizer_a'])
        # start_epoch = checkpoint['epoch']
        # optimizer.param_groups[0]['lr'] = 0.5
        lr = args.learning_rate
    else:
        lr = args.learning_rate
        start_epoch = 0
        # sys.exit()
    if fineturn:
        checkpoint = torch.load(
            '/data/face_recognition/PC-DARTS-master/checkpoints_ms1m/search-try-20200423-094408/PC-DARTS_FACE.pth.tar'
        )
        model.load_state_dict(checkpoint['model_state_dict'])
        margin.load_state_dict(checkpoint['margin_state_dict'])
        lr = args.learning_rate
        print('ccccccccccccccccccc')
        print(lr)
        start_epoch = 0

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs), eta_min=args.learning_rate_min)

    for epoch in range(args.epochs):
        scheduler.step()
        current_lr = scheduler.get_lr()[0]
        logging.info('Epoch: %d lr: %e', epoch, current_lr)

        if epoch < 5 and args.batch_size > 256 and not fineturn:
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr * (epoch + 1) / 5.0
            logging.info('Warming-up Epoch: %d, LR: %e', epoch,
                         lr * (epoch + 1) / 5.0)
            # print(optimizer)
        if MULTI_GPU:
            genotype = model.module.genotype()
            logging.info('genotype = %s', genotype)
            arch_param = model.module.arch_parameters()
        else:
            genotype = model.genotype()
            logging.info('genotype = %s', genotype)
            arch_param = model.arch_parameters()

        # training
        train_acc, train_obj = train(data_loaders['train_subset'],
                                     data_loaders['valid_subset'], model,
                                     margin, optimizer, optimizer_a, criterion,
                                     lr, epoch)
        logging.info('Train_acc %f', train_acc)

        # validation
        if epoch >= args.begin:
            infer(data_loaders, dataset, model, criterion, epoch)

        # utils.save(model, os.path.join(args.save, 'weights.pt'))
        # utils.save(margin, os.path.join(args.save, 'margin_weights.pt'))

        state = {
            'epoch': epoch + 1,
            'args': args,
            'margin_state_dict': margin.state_dict(),
            'model_state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'optimizer_a': optimizer_a.state_dict()
        }
        # filename = 'checkpoints/' + args.output_filename + '.pth.tar'
        filename = os.path.join(args.save, 'PC-DARTS_FACE') + '.pth.tar'
        torch.save(state, filename)
Example #27
0
# Summarize the Dataset

import load_dataset

# Main program
if __name__ == '__main__':
    # Load dataset
    dataset = load_dataset.load_data()

    # shape
    # print(dataset.shape)

    # head
    # print(dataset.head(20))

    # descriptions
    # print(dataset.describe())

    # class distribution
    print(dataset.groupby('class').size())
def train(args):
    params = vars(args)
    with open(params["config"], mode='r') as f:
        paramset = json.load(f)
    data_dir = paramset["root_dir"]
    samp_rate_t = paramset["sample_rate"]["sample_rate_t"]
    samp_rate_f = paramset["sample_rate"]["sample_rate_f"]

    train_data_raw, train_label_raw, test_data_raw, test_label_raw = load_data(
        data_dir, samp_rate_t, samp_rate_f)

    if "dimension_reduction" in paramset:
        dim_reducer = paramset["dimension_reduction"]["method"]
        num_components = paramset["dimension_reduction"]["n_components"]
        algo_map[dim_reducer]["parameters"]["n_components"] = num_components
        print('\nbegin dimensionality reduction process.')
        module = importlib.import_module(algo_map[dim_reducer]["module"])
        reducer = getattr(module, algo_map[dim_reducer]["function"])(
            **algo_map[dim_reducer]["parameters"])
        train_data, train_label, test_data, test_label = preprocess_data(
            train_data_raw, train_label_raw, test_data_raw, test_label_raw,
            dim_reducer)
        reducer.fit(train_data)
        train_data = reducer.transform(train_data)
        test_data = reducer.transform(test_data)
        print('\nafter dimensionality reduction:')
        print(train_data.shape)
        print(test_data.shape)

    print('\nbegin training process.')
    classify_method = paramset["classifier"]["method"]
    classify_parameter = paramset["classifier"]["parameter"]
    if "dimension_reduction" not in paramset:
        train_data, train_label, test_data, test_label = preprocess_data(
            train_data_raw, train_label_raw, test_data_raw, test_label_raw,
            classify_method)
    module = importlib.import_module(algo_map[classify_method]["module"])
    if algo_map[classify_method]["module"] == "nnet_lib":
        #classifier = getattr(module, algo_map[classify_method]["function"])(train_data, train_label)
        classifier, history = getattr(module,
                                      "nnet_training")(train_data, train_label,
                                                       classify_method,
                                                       **classify_parameter)
        plot_learncurve(classify_method, history=history)
    else:
        classifier = getattr(
            module,
            algo_map[classify_method]["function"])(**classify_parameter)
        classifier.fit(train_data, train_label)
        plot_learncurve(classify_method,
                        estimator=classifier,
                        data=train_data,
                        label=train_label,
                        train_sizes=np.linspace(0.05, 0.2, 5))

    print('\npredict for test data.')
    test_pred = classifier.predict(test_data)
    train_pred = classifier.predict(train_data)

    if len(test_pred.shape) > 1:
        test_pred = np.argmax(test_pred, axis=1)
        train_pred = np.argmax(train_pred, axis=1)
        test_label = np.argmax(test_label, axis=1)
        train_label = np.argmax(train_label, axis=1)

    print('\nevaluate the prediction(train data).')
    train_conf = confusion_matrix(train_label, train_pred)
    train_precision = precision_score(train_label, train_pred, average=None)
    train_recall = recall_score(train_label, train_pred, average=None)
    print(train_conf)
    print(train_precision)
    print(train_recall)

    print('\nevaluate the prediction(test data).')
    test_conf = confusion_matrix(test_label, test_pred)
    test_precision = precision_score(test_label, test_pred, average=None)
    test_recall = recall_score(test_label, test_pred, average=None)
    print(test_conf)
    print(test_precision)
    print(test_recall)

    pred_result = {
        "train_conf": train_conf,
        "train_precision": train_precision,
        "train_recall": train_recall,
        "test_conf": test_conf,
        "test_precision": test_precision,
        "test_recall": test_recall
    }

    print('\ngenerate report file \t')
    #logFile = write_log(paramset, pred_result)
    if algo_map[classify_method]["module"] == "nnet_lib":
        logFile = write_log(paramset, pred_result, classifier, history)
    else:
        logFile = write_log(paramset, pred_result)

    print(logFile)

    if params["show_misclassified"]:
        indices = [
            i for i in range(len(test_label)) if test_pred[i] != test_label[i]
        ]
        show_data(test_data_raw[indices], test_label[indices], indices,
                  test_pred[indices])
def plot_halo_sphere(code, redshift, halo_number, width=400):
    ds=ld.load_data(code, redshift)
    radius=nh._radius(code,redshift,halo_number)
    center=nh._center_of_mass(code,z,halo_number)
    halo_sphere = ds.sphere(center, (float(radius), "kpc"))
    ProjectionPlot(ds, 'density', width=(int(width), "kpc"), center='max', weight_field=None, data_source=halo_sphere).save('Halo'+str(halo_number)+'.png')
batch_size = 128
nb_classes = 5
nb_epoch = 20

# Directory location of Train and Test Datasets
loc_ = "/home/samiran/Desktop/suhitCS726/Dataset-CS726"

img_rows, img_cols = 96, 128
# number of convolutional filters to use
nb_filters = 64
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)

X, y = load_data(loc_)
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=42)

if K.image_dim_ordering() == 'th':
    X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
    X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
    X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

X_train = X_train.astype('float32')
Example #31
0
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from load_dataset import load_data_custom, load_data

flags, atmospheric_pressure = load_data()
sst = load_data_custom('TS')
at = load_data_custom('T')
#print(at)
pressure_flags = [
    value for sublist in flags for counter, value in enumerate(sublist)
    if counter == 10
]

sst_flags = [
    value for sublist in flags for counter, value in enumerate(sublist)
    if counter == 13
]

at_flags = [
    value for sublist in flags for counter, value in enumerate(sublist)
    if counter == 11
]

side_by_side = list(zip(at, atmospheric_pressure, at_flags, pressure_flags))

#print(side_by_side)
#Z cleared data
cleared_p_data = []
cleared_at_data = []
def inference(data_dir, regu, training):
    with tf.variable_scope("input"):
        meta, train_data, test_data = load_data(data_dir)
        CAPATCHA_SIZE = meta["captcha_size"]
        IMAGE_WIDTH = meta["width"]
        IMAGE_HEIGHT = meta["height"]
        IMAGE_SIZE = IMAGE_WIDTH * IMAGE_HEIGHT

    x = tf.placeholder(dtype=tf.float32,
                       shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH])
    y_ = tf.placeholder(dtype=tf.float32, shape=[None, CAPATCHA_SIZE])

    input = tf.reshape(x, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1])
    tf.summary.image("input", input, max_outputs=10)

    with tf.variable_scope("layer1_conv"):
        weight = tf.get_variable("weight",
                                 shape=[3, 3, 1, 64],
                                 initializer=tf.truncated_normal_initializer())
        bias = tf.get_variable("bias",
                               shape=[64],
                               initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(input,
                            weight,
                            strides=[1, 1, 1, 1],
                            padding="SAME")
        activated_conv = tf.nn.relu(tf.nn.bias_add(
            conv, bias))  #不能直接使用加法,,因为矩阵上不同位置上的节点都要加上同样的偏置项

    with tf.variable_scope("layer2-pool"):
        pool = tf.nn.max_pool(activated_conv,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding="SAME")

    with tf.variable_scope("layer3_conv"):
        weight = tf.get_variable("weight",
                                 shape=[3, 3, 64, 128],
                                 initializer=tf.truncated_normal_initializer())
        bias = tf.get_variable("bias",
                               shape=[128],
                               initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(pool, weight, strides=[1, 1, 1, 1], padding="SAME")
        activated_conv = tf.nn.relu(tf.nn.bias_add(conv, bias))

    with tf.variable_scope("layer4-pool"):
        pool = tf.nn.max_pool(activated_conv,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding="SAME")

    pool_shape = pool.get_shape().as_list()
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
    reshaped = tf.reshape(pool, [-1, nodes])

    with tf.variable_scope("layer5-fc1"):
        weight = tf.get_variable(
            "weight",
            shape=[nodes, 512],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        bias = tf.get_variable("bias",
                               shape=[512],
                               initializer=tf.constant_initializer(0.1))
        if regu != None:
            regularizer = tf.contrib.layers.l2_regularizer(0.001)
            tf.add_to_collection("loss", regularizer(weight))
        fc = tf.nn.relu(tf.matmul(reshaped, weight) + bias)
        if training:
            fc = tf.nn.dropout(fc, keep_prob=0.5)
    with tf.variable_scope("layer6-fc2"):
        weight = tf.get_variable(
            "weight",
            shape=[512, CAPATCHA_SIZE],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        bias = tf.get_variable("bias",
                               shape=[CAPATCHA_SIZE],
                               initializer=tf.constant_initializer(0.1))
        if regu != None:
            regularizer = tf.contrib.layers.l2_regularizer(0.001)
            tf.add_to_collection("loss", regularizer(weight))
        logit = tf.matmul(fc, weight) + bias

    with tf.variable_scope("loss"):
        cross_entropy = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logit))
        loss = cross_entropy + tf.get_collection("loss")
        train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
        variable_summary(loss, "loss")

    with tf.variable_scope("accuracy"):
        pred_correct = tf.equal(tf.argmax(logit, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(pred_correct, tf.float32))
        variable_summary(accuracy, "accuracy")

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        train_writer = tf.summary.FileWriter("./log/train", sess.graph)
        test_writer = tf.summary.FileWriter("./log/test", sess.graph)

        merged = tf.summary.merge_all()
        for i in range(max_step):
            xs, ys = train_data.next_batch(batch_size)
            summary_step, _ = sess.run([merged, train_step],
                                       feed_dict={
                                           x: xs,
                                           y_: ys
                                       })
            train_writer.add_summary(summary_step, i)
            if i % 10 == 0:
                valid_summary, train_accuracy = sess.run([merged, accuracy],
                                                         feed_dict={
                                                             x: xs,
                                                             y_: ys
                                                         })
                train_writer.add_summary(valid_summary, i)
                # test_x, test_y = test_data.images,test_data.labels#会报错ResourceExhaustedError,内存不足,虽然测试用理论上是可以用全数据
                test_x, test_y = test_data.next_batch(2000)
                test_summary, test_accuracy = sess.run([merged, accuracy],
                                                       feed_dict={
                                                           x: test_x,
                                                           y_: test_y
                                                       })
                test_writer.add_summary(test_summary, i)
                print(
                    'step %s, training accuracy = %.2f, testing accuracy = %.2f'
                    % (i, train_accuracy, test_accuracy))
        train_writer.close()
        test_writer.close()
        test_x, test_y = test_data.next_batch(200)
        test_accuracy = accuracy.eval(feed_dict={x: test_x, y_: test_y})
        print("test_accuracy:%.2f" % test_accuracy)