Esempio n. 1
0
 def load_data(self, source=1):
     if source == 1:
         self.numCategory, self.label_map, self.inv_label_map, self.x_train, self.y_train, self.x_val, self.y_val, self.x_test, self.y_test = \
             preprocess.processing(
                 input_dir='../input/Category and Attribute Prediction Benchmark/Img',
                 labels_dir=r'../list_attr_cloth.txt',
                 gt_dir=r'../list_attr_img.txt'
             )
     else:
         self.numCategory, self.label_map, self.inv_label_map, self.x_train, self.y_train, self.x_val, self.y_val, self.x_test, self.y_test = \
             preprocess.processing(
                 input_dir='../input/Category and Attribute Prediction Benchmark/Img'
             )
Esempio n. 2
0
def main(arg=None):
    #load npz file.
    weights = np.load('./vgg16_weights.npz')

    #Weights_Tranined store the data of vgg16_weights.npz that download from internet.
    #For example: As a member of Weights_Tranined, 'conv11_w' corresponds with the
    #			 weights of the first conv3-64. 'conv11_b' corresponds with the bias
    #			 of the first conv3-64.
    w_trained = Weights_Tranined(weights)

    #Build training and validation sets
    data_path = os.path.join(os.getcwd(), 'fmnist')
    print(data_path)
    imgs, labels = pre.processing(data_path, CLASS_NUM)
    train_img, train_label, validation_img, validation_label = pre.split(
        imgs, labels)

    print(train_img.shape)
    print(train_label.shape)

    train_model(t_x=train_img,
                t_y=train_label,
                weights=w_trained,
                dprate=DROPOUT_RATE,
                imgsize=IMG_SIZE,
                imgchannel=IMG_CHANNEL,
                batchsize=BATCH_SIZE,
                train_step=TRAINING_STEP,
                learningrate=LEARNING_RATE_BASE,
                learningdecay=LEARNING_RATE_DECAY,
                regurate=REGULARIZATION_RATE)
Esempio n. 3
0
def build_single_index(temp_files, document, docID):
	"""This method controls the processing of a document and the adding of the terms
	to the single index."""
	terms = preprocess.processing(document, 'single')
	append_to_index(terms, docID)
	temp_files = check_mem_constraint(temp_files)
	return temp_files
Esempio n. 4
0
def build_positional_index(temp_files, document, docID):
	"""This method controls the processing of a document and the adding
	of the terms to the phrase index."""

	tokens = preprocess.processing(document, 'positional')
	append_to_index_position(tokens, docID)
	
	global count
	global memory_constraint

	if count > memory_constraint:
		temp_files = write_to_temp_position(temp_files)
		count = 0
	return temp_files
Esempio n. 5
0
def processImage(imgFileName):

    path = os.path.join('C:/Users/prashant/Desktop/third/minorProject/images/',
                        imgFileName)
    processed = preprocess.processing(
        path)  #this sends image to next module for preprocessing.
    print(path)
    print('\n--------------PREDICTIONS------------------\n')
    x = image.img_to_array(processed)
    x = np.expand_dims(x, axis=0)
    with graph.as_default():
        set_session(sess)
        prediction = model.predict(x, batch_size=None)

    print("Risk factor from image = ", prediction, type(prediction))
    print('\n')
    value = prediction[0][0]
    print(type(value))
    return value
end = time.time()
print('Preprocessing finished in {} seconds'.format(end-start))

net = dan.Net(hiddenDim = 52)
net.train(train, dev, verbose = False)

torch.save(net.state_dict(), 'C:/Users/jack1/Documents/CS5304-Final-Project-Data-Science')
'''
net = dan.Net(hiddenDim=52)
net.load_state_dict(
    torch.load(
        'C:/Users/jack1/Documents/CS5304-Final-Project-Data-Science/net_dict.pt'
    ))

print(
    '\n\t REAL OR FAKE: a proof-of-concept\n\tby Irene, Aayushi and Jack\n\n')
print('Enter "exit" at any time to shut down the application.')
response = 'xxx'
while not response == 'exit':
    response = input('Please enter a tweet: ')
    text = [response]
    proc_text = [preprocess.processing(i) for i in text]
    vec = [preprocess.tweet_vec(tweet, word2vec) for tweet in proc_text]

    _, y_stars = net.get_eval_data(vec, mode='test')
    if y_stars[0] == 0:
        print('non-Disaster')
    elif y_stars[0] == 1:
        print('Disaster')

print('Thank you for using REAL OR FAKE!')