示例#1
0
def game_details(id):
    with con:
        cur = con.cursor()
        cur.execute("SELECT * from game_details WHERE Steam_ID=?", (id, ))
        row = cur.fetchone()
        x = row[9].split("$")
        z = [i.split(":") for i in x]
        zz = [j for j in z if len(j) == 2]
        system_requirements = dict(zz)
        details = {
            "Name": row[1],
            "Genre": row[2],
            "Release Date": row[3],
            "Publisher": row[4],
            "Languages": row[6],
            "Description": row[7],
            "images": get_images(id),
            "graphs": get_graphs(id)
        }
    print(details)
    con.commit()
    return render_template('game-details.html',
                           details=details,
                           sr=system_requirements,
                           reviews=get_reviews(id))
示例#2
0
def render_pic():
    if os.path.exists('images'):
        remove_contents('images')
    else:
        os.mkdir('images')
    text = request.form['text']
    key_words = select_key_words(text)
    list_of_all_img = []
    for word in key_words:
        list_of_img = get_images(word)
        for name in list_of_img:
            list_of_all_img.append(name)
    print(list_of_all_img)
    return render_template('test.html', list=list_of_all_img)
示例#3
0
def project_structure(text_music_name,text_artist_name,image_type,op_lyric,op_deepDream,deepDream_format):
    global current_job
    shutil.rmtree('/code/flask/music', ignore_errors=True)
    shutil.rmtree('/code/flask/imagens', ignore_errors=True)

    json_code = get_lyric_videoLink(text_music_name,text_artist_name)
    
    json_code['MusicPath'] = download_song(json_code['VideoID'])

    print '\n\nmusica baixada\n\n'

    json_code['Subtitle'] = get_images(json_code['Subtitle'],image_type)
    print '\n\nimagens pegadas\n\n'
    
    json_code['Subtitle'] = improve_subtitle(json_code['Subtitle'])
    print '\n\ntimestamps modificado\n\n'
    if  op_deepDream:
        if deepDream_format == '1':
            json_code['Subtitle'] = dreamImage(json_code['Subtitle'])
            if op_lyric:
                video_name = make_videoDeep_lyric(json_code,text_music_name,True)
            else:
                video_name = make_videoDeep(json_code,text_music_name,True)
        elif deepDream_format == '5':
            json_code['Subtitle'] = dreamImage_5(json_code['Subtitle'])
            if op_lyric:
                video_name = make_videoDeep_lyric(json_code,text_music_name,False)
            else:
                video_name = make_videoDeep(json_code,text_music_name,False)
        else:
            json_code['Subtitle'] = dreamImage_10(json_code['Subtitle'])
            if op_lyric:
                video_name = make_videoDeep_lyric(json_code,text_music_name,False)
            else:
                video_name = make_videoDeep(json_code,text_music_name,False)
    else:
        if op_lyric:
            video_name = make_video_lyric(json_code,text_music_name)
        else:
            video_name = make_video(json_code,text_music_name)

    print '\n\nclipe feito\n\n'
    
    return video_name
示例#4
0
# imgs_rotated = []
# imgs_shape = []

# for image in images:
#     print('Straightening {}'.format(image))
#     img_rotated_cut = cut_out_corrected_img.cut_out_corrected_img(image)
#     cv2.imwrite(r'./corrected_after/'+image.rpartition('/')[-1].rpartition('.')[-3][-1]+'.jpg', img_rotated_cut)
#     imgs_rotated.append(img_rotated_cut)
#     imgs_shape.append(img_rotated_cut.shape)

# column = 5
# img_joined = show_images.show_images(imgs_rotated, imgs_shape, column, alignment='left')
# img_ori = cv2.imread(leaf_split_before)
# # Get Corrected_after Leaves Begin

images = sorted(get_images.get_images(r'./corrected_after/'))

edges_canny = []
edges_equalized = []
edges_canny_shape = []
edges_equalized_shape = []

for image in images:
    img, img_equalized, edge_canny, edge_equalized = local_enhancement.local_enhancement(image)
    edges_canny.append(edge_canny)
    # edges_equalized.append(edge_equalized)
    edges_canny_shape.append(edge_canny.shape)
    # edges_equalized_shape.append(edge_equalized.shape)
    # plt.imshow(edge_canny, plt.cm.gray)
    # plt.show()
示例#5
0
# 2/9/2017                                                                 #
#                                                                          #
# Data: ImageNet                                                           #
# ------------------------------------------------------------------------ #
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

from get_images import get_images
from DeepLearning.deep_learning import learning_rate
from Binary_Network import ResNet, train_loss

import _init_

cnn = ResNet()
data = get_images()
# tf.reset_default_graph()
x = tf.placeholder(tf.float32, [
    None, _init_.input_image[0], _init_.input_image[1], _init_.input_image[2]
])
y = tf.placeholder(tf.float32, [None, _init_.classes_numbers])
# y = tf.placeholder(tf.int32, [None])   # y_5
fc_out, conv5 = cnn(x, scope='resnet')
train_step, acc_1 = train_loss(fc_out, y)

config = tf.ConfigProto(allow_soft_placement=True)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
    sess.run(tf.global_variables_initializer())
    Acc_1 = []
示例#6
0
import numpy as np
import time
import pickle
from sklearn.svm import LinearSVC
#from sklearn.tree import DecisionTreeClassifier
#from sklearn.model_selection import GridSearchCV
#from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from functions import extract_features
from get_images import get_images

# load images
cars, notcars = get_images()

#Feature Parameters
colorspace = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9
pix_per_cell = 8
cell_per_block = 1
hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
spatial = 32
histbin = 32
spatial_feat=True
hist_feat=True
hog_feat=True

#Extra image features
t=time.time()
X = []
示例#7
0
def get_image_class(path):
    get_images(path)
    path = get_path(path)
    images_with_tags = get_prediction(model, imagenet_class_mapping, path)
    print(images_with_tags)
    generate_html(images_with_tags)
''' Imports '''
import get_images
import get_landmarks
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
''' Load the data and their labels '''
image_directory = 'H:\\Fall 2020\\biometrics\\proj\\RonKauerTestFrames'
X, y = get_images.get_images(image_directory)
''' Get distances between face landmarks in the images '''
# get_landmarks(images, labels, save_directory="", num_coords=5, to_save=False)
X, y = get_landmarks.get_landmarks(X, y, 'landmarks/', 5, False)
''' kNN classification treating every sample as a query'''
# initialize the classifie
knn_accuracy = []
NB_accuracy = []

print()
print("KNN")
for a in [1, 3, 5, 7]:
    knn = KNeighborsClassifier(n_neighbors=a, metric='euclidean')
    num_correct = 0
    labels_correct = []
    num_incorrect = 0
    labels_incorrect = []

    for i in range(0, len(y)):
        query_img = X[i, :]
        query_label = y[i]
示例#9
0
    return client


if __name__ == '__main__':
    client = get_client(
        project_id='sacred-reality-201417',
        registry_id='robocar-ai',
        device_id='donkey',
        private_key_file='keys/rsa_private.pem',
        algorithm='RS256',
        mqtt_bridge_hostname='mqtt.googleapis.com',
        mqtt_bridge_port=443,
        cloud_region='us-central1',
        ca_certs='keys/roots.pem')
    client.loop_start()
    image_file_names = get_images()
    for image in image_file_names:
        with open(image, 'rb') as f:
            image = Image()
            image.data = f.read()
            image.name = 'test'
            res = client.publish('/devices/donkey/events', image.SerializeToString())
            print('{} is published {}'.format(image, res.is_published()))
            res.wait_for_publish()


    sleep(5)  # Time in seconds.
    # run = True
    # while run:
    #     client.loop()
    #     client.
示例#10
0
文件: model.py 项目: tarasovp/udacity
from get_images import get_images

datadirs=['/notebooks/udacity/new_training/map1_backward/',
                 '/notebooks/udacity/new_training/map1_forward/',
                 '/notebooks/udacity/new_training/map1_recovery_backward/',
                 '/notebooks/udacity/new_training/map1_recovery_forward/',
                 '/notebooks/udacity/new_training/map2_forward/',
                 '/notebooks/udacity/new_training/map2_backward/',
                 '/notebooks/udacity/new_training/map2_recovery_forward/',
                 '/notebooks/udacity/new_training/map2_recovery_backward/',
                   '/notebooks/udacity/new_training/map1_error_correction/',
                   '/notebooks/udacity/new_training/map2_error_correction/'
         ]

images=get_images(datadirs,0.08)
image_names_full, y_data_full = images.img.values, images.real.values

#preprocessing function
def proc_img(img): # input is 160x320x3
    img = img[59:138:2, 0:-1:2, :] # select vertical region and take each second pixel to reduce image dimensions
    img = (img / 127.5) - 1.0 # normalize colors from 0-255 to -1.0 to 1.0
    return img # return 40x160x3 image

#generating train/valid sets
names_train, names_valid, y_train, y_valid = train_test_split(image_names_full, y_data_full, test_size=0.02,\
                            random_state=0)

#for each image adding inverse image in train set
inverse_train=[0 for i in y_train]+[1 for i in y_train]