def generator(): while True: modality = np.random.randint(4,size=BATCH) usuario = np.random.randint(50,size=BATCH) X = np.zeros((BATCH,NUM_FRAMES,112,112,3)) Y = [] Y2=[] for i in range(BATCH): users = os.listdir(directory+dirl[modality[i]]) images = wsort(directory+dirl[modality[i]]+users[usuario[i]]+'/') #all the images numImages = len(images) imagens = np.random.randint(numImages) indice = 0 valor = np.random.random() if valor < 0.25: flagFlip = 1 elif valor < 0.50 and valor >= 0.25: flagFlip = 2 elif valor < 0.75 and valor >= 0.50: flagFlip = 3 else: flagFlip = 4 for j in range(imagens,imagens+128,8): imagem=image.load_img(directory+dirl[modality[i]]+users[usuario[i]]+'/'+images[(j)%numImages],target_size=(112,112)) imagem = image.img_to_array(imagem) # here you put your function to subtract the mean of vggface2 dataset imga = utils.preprocess_input(imagem,version=2) #subtract the mean of vggface dataset if flagFlip == 1: X[i,indice,:,:,:] = np.flip(imga,axis=1) elif flagFlip == 2: X[i,indice,:,:,:] = image.apply_affine_transform(imga,theta=30, channel_axis=2, fill_mode='nearest',cval=0.,order=1) elif flagFlip == 3: X[i,indice,:,:,:] = np.flip(imga,axis=0) else: X[i,indice,:,:,:]=imga indice = indice+1 sets = dirl[modality[i]].split('/')[1] # You can train the model using Training and Development sets if sets == 'Training': label = readCSV(dirLabels[0]+'/'+users[usuario[i]]+'_Depression.csv') else: label = readCSV(dirLabels[1]+'/'+users[usuario[i]]+'_Depression.csv') Y.append(label) Y=np.array(Y) yield X,Y
def loadData(): con = util.getConnection() cf = ColumnFamily(con, 'videos') tagCF = ColumnFamily(con, 'tag_videos_composite') movies = util.readCSV('data/movies') for movie in movies: title = movie[0] uploader = movie[1] runtime = int(movie[2]) #convert to match column validator tags = movie[3] rowKey = title+":"+uploader print "Inserting in videos: {}.".format(str(movie)) row = \ { 'title':title, 'user_name':uploader, 'runtime_in_sec':runtime, 'tags_csv': tags } cf.insert(rowKey, row) print 'inserting tags: {}'.format(tags) for tag in tags.split(','): tagCF.insert( tag.strip().lower(), #row-key = tag name { (uploader, rowKey): title #(uploader,videoId)=title } ); print 'finishished insertion.' con.dispose()
def val_generator(): while True: X=np.zeros((BATCH,NUM_FRAMES,112,112,3)) Y=[] modality = np.random.randint(2,size=BATCH) usuario = np.random.randint(50,size=BATCH) for m in range(BATCH): users=os.listdir(dirDevelopment+dirDev[modality[m]]) images=wsort(dirDevelopment+dirDev[modality[m]]+users[usuario[m]]+'/') #all the images numImages=len(images) imagens = np.random.randint(numImages) indice=0 for j in range(imagens,imagens+128,8): imagem = image.load_img(dirDevelopment+dirDevelopment[modality[m]]+users[usuario[m]]+'/'+images[j%numImages],target_size=(112,112)) imagem=image.img_to_array(imagem) # here you put your function to subtract the mean of vggface2 dataset imga = utils.preprocess_input(imagem,version=2) #subtract the mean of vggface dataset X[m,indice,:,:,:]=imga indice=indice+1 label = readCSV(dirLabels+users[usuario[m]]+'_Depression.csv') Y.append(label) Y=np.array(Y) yield X,Y
def loadData(): con = util.getConnection() cf = ColumnFamily(con, 'videos') tagCF = ColumnFamily(con, 'tag_videos_sup') movies = util.readCSV('data/movies') for movie in movies: title = movie[0] uploader = movie[1] runtime = int(movie[2]) #convert to match column validator tags = movie[3] rowKey = title+":"+uploader print "Inserting in videos: {}.".format(str(movie)) cf.insert( rowKey, { 'title':title, 'user_name':uploader, 'runtime_in_sec':runtime, 'tags_csv': tags }) for tag in tags.split(','): print 'adding tag: {0} for movie: {1}'.format(tag, title) tagCF.insert( tag.strip().lower(), # row-key = tag name { uploader: { # level 1 nesting = uploader name rowKey: title # level 2 nesting = videoId, value = title } } ); print 'finishished insertion.' con.dispose()
def loadData(): con = util.getConnection() cf = ColumnFamily(con, 'videos_denorm') tagCF = ColumnFamily(con, 'tag_videos_composite') movies = util.readCSV('data/movies') for movie in movies: title = movie[0] uploader = movie[1] runtime = int(movie[2]) #convert to match column validator tags = movie[3] rowKey = title+":"+uploader print "Inserting in videos: {}.".format(str(movie)) row = \ { 'title':title, 'user_name':uploader, 'runtime_in_sec':runtime, } for tag in tags.split(','): print 'adding tag: {0} for movie: {1}'.format(tag, title) row['tag:{}'.format(tag.strip().lower())] = tag.strip() print 'inserting denorm: {}'.format(row) cf.insert(rowKey, row) print 'finishished insertion.' con.dispose()
def loadData(): events = util.readCSV('data/events.csv') eventsList = [] for event in events: e = { 'user': event[0], 'page': event[1], 'event': event[2], 'element': event[3] } eventsList.append(e) for i in range(2): t = Thread(target=randomEvent, args=(i, eventsList)) t.start()
def loadData(): con = util.getConnection() cf = ColumnFamily(con, 'videos') movies = util.readCSV('data/movies') for movie in movies: title = movie[0] uploader = movie[1] runtime = int(movie[2]) #convert to match column validator tags = movie[3] rowKey = title+":"+uploader print "Inserting in videos: {}.".format(str(movie)) cf.insert( rowKey, { 'title':title, 'user_name':uploader, 'runtime_in_sec':runtime, 'tags_csv': tags }) print 'finishished insertion.' con.dispose()
def loadData(): events = util.readCSV('data/events.csv') eventsList = [] for event in events: e = { 'user': event[0], 'page': event[1], 'event': event[2], 'element': event[3] } eventsList.append(e) #set a user, some starttime to lookup during get global userId global startTime global THREAD_DONE userId = eventsList[0]['user'] startTime = datetime.datetime.utcnow() for i in range(2): THREAD_DONE[i] = False #set as running t = Thread(target=randomEvent, args=(i, eventsList)) t.start()
#!/usr/bin/env python # encoding: utf-8 import os import cv2 import numpy as np from six.moves import xrange from util import readCSV, normalize csvpath = './csv_file/ggo.csv' datapath = '../data/ggo/train/' count = 1 lines = readCSV(csvpath) for line in lines[1:]: series_uid = line[0] center_z = int(np.round(eval(line[1])[2])) num_z = int(line[-1]) #print series_uid, center_z, num_z absolute_path = datapath + series_uid images = normalize(np.load(absolute_path + "/image.npy")) * 255 print images.shape masks = np.load(absolute_path + "/ggo_mask.npy").astype(np.int) print masks.shape if num_z <= 3: seed_image = images[:, :, center_z] seed_mask = masks[:, :, center_z] np.save("./data/image/{}".format(count), seed_image) np.save("./data/mask/{}".format(count), seed_mask) count = count + 1 else: for i in xrange(center_z - int(num_z / 2) + 1,
hidden1 = Dense(512,activation='relu',name='hidden1')(last_layer) hidden1 = Dropout(0.5)(hidden1) #--FC Layer hidden2 = Dense(512,activation='relu',name='hidden2')(hidden1) hidden2 = Dropout(0.5)(hidden2) #--Regression Layer out = Dense(1,activation='linear',name='classifier')(hidden2) custom_vgg_model = Model(rgb_model.input,out) custom_vgg_model.compile(loss='mse',optimizer=keras.optimizers.Adam(lr=0.0001,decay=0.0005)) custom_vgg_model.fit_generator(generator(),samples_per_epoch=1000,validation_data=val_generator(),validation_steps=10,epochs=2) #--Here you read the label Y=readCSV(dirLabelsTest+'_Depression.csv') buf=0 numberOfFrames = NUM_FRAMES # while (buf < numberOfFrames): #Insert here the directory of the images of test set imagem = image.load_img(dirTesting,target_size=(FRAME_HEIGHT,FRAME_WIDTH)) imagem = image.img_to_array(imagem) #Subtract the mean of VGGFace2 dataset #---put your function here imga = utils.preprocess_input(imagem,version=2) #here it is the mean value of VGGFace dataset X.append(imga) X = np.array(X) X = np.expand_dims(X,axis=0) prediction = custom_vgg_model.predict(X)