def three_videos_demo(): if "image" not in request.files: return "there is no image in form!" # if "sound" not in request.files: # return "there is no sound in form!" image = request.files["image"] # sound = request.files["sound"] scale = int(request.form["webcamScale"], 10) imageFilename = image.filename + ".webm" # soundFilename = sound.filename + ".wav" path = os.path.join(app.config["UPLOAD_FOLDER"], imageFilename) image.save(path) # path = os.path.join(app.config["UPLOAD_FOLDER"], soundFilename) # sound.save(path) main( "./checkpoints/wav2lip_gan.pth", "./sample_data/" + imageFilename, "./sample_data/" + '2.wav', resize_factor=scale, outfile="./static/result_voice.mp4", ) with open("./static/result_voice.mp4", "rb") as bites: response = send_file( io.BytesIO(bites.read()), attachment_filename="result.mp4", mimetype="video/mp4", as_attachment=True, ) response.headers.add("Access-Control-Allow-Origin", "*") return response
def mian(file_path): inference.main(file_path) try: cutout(file_path, "trimap.png", "test.png") print('=' * 40 + '>Successfully!') os.remove('trimap.png') os.remove('pha.png') except: print('Erro')
def main(): script_dir = path.dirname(path.realpath(__file__)) checkers_base = path.join(script_dir, '..', 'src') checker_to_settings = { 'Hardcoded' : { 'checker' : 'hardcoded.HardcodedChecker', 'stubs' : path.realpath(path.join(checkers_base, 'hardcoded', 'jdk.astub')) }, 'Nullness' : { 'checker' : 'nninf.NninfChecker' }, 'OsTrusted' : { 'checker' : 'ostrusted.OsTrustedChecker', 'stubs' : path.realpath(path.join(checkers_base, 'ostrusted', 'jdk.astub')) }, 'Sink' : { 'checker' : 'sparta.checkers.SpartaSinkChecker', 'solver' : 'sparta.checkers.SpartaSinkSolver', 'stubs' : path.realpath(path.join(checkers_base, 'sparta', 'checkers', 'information_flow.astub')) }, 'Source' : { 'checker' : 'sparta.checkers.SpartaSourceChecker', 'solver' : 'sparta.checkers.SpartaSourceSolver', 'stubs' : path.realpath(path.join(checkers_base, 'sparta', 'checkers', 'information_flow.astub')) }, 'Trusted' : { 'checker' : 'trusted.TrustedChecker' } } parser = argparse.ArgumentParser(description=description(), epilog=epilog()) parser.add_argument("checker_to_run", help='Checker specifies which of the built in checkers you would like to use. ', choices=checker_to_settings.keys()) add_parser_args(parser, False) args = parser.parse_args() checker_settings = checker_to_settings[args.checker_to_run] if checker_settings == checker_to_settings['Sink'] or checker_settings == checker_to_settings['Source']: if args.json_file is not None: error("Sparta checkers do not have serializable constraints. -json-file is an invalid option\n") for key in checker_settings: if getattr(args, key) is None: setattr(args, key, checker_settings[key]) inference.main(args)
def start_plot(self): print('stream started') frame_count = 0 start_time = time.time() while not self.pause: data = self.stream.read(self.CHUNK) data_int = struct.unpack(str(2 * self.CHUNK) + 'B', data) data_np = np.array(data_int, dtype='b')[::2] + 128 predictions = inference.main(data_np) print(predictions) self.line.set_ydata(data_np) # compute FFT and update line yf = fft(data_int) self.line_fft.set_ydata( np.abs(yf[0:self.CHUNK]) / (128 * self.CHUNK)) # update figure canvas self.fig.canvas.draw() self.fig.canvas.flush_events() frame_count += 1 else: self.fr = frame_count / (time.time() - start_time) print('average frame rate = {:.0f} FPS'.format(self.fr)) self.exit_app()
def test_main(): from inference import main from model import find_best weights = find_best()[0] print(weights) args = ('--weights %s --batch-size 1' % weights).split() score = main(args) assert score > 0
def upload_file(): if "image" not in request.files: return "there is no image in form!" if "sound" not in request.files: return "there is no sound in form!" image = request.files["image"] sound = request.files["sound"] path = os.path.join(app.config["UPLOAD_FOLDER"], image.filename) image.save(path) path = os.path.join(app.config["UPLOAD_FOLDER"], sound.filename) sound.save(path) main( "./checkpoints/wav2lip_gan.pth", "./sample_data/" + image.filename, "./sample_data/" + sound.filename, resize_factor=1, outfile="./static/result_voice.mp4", ) return redirect(request.referrer)
def generateSpeech(party): if party is 'Republican': folder = 'data/republican' modelP = 'models/republican.hdf5' elif party is 'Democrat': folder = 'data/democrate' modelP = 'models/democrate.hdf5' else: raise (ValueError('Party must be Republican or Democrat.')) # get raw text rawText = inference.main(folder, modelP, party) # spell check cleaned = nlp_util.spellCheck(rawText) # get polarity wordPolarity, sentencePolarity = nlp_util.emotion(cleaned) print('=========') print(cleaned) print('=========') print(wordPolarity) print('=========') # print(sentencePolarity) return (cleaned, wordPolarity, sentencePolarity)
import inference if __name__ == "__main__": inference.main()
def train(self): ''' train :return: ''' start_time = time.time() curr_interval = 0 for epoch_n in xrange(self.epoch): for interval_i in trange(self.batch_idxs): batch_image = np.zeros([ self.batch_size * self.gpus_count, self.input_size, self.input_size, self.input_channel ], np.float32) batch_label = np.zeros([ self.data_loader_train.labels_nums, self.batch_size * self.gpus_count ], np.float32) for b_i in xrange(self.gpus_count): batch_image[ b_i * self.batch_size:(b_i + 1) * self. batch_size, :, :, :], batch_label[:, b_i * self.batch_size:( b_i + 1 ) * self.batch_size] = self.data_loader_train.read_data_batch( ) #D _, loss_d = self.sess.run([self.train_d_op, self.d_loss], feed_dict={ self.batch_data: batch_image, self.batch_label: batch_label[0] }) #G for _ in xrange(self.g_loop): _ = self.sess.run(self.train_g_op, feed_dict={ self.batch_data: batch_image, self.batch_label: batch_label[0] }) sample_data, loss_fr, loss_g, train_summary,\ data,step, \ encode_real, encode_syn\ = self.sess.run( [self.output_syn,self.g_loss_fr,self.g_loss, self.summary_train, self.input_data,self.global_step,self.cosine_real,self.cosine_syn], feed_dict={self.batch_data: batch_image, self.batch_label: batch_label[0]}) self.summary_write.add_summary(train_summary, global_step=step) logging.info('Epoch [%4d/%4d] [gpu%s] [global_step:%d]time:%.2f h, d_loss:%.4f, g_loss:%.4f,lossfr:%.4f'\ %(epoch_n,self.epoch,self.gpus_list,step,(time.time()-start_time)/3600.0,loss_d,loss_g,loss_fr)) if (curr_interval) % int( self.sample_interval * self.batch_idxs) == 0: # 记录训练数据 score_train = np.concatenate([encode_syn, encode_real], axis=0) logging.info('[score_train] {:08} {}'.format( step, score_train)) batch_image = np.split(batch_image, 2, axis=0) print sample_data.shape utils.write_batch(self.result_path, 0, sample_data, batch_image[1], curr_interval, othersample=batch_image[0], reverse_other=False, ifmerge=True, score_f_id=score_train) self.validation(curr_interval, epoch_n, step) # self.slerp_interpolation(batch_image[1],batch_label,epoch_n,curr_interval) if self.ifsave: modelname = self.model_name + '-' + str(curr_interval) self.saver.save(self.sess, os.path.join(self.check_point_path, self.model_name), global_step=curr_interval) # save_path='/world/data-gpu-58/wangyuequan/data_sunkejia/lfw_synthesis_temp/lfw_synthesis//'+self.version+self.gpus_list+'/' # self.mkdir_result(save_path) # save_path='/world/data-gpu-90/rex/lfw/data/lfw_lightcnn_96_rgb/' test_phase.main( str(curr_interval), os.path.join(self.check_point_path, self.model_name), self.gpu) # synthesis.main(os.path.join(self.check_point_path, modelname),save_path+'/synlfw'+str(curr_interval)+'/',self.gpu) print '*' * 20 + 'synthesis image finished!!!~~~~' print '*' * 20 + 'save model successed!!!!~~~~' curr_interval += 1
def step(setting, experiment): tic = time.time() if setting.step == 'data': import prepareDataNpy prepareDataNpy.step(setting, experiment) if setting.step == 'presence' and setting.sensor != 'all': sys.path.append('../specialization') from inference import main config = types.SimpleNamespace() config.rnn = True config.sensorData = True config.modelName = 'train_scene_source_lorient' config.modelPath = experiment.path.output + '../specialization/model_' + setting.typology + '/' config.datasetName = experiment.path.input + setting.id( sort=False).replace('step_presence', 'step_data').replace( '_typology_' + setting.typology, '') + '_spec.npy' config.outputPath = '' config.test = False config.classes = list(setting.typology) config.debug = experiment.status.debug presence, timeOfPresence = main(config) if experiment.status.debug: print(presence.shape) print(timeOfPresence.shape) np.save(experiment.path.output + setting.id() + '_presence.npy', presence) # np.save(experiment.path.output+setting.id()+'_timeOfPresence.npy', timeOfPresence) if setting.step == 'energy': en.energyIndicators(setting, experiment) if setting.step == 'part': # print(setting.source) if setting.sensor != 'all': presence = getData(setting, experiment) # timeOfPresence = getData(setting, experiment, type='timeOfPresence') else: presence = np.zeros(0) timeOfPresence = np.zeros(0) for k in range(len(experiment.factor.sensor) - 1): presence = np.concatenate( (presence, getData(setting.replace('sensor', value=k), experiment))) timeOfPresence = np.concatenate( (timeOfPresence, getData(setting.replace('sensor', value=k), experiment, 'timeOfPresence'))) # print(presence) presenceName = experiment.path.output + setting.id() + '_presence.npy' # print(presence.shape) np.save(presenceName, presence) timeOfPresenceName = experiment.path.output + setting.id( ) + '_timeOfPresence.npy' # print(timeOfPresence.shape) np.save(timeOfPresenceName, timeOfPresence) if setting.step == 'partEnergy': # print(setting.source) if setting.sensor != 'all': (energy, tim) = getData(setting, experiment, type='energy') else: energy = np.zeros(0) tim = np.zeros(0) for k in range(len(experiment.factor.sensor) - 1): (en, ti) = getData(setting.replace('sensor', value=k), experiment, type='energy') energy = np.concatenate((energy, en)) tim = np.concatenate((tim, ti)) name = experiment.path.output + setting.id() # print(energy.shape) np.save(name + '_energy.npy', energy) # print(tim.shape) np.save(name + '_time.npy', tim) if setting.step in ['data', 'presence']: duration = time.time() - tic np.save(experiment.path.output + setting.id() + '_duration.npy', duration)
def eval_op(): tf.logging.info("Evaluate model on dev set...") inference.main(eval_args, verbose=False) return eval_args.output, evaluate(pred_file=eval_args.output, ref_file=args.references)
print var[1].name print sess.run(var[0][0][:10]) print '\n' print var[-2].name print sess.run(var[-2][0][:10]) print '\n' print var[-4].name print sess.run(var[-4][0][:10]) print '\n' print var[-6].name print sess.run(var[-6][0][:10]) print '\n' model_saver = tf.train.Saver() model_saver.save(sess,"model_e%d_i%d"%(i,k)) with tf.Session() as sess1: model_saver = tf.train.Saver() model_saver.restore(sess1, "model_e%d_i%d"%(i,k)) beam_seqs = inference.main(train_doc2id[299][:147], vocab, sess1) for seq in beam_seqs: temp=[] for word in seq: temp.append(vocab[word[0]]) print " ".join(temp) print "\n"
def run_inference(): with HiddenPrints(): import inference inference.main(3)
import inference import argparse if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("cv", help="Cross Validation") args = parser.parse_args() cv = args.cv inference.main(cv)
# Too many images to load them all, so we are doing it in batches start = 0 end = 50000 increase = 50000 result = True batch_index = 0 # Get cursor from the database db = sqlite3.connect("<Path_to_the_full_census_database>") cur = db.cursor() # Exclusion set training_db = sqlite3.connect("<Path_to_the_dugnad_database>") exclusion_names = training_db.cursor().execute( "SELECT Name FROM cells").fetchall() exclusion_set = [x[0] for x in exclusion_names] # Prediction model prediction_model = tf.keras.models.load_model("<Path_to_saved_model>", compile=False) # While we still have images to classify while result == True: result = main(batch_index, start, end, cur, prediction_model, exclusion_set) start += increase end += increase batch_index += 1
from seed import * import train import train_KNH import inference from argument import get_args seed_everything() args = get_args() # train.main(args) # train_KNH.main(args) inference.main(args)