def generate_summary(text): preprocessor = Preprocessor() postprocessor = Postprocessor() SummaRise = SummaRiser('./path/to/data/vocab', './') sentences = sent_tokenize(text) totalSentences = len(sentences) tokens = 0 # count of tokens tokenized = [] summarys = '' for id, sentence in enumerate(sentences): tokenized += preprocessor.tokenize(sentence) tokens += len(tokenized) if tokens >= MAX_TOKENS or (id == (totalSentences - 1) and tokens >= MIN_TOKENS): tokenized = (' '.join(tokenized)) preprocessed_text = preprocessor.preprocess_text( tokenized.split('*N*')) print(preprocessed_text) summary = SummaRise.summarize([preprocessed_text]) summarys += postprocessor.postprocess_text(summary[0]) summarys += ' ' tokens = 0 tokenized = [] return summarys
def infer_one_frame(): num_landmarks = 96 input = torch.randn(2, 3, 416, 416).cuda() img = cv2.imread('E:/DB_FaceLandmark/300VW_frame/train/001_0000.jpg') # img = cv2.imread('E:/DB_FaceLandmark/300W/01_Indoor/indoor_048.png') img = cv2.resize(img, (416, 416)) x = torch.from_numpy(img / 255).float().unsqueeze(0).permute(0, -1, 1, 2).cuda() model = YoloV3(num_landmarks=num_landmarks, backbone_network='darknet53') # state = torch.load('C:/Users/th_k9/Desktop/pytorch_Yolov3/model/ubuntu_606_0.0308.pth') state = torch.load('E:/models/309_0.0099.pth') model.load_state_dict(state['model_state_dict']) model.eval() model.cuda() y_pred = model(x, training=False) postprocessor = Postprocessor(max_detection=3, iou_threshold=0.5, score_threshold=0.5).cuda() boxes, scores, landmarks, num_detection = postprocessor(y_pred) landmarks = landmarks * img.shape[0] num_img = num_detection.shape[0] for img_i in range(num_img): # based on train data # h, w, d = x[img_i].shape # based on original image h, w, d = img.shape for i in range(num_detection.item()): box = boxes.cpu()[img_i][i] xmin = int(box[0] * w) ymin = int(box[1] * h) xmax = int(box[2] * w) ymax = int(box[3] * h) img = cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2) for i in range(0, 96, 2): img = cv2.circle( img, (int(landmarks[0][img_i][i]), int(landmarks[0][img_i][i + 1])), 2, (0, 0, 255), -1) cv2.imshow('t', img) # cv2.imwrite('C:/Users/th_k9/Desktop/fl_model_test.jpg', img) cv2.waitKey() cv2.destroyAllWindows()
import numpy as np from postprocess import Postprocessor from srom import SROM from target import SampleRandomVector ''' Generate SROM to model input distribution (samples) ''' #Specify input/output files and SROM optimization parameters dim = 3 srom_size = 20 samplesfile = "mc_data/input_samples_MC.txt" outfile = "srom_data_tmp/srom_m" + str(srom_size) + ".txt" #Define target random variable from samples MCsamples = np.genfromtxt(samplesfile) target = SampleRandomVector(MCsamples) #Define SROM, determine optimal parameters, store parameters srom = SROM(srom_size, dim) srom.optimize(target, weights=[1, 1, 1], error="SSE") #NOTE - commented out to not overwrite paper data files: #srom.save_params(outfile) #Check out the CDFs pp = Postprocessor(srom, target) pp.compare_CDFs(variablenames=[r'log$C$', r'$y_{0}$', r'$n$'])
#generate SROM for random vector of stiffness & mass sromsize = 25 dim = 2 #Assume we only have access to samples in this example and want SROM from them: km_samples = np.array([k_samples, m_samples]).T km_random_vector = SampleRandomVector(km_samples) srom = SROM(sromsize, dim) srom.optimize(km_random_vector) (samples, probs) = srom.get_params() #Run model to get max disp for each SROM stiffness sample srom_disps = np.zeros(sromsize) for i in range(sromsize): k = samples[i,0] m = samples[i,1] srom_disps[i] = model.get_max_disp(k, m) #Form new SROM for the max displacement solution using samples from the model srom_solution = SROM(sromsize, 1) srom_solution.set_params(srom_disps, probs) #---------------------------------------- #Compare solutions pp = Postprocessor(srom_solution, mc_solution) pp.compare_CDFs()
def generate_summary(text): sentences = sent_tokenize(text) totalSentences = len(sentences) tokens = 0 # count of tokens tokenized = [] summarys = '' for id,sentence in enumerate(sentences): tokenized += preprocessor.tokenize(sentence) tokens += len(tokenized) if tokens >= MAX_TOKENS or id == (totalSentences - 1): tokenized = (' '.join(tokenized)) preprocessed_text = preprocessor.preprocess_text(tokenized.split('*N*')) summary = SummaRise.summarize([preprocessed_text]) summarys += postprocessor.postprocess_text(summary[0]) summarys += ' ' tokens = 0 tokenized = [] return summarys @app.route("/summary", methods = ['POST']) def getSummary(): summary = generate_summary(request.json['text']) return jsonify({"Summary": summary}) if __name__ == "__main__": preprocessor = Preprocessor() postprocessor = Postprocessor() SummaRise = SummaRiser('./path/to/data/vocab','./') app.run(debug = True, port = 3002)
def infer_cam(): num_landmarks = 96 model = YoloV3(num_landmarks=num_landmarks, backbone_network='darknet53') # state = torch.load('C:/Users/th_k9/Desktop/pytorch_Yolov3/model/ubuntu_606_0.0308.pth') state = torch.load('E:/models/309_0.0099.pth') model.load_state_dict(state['model_state_dict']) model.eval() model.cuda() # cap = cv2.VideoCapture(0) # 113, 143 cap = cv2.VideoCapture('E:/DB_FaceLandmark/300VW/007/vid.avi') # fourcc = cv2.VideoWriter_fourcc(*'DIVX') # out = cv2.VideoWriter('C:/Users/th_k9/Desktop/143.avi', fourcc, 30.0, (416, 416)) while True: ret, frame = cap.read() if ret: img = cv2.resize(frame, (416, 416)) x = torch.from_numpy(img / 255).float().unsqueeze(0).permute( 0, -1, 1, 2).cuda() y_pred = model(x, training=False) postprocessor = Postprocessor(max_detection=3, iou_threshold=0.5, score_threshold=0.5).cuda() boxes, scores, landmarks, num_detection = postprocessor(y_pred) landmarks = landmarks * img.shape[0] num_img = num_detection.shape[0] for img_i in range(num_img): # based on train data # h, w, d = x[img_i].shape # based on original image h, w, d = img.shape for i in range(num_detection.item()): box = boxes.cpu()[img_i][i] xmin = int(box[0] * w) ymin = int(box[1] * h) xmax = int(box[2] * w) ymax = int(box[3] * h) img = cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2) for i in range(0, 96, 2): img = cv2.circle(img, (int(landmarks[0][img_i][i]), int(landmarks[0][img_i][i + 1])), 1, (255, 255, 255), -1) cv2.imshow('t', img) # out.write(img) if cv2.waitKey(1) == 27: break else: break cv2.destroyAllWindows() cap.release()
MC_eols = np.genfromtxt(mc_eol_file) #Get SROM EOL samples, FD samples and input SROM from file srom_eols = np.genfromtxt(srom_eol_file) srom_fd_eols = np.genfromtxt(srom_fd_eol_file) input_srom = SROM(sromsize, dim) input_srom.load_params(srom_input_file) #Get FD step sizes from file (the same for all samples, just pull the first) #Step sizes chosen as approximately 2% of the median sample value of inputs stepsizes = [0.083, 0.0065, 0.025] #Calculate gradient from FiniteDifference class: gradient = FD.compute_gradient(srom_eols, srom_fd_eols, stepsizes) #Create SROM surrogate, sample, and create random variable solution surrogate_PWL = SROMSurrogate(input_srom, srom_eols, gradient) srom_eol_samples = surrogate_PWL.sample(MC_inputs) solution_PWL = SampleRandomVector(srom_eol_samples) #Store EOL samples for plotting later: eolfile = "srom_data/srom_eol_samples_m" + str(sromsize) + ".txt" np.savetxt(eolfile, srom_eol_samples) #Make MC random variable solution eol_mc = SampleRandomVector(MC_eols) #COmpare solutions pp = Postprocessor(solution_PWL, eol_mc) pp.compare_CDFs()
from target import SampleRandomVector ''' Ex3 - unimodal 3D Script to generate PW constant SROM approximation to EOL and compare it with the monte carlo solution (w/ surrogate model) ''' mc_eol_file = "mc_data/eol_samples_MC.txt" sromsize = 20 srom_eol_file = "srom_data/srom_eol_m" + str(sromsize) + ".txt" srom_input_file = "srom_data/srom_m" + str(sromsize) + ".txt" #Get MC EOL samples MC_eols = np.genfromtxt(mc_eol_file) #Get SROM EOL samples & probabilities from input srom srom_eols = np.genfromtxt(srom_eol_file) srom_probs = np.genfromtxt(srom_input_file)[:,-1] #probs in last column #Make MC random variable & SROM to compare eol_srom = SROM(sromsize, dim=1) eol_srom.set_params(srom_eols, srom_probs) eol_mc = SampleRandomVector(MC_eols) pp = Postprocessor(eol_srom, eol_mc) pp.compare_CDFs(variablenames=["EOL"])
from target import SampleRandomVector #Define target random vector from samples monte_carlo_input_samples_filename = path.join("mc_data", "input_samples_MC.txt") monte_carlo_input_samples = numpy.genfromtxt( monte_carlo_input_samples_filename) target_vector = SampleRandomVector(monte_carlo_input_samples) #Define SROM and determine optimal parameters srom_size = 20 input_srom = SROM(size=srom_size, dim=3) input_srom.optimize(target_vector) #Compare the input CDFs (produces Figure 6) post_processor = Postprocessor(input_srom, target_vector) post_processor.compare_CDFs(variablenames=[r'log$C$', r'$y_{0}$', r'$n$']) #Run the model for each input SROM sample: srom_results = numpy.zeros(srom_size) (srom_samples, srom_probs) = input_srom.get_params() # TODO: define model here. model = None if model is None: raise ValueError("model has not been defined.") for i, sample in enumerate(srom_samples): srom_results[i] = model.evaluate(sample)
#Specify input/output files and SROM optimization parameters dim = 3 srom_size = 20 mc_input_file = "mc_data/input_samples_MC.txt" mc_eol_file = "mc_data/eol_samples_MC.txt" #Define target random variable from samples MCsamples = np.genfromtxt(samplesfile) target = SampleRandomVector(MCsamples) #Define SROM, determine optimal parameters, store parameters input_srom = SROM(srom_size, dim) input_srom.optimize(target, weights=[1,1,1], error="SSE") #Compare the CDFs pp = Postprocessor(srom, target) pp.compare_CDFs(saveFig=False) #Run the model for each input SROM sample: srom_eols = np.zeros(srom_size) (srom_samples, srom_probs) = input_srom.get_params() for i, sample in enumerate(srom_samples): srom_eols[i] = model.evaluate(sample) #Generate SROM surrogate for the output eol_srom = SROMSurrogate(input_srom, srom_eols) #Make random variable with MC eol solution MC_eols = np.genfromtxt(mc_eol_file) eol_mc = SampleRandomVector(MC_eols)
legendfontsize = 24 cdfylabel = True #Label y axis as "CDF" plot_dir = "plots" plot_suffix = "SROM_pwlin_eol_CDF_m" for m in sromsizes: plot_suffix += "_" + str(m) #Load / initialize target random variable from samples: samples = np.genfromtxt(targetsamples) target = SampleRandomVector(samples) #Build up sromsize-to-SROM object map for plotting routine sroms = OrderedDict() for sromsize in sromsizes: #Get EOL SROM Surrogate samples to make SampleRandomVector representation of CDF eolsamplefile = "srom_eol_samples_m" + str(sromsize) + ".txt" eolsamplefile = os.path.join(srom_dir, eolsamplefile) eolsamples = np.genfromtxt(eolsamplefile) sroms[sromsize] = SampleRandomVector(eolsamples) Postprocessor.compare_srom_CDFs(sroms, target, plotdir="plots", plotsuffix=plot_suffix, variablenames=varz, xlimits=xlimits, ylimits=ylimits, xticks=xticks, cdfylabel=True, xaxispadding=xaxispadding, axisfontsize=axisfontsize, labelfontsize=labelfontsize, legendfontsize=legendfontsize)
#Load / initialize target random variable from samples: samples = np.genfromtxt(targetsamples) target = SampleRandomVector(samples) #Set x limits for each variable based on target: xlimits = [] for i in range(target._dim): lims = [np.min(samples[:, i]), np.max(samples[:, i])] xlimits.append(lims) #Build up sromsize-to-SROM object map for plotting routine sroms = OrderedDict() for sromsize in sromsizes: #Generate SROM from file: srom = SROM(sromsize, target._dim) sromfile = "srom_m" + str(sromsize) + ".txt" sromfile = os.path.join(srom_dir, sromfile) srom.load_params(sromfile) sroms[sromsize] = srom Postprocessor.compare_srom_CDFs(sroms, target, plotdir="plots", plotsuffix=plot_suffix, variablenames=varz, xlimits=xlimits, xticks=xticks, cdfylabel=cdfylabel)
from postprocess import Postprocessor from srom import SROM from target import NormalRandomVariable #Initialize Normal random variable object to be modeled by SROM: normal = NormalRandomVariable(mean=3., std_dev=1.5) #Initialize SROM & optimize to model the normal random variable: srom = SROM(size=10, dim=1) srom.optimize(normal) #Compare the CDF of the SROM & target normal variable: pp = Postprocessor(srom, normal) pp.compare_CDFs()
for i, stiff in enumerate(stiffness_samples): disp_samples[i] = model.get_max_disp(stiff) #Get Monte carlo solution as a sample-based random variable: mc_solution = SampleRandomVector(disp_samples) #-------------SROM----------------------- #generate SROM for random stiffness sromsize = 10 dim = 1 input_srom = SROM(sromsize, dim) input_srom.optimize(stiffness_rv) #Compare SROM vs target stiffness distribution: pp_input = Postprocessor(input_srom, stiffness_rv) pp_input.compare_CDFs() #Run model to get max disp for each SROM stiffness sample srom_disps = np.zeros(sromsize) (samples, probs) = input_srom.get_params() for i, stiff in enumerate(samples): srom_disps[i] = model.get_max_disp(stiff) #Form new SROM for the max disp. solution using samples from the model output_srom = SROM(sromsize, dim) output_srom.set_params(srom_disps, probs) #Compare solutions pp_output = Postprocessor(output_srom, mc_solution) pp_output.compare_CDFs()