Esempio n. 1
0
def new_run(X_train, y_train, X_val, y_val, model_savename):
    """Trains and saves a model with given training data."""
    tf.reset_default_graph()
    batches = batch_generator((X_train, y_train), batch_size=128)

    with tf.Session() as sess:
        # Create the model
        X = tf.placeholder(tf.float32,
                           (None, IMAGE_SHAPE[0], IMAGE_SHAPE[1], 3))
        target = tf.placeholder(tf.float32, (None, NUM_CLASSES))
        model = Fishmodel(X, num_classes=NUM_CLASSES)

        saver = tf.train.Saver(tf.global_variables())

        # Cross entropy loss
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
            model.logits, target, name="cross_entropy")
        loss = tf.reduce_mean(cross_entropy, name="cross_entropy_mean")

        # Accuracy
        corrects = tf.equal(tf.argmax(model.softmax, 1), tf.argmax(target, 1))
        accuracy = tf.reduce_mean(tf.cast(corrects, tf.uint8))

        # Summary reports for tensorboard
        tf.scalar_summary("Mean Cross Entropy Loss", loss)
        tf.scalar_summary("Accuracy", accuracy)
        merged_summary = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(SUMMARY_DIR, sess.graph)

        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_step = tf.train.AdamOptimizer(1e-3).minimize(
            loss, global_step=global_step)
        sess.run(tf.global_variables_initializer())

        print("Starting training...")
        for _ in range(int(1e7)):
            X_batch, y_batch = next(batches)
            _, summary, i = sess.run([train_step, merged_summary, global_step],
                                     feed_dict={
                                         X: X_batch,
                                         target: y_batch,
                                         model.keep_prob: 0.5
                                     })
            summary_writer.add_summary(summary, i)
            if i > 100000 and i % 1000 == 0:
                probs_val = infer(sess, model, X_val)
                # TODO: compare with y_val to see if we should stop early

        # TODO run accuracy on whole validation set
        probs_val = infer(sess, model, X_val)
        # TODO: define tf ops for this total accuracy

        saver.save(sess, model_savename)
Esempio n. 2
0
def main(_):
    hparams = create_hparams(FLAGS)

    if hparams.mode == 'cut_data':
        data_helper.cut_file(hparams)
        return

    data_info = data_helper.init_data(hparams)
    model = def_model(hparams)

    if hparams.mode == 'train':
        train.train(hparams, model, data_info)
    elif hparams.mode == 'inference':
        question = hparams.question
        cut_sentence = [' '.join(data_helper.cut_sentence(hparams, question))]

        if len(cut_sentence) > hparams.enc_sentence_length:
            raise Exception(
                "question to long, you can retrain your model by set `enc_sentence_length` larger."
            )

        result, loss_value = inference.infer(hparams, model, data_info,
                                             cut_sentence)
        print("question: ", question, "result: ", result.strip(), "loss:",
              loss_value)
    elif hparams.mode == 'export':
        export_helper.export_model(hparams, model)
    else:
        raise Exception("mode error, must in (train, inference, export)")
Esempio n. 3
0
    def run(self):
        """
        list of (command uris, rank) with most relevant first
        """
        fileParsing = 0
        httpReading = 0

        fileParsing -= time.time()
        self.makeRuleGraph()
        fileParsing += time.time()

        (f2, h2) = yield self.makeFactGraph()
        fileParsing += f2
        httpReading += h2

        self.target = infer(self.factGraph, self.ruleStore)

        rankCmd = {}
        for cmd, rank in self.target.query(
                "SELECT DISTINCT ?cmd ?rank WHERE { ?cmd a cl:available . OPTIONAL { ?cmd cl:ranking ?rank } }",
                initNs=dict(cl=CL)):
            rankCmd[cmd] = rankCmd.get(cmd, 0) + float(rank or 0)

        ret = sorted(rankCmd.items(),
                     key=lambda (cmd, r): (r, cmd),
                     reverse=True)

        self.timeReport = ("spent %.1fms parsing files, %.1fms fetching http" %
                           (1000 * fileParsing, 1000 * httpReading))

        returnValue(ret)
Esempio n. 4
0
    def comparable_validation(self,
                              gen,
                              style_avails,
                              target_fonts,
                              target_chars,
                              n_max_match=3,
                              compare_inputs=False):
        """Compare horizontally for target fonts and chars"""
        # infer
        loader = get_val_loader(self.data,
                                target_fonts,
                                target_chars,
                                style_avails,
                                B=self.batch_size,
                                n_max_match=n_max_match,
                                transform=self.transform,
                                content_font=self.content_font,
                                language=self.language,
                                n_workers=self.n_workers)
        out = infer(gen, loader)  # [B, 1, 128, 128]

        # ref original chars
        refs = self.get_charimages(target_fonts, target_chars)

        compare_batches = [refs, out]
        if compare_inputs:
            compare_batches += self.get_inputimages(loader)

        nrow = len(target_chars)
        comparable_grid = utils.make_comparable_grid(*compare_batches,
                                                     nrow=nrow)

        return comparable_grid
Esempio n. 5
0
def detect_spoof():
    data = request.json

    images = data['images']
    images = np.array(images)

    result = {}

    try:
        prediction, score = infer(images=images)
        result['predicted_class'] = prediction
        result['score'] = str(score)
        result['exception'] = None
        # print(result)

        return Response(json.dumps(result, indent=4),
                        mimetype='application/json')

    except Exception as exp:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print(exc_type, fname, exc_tb.tb_lineno)

        result = {'exception': str(exp)}
        return Response(json.dumps(result, indent=4),
                        mimetype='application/json')
Esempio n. 6
0
def post():
    if request.method == "POST":
        if not request.files["file-submit"].filename == "":
            dic = {}
            img_file = request.files["file-submit"]
            f = img_file.stream.read()
            bin_data = io.BytesIO(f)
            file_bytes = np.asarray(bytearray(bin_data.read()), dtype=np.uint8)
            img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)

            raw_img_url = os.path.join(
                UPLOAD_FOLDER, "raw_" + secure_filename(img_file.filename))
            cv2.imwrite(raw_img_url, img)

            prob = inference.infer(raw_img_url)
            dic["img_ans"] = prob

            ref_img_url = os.path.join(
                REF_FOLDER, "raw_" + secure_filename(img_file.filename))
            dic["img_url"] = ref_img_url
        else:
            pass
    else:
        pass

    return jsonify(dic)
Esempio n. 7
0
    def infer_call_type(self, func):
        cls = self.curr_scope.class_context()
        if isinstance(func, nodes.Name):
            if self.curr_scope.check_builtin_usage(func.name):
                if func.name in Scope.BUILTINS_FUNC:
                    return "Function"
                else:
                    return "Class"
        elif isinstance(func, nodes.Getattr):
            if cls is not None and isinstance(func.expr,
                                              nodes.CallFunc) and isinstance(
                                                  func.expr.func, nodes.Name):
                if func.expr.func.name == "super":
                    return "Function"

        is_class = False
        is_func = False
        for inferred in inference.infer(func):
            qname = inferred.qname()
            if isinstance(qname, str):
                if qname.startswith("Module."):
                    qname = "%s.%s" % (self.modname, qname[len("Module."):])
                if qname in self.overridden_types:
                    return self.overridden_types[qname]
                if isinstance(inferred, nodes.Class):
                    is_class = True
                elif isinstance(inferred, nodes.Function):
                    is_func = True
        if is_class and is_func:
            return None
        if is_class:
            return "Class"
        if is_func:
            return "Function"
        return None
Esempio n. 8
0
    def run(self):
        """
        list of (command uris, rank) with most relevant first
        """
        fileParsing = 0
        httpReading = 0

        fileParsing -= time.time()
        self.makeRuleGraph()
        fileParsing += time.time()

        (f2, h2) = yield self.makeFactGraph()
        fileParsing += f2
        httpReading += h2

        self.target = infer(self.factGraph, self.ruleStore)

        rankCmd = {}
        for cmd, rank in self.target.query("SELECT DISTINCT ?cmd ?rank WHERE { ?cmd a cl:available . OPTIONAL { ?cmd cl:ranking ?rank } }", initNs=dict(cl=CL)):
            rankCmd[cmd] = rankCmd.get(cmd, 0) + float(rank or 0)

        ret = sorted(rankCmd.items(), key=lambda (cmd,r): (r, cmd),
                     reverse=True)

        self.timeReport = (
            "spent %.1fms parsing files, %.1fms fetching http" % (
                1000 * fileParsing, 1000 * httpReading))

        returnValue(ret)
Esempio n. 9
0
 def _unwrap_process_rewrap(self, lower_bound, upper_bound, line_objects):
     s = [obj['raw_date_string'] for obj in line_objects]
     datetime_list = inference.infer(lower_bound, upper_bound, s)
     for idx, line_object in enumerate(line_objects):
         line_object['datetime'] = datetime_list[idx]
         del (line_object['raw_date_string'])
     return line_objects
Esempio n. 10
0
    def run(self):
        """ Method that runs forever """

        #self.globalVar = 1

        while (True):
            print('Updating...')
            self.globalVar = infer()
def annotate():
    file = extract_image(request)
    if file and is_picture(file.filename):
        # The image file seems valid! Detect faces and return the result.
        image = infer(file, ort_session)
        _, buffer = cv2.imencode(file_extension(file.filename), image)
        return make_response(buffer.tobytes())
    else:
        raise BadRequest("Given file is invalid!")
Esempio n. 12
0
def yolo_video(video_file, result_path, model_path, context_norm, body_norm, ind2cat, ind2vad, args):
  ''' Perform inference on a video. First yolo model is used to obtain bounding boxes of persons in every frame.
  After that the emotic model is used to obtain categoraical and continuous emotion predictions. 
  :param video_file: Path of video file. 
  :param result_path: Directory path to save the results (output video).
  :param model_path: Directory path to load models and val_thresholds to perform inference.
  :param context_norm: List containing mean and std values for context images. 
  :param body_norm: List containing mean and std values for body images. 
  :param ind2cat: Dictionary converting integer index to categorical emotion. 
  :param ind2vad: Dictionary converting integer index to continuous emotion dimension (Valence, Arousal and Dominance).
  :param args: Runtime arguments.
  '''  
  device = torch.device("cuda:%s" %(str(args.gpu)) if torch.cuda.is_available() else "cpu")
  yolo = prepare_yolo(model_path)
  yolo = yolo.to(device)
  yolo.eval()

  thresholds = torch.FloatTensor(np.load(os.path.join(result_path, 'val_thresholds.npy'))).to(device) 
  model_context = torch.load(os.path.join(model_path,'model_context1.pth')).to(device)
  model_body = torch.load(os.path.join(model_path,'model_body1.pth')).to(device)
  emotic_model = torch.load(os.path.join(model_path,'model_emotic1.pth')).to(device)
  model_context.eval()
  model_body.eval()
  emotic_model.eval()
  models = [model_context, model_body, emotic_model]

  video_stream = cv2.VideoCapture(video_file)
  writer = None

  print ('Starting testing on video')
  while True:
    (grabbed, frame) = video_stream.read()
    if not grabbed:
      break
    image_context = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    try: 
      bbox_yolo = get_bbox(yolo, device, image_context)
      for pred_idx, pred_bbox in enumerate(bbox_yolo):
        pred_cat, pred_cont = infer(context_norm, body_norm, ind2cat, ind2vad, device, thresholds, models, image_context=image_context, bbox=pred_bbox, to_print=False)
        write_text_vad = list()
        for continuous in pred_cont:
          write_text_vad.append(str('%.1f' %(continuous)))
        write_text_vad = 'vad ' + ' '.join(write_text_vad) 
        image_context = cv2.rectangle(image_context, (pred_bbox[0], pred_bbox[1]),(pred_bbox[2] , pred_bbox[3]), (255, 0, 0), 3)
        cv2.putText(image_context, write_text_vad, (pred_bbox[0], pred_bbox[1] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
        for i, emotion in enumerate(pred_cat):
          cv2.putText(image_context, emotion, (pred_bbox[0], pred_bbox[1] + (i+1)*12), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
    except Exception:
      pass
    if writer is None:
      fourcc = cv2.VideoWriter_fourcc(*"MJPG")
      writer = cv2.VideoWriter(os.path.join(result_path, 'result_vid.avi'), fourcc, 30, (image_context.shape[1], image_context.shape[0]), True)  
    writer.write(cv2.cvtColor(image_context, cv2.COLOR_RGB2BGR))
  writer.release()
  video_stream.release() 
  print ('Completed video')
Esempio n. 13
0
    def _makeInferred(self, inputGraph):
        t1 = time.time()

        out = infer(inputGraph, self.ruleStore)
        for p, n in NS.iteritems():
            out.bind(p, n, override=True)

        inferenceTime = time.time() - t1
        out.add((ROOM['reasoner'], ROOM['inferenceTime'],
                 Literal(inferenceTime)))
        return out, inferenceTime
Esempio n. 14
0
    def _makeInferred(self, inputGraph: ConjunctiveGraph):
        t1 = time.time()

        out = infer(inputGraph, self.ruleStore)
        for p, n in NS.items():
            out.bind(p, n, override=True)

        inferenceTime = time.time() - t1
        out.add(
            (ROOM['reasoner'], ROOM['inferenceTime'], Literal(inferenceTime)))
        return out, inferenceTime
Esempio n. 15
0
 def is_js_noop(self, dec):
     inferred = list(inference.infer(dec))
     if len(inferred) == 1:
         inferred_dec = inferred[0]
         if isinstance(
                 inferred_dec,
                 nodes.Function) and inferred_dec.decorators is not None:
             for dec_dec in inferred_dec.decorators.nodes:
                 if inference.infer_qname(
                         dec_dec) == "prambanan.JS_noop_marker":
                     return True
     return False
Esempio n. 16
0
def predict():
    pred_imgs = []
    pred_label = []
    result = []
    files = [x[2] for x in os.walk(TEST_PATH)]
    for file in files[0]:
        file_path = os.path.join(TEST_PATH,file)
        new_img = Image.open(file_path)
        new_img = ImageOps.fit(new_img, (96, 96),Image.ANTIALIAS).convert('RGB')
        pred_imgs.append(np.array(new_img))

    x=tf.placeholder(tf.float32,[len(pred_imgs),preprocessing.IMAGE_SIZE,preprocessing.IMAGE_SIZE,preprocessing.IMAGE_CHANNELS],'x-input')
    y=inference.infer(x,False,None)
    saver=tf.train.Saver()
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        ckpt=tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH,"checkpoint")
        if ckpt and ckpt.model_checkpoint_path:
            save_path=ckpt.model_checkpoint_path
            saver.restore(sess,save_path)
            pred=sess.run(y,feed_dict={x:pred_imgs})
            pred_idx = np.argmax(pred,axis=1)

            for i in range(len(pred_idx)):
                if pred_idx[i] == 0:
                    pred_label.append("Black-grass")
                elif pred_idx[i] == 1:
                    pred_label.append("Cleavers")
                elif pred_idx[i] == 2:
                    pred_label.append("Common wheat")
                elif pred_idx[i] == 3:
                    pred_label.append("Loose Silky-bent")
                elif pred_idx[i] == 4:
                    pred_label.append("Scentless Mayweed")
                elif pred_idx[i] == 5:
                    pred_label.append("Small-flowered Cranesbill")
                elif pred_idx[i] == 6:
                    pred_label.append("Charlock")
                elif pred_idx[i] == 7:
                    pred_label.append("Common Chickweed")
                elif pred_idx[i] == 8:
                    pred_label.append("Fat Hen")
                elif pred_idx[i] == 9:
                    pred_label.append("Maize")
                elif pred_idx[i] == 10:
                    pred_label.append("Shepherds Purse")
                elif pred_idx[i] == 11:
                    pred_label.append("Sugar beet")

            df = pd.DataFrame(data={'file': files[0], 'species': pred_label})
            print(df)
            df_sort = df.sort_values(by=['file'])
            df_sort.to_csv('/home/administrator/PengXiao/plant/results.csv', index=False, sep=',')
def pred(img):
	seg=segment_inference.segment()
	img=seg.converter(img)
	# cv2.imshow('a',img)
	# cv2.waitKey(0)
	# cv2.destroyAllWindows()
	infer_=inference.infer()
	flag=(infer_.predict(img))

	### closing 
	infer_.close()
	seg.close()

	return flag
Esempio n. 18
0
def recommend(user_id: int):
    try:
        user_recommendations = ",".join(map(str, infer(user_id)))
    except:
        shuffle(movie_ids)
        user_recommendations = ",".join(map(str, movie_ids[:10]))
    # Add recommendation to list of recommendations to write to DB
    recommendations.append({
        "timestamp": datetime.now(),
        "user_id": user_id,
        "recommendations": user_recommendations,
        "model_version": model_version,
    })
    return user_recommendations
Esempio n. 19
0
    def infer(self, prompts_text):
        """
		:param list(str) prompts_text: Prompts to give the model

		:returns: A dataframe of the prompts, their corrresponding responses, and any other metadata the model provides.
		:rtype: pd.DataFrame
		"""
        if not self.inference:
            raise ValueError(
                "Can only call infer() if model is constructed in inference mode"
            )

        unk_int = self.data.unk_int
        vocab2int = self.data.text2int

        cleaned_prompts = [seq.strip() for seq in prompts_text]
        prompts_int = [[
            vocab2int.get(token, unk_int) for token in seq.split()
        ] for seq in cleaned_prompts]
        pad_int = self.data.pad_int

        with tf.Session() as sess:
            self.infer_checkpoint.restore(
                self.model_load).assert_consumed().run_restore_ops()
            sys.stderr.write("Restored model from {}\n".format(
                self.model_load))
            beam_outputs = inference.infer(
                sess,
                self.model,
                prompts_int,
                self.infer_feeds,
                self.model.beams,
                pad_int,
                batch_size=self.config.infer_batch_size)

        int2vocab = self.data.int2text
        beam_width = len(beam_outputs[0][0][:])

        out_frame = pd.DataFrame({"prompts": prompts_text})
        for j in range(beam_width):
            label = "beams_" + str(j)
            column = []
            for i in range(len(beam_outputs)):
                beam = beam_outputs[i][:, j]  #jth beam for the ith sample
                beam_text = " ".join(
                    [int2vocab[token] for token in beam if token != pad_int])
                column.append(beam_text)
            out_frame[label] = column
        return out_frame
Esempio n. 20
0
def run_infer(weights_file, labels_file, image_path, out_filename):

    model = YOLOv4Model()
    model.load_weights(weights_file)

    img, input = read_img(image_path, 608)

    cls_names = open(labels_file, "r").read().split("\n")

    boxes, scores, labels = inference.infer(model, cls_names, input)

    pixels = add_bboxes(img, boxes, scores, labels)
    if out_filename:
        save_img(out_filename, pixels)
    else:
        draw_img(pixels)
Esempio n. 21
0
def image():
    json = request.json
    imageUrl = json['imageUrl']
    quadKey = json['quadKey']

    temp = inference.infer(imageUrl)

    filename = "/Users/danielblackburn/space-apps-2019/beautiful_earth/app/static/" + quadKey + ".png"
    cv2.imwrite(filename, temp)

    response = make_response(
        send_file(filename,
                  mimetype='image/jpeg',
                  as_attachment=True,
                  attachment_filename=quadKey))
    response.headers['X-quadKey'] = quadKey
    return response
Esempio n. 22
0
def yolo_infer(images_list, result_path, model_path, context_norm, body_norm, ind2cat, ind2vad, args):
  ''' Infer on a list of images defined in images_list text file to obtain bounding boxes of persons in the images using yolo model.
  :param images_list: Text file specifying the images to conduct inference. A row in the file is Path_of_image. 
  :param result_path: Directory path to save the results (images with the predicted emotion categories and continuous emotion dimesnions).
  :param model_path: Directory path to load models and val_thresholds to perform inference.
  :param context_norm: List containing mean and std values for context images. 
  :param body_norm: List containing mean and std values for body images. 
  :param ind2cat: Dictionary converting integer index to categorical emotion. 
  :param ind2vad: Dictionary converting integer index to continuous emotion dimension (Valence, Arousal and Dominance).
  :param args: Runtime arguments.
  '''
  device = torch.device("cuda:%s" %(str(args.gpu)) if torch.cuda.is_available() else "cpu")
  yolo = prepare_yolo(model_path)
  yolo = yolo.to(device)
  yolo.eval()

  thresholds = torch.FloatTensor(np.load(os.path.join(result_path, 'val_thresholds.npy'))).to(device) 
  model_context = torch.load(os.path.join(model_path,'model_context1.pth')).to(device)
  model_body = torch.load(os.path.join(model_path,'model_body1.pth')).to(device)
  emotic_model = torch.load(os.path.join(model_path,'model_emotic1.pth')).to(device)
  models = [model_context, model_body, emotic_model]

  with open(images_list, 'r') as f:
    lines = f.readlines()
  
  for idx, line in enumerate(lines):
    image_context_path = line.split('\n')[0].split(' ')[0]
    image_context = cv2.cvtColor(cv2.imread(image_context_path), cv2.COLOR_BGR2RGB)
    try:
      bbox_yolo = get_bbox(yolo, device, image_context)
      for pred_bbox in bbox_yolo:
        pred_cat, pred_cont = infer(context_norm, body_norm, ind2cat, ind2vad, device, thresholds, models, image_context=image_context, bbox=pred_bbox, to_print=False)
        write_text_vad = list()
        for continuous in pred_cont:
          write_text_vad.append(str('%.1f' %(continuous)))
        write_text_vad = 'vad ' + ' '.join(write_text_vad) 
        image_context = cv2.rectangle(image_context, (pred_bbox[0], pred_bbox[1]),(pred_bbox[2] , pred_bbox[3]), (255, 0, 0), 3)
        cv2.putText(image_context, write_text_vad, (pred_bbox[0], pred_bbox[1] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
        for i, emotion in enumerate(pred_cat):
          cv2.putText(image_context, emotion, (pred_bbox[0], pred_bbox[1] + (i+1)*12), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
    except Exception as e:
      print ('Exception for image ',image_context_path)
      print (e)
    cv2.imwrite(os.path.join(result_path, 'img_%r.jpg' %(idx)), cv2.cvtColor(image_context, cv2.COLOR_RGB2BGR))
    print ('completed inference for image %d'  %(idx))
def get_invite():
    result = {"error": 1}
    try:

        query = json.loads(request.data.decode('utf-8'))['query']
        group = infer(query, kmeans, vectorizer)

        resultDf = manager.get_emails(group[0])

        data = []
        for index, row in resultDf.iterrows():
            data.append(dict(row))

        result['data'] = data
        result["error"] = 0

        return make_response(json.dumps(result), 200)
    except Exception as err:
        result["message"] = str(err)
        return make_response(json.dumps(result), 404)
Esempio n. 24
0
def evaluate(dataset):
    f1 = exact_match = total = 0
    for paragraph in dataset:
        corpus = paragraph['context'].replace('\n', ' ')
        print(corpus)
        for qa in paragraph['qas']:
            total += 1
            question = qa['question']
            answer = inference.infer(corpus, question)
            f1 += metric_max_over_ground_truths(f1_score, answer, qa['answer'])
            exact_match += metric_max_over_ground_truths(
                exact_match_score, answer, qa['answer'])
            if metric_max_over_ground_truths(f1_score, answer,
                                             qa['answer']) != 1.0:
                print("\nQUESTION: {} \nCORRECT_answer: {} \nPREDICTED_answer: {} \nF1 = {}".format \
                (qa['question'], qa['answer'], answer, metric_max_over_ground_truths(f1_score, answer, qa['answer'])))

    exact_match = 100.0 * exact_match / total
    f1 = 100.0 * f1 / total

    return {'exact_match': exact_match, 'f1': f1}
Esempio n. 25
0
def image():
    json = request.json
    imageUrl = json['imageUrl']
    quadKey = json['quadKey']

    # # Dummy code - get image as cv2 image
    # resp = urllib.urlopen(imageUrl)
    # image = np.asarray(bytearray(resp.read()), dtype="uint8")
    # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
    # This is how we could go from cv2 to jpeg - https://stackoverflow.com/questions/48465739/send-and-receive-opencv-images-flask


    # TODO - Run model on this image URL
    
    temp = inference.infer(imageUrl)

    #print(temp)

    temp = (temp.detach().numpy() + 0.5) * 512
    print(temp.shape)

    temp = np.squeeze(temp, axis=0)
    print(temp.shape)
    temp = np.transpose(temp, (1,2,0))

    print(temp.shape)

    print(temp)
    filename = "/Users/parismorgan/space-apps-2019/beautiful_earth/app/static/"+quadKey+".png" 
    
    cv2.imwrite(filename, temp)

    print("PARISDEBUG: " + filename)

    response = make_response(send_file(filename, mimetype='image/jpeg', as_attachment=True, attachment_filename=quadKey))
    response.headers['X-quadKey'] = quadKey
    return response
def eval(testX, testY):
    x=tf.placeholder(tf.float32,[len(testX),preprocessing.IMAGE_SIZE,preprocessing.IMAGE_SIZE,preprocessing.IMAGE_CHANNELS],\
                     'x-input')
    y_ = tf.placeholder(tf.float32, [len(testX), preprocessing.OUTPUT_NODE],
                        'y-input')
    y = inference.infer(x, False, None)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)), tf.float32))
    saver = tf.train.Saver()
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        xs_reshaped = tf.reshape(testX, [
            len(testX), preprocessing.IMAGE_SIZE, preprocessing.IMAGE_SIZE,
            preprocessing.IMAGE_CHANNELS
        ])
        test_feed = {x: sess.run(xs_reshaped), y_: testY}
        ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH,
                                             "checkpoint")
        if ckpt and ckpt.model_checkpoint_path:
            save_path = ckpt.model_checkpoint_path
            saver.restore(sess, save_path)
            acc = sess.run(accuracy, test_feed)
            step = save_path.split("/")[-1].split("-")[-1]
            print("After {} steps, the accuracy on test is {}".format(
                step, acc))
Esempio n. 27
0
        # decoder_states = [state]

decoder_dense = keras.layers.Dense(num_decoder_characters, activation = "softmax")
decoder_outputs = decoder_dense(decoder_outputs)

# Define the model that will turn
# encoder_input_data & decoder_input_data into decoder_output_data
model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)

model.compile(
    optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
)

model.fit(
    [encoder_train_input_data, decoder_train_input_data],
    decoder_train_target_data,
    batch_size = batch_size,
    epochs = epochs,
    validation_data = ([encoder_val_input_data, decoder_val_input_data], decoder_val_target_data),
)

# Save model
model.save("seq2seq")

# Inference Call for Validation Data
val_accuracy = inference.infer(encoder_val_input_data, val_input_words, val_target_words, num_decoder_characters, max_decoder_seq_length, target_characters_index, inverse_target_characters_index, enc_latent_dims, dec_latent_dims, cell_type, beam_size)
print("Val Accuracy: ", val_accuracy)

# Inference Call for Test Data
# test_accuracy = inference.infer(encoder_test_input_data, test_input_words, test_target_words, num_decoder_characters, max_decoder_seq_length, target_characters_index, inverse_target_characters_index, enc_latent_dims, dec_latent_dims, cell_type, beam_size)
# print("Test Accuracy: ", test_accuracy)
Esempio n. 28
0
def train(args):
	# build model
	model = Tacotron2()
	mode(model, True)
	optimizer = torch.optim.Adam(model.parameters(), lr = hps.lr,
								betas = hps.betas, eps = hps.eps,
								weight_decay = hps.weight_decay)
	criterion = Tacotron2Loss()
	
	# load checkpoint
	iteration = 1
	if args.ckpt_pth != '':
		model, optimizer, iteration = load_checkpoint(args.ckpt_pth, model, optimizer)
		iteration += 1 # next iteration is iteration+1
	
	# get scheduler
	if hps.sch:
		lr_lambda = lambda step: hps.sch_step**0.5*min((step+1)*hps.sch_step**-1.5, (step+1)**-0.5)
		if args.ckpt_pth != '':
			scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch = iteration)
		else:
			scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
	
	# make dataset
	train_loader = prepare_dataloaders(args.data_dir)
	
	# get logger ready
	if args.log_dir != '':
		if not os.path.isdir(args.log_dir):
			os.makedirs(args.log_dir)
			os.chmod(args.log_dir, 0o775)
		logger = Tacotron2Logger(args.log_dir)

	# get ckpt_dir ready
	if args.ckpt_dir != '' and not os.path.isdir(args.ckpt_dir):
		os.makedirs(args.ckpt_dir)
		os.chmod(args.ckpt_dir, 0o775)
	
	model.train()
	# ================ MAIN TRAINNIG LOOP! ===================
	while iteration <= hps.max_iter:
		for batch in train_loader:
			if iteration > hps.max_iter:
				break
			start = time.perf_counter()
			x, y = model.parse_batch(batch)
			y_pred = model(x)

			# loss
			loss, item = criterion(y_pred, y, iteration)
			
			# zero grad
			model.zero_grad()
			
			# backward, grad_norm, and update
			loss.backward()
			grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hps.grad_clip_thresh)
			optimizer.step()
			if hps.sch:
				scheduler.step()
			
			# info
			dur = time.perf_counter()-start
			print('Iter: {} Loss: {:.2e} Grad Norm: {:.2e} {:.1f}s/it'.format(
				iteration, item, grad_norm, dur))
			
			# log
			if args.log_dir != '' and (iteration % hps.iters_per_log == 0):
				learning_rate = optimizer.param_groups[0]['lr']
				logger.log_training(item, grad_norm, learning_rate, iteration)
			
			# sample
			if args.log_dir != '' and (iteration % hps.iters_per_sample == 0):
				model.eval()
				output = infer(hps.eg_text, model)
				model.train()
				logger.sample_training(output, iteration)
			
			# save ckpt
			if args.ckpt_dir != '' and (iteration % hps.iters_per_ckpt == 0):
				ckpt_pth = os.path.join(args.ckpt_dir, 'ckpt_{}'.format(iteration))
				save_checkpoint(model, optimizer, iteration, ckpt_pth)

			iteration += 1
	if args.log_dir != '':
		logger.close()
                          loss_fn,
                          device,
                          n_epochs,
                          f'{model_name}_head_fold_{fold_id + 1}',
                          checkpoint_dir,
                          scheduler=None,
                          metric_spec={'spearmanr': spearmanr_torch},
                          monitor_metric=True,
                          minimize_score=False,
                          logger=fold_logger,
                          grad_accum=grad_accum,
                          batch_step_scheduler=False,
                          eval_at_start=True)
        learner.train()

        oofs[valid_index] = infer(learner.model, valid_loader,
                                  learner.best_checkpoint_file, device)

        # Save tuned model in half precision (reduces memory making it easier to upload to Kaggle)
        head_checkpoint_file = f'{checkpoint_dir}{model_name}_head_fold_{fold_id+1}_best.pth'
        checkpoint = torch.load(head_checkpoint_file)
        model.head.load_state_dict(checkpoint['model_state_dict'])
        model.half()
        tuned_checkpoint_file = f'{checkpoint_dir}{model_name}_tuned_fold_{fold_id+1}_best.pth'
        torch.save({'model_state_dict': model.state_dict()},
                   tuned_checkpoint_file)

    main_logger.info(f'Finished tuning {model_name}')

    # Print CV scores
    ix = np.where(
        train.groupby("question_body")["host"].transform("count") == 1)[
def train(trainX, trainY, testX, testY):
    print("Start to train")
    print("The length of training data is {}".format(len(trainX)))
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE, preprocessing.IMAGE_SIZE, preprocessing.IMAGE_SIZE,
        preprocessing.IMAGE_CHANNELS
    ], "x-input")
    y_ = tf.placeholder(tf.float32, [BATCH_SIZE, preprocessing.OUTPUT_NODE],
                        "y-input")
    global_step = tf.Variable(0.0, dtype=tf.float32, trainable=False)

    # regularization
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = inference.infer(x, True, None, False)
    # cross entropy loss
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=tf.argmax(y_, axis=1), logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean
    # loss = cross_entropy_mean + tf.add_n(tf.get_collection("loss"))
    # learning rate
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step,
                                               len(trainX) / BATCH_SIZE,
                                               LEARNING_RATE_DECAY)
    # save the model
    saver = tf.train.Saver()

    train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(
        loss, global_step)
    # train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step)

    train_len = len(trainX)

    # The following is accuracy on test data.
    test = tf.placeholder(tf.float32, [
        1, preprocessing.IMAGE_SIZE, preprocessing.IMAGE_SIZE,
        preprocessing.IMAGE_CHANNELS
    ], 'test-input')
    test_y_ = tf.placeholder(tf.float32, [1, preprocessing.OUTPUT_NODE],
                             'test-y-input')
    test_y = inference.infer(test, False, None, True)
    accuracy = tf.reduce_sum(
        tf.cast(tf.equal(tf.argmax(test_y, 1), tf.argmax(test_y_, 1)),
                tf.float32))
    test_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(
            test_y_, axis=1),
                                                       logits=test_y))
    # accuracy end

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(TRAINING_STEP):
            for _ in range(int(train_len / BATCH_SIZE) + 1):
                l = list(range(train_len))
                rdn = random.sample(l, BATCH_SIZE)
                xs = []
                ys = []
                for r in rdn:
                    xs.append(trainX[r])
                    ys.append(trainY[r])
                _, loss_value, step = sess.run([train_op, loss, global_step],
                                               feed_dict={
                                                   x: xs,
                                                   y_: ys
                                               })

            if i % 10 == 0:
                acc_result = 0
                for j in range(len(testX)):
                    testX_feed = testX[j].reshape(1, preprocessing.IMAGE_SIZE,
                                                  preprocessing.IMAGE_SIZE, 3)
                    testY_feed = testY[j].reshape(1, 12)
                    acc_result += sess.run(accuracy,
                                           feed_dict={
                                               test: testX_feed,
                                               test_y_: testY_feed
                                           })
                acc = acc_result / len(testX)
                print(
                    "After %d epoch(s),loss on training batch is %f, accuracy on test is %f"
                    % (i, loss_value, acc))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step)
Esempio n. 31
0
def login():
    if request.method == "POST":

        company_code = "aapl"
        company_code_ = request.form.get("company", None)
        print(company_code)
        if (company_code_ is not None):
            company_code = company_code_
        if company_code != None:

            code = stock_code[company.index(company_code)]
            print(code)
            profile = requests.get(
                "https://financialmodelingprep.com/api/v3/company/profile/" +
                str(code))
            history = requests.get(
                "https://financialmodelingprep.com/api/v3/historical-price-full/"
                + str(code) + "/?timeseries=50")
            history = json.loads(history.text)

            profile = json.loads(profile.text)
            exchange = profile['profile']['exchange']
            price = profile['profile']['price']
            image = profile['profile']['image']
            industry = profile['profile']['industry']
            changes = profile['profile']['changes']
            changesPercentage = profile['profile']['changesPercentage']

            data = history['historical']
            data_ = []
            for item in reversed(data):
                # print(item)
                data_.append([
                    item['open'] / 1000, item['close'] / 1000,
                    item['volume'] / 500000000
                ])

            # print(data_)
            code = code.lower()
            # print(code)
            predicted_price = inference.infer(code, data_) * 1000

            display_data = []
            for item in data:
                display_data.append({
                    "date": item['date'],
                    "open": item['open'],
                    "high": item['high'],
                    "low": item['low'],
                    "close": item['close']
                })

            # print(display_data)
            # print(display_data)

            # print(display_data)

            if len(display_data) == 0:
                print("running")

                return render_template(
                    "home.html",
                    company_names=company,
                    company_code=code,
                    company_name=company[company.index(company_code)],
                    current_price=price,
                    predicted_price=predicted_price,
                    image=image,
                    exchange=exchange,
                    industry=industry,
                    changes=changes,
                    changesPercentage=changesPercentage)

            display_data = json.dumps(display_data)

            return render_template(
                "home.html",
                company_names=company,
                company_code=code,
                company_name=company[company.index(company_code)],
                current_price=price,
                predicted_price=predicted_price,
                image=image,
                exchange=exchange,
                industry=industry,
                changes=changes,
                changesPercentage=changesPercentage,
                display_data=display_data)

    print("running")
    return render_template("home.html",
                           company_names=company,
                           company_name=company[0])
Esempio n. 32
0
from vertical_horizontal_simplecut import Cut
from inference import infer, cv2ImgAddText
import cv2
'''pic_path=input("请输入图片路径")
minThred=input("请输入阈值")'''
pic_path = './testimages/6.png'
minThred = 0.3
img = cv2.imread(pic_path)
part_imgs = Cut(pic_path)
translate = infer(part_imgs, minThred)
img = cv2ImgAddText(img, translate, 10, 10)
cv2.imshow("the translated image:", img)
cv2.waitKey(0)