def message_loop():
    source_dir = config.options.data_dir / "source"
    last_image = latest_image()
    last_status = predict(last_image)
    last_change = image_date(last_image)
    print(f"Current status: {last_status} at {last_change:%Y-%m-%d %H:%M}")

    while True:
        try:
            get_ftp_files(**config.ftp, source_dir=source_dir)
            last_image = latest_image()
            current_status = predict(latest_image())
            if (last_status == "OPEN"
                    and (datetime.datetime.now() - last_change).seconds >
                    config.open_warning_seconds):
                print(
                    f"Warning: Door has been open since {last_change:%Y-%m-%d %H:%M}"
                )
                send_warning_message(last_change)
                last_change = datetime.datetime.now()

            if current_status != last_status:
                last_change = image_date(last_image)
                print(
                    f"Status changed to {current_status} at {datetime.datetime.now():%Y-%m-%d %H:%M}"
                )
                send_message(current_status)
            last_status = current_status

            time.sleep(293)
        except Exception as ex:
            traceback.print_exc()
예제 #2
0
def main_with_class_explore():
    class_uri = 'http://dbpedia.org/ontology/Person'
    properties = easysparql.get_numerical_properties_for_class_tbox(endpoint=RAW_ENDPOINT, class_uri=class_uri)
    if properties is None:
        return
    class_property_combinations = zip((len(properties) * [class_uri]), properties)
    # print class_property_combinations
    data, meta_data = data_extraction.data_and_meta_from_class_property_uris(
        class_property_uris=class_property_combinations)
    # data_extraction.save_data_and_meta_to_files(data=data, meta_data=meta_data)
    model = learning.train_with_data_and_meta(data=data, meta_data=meta_data)
    meta_with_clusters = learning.get_cluster_for_meta(training_meta=meta_data, testing_meta=meta_data)
    learning.test_with_data_and_meta(model=model, data=data, meta_data=meta_with_clusters)
    # now testing some files
    test_data, test_meta_data = data_extraction.data_and_meta_from_files(['person_waist.csv','person_hipsize.csv',
                                                          'person_bustsize.csv'])
    learning.predict(model, data=test_data, meta_data=test_meta_data)
예제 #3
0
def main():
    class_property_combinations = [
        ('http://xmlns.com/foaf/0.1/Person', 'http://dbpedia.org/ontology/numberOfMatches'),
        # ('http://schema.org/Place', 'http://dbpedia.org/property/longew'),
        # ('http://schema.org/Place', 'http://dbpedia.org/property/latns'),
        ('http://schema.org/Place', 'http://www.georss.org/georss/point'),
        # ('http://schema.org/Place', 'http://dbpedia.org/property/latm'),
        # ('http://schema.org/Place', 'http://dbpedia.org/property/longm'),
        ('http://schema.org/Place', 'http://dbpedia.org/property/latd'),
        ('http://schema.org/Place', 'http://dbpedia.org/property/longd'),
    ]
    class_property_combinations_test = [
        # ('http://schema.org/Place', 'http://dbpedia.org/property/latm'),
        # ('http://schema.org/Place', 'http://dbpedia.org/property/longm'),
        ('http://schema.org/Place', 'http://dbpedia.org/property/latd'),
        ('http://schema.org/Place', 'http://dbpedia.org/property/longd'),
    ]

    data1, meta_data1 = data_extraction.data_and_meta_from_class_property_uris(class_property_combinations)
    data2, meta_data2 = data_extraction.data_and_meta_from_files(['novHighC.csv'])
    data, meta_data = data_manipulation.merge_data_and_meta_naive(data1=data1, meta_data1=meta_data1, data2=data2,
                                                                  meta_data2=meta_data2)
    for clus, md in enumerate(meta_data):
        print "cluster %d => type: %s" % (clus, md["type"])
    model = learning.train_with_data_and_meta(data=data, meta_data=meta_data)

    test_data1, test_meta_data1 = data_extraction.data_and_meta_from_class_property_uris(
        class_property_combinations_test)

    test_data2, test_meta_data2 = data_extraction.data_and_meta_from_files(['mayHighC.csv'])
    # merge the two data sets
    test_data, test_meta_data = data_manipulation.merge_data_and_meta_naive(
        data1=test_data1, meta_data1=test_meta_data1, data2=test_data2, meta_data2=test_meta_data2)

    # test_meta_data_with_clusters = learning.get_cluster_for_meta(training_meta=meta_data, testing_meta=test_meta_data)
    # learning.test_with_data_and_meta(model=model, data=test_data, meta_data=test_meta_data_with_clusters)
    learning.predict(model=model, data=test_data, meta_data=test_meta_data)
예제 #4
0
파일: core.py 프로젝트: ssameerr/tada
def predict_files(predictionrun_id=None,
                  model_dir=None,
                  files=[],
                  original_uploaded_filenames=[],
                  has_header=False):
    """
    :param predictionrun_id:
    :param model_dir: the dir of the FCM model csv file abs dir
    :param files: list of files to be predicted
    :return:
    """
    if predictionrun_id is None:
        print "predict_files> predictionrun_id should not be None"
        return
    if model_dir is None:
        print "predict_files> model_dir should not be None"
        return
    if len(files) != len(original_uploaded_filenames):
        print "predict_files> number of files (%d) does not equal original_uploaded_filenames (%d)" % \
              (len(files), len(original_uploaded_filenames))
        return
    print "original uploaded files:"
    print original_uploaded_filenames
    update_func = partial(update_predictionrun_progress_for_partial,
                          predictionrun_id)
    update_predictionrun_state(predictionrun_id=predictionrun_id,
                               new_progress=0,
                               new_state=PredictionRun.RUNNING)
    model, types = learning.load_model(model_dir)
    num_of_files = len(files)
    for idx, fname in enumerate(files):
        update_predictionrun_state(predictionrun_id=predictionrun_id,
                                   new_notes='predicting columns in file: ' +
                                   fname.split('/')[-1].strip()[:-4])
        data, meta_data = data_extraction.data_and_meta_from_a_mixed_file(
            file_name=fname,
            has_header=has_header,
            original_file_name=original_uploaded_filenames[idx])
        print "predict_files> extracted data shape is %s " % str(data.shape)
        u = learning.predict(model=model,
                             data=data,
                             meta_data=meta_data,
                             update_func=update_func)
        predictionrun = PredictionRun.objects.filter(id=predictionrun_id)
        if len(predictionrun) == 1:
            predictionrun = predictionrun[0]
            file_column_list = [{
                "file_name": fc["type"].split(' , ')[0],
                "column_no": fc["type"].split(' , ')[1]
            } for fc in meta_data]
            predictionrun.add_memberships(u, file_column_list)
        else:
            update_predictionrun_state(
                predictionrun_id=predictionrun_id,
                new_notes="predictionrun_id is not longer exists",
                new_state=PredictionRun.STOPPED)
            return
    predictionrun = PredictionRun.objects.filter(id=predictionrun_id)
    if len(predictionrun) == 1:
        predictionrun = predictionrun[0]
        predictionrun.set_types(types)
        print "setting types"
        print types
    else:
        update_predictionrun_state(
            predictionrun_id=predictionrun_id,
            new_notes="predictionrun_id is not longer exists",
            new_state=PredictionRun.STOPPED)
        return
    update_predictionrun_state(predictionrun_id=predictionrun_id,
                               new_progress=100,
                               new_state=PredictionRun.COMPLETE,
                               new_notes='')
예제 #5
0
파일: main.py 프로젝트: Ghanyy/Protein_DB
    # first parameter is empty (in fact it's an id), ignore it
    data_columns = list(test_df.columns.values)[1:]
    data_frame_training_data = data_frame.loc[:, data_columns].values

    data_frame_target = data_frame.loc[:, "res_name"].tolist()
    data_frame_2_target = data_frame_2.loc[:, "res_name_group"].tolist()

    data_frame_training_data = lrn.clean_data_values(data_frame_training_data)
    # verbose output: UserWarning: Deleting features without observed values:
    # [690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
    #  708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
    #  726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
    #  744 745 746 747 748 749 750 751 752 753 754 755 756 757 758]
    data_columns_exclude = [690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707,
                            708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725,
                            726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743,
                            744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 0]

    simple_model = lrn.model_rfc(data_frame_training_data, data_frame_target, 50, 50, "data/tmp/simple_model.pkl")
    grouped_model = lrn.model_rfc(data_frame_training_data, data_frame_2_target, 50, 50, "data/tmp/grouped_model.pkl")

    test_df.drop(test_df.columns[data_columns_exclude], axis=1, inplace=True)
    test_data = test_df.values
    test_data = lrn.clean_data_values(test_data)

    simple_prediction = lrn.predict(simple_model, test_data)
    grouped_prediction = lrn.predict(grouped_model, test_data)

    pickle.dump(simple_prediction, open("data/tmp/simple_prediction.p", "wb"))
    pickle.dump(grouped_prediction, open("data/tmp/grouped_prediction.p", "wb"))
예제 #6
0
parser.add_argument("checkpoint_path", type=str, help="path to the checkpoint")
parser.add_argument("--mapping_path",
                    type=str,
                    default="",
                    help="path to the checkpoint (Default: No_Mapping)")
parser.add_argument("--top_k",
                    type=int,
                    default="3",
                    help="path to the checkpoint (Default: 3)")
parser.add_argument('--gpu',
                    action='store_true',
                    help="Use GPU for training (Default: False)")
args = parser.parse_args()

#load name mapping from json file
cat_to_name = utility.load_mapping(
    args.mapping_path) if args.mapping_path != "" else {}

#load model from checkpoint
model = learning.load_model(args.checkpoint_path)

#predict classes and propabilities
ps, preds = learning.predict(args.img_path, model, args.top_k, args.gpu)

#map classes to names
preds = [cat_to_name[pred] for pred in preds] if cat_to_name else preds

#print results
for name, prob in zip(preds, ps[-1, :]):
    print("Class: {}, Propability: {}%".format(name, prob * 100))
예제 #7
0
def answer_per_question(Question, model, tokenizer, all_events,
                        all_event_keys):
    answer, method = find(Question, model, tokenizer, all_events,
                          all_event_keys)
    answer_sentence = ""
    try:
        answer["type"] = str(predict(Question))
    except Exception:
        raise ValueError("Type Predict Error!")

    if answer["type"] == '1':
        # HANDLED BY ARGUMENTS
        # method = "temp"
        time_len = len(answer["time"])
        if answer["religious_time"]:
            time_len = 1
            time, answer["api_url"] = find_time_from_religious(answer)
            answer["time"].extend(time)
        en_datetime = dateparser.parse('امروز', settings={'TIMEZONE': '+0330'})
        naive_dt = JalaliDate(en_datetime)
        d1 = datetime.datetime.strptime(answer['date'][0], "%Y-%m-%d")
        difference_days = d1.day - naive_dt.day
        if not answer["time"]:
            hour = datetime.datetime.now().hour
            if hour < 12 or difference_days > 0:
                answer["time"] = ["12:00"]
            else:
                if (hour + 1) == 24:
                    hour = 0
                time = str(
                    str(hour + 1).zfill(2) + ":" +
                    str(datetime.datetime.now().minute).zfill(2))
                answer["time"] = [time]

        try:

            if "اختلاف" in Question or "تفاوت" in Question:
                temps, urls, logic1, logic2 = weather_difference(
                    Question, answer, ' و ')
                temp = round(abs(temps[1] - temps[0]), 2)
                answer_mode = 1
            elif "سردتر " in Question or "سرد تر " in Question:
                temps, urls, logic1, logic2 = weather_difference(
                    Question, answer, ' یا ')
                if temps[1] < temps[0]:
                    answer_number = 1
                    temp = find_fit_word(answer, True)
                else:
                    answer_number = 0
                    temp = find_fit_word(answer, False)
                answer_mode = 2
            elif "گرم‌تر " in Question or "گرمتر " in Question or "گرم تر " in Question:
                temps, urls, logic1, logic2 = weather_difference(
                    Question, answer, ' یا ')
                if temps[1] > temps[0]:
                    answer_number = 1
                    temp = find_fit_word(answer, True)
                else:
                    answer_number = 0
                    temp = find_fit_word(answer, False)
                answer_mode = 3
            else:
                greg_date = convert_date(answer["date"][0], "shamsi",
                                         "greg") + " " + answer["time"][0]
                temp, cond, url, logic1 = find_weather_from_city_date(
                    Question, answer["city"][0], greg_date)
                urls = [url]
                answer_mode = 4
            answer["api_url"].extend(urls)
            if method == "temp":
                answer["result"] = str(temp)
            elif method == "cond":
                answer["result"] = cond
            if time_len == 0:
                answer["time"] = []
            else:
                answer["time"] = answer["time"][:time_len]
            if answer_mode == 0:
                pass
            elif answer_mode == 1:
                answer_sentence = weather_logical_sentence(
                    answer, logic1, logic2, 'اختلاف')
            elif answer_mode == 2:
                answer_sentence = weather_logical_sentence(
                    answer, logic1, logic2, 'سردتر', answer_number)
            elif answer_mode == 3:
                answer_sentence = weather_logical_sentence(
                    answer, logic1, logic2, 'گرمتر', answer_number)
            elif answer_mode == 4:
                answer_sentence = weather_sentence(answer, logic1)
            if answer["religious_time"]:
                answer["time"] = []

        except Exception:
            # raise ValueError("Type 1 Error!")
            pass

    elif answer["type"] == '2':
        try:
            result, answer["api_url"] = find_time_from_religious(answer)
            answer["result"] = result[0]
            answer_sentence = religion_sentence(answer)
        except Exception:
            # raise ValueError("Type 2 Error!")
            pass
    elif answer["type"] == '3':
        try:
            t = Time(answer["city"][0])
            res = t.send_request()
            answer["result"] = res
            answer["api_url"] = [t.url]
            answer["date"] = []
            answer["time"] = []
            answer_sentence = time_sentence(answer)
        except Exception:
            # raise ValueError("Type 3 Error!")
            pass
    elif answer["type"] == '4':
        answer["city"] = []
        try:
            answer["api_url"] = ["https://www.time.ir/"]
            if 'مناسبت' in Question:
                answer["result"] = answer["event"][0]
                answer["event"] = []
            else:
                if answer["calendar_type"] and answer["date"]:
                    target = answer["calendar_type"][0]
                    if target == "شمسی":
                        answer["result"] = convert_date(
                            answer["date"][0], "shamsi", "shamsi")
                    elif target == "قمری" or target == "هجری":
                        answer["result"] = convert_date(
                            answer["date"][0], "shamsi", "hijri")
                    elif target == "میلادی":
                        answer["result"] = convert_date(
                            answer["date"][0], "shamsi", "greg")
                elif answer["date"]:
                    answer["result"] = answer["date"][0]
            answer_sentence = date_sentence(answer)
        except Exception:
            # raise ValueError("Type 4 Error!")
            pass
    elif answer["type"] == '-1':
        answer = {
            'type': '-1',
            'city': [],
            'date': [],
            'time': [],
            'religious_time': [],
            'calendar_type': [],
            'event': [],
            'api_url': [],
            'result': ''
        }
        answer_sentence = unknown_sentence()

    return answer, answer_sentence
def predict_image(image):
    test = []
    array = convert_to_ndarray(image, [56, 56])
    test.append(array)
    predict(test)