Example #1
0
def list_identifiers(status=[], first=0, max_size=0, order_by=None):
    db = get_db_instance()
    models = Models(db)
    model_identifiers = models.getIdentifiers()
    items = model_identifiers.all(status=status,
                                  skip=first,
                                  limit=max_size,
                                  order_by=('updated', 1))
    i = 0

    def show(x, alt, f=lambda a: a):
        return f(x) if x is not None else alt

    def isodate(t):
        return datetime.datetime.fromtimestamp(t).isoformat()

    for item in items:
        ean = show(item.ean, '-')
        asin = show(item.asin, '-')
        status = show(item.status, '-', str)
        updated = show(item.updated, '-', isodate)
        print(
            '#{i}\tasin:{asin}\tean:{ean}\tstatus:{status}\tupdated:{updated}'.
            format(i=i, asin=asin, ean=ean, status=status, updated=updated))
        i += 1
def test_create_model(make_stubber, error_code):
    lookoutvision_client = boto3.client('lookoutvision')
    lookoutvision_stubber = make_stubber(lookoutvision_client)
    project_name = 'test-project_name'
    model_version = 'test-model'
    model_arn = 'test-arn'
    out_buck = 'doc-example-bucket'
    out_folder = 'test-results'
    training_results = f's3://{out_buck}/{out_folder}'
    status = 'TRAINED'

    lookoutvision_stubber.stub_create_model(project_name, out_buck, out_folder,
                                            model_arn, model_version)
    lookoutvision_stubber.stub_describe_model(project_name,
                                              model_version,
                                              model_arn,
                                              status,
                                              error_code=error_code)

    if error_code is None:
        got_status, got_version = Models.create_model(lookoutvision_client,
                                                      project_name,
                                                      training_results)
        assert got_status == status
        assert got_version == model_version
    else:
        with pytest.raises(ClientError) as exc_info:
            Models.create_model(lookoutvision_client, project_name,
                                training_results)
        assert exc_info.value.response['Error']['Code'] == error_code
	def outside_images(self):

		models = Models()

		number_of_files = 0

		print "Retrieving all online avisos. Please, wait..."
		array_avisos_online = models.get_all_avisos_online()
		print '[Ok]'
		print "Retrieving all histogramed avisos. Please, wait..."
		array_avisos_with_hist = models.get_all_avisos_with_histogram()
		print '[Ok]'

		for aviso_online_id in array_avisos_online:

			is_on_hist_table = False

			for aviso_online_with_hist_id in array_avisos_with_hist:

				if aviso_online_id == aviso_online_with_hist_id:
					is_on_hist_table = True
					break

			if not is_on_hist_table:
				aviso_json = {"id_aviso":aviso_online_id}
				models.save_id_aviso_not_histogram_table(aviso_json)

			if number_of_files%100==0:
				print number_of_files

			number_of_files +=1
    def initialise(self, param):
        self.parameters = param
        self.intrinsic_motivation = IntrinsicMotivation(param)
        self.models = Models(param)

        self.exp_iteration = param.get('exp_iteration')
        self.iteration = 0

        self.pos = []
        self.cmd = []
        self.img = []

        self.goal_code = []

        self.current_goal_x = -1
        self.current_goal_y = -1
        self.current_goal_idx = -1
        self.prev_goal_idx = -1

        self.goal_image = np.zeros(
            (1, param.get('image_size'), param.get('image_size'),
             param.get('image_channels')), np.float32)

        np.random.seed()  # change the seed

        self.prev_pos = self.get_starting_pos()
Example #5
0
    def process(self):
        try:
            self.progress.emit('Проверка имени пользователя и пароля')
            if not self.checkAuth():
                self.progress.emit(
                    '<font color=red>Неверное имя пользователя и/или пароль</font>'
                )
                self.finished.emit()
                return

            self.progress.emit(
                'Синхронизация локального хранилища с сервером...')
            for client in Options.get().local_clients:
                Models.get().clients.saveItem(client)
            for application in Options.get().local_applications:
                Models.get().applications.saveItem(application)
            Options.get().local_clients = []
            Options.get().local_applications = []

            self.progress.emit('Загрузка типов заявок')
            Models.get().application_types.loadData()
            self.progress.emit('Загрузка заказчиков')
            Models.get().clients.loadData()
            self.progress.emit('Загрузка специалистов')
            Models.get().specialists.loadData()
            self.progress.emit('')
            self.result.emit(True)
        except requests.exceptions.ConnectionError:
            self.result.emit(False)
        self.finished.emit()
Example #6
0
    def train(self, token_lists, model_file):
        self.model_file = "./trained_model/random3/randomhole.tfl"
        self.prepare_data(token_lists)

        xs, ys = self.getTrainData(token_lists)

        with tf.Graph().as_default():
            self.model = Models().create_network(
                self.in_max_int,
                self.out_max_int,
                model_name="bidirectional_attention_rnn",
                in_seq_len=self.in_seq_len,
                out_seq_len=self.out_seq_len,
                num_layers=2,
                memory_size=64,
                embedding_size=128,
                num_heads=6,
                scope="randomhole")
            self.model.load(self.model_file)
            self.model.fit(xs,
                           ys,
                           n_epoch=1,
                           batch_size=256,
                           shuffle=True,
                           show_metric=False,
                           run_id="Random Hole Completion")
            self.model.save(self.model_file)
	def create_images_histogram_collection(self):

		models = Models()

		number_of_files = 0

		for dir_name, dir_names, file_names in os.walk(Constants.LOCAL_DIR_SAVE_PHOTO):

			#walking inside all ads
			for subdir_name in dir_names:

				aviso_json = {"id_aviso":subdir_name, "photos":[]}

				for dir_name_, dir_names_, file_names_ in os.walk(os.path.join(dir_name, subdir_name)):

					#walking inside all photos of the specific ad
					for file_name_ in file_names_:

						try:
							#generating the histogram and adding it to the json to be added to mongo
							hist = self.get_histogram(os.path.join(dir_name, subdir_name, file_name_)) 
							hist_json = {"photo_path":subdir_name + "/" + file_name_, "histogram":json.dumps(hist.tolist())}
							aviso_json["photos"].append(hist_json)                                
						except:
							pass

						if number_of_files%1000==0:
							print number_of_files

						number_of_files +=1

					#calling model to add item to mongo
					models.add_image_histogram(aviso_json)

		print "[OK] Created histograms for " + str(number_of_files) + " photos."
def train_model(lookoutvision_client, bucket, project_name):
    """
    Trains a model.

    :param lookoutvision_client: A Boto3 Lookout for Vision client.
    :param bucket: The bucket where the training output is stored.
    :param project_name: The project that you want to train.
    """
    print("Training model...")
    training_results = f"{bucket}/{project_name}/output/"
    status, version = Models.create_model(lookoutvision_client, project_name,
                                          training_results)

    Models.describe_model(lookoutvision_client, project_name, version)
    if status == "TRAINED":
        print(
            "\nCheck the performance metrics and decide if you need to improve "
            "the model performance.")
        print(
            "\nMore information: "
            "https://docs.aws.amazon.com/lookout-for-vision/latest/developer-guide/improve.html"
        )
        print("If you are satisfied with your model, you can start it.")
        start_model(lookoutvision_client, project_name, version)
    else:
        print("Model training failed.")
Example #9
0
def save_models():
    all_auto = model_auto()
    for auto in all_auto:
        for brand in Brands.select().where(Brands.brand == auto['brand']):
            #  print(brand.brand)
            for model in auto['models']:
                Models.create(brand=brand, models=model)
Example #10
0
def delete(num):
    db = Models()
    if not db.view():
        return render_template('success.html', not_delete=True)
    else:
        if request.method == 'GET':
            db.delete(str(num))
            return render_template('success.html', delete=True)
Example #11
0
	def __init__(self, json=None):
		self.fields = ['buyoutPrice', 'reservePrice', 'imgUrl', 'status', 'user', 'categorie', 'lat', 'long']
		self.unique = [] # Unique fields are also mandatory
		self.mandatory = ['title', 'description', 'dateStart', 'dateLength', 'startPrice'] # Champs obligatoires // status : encherissable, en envoi, envoyé ?
		self.editable_fields = ['title', 'description', 'imgUrl', 'lat', 'long'] # Champs éditables par l'utilisateur 
		self.belongs_to = ['user', 'categorie'] # Est rattaché à un utilisateur, crée un champ 'user_id'
		self.has_many = ['bids'] 
		Models.__init__(self, json)
Example #12
0
    def train(self, token_lists, model_file):
        self.total = 0
        self.correct = 0
        self.incorrect = 0
        self.top2 = 0
        self.top3 = 0
        self.prefix_model_file = "./trained_model/prefix_final/prefix.tfl"
        self.suffix_model_file = "./trained_model/suffix_final/suffix.tfl"
        self.prepare_data(token_lists)
        # xs = xs[:40960]
        # ys = ys[:40960]

        (pxs, pys, sxs, sys) = self.getTrainData(token_lists)

        with tf.Graph().as_default():
            self.prefix_model = Models().create_network(
                len(self.string_to_number),
                len(self.string_to_number),
                model_name="bidirectional_attention_rnn",
                in_seq_len=self.in_seq_len,
                out_seq_len=self.out_seq_len,
                num_layers=2,
                memory_size=64,
                embedding_size=128,
                num_heads=8,
                scope="prefix")

            self.prefix_model.load(self.prefix_model_file)
            self.prefix_model.fit(pxs,
                                  pys,
                                  n_epoch=1,
                                  batch_size=512,
                                  shuffle=True,
                                  show_metric=False,
                                  run_id="Prefix Completion")
            self.prefix_model.save(self.prefix_model_file)

        with tf.Graph().as_default():
            self.suffix_model = Models().create_network(
                len(self.string_to_number),
                len(self.string_to_number),
                model_name="bidirectional_attention_rnn",
                in_seq_len=self.in_seq_len,
                out_seq_len=self.out_seq_len,
                num_layers=2,
                memory_size=64,
                embedding_size=128,
                num_heads=8,
                scope="suffix")
            self.suffix_model.load(self.suffix_model_file)
            self.suffix_model.fit(sxs,
                                  sys,
                                  n_epoch=1,
                                  batch_size=512,
                                  shuffle=True,
                                  show_metric=False,
                                  run_id="Suffix Completion")
            self.suffix_model.save(self.suffix_model_file)
Example #13
0
    def cross_test(self, rfclf, feature_models, evalTag = True):
        #load data
        [tt_idx, x_te, y_te] = np.load(self.data_file_test)
        if self.model_dict['lrTModel']:
            [x_te_title, y_te] = np.load(self.data_file_title_test)
        if self.model_dict['scoreModel']:
            [x_te_score] = np.load(self.data_file_score_test)
        if self.model_dict['scoreTModel']:
            [x_te_score_title] = np.load(self.data_file_score_title_test)
        if self.model_dict['dictModel']:
            dic_result = np.array(np.load(self.dict_result)[0])[tt_idx]
#            dic_result[dic_result > 0] = 1
#            dic_result[dic_result < 0] = -1

        #get features
        n_features = []

        for key in self.model_dict.keys():
            if self.model_dict[key]:
                x_test = x_te
                if key == 'dictModel':
                    n_features.append(dic_result)
                    continue
                elif key == 'scoreModel':
                    n_features.append(Models().select_demo(key, 0, 0).predict(x_te_score))
                    continue
                elif key == 'scoreTModel':
                    n_features.append(Models().select_demo(key, 0, 0).predict(x_te_score_title))
                    continue
                elif key == 'lrTModel':
                    x_test = x_te_title

                if key == 'lrModel' or key == 'lrTModel':
                    n_features.append(feature_models[key].predict_proba(x_test))
                else:
                    n_features.append(feature_models[key].predict(x_test))

        #start the second RF model
        x_second_te = np.column_stack(tuple(n_features))
        rf_result = rfclf.predict(x_second_te)
        rf_result_proba = rfclf.predict_proba(x_second_te)

        #evaluate the precision of model
        rf_rp = np.copy(rf_result)
        rf_rp[rf_result_proba.max(axis=1) < 0.5] = 0

        if evalTag:
            self.evaluate(rf_result, y_te)

#        self.DT.write_data('../data/negative.xls', self.origin_data_file,
#                          tt_idx[rf_result == -1], y_te[rf_result == -1])
#        self.DT.write_data('../data/positive.xls', self.origin_data_file,
#                          tt_idx[rf_result == 1], y_te[rf_result == 1])
#        self.DT.write_data('../data/zeros.xls', self.origin_data_file,
#                          tt_idx[rf_result == 0], y_te[rf_result == 0])
        return rf_result, y_te, rf_rp
Example #14
0
 def __init__(self, user_name = None, user_password=None, session = None):
     '''
     This constructor has at least a session object to start petitions agains 
     '''
     if session != None:
         self.session = session
     elif user_name != None and user_password != None:
         self.session = Models.get_session(user_name, user_password, sqlite=False)
     else:
         self.session = Models.get_session()
Example #15
0
    def _init_model(self):

        M = Models()
        model = M.FPN(img_ch=3, output_ch=1)
        # model = U_Net(img_ch=3, output_ch=1)

        if torch.cuda.device_count() > 1 and self.args.mgpu:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
            model = nn.DataParallel(model)

        self.model = model.to(self.device)
Example #16
0
    def creating_dataframe(self, dictionary):
        final_words = []
        final_words1 = []
        documents = []
        l = []
        z = []
        docs = {}
        keys = dictionary.keys()
        for key in keys:
            kk = str(key)
            k = re.findall(r'\d{8}', kk)
            l.append(k)
        for i in l:
            for j in i:
                z.append(j)
        for key in z:
            # if key == '19234329':
            print(
                "###################### Generating topic labels for {} ############################"
                .format(key))
            df = pd.DataFrame(dictionary[key])
            df.columns = ['Text']
            df_ = df['Text'].apply(lambda x: ''.join(x))
            df_ = df_.str.lower()
            df_ = df_.apply(self.tokenize)
            df_ = df_.apply(self.replace)
            df_ = df_.apply(self.split)
            df_ = df_.apply(self.terms_only)
            df_ = df_.apply(lambda x: ' '.join(x))
            df_ = df_.apply(lambda x: re.sub(r' +', ' ', x))
            [final_words.append("".join(i).strip().split()) for i in df_]
            [final_words1.append(i) for i in final_words if len(i) >= 5]
            [
                documents.append(re.sub(r' +', " ", (' '.join(i))))
                for i in final_words1
            ]

            if key in docs:
                docs[key].append(documents)
            else:
                docs[key] = documents

            mm = Models(5, 10, **docs)
            terms_to_wiki = mm.calling_methods('LDA')
            ll = Labels(terms_to_wiki)
            wiki_titles = ll.get_titles_wiki()
            equal_length = ll.remove_all_null_dicts_returned_from_wiki(
                **wiki_titles)
            frq = ll.calculating_word_frequency(**equal_length)
            results = ll.predicting_label(**frq)

            print(key, results)
        print('########### FINAL FILE EXECUTED ##################')
Example #17
0
def search(query):
    assert isinstance(query, str)
    # Model
    db = get_db_instance()
    models = Models(db)
    model_caches = models.getCaches()
    # Yahoo shopping API
    appid = get_yahoo_appid()
    assert appid is not None
    # Run
    y = YahooShopping(appid=appid, cache=model_caches)
    content = y.query(query=query, ignoreCache=False)
    print(content)
Example #18
0
    def load(self, token_lists, model_file):
        self.model_file = "./trained_model/random4/randomhole.tfl"
        self.prepare_data(token_lists)

        with tf.Graph().as_default():
            self.model = Models().create_network(self.in_max_int,
                                                 self.out_max_int,
                                                 model_name="bidirectional_attention_rnn",
                                                 in_seq_len=self.in_seq_len, out_seq_len=self.out_seq_len,
                                                 num_layers=2, memory_size=128,
                                                 embedding_size=128, num_heads=8, scope="randomhole")

            self.model.load(self.model_file)
    def start(self):
        bag_of_words, words = TermFrequency(self.trained).create_vocabulary()

        v = Vectorizer(self.trained, self.classify, words, bag_of_words)

        tfidf_trained     = v.tfidf_for_tweets_trained
        evaluations       = v.evaluations
        tfidf_to_classify = v.tfidf_for_tweets_to_classify

        models     = Models(tfidf_trained, evaluations, tfidf_to_classify)
        prediction = models.svm_linear()

        return prediction
Example #20
0
    def start(self):
        bag_of_words, words = TermFrequency(self.trained).create_vocabulary()

        v = Vectorizer(self.trained, self.classify, words, bag_of_words)

        tfidf_trained = v.tfidf_for_tweets_trained
        evaluations = v.evaluations
        tfidf_to_classify = v.tfidf_for_tweets_to_classify

        models = Models(tfidf_trained, evaluations, tfidf_to_classify)
        prediction = models.svm_linear()

        return prediction
Example #21
0
    def saveModels():
        '''
        This function opens the tarfile, preprocess the data, train models on it and it
        saves the model in the Models directory.
        '''
        tar = tarfile.open('Data/babi_tasks_1-20_v1-2.tar.gz')

        challenges = {
          # QA1 with 10,000 samples
          'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
          # QA2 with 10,000 samples
          'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
        }

        ## Single Supporting Fact Challenge
        ss_train_stories, ss_test_stories, \
            ss_stories_train, ss_questions_train, ss_answers_train, \
            ss_stories_test, ss_questions_test, ss_answers_test, \
            ss_story_maxlen, ss_story_maxsents, ss_question_maxlen, \
            ss_vocab, ss_vocab_size, ss_word2idx = \
            Preprocess.getData(challenges['single_supporting_fact_10k'], tar)

        ss_idx2word = {value : key for key, value in ss_word2idx.items()}

        single_model, single_debug_model = \
            Models.singleModel(ss_story_maxlen, ss_story_maxsents, ss_question_maxlen, ss_vocab_size, \
                            ss_stories_train, ss_questions_train, ss_answers_train, \
                            ss_stories_test, ss_questions_test, ss_answers_test, \
                            EMBEDDING_DIM, NUM_EPOCHS, BATCH_SIZE)

        Utilities.saveModel(single_model, 'single_model')
        Utilities.saveModel(single_debug_model, 'single_debug_model')

        ## Two Supporting Fact challenge
        ts_train_stories, ts_test_stories, \
            ts_stories_train, ts_questions_train, ts_answers_train, \
            ts_stories_test, ts_questions_test, ts_answers_test, \
            ts_story_maxlen, ts_story_maxsents, ts_question_maxlen, \
            ts_vocab, ts_vocab_size, ts_word2idx = \
            Preprocess.getData(challenges['two_supporting_facts_10k'], tar)

        ts_idx2word = {value : key for key, value in ts_word2idx.items()}

        double_model, double_debug_model = \
            Models.doubleModel(ts_story_maxlen, ts_story_maxsents, ts_question_maxlen, ts_vocab_size, \
                            ts_stories_train, ts_questions_train, ts_answers_train, \
                            ts_stories_test, ts_questions_test, ts_answers_test, \
                            EMBEDDING_DIM, NUM_EPOCHS_2, BATCH_SIZE)

        Utilities.saveModel(double_model, 'double_model')
        Utilities.saveModel(double_debug_model, 'double_debug_model')
Example #22
0
def train():
    embeddings = np.load('text_embedding.npy', allow_pickle=True)
    sentiments = np.load('sentiments.npy', allow_pickle=True)
    texts = np.load('texts.npy', allow_pickle=True)
    all_texts = np.load('text_cache.npy', allow_pickle=True)
    categorical_sentiments = to_categorical(sentiments, num_classes=5)
    tokenizer = Tokenizer(num_words=300000, oov_token=None)
    tokenizer.fit_on_texts(all_texts)
    X_train, X_test, Y_train, Y_test = train_test_split(texts,
                                                        categorical_sentiments,
                                                        test_size=0.2)
    np.save("text_train.npy", X_train)
    np.save("sentiment_train.npy", Y_train)
    models = Models()
    logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = TensorBoard(log_dir=logdir)
    models = []
    bgruModel = "ensemble_bgru.h5"
    models.buil_pre_model(embeddings)
    model = models.model
    if os.path.isfile(filepath):
        model = load_model(filepath)

    checkpoint = ModelCheckpoint(filepath,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    callbacks_list = [checkpoint, tensorboard_callback]

    model.fit(pad_sequences(tokenizer.texts_to_sequences(X_train[:500000]),
                            maxlen=75),
              Y_train[:500000],
              batch_size=512,
              epochs=50,
              validation_data=(pad_sequences(tokenizer.texts_to_sequences(
                  X_test[:5000]),
                                             maxlen=75), Y_test[:5000]),
              callbacks=callbacks_list,
              shuffle=True)

    result = model.predict_on_batch(
        pad_sequences(tokenizer.texts_to_sequences([
            " What happened 2 ur vegan food options?! At least say on ur site so i know I won't be able 2 eat anything for next 6 hrs #fail",
            " I sleep hungry and It gets harder everyday",
            "everything is great, i have lost some weight",
            "awesome, really cool", "should I play cards",
            "I am full and inshape", "is it okay to be that hungry at night?"
        ]),
                      maxlen=75))
    print("result: ", np.argmax(result, axis=-1), "\n")
Example #23
0
def update(num):
    form = NoteUpdate(request.form)  # instantiating form to use the forms defined from the Form class in form.py
    if request.method == 'GET':
        return render_template('update.html', update=True, form=form, num=num)
    else:
        if not form.validate_on_submit():  # making sure that the form is validated before submission
            return render_template('update.html', update=True, not_validate=True, form=form, num=num)
        else:
            db = Models()
            name = request.form['note_name']
            subject = request.form['note_subject']
            content = request.form['note_content']
            db.update(str(num), name, subject, content)
            return render_template('success.html', update=True, form=form)
Example #24
0
 def __init__(self, logger=None):
   if logger is None:
     self.logger = logging.getLogger()
   else:
     self.logger = logger
   # Model
   db = get_db_instance()
   models = Models(db)
   model_caches = models.getCaches()
   # Proxy
   proxies = get_amazon_scraping_proxy()
   # Run
   self.amazon_scraper = AmazonScraper(cache=model_caches, proxies=proxies,
                                       logger=self.logger)
Example #25
0
 def __init__(self, json=None):
     self.fields = ["name"]  # Default fields, can be blank in the request.
     self.unique = [
         "email",
         "username",
         "rate",
         "rateNumber",
     ]  # Unique fields are also mandatory // Rate = note de l'utilisateur & rate_number = nombre de vote (pour le calcul coté client)
     self.mandatory = ["password"]  # Mandatory fields.
     self.intern_fields = ["token"]
     self.editable_fields = ["name", "password", "email"]
     self.has_many = ["products", "bids", "addresses", "payments"]
     self.__password = None  # Special field with a callback
     Models.__init__(self, json)
Example #26
0
    def predict(self, pred_file = None):
         #load models
        rfclf = joblib.load(self.rfmodel_file)

        feature_models = dict()
        disable_models = ['dictModel', 'scoreModel', 'scoreTModel']
        for key in self.model_dict.keys():
            if self.model_dict[key] and (key not in disable_models):
                clf = joblib.load(self.model_dir + '/' + key)
                feature_models.setdefault(key, clf)
        
        if pred_file != None:
            test_data = self.pre_data_treate(pred_file)
            [x_te, x_te_title, x_te_score, x_te_score_title] = test_data
            
            #get features
            n_features = []
    
            for key in self.model_dict.keys():
                if self.model_dict[key]:
                    x_test = x_te
                    if key == 'scoreModel':
                        n_features.append(Models().select_demo(key, 0, 0).predict(x_te_score))
                        continue
                    elif key == 'scoreTModel':
                        n_features.append(Models().select_demo(key, 0, 0).predict(x_te_score_title))
                        continue
                    elif key == 'lrTModel':
                        x_test = x_te_title
    
                    if key == 'lrModel' or key == 'lrTModel':
                        n_features.append(feature_models[key].predict_proba(x_test))
                    else:
                        n_features.append(feature_models[key].predict(x_test))
            
            #start the second RF model
            x_second_te = np.column_stack(tuple(n_features))
            rf_result = rfclf.predict(x_second_te)
            rf_result_proba = rfclf.predict_proba(x_second_te)
    
            #evaluate the precision of model
            rf_rp = np.copy(rf_result)
            rf_rp[rf_result_proba.max(axis=1) < 0.5] = 0
    
            self.DT.write_data('../data/negative.xls', pred_file,
                              range(rf_result.shape[0]), rf_result)
            self.DT.write_data('../data/negative1.xls', pred_file,
                              range(rf_result.shape[0]), rf_rp)
        else:
            return self.cross_test(rfclf, feature_models)
    def _init_model(self):

        criterion = nn.BCELoss()
        self.criterion = criterion.to(self.device)
        M = Models()
        model = M.PSP(img_ch=3, output_ch=1)

        self.model = model.to(self.device)
        # init_weights(self.model, 'kaiming', gain=1)
        # summary(self.model, input_size=(4, 448, 448))
        self.model_optimizer = optim.Adamax(model.parameters(),
                                            lr=1e-3,
                                            weight_decay=0.01)
        self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.model_optimizer, T_max=len(self.train_queue))
Example #28
0
 def fromDict(cls, data):
     obj = cls()
     obj.id = data['id']
     obj.date = QDate().fromString(data['date'], 'yyyy-MM-dd')
     obj.type = Models.get().application_types.getItemById(data['type'])
     obj.client = Models.get().clients.getItemById(data['client'])
     for item in data['entries']:
         obj.entries.append(ApplicationEntry.fromDict(item))
     if data['contract']:
         obj.contract = Contract.fromDict(data['contract'])
     if data['account']:
         obj.account = Account.fromDict(data['account'])
     if data['order']:
         obj.order = Order.fromDict(data['order'])
     return obj
    def initialise(self, param):
        self.parameters = param

        if not param.get('goal_selection_mode') == 'som':
            print('wrong goal selection mode, exit!')
            sys.exit(1)

        self.intrinsic_motivation = IntrinsicMotivation(param)

        if (self.parameters.get('train_cae_offline')) or (
                self.parameters.get('train_som_offline')):
            self.models = Models(param, train_images=self.train_images)
        else:
            self.models = Models(param)

        plot_encoded = self.models.encoder.predict(
            np.asarray([self.test_images[0:5]
                        ]).reshape(5, self.parameters.get('image_size'),
                                   self.parameters.get('image_size'),
                                   self.parameters.get('image_channels')))
        plots.plots_cae_decoded(
            self.models.decoder,
            plot_encoded,
            self.test_images[0:5],
            image_size=self.parameters.get('image_size'),
            directory=self.parameters.get('directory_pretrained_models'))

        self.experiment_id = param.get('experiment_id')
        self.iteration = 0

        self.pos = []
        self.cmd = []
        self.img = []

        self.goal_code = []

        self.current_goal_x = -1
        self.current_goal_y = -1
        self.current_goal_idx = -1
        self.prev_goal_idx = -1

        self.goal_image = np.zeros(
            (1, param.get('image_size'), param.get('image_size'),
             param.get('image_channels')), np.float32)

        np.random.seed()  # change the seed

        self.prev_pos = self.get_starting_pos()
Example #30
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-i",
        "--eval-dir",
        required=True,
        help="List of problem instances, i.e., [filename].txt files.")
    parser.add_argument(
        "-o",
        "--output-dir",
        required=True,
        help="List of truth files, one for each txt file in our --eval-dir.")
    args = parser.parse_args()

    if os.path.exists(args.output_dir):
        logging.warn(
            '[%s] already exists! Will possibly override files in it!' %
            args.output_dir)
    else:
        logging.debug('Creating [%s].' % args.output_dir)
        os.makedirs(args.output_dir)

    texts = load_texts(args.eval_dir)
    scalers = load_scalers()
    models = Models()

    splitted_texts = split_texts(texts)

    # NOTE using standard scaler
    splitted_texts_vectors = to_vectors(splitted_texts,
                                        scalers['standard'].transform)

    logging.debug('Making predictions...')
    for i, chunks_vectors_tuple in enumerate(splitted_texts_vectors, 1):
        chunks_vectors_pairs = itertools.combinations(chunks_vectors_tuple, 2)
        confidences = [0, 0]  # (for false, for true)
        for a, b in chunks_vectors_pairs:
            pred_for_false, pred_for_true = models.classify_proba(a, b)
            # TODO if very confident about style change - break or something?
            confidences[0] += pred_for_false
            confidences[1] += pred_for_true

        y_pred = bool(np.argmax(confidences))
        with open(os.path.join(args.output_dir, 'problem-%d.truth' % i),
                  'w') as f:
            json.dump({'changes': y_pred}, f)

    logging.debug('Done.')
Example #31
0
    def post(self, request, *args, **kwargs):
        """
        Form submit.
        """

        if not request.is_ajax():
            return HttpResponseBadRequest
        model_name = request.POST.get('model_name', '')
        model = Models.get_model(model_name)
        if model is None:
            return HttpResponseBadRequest()
        form_class = DynamicModelForm(model_name).get_form()
        form = form_class(request.POST)
        if form.is_valid():
            form.save()
            response = {
                'result': 'success'
            }
        else:
            errors = {}
            for k in form.errors:
                errors[k] = form.errors[k][0]

            response = {
                'result': 'error',
                'errors': errors
            }
        return HttpResponse(json.dumps(response, cls=DateTimeEncoder), mimetype='application/json')
Example #32
0
    def post(self, request, *args, **kwargs):
        """
        Changing the model field via table.
        """

        if 'model_name' not in request.POST or 'field_index' not in request.POST or 'model_pk' not in request.POST or 'value' not in request.POST:
            return HttpResponseBadRequest()
        else:
            try:
                model = Models.get_model(request.POST.get('model_name'))
                obj = model.objects.get(pk=request.POST.get('model_pk'))
                field = [f.name for f in model._meta.fields][int(request.POST.get('field_index'))]
                try:
                    setattr(obj, field, cgi.escape(request.POST.get('value')))
                    obj.full_clean()
                    obj.save()
                    response = {
                        'result': 'success'
                    }
                except ValidationError:
                    response = {
                        'result': 'error',
                        'error': u"Поле заполнено неправильно"
                    }
                return HttpResponse(json.dumps(response, cls=DateTimeEncoder), mimetype='application/json')
            except model.DoesNotExist:
                raise Http404
Example #33
0
def evaluate_model(trainX, trainy, testX, testy):
    epochs, batch_size = 15, 64
    verbose, n_steps, n_length = 0, 4, 32
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    model = 'lstm'
    if model == 'cnnlstm':
        trainX = trainX.reshape(
            (trainX.shape[0], n_steps, n_length, n_features))
        testX = testX.reshape((testX.shape[0], n_steps, n_length, n_features))
    elif model == 'convlstm':
        trainX = trainX.reshape(
            (trainX.shape[0], n_steps, 1, n_length, n_features))
        testX = testX.reshape(
            (testX.shape[0], n_steps, 1, n_length, n_features))
    model = Models(model, n_timesteps, n_features, n_outputs, n_steps,
                   n_length)
    model.model.fit(trainX,
                    trainy,
                    epochs=epochs,
                    batch_size=batch_size,
                    verbose=verbose)
    _, accuracy = model.model.evaluate(testX,
                                       testy,
                                       batch_size=batch_size,
                                       verbose=verbose)
    print('Accuracy: {:.4f}'.format(accuracy))
Example #34
0
 def __init__(self, model):
     model = Models.get_model(model)
     meta = type('Meta', (), {
         "model": model,
     })
     self.form_object = type('DynamicModelForm', (forms.ModelForm, ),
                             {"Meta": meta})
Example #35
0
def main():
	if len(sys.argv) < 2:
		print(OutputError.no_arguments)
		print("Using default sample.json")
		input_file = "sample.json"
	else:
		input_file = sys.argv[1]

	print("Processing : " + input_file)
	with open(input_file) as json_file:
		json_data = json.load(json_file)

	data_out = { 
		"meshes":[],
		"textures": json_data["textures"],
		"materials": json_data["materials"],
		"nodes": json_data["nodes"]
	} 

	# Processing meshes, that will be split into submeshes and all the data
	# will be merged into one single file
	with open(input_file.replace(".json", ".data"), "wb") as file_stream:
		file_size = 0
		for mesh_element in json_data[Models.json_tag]:
			json_element, file_size = Models.process_meshes((mesh_element), file_stream, file_size)
			data_out["meshes"].append(json_element)

	# Writing metadata file out
	with open(input_file.replace(".json", ".metadata"), "w") as file_stream:
		json.dump(data_out, file_stream, indent=4, separators=(',', ': '))
    def setupUi(self):
        self.ui.edtDate.setDate(QDate().currentDate())

        self.ui.tblElevatorsData.horizontalHeader().setVisible(True)
        self.ui.cmbApplicationType.setModel(Models.get().application_types)
        self.ui.tblElevatorsData.setModel(DataTableModel())
        self.ui.tblElevatorsData.setItemDelegate(DataTableDelegate())
Example #37
0
def make_model(train_image, train_label, test_image, test_label, name):
    try:
        m = Models(shape=train_image.shape[1:], name=name)
        model = exec('m.' + bp.USE_MODEL + '(train_image, train_label, test_image, test_label)')
        print('info: input_shape:{}'.format(train_image.shape[1:]))
    except Exception as e:
        print(e)
        exit()
Example #38
0
 def insert_test_clients(self):
     Models.insert_test(self.session)
	def validate_grouped_equals(self):
		models = Models()
		models.validate_grouped_equals()
Example #40
0
 def __init__(self, json=None):
     self.belongs_to = ["user"]
     self.fields = ["number", "security", "date", "name"]
     self.editable_fields = ["number", "security", "date", "name"]
     Models.__init__(self, json)
Example #41
0
	def __init__(self, json=None):
		self.belongs_to = ['user']
		self.fields = ['street1', 'street2', 'country', 'zipcode', 'city']
		self.editable_fields = ['street1', 'street2', 'country', 'zipcode', 'city']
		Models.__init__(self, json)
Example #42
0
    def build_model(self, save_tag = True):
        #load data
        tr_data = np.load(self.data_file_train)
        [tt_idx, x_tr, y_tr] = tr_data

        if self.model_dict['lrTModel']:
            [x_tr_title, y_tr] = np.load(self.data_file_title_train)
        if self.model_dict['dictModel']:
            dicResult = np.array(np.load(self.dict_result)[0], dtype=float)[tt_idx]
#            dicResult[dicResult > 0] = 1
#            dicResult[dicResult < 0] = -1
        if self.model_dict['scoreModel']:
            [x_tr_score] = np.load(self.data_file_score_train)
        if self.model_dict['scoreTModel']:
            [x_tr_score_title] = np.load(self.data_file_score_title_train)

        mid = int(x_tr.shape[0]/2)
        #train model
        models = Models()

        #get the features
        n_features = []
        for key in self.model_dict.keys():
            #remove the models unused
            if not self.model_dict[key]:
                continue

            x_train = x_tr
            if key == 'dictModel':
                n_features.append(dicResult)
                continue
            elif key == 'lrTModel':
                x_train = x_tr_title
            elif key == 'scoreModel':
                x_train = x_tr_score
            elif key == 'scoreTModel':
                x_train = x_tr_score_title

            clf = models.select_demo(key, x_train[:mid, :], y_tr[:mid])
            if key == 'lrModel' or key == 'lrTModel':
                mid_to_end = list(clf.predict_proba(x_train[mid:,:]))
            else:
                mid_to_end = list(clf.predict(x_train[mid:,:]))
            clf = models.select_demo(key, x_train[mid:, :], y_tr[mid:])
            if key == 'lrModel' or key == 'lrTModel':
                top_to_mid = list(clf.predict_proba(x_train[:mid, :]))
            else:
                top_to_mid = list(clf.predict(x_train[:mid, :]))
            n_features.append(np.array(top_to_mid + mid_to_end))

            #get the true model
            clf = models.select_demo(key, x_train, y_tr)
            if save_tag and key != 'scoreModel' and key != 'scoreTModel':
                joblib.dump(clf, self.model_dir + '/' + key)

        if len(n_features) > 1:
            x_tr_second = np.column_stack(tuple(n_features))
        else:
            print 'Error: less models'
            sys.exit(1)

        self.GBDT.fit(x_tr_second, y_tr)
        print self.GBDT.feature_importances_
        #train the second model
        rfclf = models.rfdemo(x_tr_second, y_tr)
        if save_tag:
            joblib.dump(rfclf, self.rfmodel_file)
	def create_images_histogram_from_images_backup_iw(self):

		models = Models()

		self.number_of_files = 0

		print "Retrieving all online avisos. Please, wait..."
		self.array_avisos_online = models.get_all_avisos_online()
		print "[Ok]"		

		def step((ext, self), dir_name, files):

			download_100x75 = True

			aviso_id = dir_name[dir_name.rfind("Constants.LOCAL_DIR_SAVE_PHOTO")+len(Constants.LOCAL_DIR_SAVE_PHOTO)+2:dir_name.rfind("/")]
			aviso_id = aviso_id.translate(None, "/")
			
			if "100x75" in dir_name or "1200x1200" in dir_name:
				dir100x75 = dir_name[:dir_name.rfind("/")] + "/100x75"
				dir1200x1200 = dir_name[:dir_name.rfind("/")] + "/1200x1200"

				try: 
					aviso_id_int = int(aviso_id)
				except ValueError:
					aviso_id_int = 0
				
				if aviso_id_int in self.array_avisos_online:
					
					if "1200x1200" in dir_name:
						if os.path.isdir(dir100x75):
							download_100x75 = False

					if download_100x75:

						aviso_json = {"id_aviso":aviso_id, "photos":[]}

						for file_name in files:

							if file_name.lower().endswith(ext):

								try:
									#generating the histogram and adding it to the json to be added to mongo
									hist = self.get_histogram(os.path.join(dir_name, file_name)) 
									hist_json = {"photo_path":dir_name + "/" + file_name, "histogram":json.dumps(hist.tolist())}
									aviso_json["photos"].append(hist_json)
								except:
									pass

						models.add_image_histogram(aviso_json)
				

				if self.number_of_files%100==0:
					print self.number_of_files

				self.number_of_files +=1
			else:
			
				if self.number_of_files%100==0:
					print self.number_of_files
			
				self.number_of_files +=1
			
				print aviso_id
	def save_compressed_histogram_online(self):
		models = Models()
		models.save_compressed_histogram_online()
	def create_tuples_equals_avisos_collection(self):
		models = Models()
		models.create_tuples_equals_avisos_collection()
	def create_raw_equal_avisos(self):
		models = Models()
		models.create_raw_equal_avisos()
	def validate_arr(self):
		models = Models()
		models.validate_arr()
	def create_duplicateds_group_collection_new(self):
		models = Models()
		models.create_duplicateds_group_collection_new()
Example #49
0
	def __init__(self, json=None):
		self.fields = ['name']
		self.has_many = ['products'] 
		self.editable_fields = ['name']
		Models.__init__(self, json)
	def create_images_histogram_from_online_ads(self):	
		models = Models()

		number_of_files = 0

		print "Retrieving all online avisos. Please, wait..."
		self.array_avisos_online = models.get_all_avisos_online()
		print "[Ok]"		

		for aviso_id in self.array_avisos_online:

			if number_of_files%100==0:
				print number_of_files

			number_of_files +=1

			aviso_json = {"id_aviso":aviso_id, "photos":[]}

			# print os.walk(Constants.LOCAL_DIR_SAVE_PHOTO + complete_folder)
			try:

				if len(str(aviso_id))<10:
					aviso_id = format(int(aviso_id), "010")

				aviso_id_splitted = re.findall(r'.{1,2}',str(aviso_id),re.DOTALL)

				complete_folder = ""

				for folder_name in aviso_id_splitted:
					complete_folder +=  folder_name + "/"
				
				for root, dirs, files in os.walk(Constants.LOCAL_DIR_SAVE_PHOTO + complete_folder):

					folder_to_download = ""
					for folder in dirs:

						if folder == "100x75":

							folder_to_download = "100x75"
							break

						elif folder == "1200x1200":

							folder_to_download = "1200x1200"
							break


					folder_name = Constants.LOCAL_DIR_SAVE_PHOTO + complete_folder + folder_to_download

					for file in os.listdir(folder_name):
						
						if file.endswith(".jpg"):
							hist = self.get_histogram(os.path.join(folder_name, file))
							hist_json = {"photo_path":folder_name + "/" + file, "histogram":json.dumps(hist.tolist())}
							aviso_json["photos"].append(hist_json)

					if len(os.listdir(folder_name))>0:
						models.add_image_histogram(aviso_json)

					break

			except:
				pass
	def create_similar_photos_collection(self):
		models = Models()
		models.create_similar_photos_collection()
Example #52
0
File: bids.py Project: Erowlin/Ubid
	def __init__(self, json=None):
		self.fields = ['price', 'date']
		self.belongs_to = ['user', 'product'] # Est rattaché à un utilisateur et un produit
		self.editable_fields = ['price', 'date']
		Models.__init__(self, json)