Beispiel #1
0
 def user_login(user_info, addition_header=None):
     try:
         header = API.get_header(addition_header)
         http = API.http
         url = API.url + "login?%s"
     except:
         print(
             "[error] can't find header in API (delete_object function in API class)"
         )
         pass
     try:
         user_info_decode = urlencode(user_info)
         r = http.request("GET", url % user_info_decode, headers=header)
         checkRequest = DATA.convert_bytes_to_json(r.data)
         if "error" in list(checkRequest.keys()):
             print(
                 "[error] ",
                 checkRequest["code"],
                 ", data:",
                 checkRequest["error"],
                 "(delete_object function in API class)",
             )
         else:
             dataResultLogin = DATA.convert_bytes_to_json(r.data)
             return dataResultLogin
     except:
         print(
             "[Error] bad request, check user id, class name (delete_object function in API class)"
         )
         pass
Beispiel #2
0
 def signing_up(user_info, addition_header=None):
     try:
         header = API.get_header(addition_header)
         http = API.http
         url = API.url + "users"
         role_control = {
             "ACL": {"role:user": {"read": True, "write": True}},
         }
     except:
         print(
             "[error] can't find header in API (delete_object function in API class)"
         )
         pass
     try:
         user_info = dict(user_info)
         user_info.update(role_control)
         user_info_decoded = json.dumps(user_info)
         r = http.request("POST", url, body=user_info_decoded, headers=header)
         checkRequest = DATA.convert_bytes_to_json(r.data)
         if "error" in list(checkRequest.keys()):
             print(
                 "[error] ",
                 checkRequest["code"],
                 ", data:",
                 checkRequest["error"],
                 "(delete_object function in API class)",
             )
         else:
             data_signing_up_result = DATA.convert_bytes_to_json(r.data)
             return data_signing_up_result
     except:
         print(
             "[Error] bad request, check user id, class name (delete_object function in API class)"
         )
         pass
Beispiel #3
0
 def dojob(self):
     self.obj.write(0x2, 0x5)
     v1 = self.obj.read(0x2)
     print 'V1=' + str(v1)
     format = DATA()
     v2 = format.dedata(v1)
     print 'V2=' + str(v2)
     return True
def wrapper(parameters, nr, name):
        data = DATA()
        data.loadData(name) #load the data
        data.setLimits(dataLimits) #set data limits
        if _abs:
                data.fitBaseline(degree, peakLimits, skipRegion, abs=_abs) #fit the baseline (first time)
        else:
                data.fitBaseline(degree, peakLimits, skipRegion) #fit the baseline (first time)
        fit, parguess, bounds = bound(nr, parameters)
        fit.deconvolute(data, parguess, bounds, True)
        return name, fit.pars
Beispiel #5
0
 def upload_model_detail(
     model_id,
     athm,
     dataName,
     modelName,
     description,
     col_feature,
     col_label,
     col_feature_name,
     col_label_name,
     addition_header=None,
 ):
     try:
         http = API.http
         header = API.get_header(addition_header)
         url = API.url + "classes/ModelDetail"
     except:
         print(
             "[error] can't find header in API (upload_model_file function in API class)"
         )
         pass
     data = {
         "colLabel": col_label,
         "colFeature": col_feature,
         "algorithm": athm,
         "modelName": modelName,
         "modelId": {
             "__type": "Pointer",
             "className": "Model",
             "objectId": model_id,
         },
         "description": description,
         "dataName": dataName,
         "colFeatureName": col_feature_name,
         "colLabelName": col_label_name,
     }
     model_detail = json.dumps(data)
     r = http.request("POST", url, body=model_detail, headers=header)
     checkRequest = DATA.convert_bytes_to_json(r.data)
     if "error" in list(checkRequest.keys()):
         print(
             "[error] ",
             checkRequest["code"],
             ", data:",
             checkRequest["error"],
             "(upload_model_file function in API class)",
         )
     else:
         dataResultLogin = DATA.convert_bytes_to_json(r.data)
         return dataResultLogin
Beispiel #6
0
 def delete_object(class_name, object_id, addition_header=None):
     try:
         header = API.get_header(addition_header)
         http = API.http
         url = API.url + "classes/" + str(class_name) + "/" + str(object_id)
     except:
         print(
             "[error] can't find header in API (delete_object function in API class)"
         )
         pass
     try:
         r = http.request("DELETE", url, headers=header)
         checkRequest = DATA.convert_bytes_to_json(r.data)
         if "error" in list(checkRequest.keys()):
             print(
                 "[error] ",
                 checkRequest["code"],
                 ", data:",
                 checkRequest["error"],
                 "(delete_object function in API class)",
             )
         else:
             return r.data
     except:
         print(
             "[Error] bad request, check user id, class name (delete_object function in API class)"
         )
         pass
Beispiel #7
0
def download_dataset():
    try:
        # Get opjectId, collabel, feature, algorithm and parameters
        data_id = request.args.get("dataId")
        class_name = request.args.get("className")
        r = API.get_data_create_model(class_name, data_id)
        file_name = str(uuid.uuid4())[:8] + "_data.csv"
        data_json = json.loads(r["jsonData"])
        dataFrame = pd.DataFrame(data_json)
        dataFrame = DATA.check_columns_name(dataFrame)
        if os.path.exists(app.config["DOWNLOAD_FOLDER"]):
            file_path_download = os.path.join(app.config["DOWNLOAD_FOLDER"],
                                              file_name)
        else:
            os.makedirs(app.config["DOWNLOAD_FOLDER"])
            file_path_download = os.path.join(app.config["DOWNLOAD_FOLDER"],
                                              file_name)
        # file_path = "./temp/" + file_name
        # file_path_download = "./temp/" + file_name
        export_csv = dataFrame.to_csv(file_path_download,
                                      index=None,
                                      header=True)
        return send_file(
            file_path_download,
            mimetype="test/csv",
            attachment_filename="data.csv",
            as_attachment=True,
        )
    except Exception as ex:
        print("[error] (createModel function app.py)")
        data = {"error": "can't  download data"}
        print(ex)
        return data
Beispiel #8
0
    def post(class_name, data, addition_header=None):
        try:
            header = API.get_header(addition_header)
            data_encoded = json.dumps(data)
            http = API.http
            url = API.url + "classes/" + str(class_name)

        except AttributeError:
            print("[error] can't find header in API (post function in API class)")
            pass
        try:
            r = http.request("POST", url, body=data_encoded, headers=header)
            checkRequest = DATA.convert_bytes_to_json(r.data)
            if "error" in list(checkRequest.keys()):
                print(
                    "[error] ",
                    checkRequest["code"],
                    ", data:",
                    checkRequest["error"],
                    "(post function in API class)",
                )
            else:
                return r.data
        except:
            print(
                "[Error] bad request, check user id, class name (post function in API class)"
            )
            pass
Beispiel #9
0
    def load(cls, id):
        """Parse content of PGI info page, e.g. http://jaskiniepolski.pgi.gov.pl/Details/Information/406"""
        data_path = cls.DATA_PATH.format(id=id)

        if not os.path.exists(data_path):
            cls._preload_html(id)
            time.sleep(2)

        with open(data_path) as file:
            doc = lxml.html.parse(file)
            data = OrderedDict()
            for e in doc.xpath('//tr'):
                assert len(e.getchildren()) == 2
                key, value = e.getchildren()
                key = key.text_content()

                paragraphs = value.xpath('.//p')
                if paragraphs:
                    value = '\n\n'.join(
                        [clear_text(e.text_content()) for e in paragraphs])
                else:
                    value = clear_text(value.text_content())

                data[clear_text(key)] = value

            # Read attachments
            file.seek(0)
            text = file.read()

            if id in PRELOAD_IMAGES:
                attachments = list(
                    map(int, re.findall('showImageInfo\((\d+)\)', text)))
                logging.info('Preloading %d images for %s', len(attachments),
                             data['Nazwa'])
            else:
                attachments = None

            # Get Links
            links = [
                Link(
                    'Pokaż oryginał',
                    f"http://jaskiniepolski.pgi.gov.pl/Details/Information/{id}"
                )
            ]
            if 'geostanowiska.pgi.gov.pl' in text:
                geostanowisko = re.search(
                    r'https?://geostanowiska.pgi.gov.pl/[^"\']+', text)
                assert geostanowisko
                # print(geostanowisko, geostanowisko.group())
                links.append(Link('Geostanowisko', geostanowisko.group()))

            for content in DATA.get(id, []):
                if isinstance(content, Link):
                    links.append(content)

        return PGIRecord(id=id,
                         description=data,
                         attachments=attachments,
                         links=links)
Beispiel #10
0
def get_formatted():
    with DATA.lock:
        init = DATA.get()[KEY]
    header = "**Initiative**\n"
    roll_list = [(init[name], name) for name in init]
    roll_list.sort(reverse=True)
    contents = "\n".join(f"{roll}: {name}" for roll, name in roll_list)
    return header + contents
Beispiel #11
0
def train():
    # load data
    train_path = './tfrecord'
    num_epochs = FLAGS.num_epochs
    batch_size = FLAGS.batch_size
    # load network
    num_classes = 2
    checkfile = 'checkpoint/model_{:03d}.ckpt'.format(FLAGS.checkpoint)
    with tf.Graph().as_default():
        data = DATA(train_path, batch_size, num_epochs)
        img = data.data[0]
        label = data.data[1]
        network = Alexnet(img, 2, 0.5)

        learning_rate = 0.001
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(logits=network.fc_8,
                                                       labels=label))
        #cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
        train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

        with tf.Session() as sess:
            # init
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            sess.run(init_op)
            coord = tf.train.Coordinator()
            thread = tf.train.start_queue_runners(sess=sess, coord=coord)
            if FLAGS.checkpoint >= 0 and os.path.exists(checkfile):
                network.loadModel(sess, checkfile)
            for epoch in range(FLAGS.checkpoint + 1, num_epochs):
                num_batch = 25000 // batch_size
                if 25000 % batch_size == 0:
                    num_batch += 1
                for batch in range(num_batch):
                    start_time = datetime.datetime.now()
                    if coord.should_stop():
                        break
                    _, loss_print = sess.run([train, loss])
                    time = (datetime.datetime.now() - start_time)
                    print(
                        '[TRAIN] Epoch[{}]({}/{});  Loss: {:.6f};  Backpropagation: {} sec; '
                        .format(epoch, batch + 1, num_batch, loss_print, time))
                if not os.path.exists('checkpoint'):
                    os.mkdir('checkpoint')
                saver = tf.train.Saver()
                saver.save(sess, 'checkpoint/model_{:03d}.ckpt'.format(epoch))
            coord.request_stop()
            coord.join(thread)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch-size', default=50, type=int)
    parser.add_argument(
        '--dataset',
        default='TREC',
        help="available datasets: MR, TREC, SST-1, SST-2, SUBJ")
    parser.add_argument('--dropout', default=0.5, type=float)
    parser.add_argument('--epoch', default=300, type=int)
    parser.add_argument('--gpu', default=0, type=int)
    parser.add_argument('--learning-rate', default=0.1, type=float)
    parser.add_argument('--word-dim', default=300, type=int)
    parser.add_argument('--norm-limit', default=3.0, type=float)
    parser.add_argument(
        "--mode",
        default="non-static",
        help="available models: rand, static, non-static, multichannel")
    parser.add_argument('--num-feature-maps', default=100, type=int)

    args = parser.parse_args()

    print('loading', args.dataset, 'data...')
    data = DATA(args)
    vectors = getVectors(args, data)

    setattr(args, 'word_vocab_size', len(data.TEXT.vocab))
    setattr(args, 'class_size', len(data.LABEL.vocab))
    setattr(args, 'model_time', strftime('%H:%M:%S', gmtime()))
    setattr(args, 'FILTER_SIZES', [3, 4, 5])

    if args.gpu > -1:
        setattr(args, 'device', "cuda:0")
    else:
        setattr(args, 'device', "cpu")

    print('training start!')
    best_model = train(args, data, vectors)

    if not os.path.exists('saved_models'):
        os.makedirs('saved_models')
    torch.save(
        best_model.state_dict(),
        f'saved_models/CNN_Sentece_{args.dataset}_{args.model_time}.pt')

    print('training finished!')
Beispiel #13
0
 def get_object_data(class_name, object, addition_header=None):
     data = ""
     try:
         header = API.get_header(addition_header)
         http = API.http
         url = API.url + "classes/" + str(class_name) + "/" + str(object)
     except AttributeError:
         print(
             "[error] can't find header in API (get_object_data function in API class)"
         )
         pass
     try:
         r = http.request("GET", url, headers=header)
         data = DATA.convert_dataframe(r.data)
     except:
         print("[error] can't find data (get_object_data in function API class)")
         pass
     return data
Beispiel #14
0
def BatchWorker(*params):
    f, names, shape, bsParams, crop, peakLimits, parguess, bounds, index, folder = params
    dt = DATA()
    result = pd.Series(name = os.path.basename(f), index = index, dtype = object)
    try:
        dt.loadData(f)
        dt.crop(crop.min, crop.max)
        dt.fitBaseline(bsParams, peakLimits, abs = True)

        text = ''
        for deg in np.arange(0, dt.bsDegree+1):
            if dt.bsCoef[deg]>=0 and deg!=0:
                text += '+'
            text += '{:.4E}*x^{}'.format(dt.bsCoef[deg], dt.bsDegree-deg)
        result.loc['baseline'] = text
    except Exception as e:
        print("Error {}".format(e))
    try:
        fit = FIT(shape, names)
        fit.deconvolute(dt, parguess, bounds, batch=True)
        for i, pars in enumerate(zip(names, shape)):
            result.loc[pars[0]+'_'+'Position'] = fit.pars[int(np.sum(fit.args[:i])+2)]
            result.loc[pars[0]+'_'+'Amplitude'] = fit.pars[int(np.sum(fit.args[:i]))]
            result.loc[pars[0]+'_'+'FWHM'] = fit.fwhm[i]
            result.loc[pars[0]+'_'+'Area'] = fit.area[i]
            if pars[1] == 'V':
                result.loc[pars[0]+'_'+'L/G'] = fit.pars[int(np.sum(fit.args[:i])+3)]
            elif pars[1] == 'B':
                result.loc[pars[0]+'_'+'1/q'] = fit.pars[int(np.sum(fit.args[:i])+3)]
        # result.iloc[1::4] = fit.pars[2::3]
        # result.iloc[2::4] = fit.pars[::3]
        # result.iloc[3::4] = fit.fwhm
        # result.iloc[4::4] = fit.area
    except Exception as e:
        print('Could not deconvolute the {} file'.format(f))
        result.iloc[1:len(index)] = -1*np.ones(len(index)-1)
        print(e)
    path = folder+ '/' +os.path.basename(f)+'.png'
    fig = plt.figure(figsize=(12,8))
    fit.plot(figure = fig, path = path)
    return result
Beispiel #15
0
def main():
    batch_size = 128
    epoch = 15

    data = DATA()
    model = LeNet(data.input_shape, data.num_classes)

    hist = model.fit(data.x_train,
                     data.y_train,
                     batch_size=batch_size,
                     epochs=epoch,
                     validation_split=0.2)
    score = model.evaluate(data.x_test, data.y_test, batch_size=batch_size)

    print()
    print('Test Loss= ', score)

    plot_loss(hist)
    plt.show()
Beispiel #16
0
 def delete_data(class_name, object_id, addition_header=None):
     try:
         header = API.get_header(addition_header)
         http = API.http
         url_delete = API.url + "classes/" + str(class_name) + "/" + str(object_id)
     except:
         print(
             "[error] can't find header in API (delete_object function in API class)"
         )
         pass
     try:
         r = http.request("DELETE", url_delete, headers=header)
         request_delete_data = DATA.convert_bytes_to_json(r.data)
         return request_delete_data
     except:
         print(
             "[Error] bad request, check user id, class name (delete_object function in API class)"
         )
         pass
Beispiel #17
0
def do_mail():
    print "Initiating x-mass"
    people = DATA.keys()

    secret_chunks = cipher_and_split(SECRET_MESSAGE, len(DATA))
    print secret_chunks

    targets = shuffle_list(people)

    for source, target, secret_chunk in zip (people, targets, secret_chunks):
        source = DATA[source]
        target = DATA[target]

        message = message_template.format(from_name=source['full_name'],
                                          target_name=target['full_name'],
                                          secret_chunk=secret_chunk)
        send_mail(source['full_name'], source['email'], subject_template, message)

        print "Message sent to " + source['full_name']

    print "All presents are prepared!"
Beispiel #18
0
    def previewBaseline(self, dialog):
        dt = DATA()
        dt.setData(self.data.X, self.data.current)
        try:
            _min = int(dialog.lineEdit_min.text())
            _max = int(dialog.lineEdit_max.text())
        except ValueError:
            self.statusbar.showMessage("Wrong value...setting to default", 3000)
            _min = self.peakLimits.min
            _max = self.peakLimits.max
        if self.baseline != 0:
            self.limit_low.remove()
            self.limit_high.remove()
            self.baseline.remove()
        self.limit_low,  = self.subplot.plot([_min, _min], [np.min(self.data.current), np.max(self.data.current)], color = 'red', label = 'Exclude region')
        self.limit_high, = self.subplot.plot([_max, _max], [np.min(self.data.current), np.max(self.data.current)], color = 'red')

        peakLimits = Limits(_min, _max)
        dt.fitBaseline(dialog.spinBox.value(), peakLimits, abs = True)
        self.baseline, = self.subplot.plot(dt.X, dt.baseline, 'r--', label = "Baseline")
        self.subplot.legend()
        self.canvas.draw()
        del dt
Beispiel #19
0
                step2 += 1
                loss, accuracy = sess.run([model.loss, model.accuracy],
                                          feed_dict={
                                              model.x: x,
                                              model.y: y
                                          })
                loss2 += loss
                accuracy2 += accuracy
            print(' Valid', epoch, 'Loss', loss2 / step2, accuracy2 / step2)
            if epoch > 10 and accuracy2 > last_accuracy:
                saver.save(sess,
                           '/home/z/Models/digit/mnist',
                           global_step=model.global_step.eval())
                print(' - Model saved - ')
                last_accuracy = accuracy2


if __name__ == '__main__':
    n_row = 28
    n_col = 28
    n_dep = 1
    n_out = 10
    epoches = 100
    batch_size = 32
    learning_rate = 0.001

    mnist_train_path = '/home/z/DATA/mnist/train.csv'

    model = CNN(n_row, n_col, n_dep, n_out, learning_rate)
    data = DATA(mnist_train_path, None, batch_size, n_out)
    train(model, data, epoches)
Beispiel #20
0
def create_api_model():
    # try:
    if request.headers["CONTENT_TYPE"] == "application/json":
        try:
            data_request = request.json
            modelId = data_request["modelId"]
            if modelId == None:
                return "[error] modelId not found check (keys) modelId and values "
            else:
                data_test_json = data_request["data"]
                data_test_dataFrame = pd.DataFrame(data_test_json)
                r = API.get_model("Model", modelId)
                modelUrl = r["modelFile"]["url"]
                Nu_SVC_classifier = joblib.load(urlopen(modelUrl))
                KQ = np.array(Nu_SVC_classifier.predict(data_test_dataFrame))
                data_transform = {
                    "0": "Thiếu cân (Insufficient weight)",
                    "1": "Bình thường (Normal weight)",
                    "2": "Thừa cân loại 1 (Overweight level 1)",
                    "3": "Thừa cân loại 2 (Overweight level 2)",
                    "4": "Béo phì loại 1 (Obesity type I)",
                    "5": "git Béo phì loại 2 (Obesity type II)",
                    "6": "Béo phì loại 3 (Obesity type III)",
                }
                dataReturn = {
                    "result": [],
                }
                for rs in KQ:
                    dataReturn["result"].append(data_transform[str(rs)])
                return dataReturn
        except:
            print("[error] check key (inputColumns) and value")
            return (
                "[error] check key (inputColumns) and value (check type inputColumns)"
            )
            pass
    else:
        modelId = request.form.get("modelId")
        if modelId == None:
            return "[error] modelId not found check (keys) modelId and values "
        else:
            file_test = request.files.getlist("data")[0]
            file_name = secure_filename(file_test.filename)
            if file_name == "":
                return "[error] Can't find data, check keys 'data' and values"
            else:
                try:
                    filename_random = str(uuid.uuid4())[:8] + "_" + file_name
                    if os.path.exists(app.config["DATA_API_FOLDER"]):
                        file_path_test = os.path.join(
                            app.config["DATA_API_FOLDER"], filename_random)
                    else:
                        os.makedirs(app.config["DATA_API_FOLDER"])
                        file_path_test = os.path.join(
                            app.config["DATA_API_FOLDER"], filename_random)
                    file_test.save(file_path_test)
                    df_test, columns, n, m = DATA.read("csv", file_path_test,
                                                       ",")
                except Exception as e:
                    print(e)
                    return "[error] can't save data, request fail"
                    pass
                try:
                    col_feature_test_string = request.form.getlist(
                        "inputColumns")[0]
                    col_feature_test_list = ast.literal_eval(
                        col_feature_test_string)
                    col_feature_test_array = np.array(col_feature_test_list)
                    r = API.get_model("Model", modelId)
                    modelUrl = r["modelFile"]["url"]
                    Nu_SVC_classifier = joblib.load(urlopen(modelUrl))
                except:
                    print("[error] request fail")
                    notification = (
                        "[error] request fail check key 'modelId', model " +
                        str(modelId) + " not found")
                    return notification
                    pass
                try:
                    data_test = df_test.iloc[:, col_feature_test_array]
                    KQ = np.array(Nu_SVC_classifier.predict(data_test))
                    data_transform = {
                        "0": "Thiếu cân (Insufficient weight)",
                        "1": "Bình thường (Normal weight)",
                        "2": "Thừa cân loại 1 (Overweight level 1)",
                        "3": "Thừa cân loại 2 (Overweight level 2)",
                        "4": "Béo phì loại 1 (Obesity type I)",
                        "5": "Béo phì loại 2 (Obesity type II)",
                        "6": "Béo phì loại 3 (Obesity type III)",
                    }
                    dataReturn = {
                        "result": [],
                    }
                    for rs in KQ:
                        dataReturn["result"].append(data_transform[str(rs)])
                    os.remove(file_path_test)
                    return dataReturn
                except IndexError:
                    print("[error] check key (inputColumns) and value")
                    return "[error] check key (inputColumns) and value check (number inputColumns)"
                    pass
                except ValueError:
                    print("[error] check key (inputColumns) and value")
                    return "[error] check key (inputColumns) and value (check type inputColumns)"
                    pass
Beispiel #21
0
    blockD = upscale2("blockD", blockC,  64)             #n,128,128, 64
    blockE = res_net1("blockE", blockD,2, 32,  64)
    blockF = upscale2("blockF", blockE,  32)             #n,256,256, 32
    blockG = res_net1("blockG", blockF,2, 16,  32)


    blockZ = conv2d_1('blockZ', blockG, [1, 1, 32,1]) 

    out = tf.sigmoid(blockZ, "sigmoid")

    tf.summary.image("input0", inputs[:1,:,:,:])
    tf.summary.image("output" , (y[:1,:,:,:]+out[:1,:,:,:])/2.0*255)
    return out


data = DATA()
pred = yake_net(x)



cast = tf.reduce_mean(tf.abs((pred-y)*(y+0.004)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cast)
l2 = tf.add_n(tf.get_collection('l2_losses'))


tf.summary.scalar('loss',cast)
tf.summary.scalar('l2',l2)



init = tf.global_variables_initializer()
Beispiel #22
0
def create_model():
    try:
        # Get opjectId, collabel, feature, algorithm and parameters
        data_name = request.args.get("dataName")
        class_name = request.args.get("className")
        model_name = request.args.get("modelname")
        col_label = int(request.args.get("label"))
        col_feature_str = (request.args.get("feature")).split(",")
        col_feature = []
        for col in col_feature_str:
            col_feature.append(int(col))
        athm = request.args.get("algorithm")
        athm_id = request.args.get("algorithmId")
        params = json.loads(request.args.get("params"))
        # print(params)
        if params == {}:
            params = None
            test_size = 0.3
        else:
            test_size = float(params["testSize"])
            if test_size >= 1.0 or test_size <= 0.0:
                data = {"error": "0.0 < test size < 1.0"}
                return data
        # get data
        r = API.get_class(class_name)
        arr = str(r.data, "utf-8")
        r_json = json.loads(arr)
        data = r_json["results"]
        dataFrame = pd.DataFrame(data)
        if class_name == "DatasetSurveyBalance":
            dataFrame = dataFrame.iloc[:, 3:]
        elif class_name == "DatasetObesity":
            dataFrame = dataFrame.iloc[:, 3:]
        if "yearOfBirth" in list(dataFrame.columns):
            del dataFrame["yearOfBirth"]
        # dataFrame = dataFrame.dropna(axis="1",how = "any")
        col_feature_name = np.array((dataFrame.iloc[:, col_feature]).columns)
        col_feature_name_str = col_feature_name[0]
        col_feature_name = list(col_feature_name)
        col_feature_name.pop(0)
        col_feature_name = np.array(col_feature_name)
        for col in col_feature_name:
            col_feature_name_str = col_feature_name_str + "," + col
        col_label_name = str(
            np.array(
                pd.DataFrame(np.matrix(dataFrame.columns)).iloc[0, col_label]))
        # get data train, test
        X_train, X_test, y_train, y_test = DATA.get_data_train(
            dataFrame, col_feature, col_label, test_size)
        model, evalution, error, params = get_athm(athm, X_train, X_test,
                                                   y_train, y_test, params)
        if error != "":
            data = {"error": error}
            return data
        else:
            #   Create random id for file name
            folder_model = "./upload_model"
            randomId = str(uuid.uuid4())[:8]
            file_name_model = (randomId + "_" + str(athm) + "_" +
                               str(class_name) + str(".pkl"))
            pkl_filename = folder_model + "/" + file_name_model
            joblib.dump(model, str(file_name_model))
            custom_header = {}
            custom_header[
                "X-Parse-Application-Id"] = API.X_Parse_Application_Id
            custom_header["X-Parse-REST-API-Key"] = API.X_Parse_REST_API_Key
            custom_header["Content-Type"] = "application/x-binary"
            desription = description = (
                "Model " + " use " + str(athm) + " algorithm " + ". " +
                "Dataset for model is " + str(data_name) +
                ", columns label is " + str(col_label_name) +
                " and columns feature is " + str(col_feature_name))
            r_upload = API.upload_model_file(
                file_name_model,
                model_name,
                data_name,
                athm_id,
                params,
                col_label,
                col_label_name,
                col_feature,
                col_feature_name_str,
                description,
                evalution,
            )
            return r_upload
    except:
        print("[error] (createModel function app.py)")
        data = {"error": "can't create model"}
        return data
Beispiel #23
0
def create_model_system_mx():
    try:
        # Get opjectId, collabel, feature, algorithm and parameters
        data_name = 'DatasetMX'
        class_name = 'DatasetObesity'
        model_name = "MODEL SYSTEM"
        col_label = 7
        col_feature_str = [
            '0', '1', '2', '3', '4', '5', '6', '8', '9', '10', '11', '12',
            '13', '14', '15', '16'
        ]
        col_feature = []
        for col in col_feature_str:
            col_feature.append(int(col))
        athm = 'SupportVectorMachine'
        athm_id = 'ccn7ofeacm'
        params = {
            'C': '100000',
            'degree': '3',
            'gamma': '0.3',
            'kernel': 'linear',
            'testSize': '0.3'
        }
        if params == {}:
            params = None
            test_size = 0.3
        else:
            test_size = float(params["testSize"])
            if test_size >= 1.0 or test_size <= 0.0:
                data = {"error": "0.0 < test size < 1.0"}
                return data
        # get data
        r = API.get_class(class_name)
        arr = str(r.data, "utf-8")
        r_json = json.loads(arr)
        data = r_json["results"]
        dataFrame = pd.DataFrame(data)
        if class_name == "DatasetSurveyBalance":
            dataFrame = dataFrame.iloc[:, 3:]
        elif class_name == "DatasetObesity":
            dataFrame = dataFrame.iloc[:, 3:]
        if "yearOfBirth" in list(dataFrame.columns):
            del dataFrame["yearOfBirth"]
        # dataFrame = dataFrame.dropna(axis="1",how = "any")
        col_feature_name = np.array((dataFrame.iloc[:, col_feature]).columns)
        col_feature_name_str = col_feature_name[0]
        col_feature_name = list(col_feature_name)
        col_feature_name.pop(0)
        col_feature_name = np.array(col_feature_name)
        for col in col_feature_name:
            col_feature_name_str = col_feature_name_str + "," + col
        col_label_name = str(
            np.array(
                pd.DataFrame(np.matrix(dataFrame.columns)).iloc[0, col_label]))
        # get data train, test
        X_train, X_test, y_train, y_test = DATA.get_data_train(
            dataFrame, col_feature, col_label, test_size)
        model, evalution, error, params = get_athm(athm, X_train, X_test,
                                                   y_train, y_test, params)
        if error != "":
            data = {"error": error}
            return data
        else:
            #   Create random id for file name
            folder_model = "./upload_model"
            randomId = str(uuid.uuid4())[:8]
            file_name_model = (randomId + "_" + str(athm) + "_" +
                               str(class_name) + str(".pkl"))
            pkl_filename = folder_model + "/" + file_name_model
            joblib.dump(model, str(file_name_model))
            custom_header = {}
            custom_header[
                "X-Parse-Application-Id"] = API.X_Parse_Application_Id
            custom_header["X-Parse-REST-API-Key"] = API.X_Parse_REST_API_Key
            custom_header["Content-Type"] = "application/x-binary"
            desription = description = (
                "Model " + " use " + str(athm) + " algorithm " + ". " +
                "Dataset for model is " + str(data_name) +
                ", columns label is " + str(col_label_name) +
                " and columns feature is " + str(col_feature_name))
            r_upload_2 = API.upload_model_file_system_mx(
                file_name_model,
                model_name,
                data_name,
                athm_id,
                params,
                col_label,
                col_label_name,
                col_feature,
                col_feature_name_str,
                description,
                evalution,
            )
            return r_upload_2
    except:
        print("[error] (createModel function app.py)")
        data = {"error": "can't create model"}
        return data
Beispiel #24
0
def upload_file_url():
    try:
        #   Get file
        url = request.args.get("urlData")
        data_name = request.args.get("dataName")
        separator = request.args.get("separator")
        if data_name == "":
            data_name = "dataset_not_name"
        user_id = request.args.get("userId")
    except AttributeError:
        print("[error] can't find file_upload (upfile function app.py)")
        pass
    try:
        #   Create random id
        random_id = str(uuid.uuid4())[:8]
        #   Random file_name
        filename_upload_random = str(random_id) + "_" + "upload.csv"
        #   get file_path
        if os.path.exists(app.config["UPLOAD_FOLDER"]):
            file_path_upload = os.path.join(app.config["UPLOAD_FOLDER"],
                                            filename_upload_random)
        else:
            os.makedirs(app.config["UPLOAD_FOLDER"])
            file_path_upload = os.path.join(app.config["UPLOAD_FOLDER"],
                                            filename_upload_random)
        #   save file
        # Save file locally
        a = urlretrieve(url, file_path_upload)
        # Read file into a DataFrame and print its head
    except UnboundLocalError:
        print(
            "[error] local variable 'filename' referenced before assignment (upfile function app.py)"
        )
        pass
    except ValueError:
        data_return_err = {"error": "unknown url"}
        return data_return_err
    try:
        data, col, n, m = DATA.read("csv", file_path_upload, separator)
        file_name_csv = data_name + ".csv"
        file_path_save_csv = os.path.join(app.config["UPLOAD_FOLDER"],
                                          file_name_csv)
        export_csv = data.to_csv(file_path_save_csv, index=None, header=True)
        data_str = DATA.convert_str(file_path_save_csv)
        data_str = str(data_str)
        data_post = {
            "jsonData": data_str,
            "dataName": data_name,
            "userUpload": {
                "__type": "Pointer",
                "className": "_User",
                "objectId": user_id,
            },
            "delimiter": separator,
            "uploadFrom": "url",
        }
        class_name = "Data"
        data = API.post(class_name, data_post)
        print(data)
        return data
    except UnboundLocalError:
        print("[error] ")
        return "fail, can't upload dataset"
Beispiel #25
0
def get_data_charts_obesity():
    try:
        class_name = request.args.get("className")
        # class_name = "Dataset"
        r = API.get_class(class_name)
        arr = str(r.data, "utf-8")
        r_json = json.loads(arr)
        data = r_json["results"]
        df = pd.DataFrame(data)
        columns = list(df.columns)[4:]
        columns.remove("weight")
        for i in range(len(df)):
            for column in list(columns):
                try:
                    df[column][i] = int(round(df[column][i]))
                except:
                    df[column][i] = str(df[column][i])
        data_FAVC = DATA.get_data_chart(df, "FAVC")
        data_gender = DATA.get_data_chart(df, "gender")
        data_NCP = DATA.get_data_chart(df, "NCP")
        data_FHWO = DATA.get_data_chart(df, "FHWO")
        data_CAEC = DATA.get_data_chart(df, "CAEC")
        data_CH2O = DATA.get_data_chart(df, "CH2O")
        data_SMOKE = DATA.get_data_chart(df, "SMOKE")
        data_FCVC = DATA.get_data_chart(df, "FCVC")
        data_SCC = DATA.get_data_chart(df, "SCC")
        data_FAF = DATA.get_data_chart(df, "FAF")
        data_TUE = DATA.get_data_chart(df, "TUE")
        data_CALC = DATA.get_data_chart(df, "CALC")
        data_MTRANS = DATA.get_data_chart(df, "MTRANS")
        data_NObeyesdad = DATA.get_data_chart(df, "NObeyesdad")
        df1 = df.sort_values(by=["weight"],
                             ascending=True).reset_index(drop=True)
        weight_values = {
            "duoi_40": [],
            "duoi_50": [],
            "duoi_60": [],
            "duoi_70": [],
            "duoi_80": [],
            "duoi_90": [],
            "duoi_100": [],
            "tren_100": [],
        }
        for i in range(len(df1)):
            if float(df1["weight"][i]) < 40:
                weight_values["duoi_40"].append(float(df1["height"][i]))
            elif (float(df1["weight"][i]) >= 40) and (float(df1["weight"][i]) <
                                                      50):
                weight_values["duoi_50"].append(float(df1["height"][i]))
            elif (float(df1["weight"][i]) >= 50) and (float(df1["weight"][i]) <
                                                      60):
                weight_values["duoi_60"].append(float(df1["height"][i]))
            elif (float(df1["weight"][i]) >= 60) and (float(df1["weight"][i]) <
                                                      70):
                weight_values["duoi_70"].append(float(df1["height"][i]))
            elif (float(df1["weight"][i]) >= 70) and (float(df1["weight"][i]) <
                                                      80):
                weight_values["duoi_80"].append(float(df1["height"][i]))
            elif (float(df1["weight"][i]) >= 80) and (float(df1["weight"][i]) <
                                                      90):
                weight_values["duoi_90"].append(float(df1["height"][i]))
            elif (float(df1["weight"][i]) >= 90) and (float(df1["weight"][i]) <
                                                      100):
                weight_values["duoi_100"].append(float(df1["height"][i]))
            elif float(df1["weight"][i]) >= 100:
                weight_values["tren_100"].append(float(df1["height"][i]))
        data_height_weight = [
            {
                "name": "Ít hơn 40 kg",
                "value": DATA.trung_binh(weight_values["duoi_40"]),
            },
            {
                "name": "Từ 40 - 50 kg",
                "value": DATA.trung_binh(weight_values["duoi_50"]),
            },
            {
                "name": "Từ 50 - 60 kg",
                "value": DATA.trung_binh(weight_values["duoi_60"]),
            },
            {
                "name": "Từ 60 - 70 kg",
                "value": DATA.trung_binh(weight_values["duoi_70"]),
            },
            {
                "name": "Từ 70 - 80 kg",
                "value": DATA.trung_binh(weight_values["duoi_80"]),
            },
            {
                "name": "Từ 80 - 90 kg",
                "value": DATA.trung_binh(weight_values["duoi_90"]),
            },
            {
                "name": "Từ 90 - 100 kg",
                "value": DATA.trung_binh(weight_values["duoi_100"]),
            },
            {
                "name": "Trên 100 kg",
                "value": DATA.trung_binh(weight_values["tren_100"]),
            },
        ]
        data_age = [
            {
                "name": "Dưới 20 tuổi",
                "value": 0
            },
            {
                "name": "Từ 20 - dưới 30 tuổi",
                "value": 0
            },
            {
                "name": "Từ 30 - dưới 40 tuổi",
                "value": 0
            },
            {
                "name": "Từ 40 - dưới 50 tuổi",
                "value": 0
            },
            {
                "name": "Từ 50 - dưới 60 tuổi",
                "value": 0
            },
            {
                "name": "Từ 60 trở lên",
                "value": 0
            },
        ]
        data_result = {
            "chart_gender": data_gender,
            "chart_FAVC": data_FAVC,
            "chart_NCP": data_NCP,
            "chart_FHWO": data_FHWO,
            "chart_CAEC": data_CAEC,
            "chart_CH2O": data_CH2O,
            "chart_SMOKE": data_SMOKE,
            "chart_FCVC": data_FCVC,
            "chart_SCC": data_SCC,
            "chart_FAF": data_FAF,
            "chart_TUE": data_TUE,
            "chart_CALC": data_CALC,
            "chart_MTRANS": data_MTRANS,
            "chart_age": data_age,
            "chart_obesity": data_NObeyesdad,
        }
        # data_result = json.dumps(data_result)
        return data_result
    except:
        print("[Error] (getDataModels function app.py)")
        return "[Error] BAD REQUEST can't get datamodel"
Beispiel #26
0
def get_data_charts():
    try:
        class_name = request.args.get("className")
        r = API.get_class(class_name)
        arr = str(r.data, "utf-8")
        r_json = json.loads(arr)
        data = r_json["results"]
        df = pd.DataFrame(data)
        data_mealOfTheDay = DATA.get_data_chart(df, "mealOfTheDay")
        data_breakfastOfTheWeek = DATA.get_data_chart(df, "breakfastOfTheWeek")
        data_dinnerOfTheWeek = DATA.get_data_chart(df, "dinnerOfTheWeek")
        data_fastfoodOfTheWeek = DATA.get_data_chart(df, "fastFoodOfTheWeek")
        data_vegetableInMeal = DATA.get_data_chart(df, "vegetableInMeal")
        data_proteinOfMeal = DATA.get_data_chart(df, "proteinOfMeal")
        data_waterOfTheDay = DATA.get_data_chart(df, "waterOfTheDay")
        data_timeDoExcerciseForWeek = DATA.get_data_chart(
            df, "timeDoExcerciseForWeek")
        data_sportTimeForWeek = DATA.get_data_chart(df, "sportTimeForWeek")
        data_alcohol = DATA.get_data_chart(df, "alcohol")
        data_nicotine = DATA.get_data_chart(df, "nicotine")
        data_requireOfJob = DATA.get_data_chart(df, "requireOfJob")
        data_park = DATA.get_data_chart(df, "park")
        data_depression = DATA.get_data_chart(df, "depression")
        data_result = {
            "chart_meal_of_theday": data_mealOfTheDay,
            "chart_breakfast_of_theweek": data_breakfastOfTheWeek,
            "chart_dinner_of_theweek": data_dinnerOfTheWeek,
            "chart_fastfood_of_theweek": data_fastfoodOfTheWeek,
            "chart_vegetable_in_meal": data_vegetableInMeal,
            "chart_protein_of_meal": data_proteinOfMeal,
            "chart_water_of_the_day": data_waterOfTheDay,
            "chart_time_doexcercise_for_week": data_timeDoExcerciseForWeek,
            "chart_sporttime_for_week": data_sportTimeForWeek,
            "chart_alcohol": data_alcohol,
            "chart_nicotine": data_nicotine,
            "chart_requireOfJob": data_requireOfJob,
            "chart_park": data_park,
            "chart_depression": data_depression,
        }
        # data_result = json.dumps(data_result)
        return data_result
    except:
        print("[Error] (getDataModels function app.py)")
        return "[Error] BAD REQUEST can't get datamodel"
Beispiel #27
0

batch_size = 4
P_channels = (3, 48, 96, 192)
I_channels = (3, 48, 96, 192)
R_channels = (48, 96, 192)
extrap_start_time = 20

DATA_DIR = './KTH_data'
test_file = os.path.join(DATA_DIR, 'X_test.hkl')
test_sources = os.path.join(DATA_DIR, 'sources_test.hkl')

nt = 8
nc = 5

DATA_test = DATA(test_file, test_sources, nt * nc)

test_loader = DataLoader(DATA_test, batch_size=batch_size, shuffle=False)

model = HPNet(P_channels,
              I_channels,
              R_channels,
              nc,
              output_mode='prediction',
              extrap_start_time=extrap_start_time)
model = nn.DataParallel(model)
model.load_state_dict(torch.load('models/training_weights.pt'))

if torch.cuda.is_available():
    print('Using GPU.')
    model.cuda()
Beispiel #28
0
    parser.add_argument(
        '--dataset',
        default='TREC',
        help="available datasets: MR, TREC, SST-1, SST-2, SUBJ, CR")
    parser.add_argument('--dropout', default=0.5, type=float)
    parser.add_argument('--gpu', default=0, type=int)
    parser.add_argument('--word-dim', default=300, type=int)
    parser.add_argument(
        "--mode",
        default="non-static",
        help="available models: rand, static, non-static, multichannel")
    parser.add_argument('--num-feature-maps', default=5, type=int)

    args = parser.parse_args()

    print('loading SNLI data...')
    data = DATA(args)

    setattr(args, 'word_vocab_size', len(data.TEXT.vocab))
    setattr(args, 'class_size', len(data.LABEL.vocab))

    # if block size is lower than 0, a heuristic for block size is applied.
    if args.block_size < 0:
        args.block_size = data.block_size

    print('loading model...')
    model = load_model(args, data)

    _, acc = test(model, data)

    print(f'test acc: {acc:.3f}')
Beispiel #29
0
if __name__ == '__main__':
    # Create a root logger:
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.INFO)

    # Instantiating logging handler and record format:
    root_handler = logging.FileHandler("main.log")
    rec_format = "%(asctime)s:%(levelname)s:%(name)s:%(funcName)s:%(message)s"
    formatter = logging.Formatter(rec_format, datefmt="%Y-%m-%d %H:%M:%S")
    root_handler.setFormatter(formatter)

    # Send logging handler to root logger:
    root_logger.addHandler(root_handler)

    example = 1
    my_data = DATA()
    if example == 1:
        # Example 1: read CSV file:
        my_file = '../../../data/example_data.csv'
        my_data.read_csv(my_file)
    elif example == 2:
        # Example 2: read TXT files:
        my_sf_file = 'daily_sf_2000_cruts.txt'
        my_pn_file = 'daily_pn_2000_wfdei.txt'
        my_tair_file = 'daily_tair_2000_wfdei.txt'
        my_data.read_txt(my_sf_file, 'sf')
        my_data.read_txt(my_pn_file, 'pn')
        my_data.read_txt(my_tair_file, 'tair')

    # Consistency Test #4: Spin-Up
    my_lat = 37.7
Beispiel #30
0
def reset():
    with DATA.lock:
        DATA.get()[KEY] = {}
        DATA.write()
Beispiel #31
0
def add(name: str, value: int):
    with DATA.lock:
        DATA.get()[KEY][name] = value
        DATA.write()
from data import DATA
from seat import Seat

cleaned_data = DATA.splitlines()

if __name__ == "__main__":
    seats = [Seat.from_string(data) for data in cleaned_data]
    max_id = max([seat.id for seat in seats])

    print("PART ONE\n=======")
    print('Greatest passport ID: {}'.format(max_id))

    sorted_ids = [seat.id for seat in sorted(seats, key=lambda seat: seat.id)]
    expected_id = None
    for id in sorted_ids:
        if expected_id is None:
            expected_id = id + 1
            continue

        if not id == expected_id:
            break

        expected_id = id + 1

    print('Missing Seat ID: {}'.format(expected_id))
    print("\nPART TWO\n=======")
Beispiel #33
0
    device = "cuda"
    job = 10  #Can change this to multi-process dataloading
else:
    device = "cpu"
    job = 0

##################################################################
# Load Data
##################################################################
if opt.use_MNIST:
    ### MNIST
    folder_name = folder_name + "MNIST"
if not opt.icdf and not opt.motion_samples:
    train_batch_size = opt.train_batch_size
    test_batch_size = opt.test_batch_size
    data = DATA("h3.6m_3d", "h3.6m/dataset/")
    out_of_distribution = data.get_poses(input_n=1,
                                         output_n=1,
                                         sample_rate=2,
                                         dct_n=2,
                                         out_of_distribution_action=None,
                                         val_categorise=True)
    train_loader, val_loader, OoD_val_loader, test_loader = data.get_dataloaders(
        train_batch=train_batch_size,
        test_batch=test_batch_size,
        job=job,
        val_categorise=True)

##################################################################
# Instantiate model, and methods used fro training and valdation
##################################################################