Example #1
0
def learning(weight):
    for line in iter(sys.stdin.readline, ""):
        label, sent = get_label_and_sentence(line)
        phi = cf.create_features(sent)
        pre_label = po.predict_one(weight, phi)
        if int(pre_label) != int(label):
            update_weights(weight, phi, int(label))
def predict_all(network):
    # predict all
    for line in iter(sys.stdin.readline, ""):
        phi = cf.create_features(line)
        result, y = predict_nn(network, phi)
        # 1.0 * sign(result)
        print int(copysign(1, result))
def predict_all(network):
    # predict all
    for line in iter(sys.stdin.readline, ""):
        phi = cf.create_features(line)
        result, y = predict_nn(network, phi)
        # 1.0 * sign(result)
        print int(copysign(1, result))
def learning(network):
    lines = sys.stdin.readlines()
    for i in range(FLAGS.iteration):
        sys.stdout.write("\rIteration:%d" % (i+1))
        sys.stdout.flush()
        for line in lines:
            label, sent = get_label_and_sentence(line)
            phi = cf.create_features(sent)
            update_nn(network, phi, label)
def margin_learning(weight, margin):
    last = defaultdict(lambda: 0)
    for index, line in enumerate(iter(sys.stdin.readline, "")):
        label, sent = get_label_and_sentence(line)
        phi = cf.create_features(sent)
        val = po.get_score(weight, phi, FLAGS.l1_value, index,
                           last) * float(label)
        if val <= margin:
            update_weights(weight, phi, int(label))
Example #6
0
def learning(network):
    lines = sys.stdin.readlines()
    for i in range(FLAGS.iteration):
        sys.stdout.write("\rIteration:%d" % (i + 1))
        sys.stdout.flush()
        for line in lines:
            label, sent = get_label_and_sentence(line)
            phi = cf.create_features(sent)
            update_nn(network, phi, label)
def margin_learning(weight, margin):
    last = defaultdict(lambda : 0)
    for index, line in enumerate(iter(sys.stdin.readline, "")):
        label, sent = get_label_and_sentence(line)
        phi = cf.create_features(sent)
        val = po.get_score(weight, phi,
                           FLAGS.l1_value,
                           index, last) * float(label)
        if val <= margin:
            update_weights(weight, phi, int(label))
Example #8
0
def make_df(evals, facs, dropcols=to_drop):
    dfbeta = cf.create_features(facs, evals)
    evals.drop([
        'ACTIVITY_LOCATION', 'EVALUATION_IDENTIFIER', 'EVALUATION_TYPE',
        'EVALUATION_DESC', 'EVALUATION_AGENCY', 'FOUND_VIOLATION'
    ],
               inplace=True,
               axis=1)
    evals[['month', 'day',
           'year']] = evals['EVALUATION_START_DATE'].str.split('/',
                                                               expand=True)
    dfbeta.drop(dropcols, inplace=True, axis=1)
    return pd.merge(evals, dfbeta, on='ID_NUMBER', how='left')
def average_learning(weight):
    updates = 0.0
    average = defaultdict(lambda: 0.0)
    for line in iter(sys.stdin.readline, ""):
        label, sent = get_label_and_sentence(line)
        phi = cf.create_features(sent)
        pre_label = po.predict_one(weight, phi)
        if int(pre_label) != int(label):
            update_weights(weight, phi, int(label))
        updates += 1.0
        for key, value in weight.items():
            average[key] = (average[key] * (updates - 1.0) + value) / updates
    # copy to weight from average
    for key in weight.keys():
        weight[key] = average[key]
def average_learning(weight):
    updates = 0.0
    average = defaultdict(lambda : 0.0)
    for line in iter(sys.stdin.readline, ""):
        label, sent = get_label_and_sentence(line)
        phi = cf.create_features(sent)
        pre_label = po.predict_one(weight, phi)
        if int(pre_label) != int(label):
            update_weights(weight, phi, int(label))
        updates += 1.0
        for key, value in weight.items():
            average[key] = (average[key] * (updates - 1.0) + value) / updates
    # copy to weight from average
    for key in weight.keys():
        weight[key] = average[key]
def predict_all(model_file):
    weight = defaultdict(lambda : 0.0)

    # load model_file
    fin = open(model_file)
    for line in iter(fin.readline, ""):
        parts = line.rstrip("\n").split()
        value = parts.pop()
        name  = " ".join(parts)
        weight[name] = float(value)
    fin.close()

    # predict all
    for line in iter(sys.stdin.readline, ""):
        phi = cf.create_features(line)
        y   = po.predict_one(weight, phi)
        print y
Example #12
0
def predict_all(model_file):
    weight = defaultdict(lambda : 0.0)

    # load model_file
    fin = open(model_file)
    for line in iter(fin.readline, ""):
        parts = line.rstrip("\n").split()
        value = parts.pop()
        name  = " ".join(parts)
        weight[name] = float(value)
    fin.close()

    # predict all
    for line in iter(sys.stdin.readline, ""):
        phi = cf.create_features(line)
        y   = po.predict_one(weight, phi)
        print int(y)
Example #13
0
    return data


def scan_for_images(source_directory: str, project_name: str) -> None:

    image_path_list = create_image_path_list(source_directory)
    with open(f"{os.path.join(GloabalDir.projects,project_name)}.info",
              "w") as info:
        data = {"initial_absolute_path": os.path.abspath(source_directory)}
        info.write(json.dumps(data))

    f = h5py.File(f"{os.path.join(GloabalDir.projects,project_name)}.hdf5",
                  "w")

    for image_path in image_path_list:
        file, relative_path = image_path
        g = f.create_group(file)
        g.attrs['path'] = relative_path

    f.close()


if __name__ == "__main__":
    options = vars(parser.parse_args())

    scan_for_images(options['source'], options['project'])
    create_features.create_features(options['project'])
    create_neighbours.create_neighbours(options['project'],
                                        options['k_nearest'])