Beispiel #1
0
                        <link rel="stylesheet" href="css/style.css">
                        <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.5.0/css/font-awesome.min.css">
                        <link rel='stylesheet prefetch' href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css'>
                         <link rel="stylesheet" href="css/style.css">
                    </head>

                    <body>
                        <section class="title container">
                            <div class="row">
                                <div class="col-md-12">
                                    <h1>Code<fade>X</fade> Blog</h1>
                                    <div class="seperator"></div>
                                    <p style="padding-bottom:50px;"></p>
                                </div>
                            </div>
                        </section>

                        <!-- Start Blog Layout -->
                        <div class="container">
                            %s
                        </div>

                    </body>

                    </html>''' % '\n'.join(rows)
    with open('index.html', 'w') as index_file:
        index_file.write(index_html)
    console.info("Wrote index HTML.")
    make_sitemap([blog_post['url'] for blog_post in blog_posts])
console.success("Finished blog iteration.")
Beispiel #2
0
console.log("Criando TASK TRAIN ")
import os
data_root = os.path.join(root, filename)
dataset_train = task.Dataset(data_root, classes=('motorbike', ))

console.info("TRAINING DATA MODEL...")
time_limits = 5 * 60 * 60  # 5 hours
epochs = 30
detector = task.fit(dataset_train,
                    num_trials=2,
                    epochs=epochs,
                    lr=ag.Categorical(5e-4, 1e-4),
                    ngpus_per_trial=1,
                    time_limits=time_limits)
console.success("TRAINING DONE !")
console.log("START TEST MODEL ")
dataset_test = task.Dataset(data_root,
                            index_file_name='test',
                            classes=('motorbike', ))

test_map = detector.evaluate(dataset_test)
console.log("mAP on test dataset: {}".format(test_map[1][1]))

console.success("SAVE MODEL")
savefile = 'model.pkl'
detector.save(savefile)

from autogluon import Detector
new_detector = Detector.load(savefile)
Beispiel #3
0
with open('jobs.json') as f:
    job_data = json.load(f)
    console.info("Crawling %d career pages." % len(job_data))
    i = 0
    for job_entry in job_data:
        try:
            url = job_entry['link']
            page = requests.get(url)
            tree = html.fromstring(page.content)
            links = tree.xpath('//a')
            job_postings = []
            for link in links:
                job_title = link.text_content().strip().lstrip()
                if 'intern' in job_title: # only test if intern position
                    res = requests.post(
                        'http://127.0.0.1:8000/predict', json={'title': job_title})
                    prediction = res.text.strip().lstrip()
                    if prediction in ['IT/Software Development', 'Engineering']:
                        job_postings.append(job_title)
            job_entry['positions'] = job_postings
        except Exception as e:
            console.error(e)
        i = i + 1
        if i % 20 == 0:
            console.log("Processed %d pages." % i)
console.success("Finished crawling.")

with open('jobs.json', 'w') as f:
    json.dump(job_data, f)

console.success("Dumped data.")
Beispiel #4
0

def load_data():
    lines = open(dataset_path, 'r', encoding='utf8').readlines()
    data = [{
        'emotion': line.split(',')[1][1:-1],
        'raw': _clean(','.join(line.split(',')[3:]))
    } for line in lines]
    return data


data_save_path = os.path.join(os.getcwd(), 'data/data.sav')
if os.path.exists(data_save_path):
    console.log("Reading from save file...")
    data = pkl.load(open(data_save_path, 'rb'))
    console.success("Finished reading data from save.")
else:
    console.log("Did not find a save file.")
    data = load_data()
    pkl.dump(data, open(data_save_path, 'wb'))
    console.success("Created save file.")

console.info("First data is sentence \"%s\" with emotion \'%s\'" %
             (data[0]['raw'], data[0]['emotion']))


def make_wordlists(data):
    wordlist = set()
    mentions = set()
    uppercase = set()
    for datapoint in data:
Beispiel #5
0
import utils
from classifiers import JobTitle
from console_logging.console import Console

console = Console()

train = utils.load_dataset('features')
console.info("Loaded training dataset.")
test = utils.load_dataset('test')
console.info("Loaded testing dataset.")
pipe = JobTitle.pipe(train)
console.success("Finished training pipe.")

t = [_['title'] for _ in test]
e = [_['categories'][0] for _ in test]

accuracy = utils.evaluate(pipe, t, e)
console.success("%f accuracy" % accuracy)


def get_analytics():
    analytics = utils.analyze(pipe, t, e, utils.categories(test))
    # console.log('\n'+str(analytics))
    return analytics
Beispiel #6
0
routing_table = dict()
with open('paths.json') as f:
    for d in j.load(f):
        routing_table[d["passkey"]] = d["url"]

console.info("Compiled routing table of %d routes." %
             len(routing_table.keys()))


@app.middleware('response')
async def all_cors(r, s):
    s.headers['Access-Control-Allow-Origin'] = '*'
    s.headers['Access-Control-Allow-Headers'] = '*'


@app.route("/knock", methods=['POST', 'OPTIONS'])
async def whos_there(r):
    if r.method == 'OPTIONS': return json({}, status=200)
    if 'name' not in r.json.keys(): return json({}, status=500)
    console.log("%s@%s is knocking." % (r.json['name'], r.ip))
    if r.json['name'] in routing_table.keys():
        p = routing_table[r.json['name']]
        console.log("%s is answering." % p)
        return json({"url": p}, status=200)
    return json({}, status=401)


if __name__ == "__main__":
    console.success("Starting server.")
    app.run(host="0.0.0.0", port=7734)
Beispiel #7
0
    y = tf.constant(test_set.target)
    return x, y


##

print("How many steps should we train for?")
maxsteps = int(input('> '))

# Create the classifier. Take maxsteps steps.
classifier.fit(input_fn=get_train_inputs, steps=maxsteps)

# Evaluate loss.
results = classifier.evaluate(input_fn=get_test_inputs, steps=1)
print(results)
console.success('\nFinished with loss {0:f}'.format(results['loss']))

print("\nPlease provide a GPA and test score to chance.")
cur_gpa = float(input('GPA: '))
print("Given " + str(cur_gpa))
test_score = int(input('Test Score: '))


def new_samples():
    return np.array([[0.0, 0], [cur_gpa, test_score], [maxgpa, maxtest]],
                    dtype=np.float32)


predictions = list(classifier.predict(input_fn=new_samples))
console.success("Made predictions:")