Пример #1
0
def multimap(fun, args, naccounts=None):
    if naccounts is None:
        naccounts = len(api_keys)

    max_parallel = sum(array(parallelism[:naccounts]))
    if len(args) <= max_parallel:
        naccounts = nonzero(cumsum(parallelism) - len(args) >= 0)[0][0] + 1
        size = parallelism[:naccounts]
    else:
        size = [len(args) / naccounts for _ in xrange(naccounts)]
    if naccounts > 1:
        size[-1] = len(args) - sum(array(size[:-1]))
    else:
        size[0] = len(args)

    # jids[i] contains the job indices for account i
    jids = [None for _ in xrange(naccounts)]

    # Launches the jobs
    k = 0
    for i in xrange(naccounts):
        api_key = api_keys[i]
        api_secretkey = api_secretkeys[i]
        n = size[i]

        args_tmp = args[k:k + n]
        if len(args_tmp) > 0:
            print "Launching %d jobs with account %d..." % (len(args_tmp), i + 1)
            cloud.setkey(api_key=api_key, api_secretkey=api_secretkey)
            jids[i] = cloud.map(fun, args_tmp, _high_cpu=True)
            print "    Jobs:", jids[i]
            k += n
    return jids
Пример #2
0
def main():
    cloud.setkey(2329,'270cb3cccb9beb65d2f424b24ccbd5a920c5ccef')   
    try:
        fn = raw_input()
        f = open(fn)
        L = float(f.readline())           
        line = f.readline()        
        data = []
        while line:
            d = map(eval,line.split())
            data.append(d)
            line = f.readline()         
        f.close()
        n = len(data)            
        Gs = np.array(data[0:n/2])
        ls = np.array(data[n/2::])
        outstr = ''
        outstr += 'submitting cross validation to picloud\n'
        cloud.config.max_transmit_data=12000000    
        start = time.time()
        jid = cloud.call(traintst,Gs,ls,L)  
        outstr += 'submission time: %s\n' %str(time.time()-start)
        start = time.time()    
        result = cloud.result(jid) 
        outstr += 'cloud execution time: %s\n' %str(time.time()-start)      
        outstr += 'misclassification rate: %f\n' %np.mean(result)
        outstr += 'standard deviation:     %f\n' %np.std(result)         
        outstr += '--------done---------------------'  
        print outstr
    except:
        print 'an error occurred'     
Пример #3
0
def multimap(fun, args, naccounts=None):
    if naccounts is None:
        naccounts = len(api_keys)

    max_parallel = sum(array(parallelism[:naccounts]))
    if len(args) <= max_parallel:
        naccounts = nonzero(cumsum(parallelism) - len(args) >= 0)[0][0] + 1
        size = parallelism[:naccounts]
    else:
        size = [len(args) / naccounts for _ in xrange(naccounts)]
    if naccounts > 1:
        size[-1] = len(args) - sum(array(size[:-1]))
    else:
        size[0] = len(args)

    # jids[i] contains the job indices for account i
    jids = [None for _ in xrange(naccounts)]

    # Launches the jobs
    k = 0
    for i in xrange(naccounts):
        api_key = api_keys[i]
        api_secretkey = api_secretkeys[i]
        n = size[i]

        args_tmp = args[k:k + n]
        if len(args_tmp) > 0:
            print "Launching %d jobs with account %d..." % (len(args_tmp),
                                                            i + 1)
            cloud.setkey(api_key=api_key, api_secretkey=api_secretkey)
            jids[i] = cloud.map(fun, args_tmp, _high_cpu=True)
            print "    Jobs:", jids[i]
            k += n
    return jids
Пример #4
0
def main():
    cloud.setkey(2329, '270cb3cccb9beb65d2f424b24ccbd5a920c5ccef')
    try:
        fn = raw_input()
        f = open(fn)
        L = float(f.readline())
        line = f.readline()
        data = []
        while line:
            d = map(eval, line.split())
            data.append(d)
            line = f.readline()
        f.close()
        n = len(data)
        Gs = np.array(data[0:n / 2])
        ls = np.array(data[n / 2::])
        outstr = ''
        outstr += 'submitting cross validation to picloud\n'
        cloud.config.max_transmit_data = 12000000
        start = time.time()
        jid = cloud.call(traintst, Gs, ls, L)
        outstr += 'submission time: %s\n' % str(time.time() - start)
        start = time.time()
        result = cloud.result(jid)
        outstr += 'cloud execution time: %s\n' % str(time.time() - start)
        outstr += 'misclassification rate: %f\n' % np.mean(result)
        outstr += 'standard deviation:     %f\n' % np.std(result)
        outstr += '--------done---------------------'
        print outstr
    except:
        print 'an error occurred'
Пример #5
0
def status(jids):
    naccounts = len(jids)
    status = []
    # Retrieves the results
    for i in xrange(naccounts):
        api_key = api_keys[i]
        api_secretkey = api_secretkeys[i]
        cloud.setkey(api_key=api_key, api_secretkey=api_secretkey)
        print "Retrieving status for account %d..." % (i + 1)
        status.extend(cloud.status(jids[i]))

    return status
Пример #6
0
def status(jids):
    naccounts = len(jids)
    status = []
    # Retrieves the results
    for i in xrange(naccounts):
        api_key = api_keys[i]
        api_secretkey = api_secretkeys[i]
        cloud.setkey(api_key=api_key, api_secretkey=api_secretkey)
        print "Retrieving status for account %d..." % (i + 1)
        status.extend(cloud.status(jids[i]))

    return status
Пример #7
0
def setup_cloud_queue():
    """
    Attaches an instance of CrawlerWorker to a Queue on PiCloud.
    Worker jobs will run using the s1 core-type (AWS t1.micro) in order to
    avoid being rate-limited. We can scale horizontally by increasing the
    max_parallel_jobs value.
    """
    cloud.setkey(PICLOUD_API_KEY, PICLOUD_API_SECRETKEY)

    url_queue = cloud.queue.get('url_queue')

    url_queue.attach(
        CrawlerWorker(), output_queues=[url_queue], iter_output=True,
        _type='s1', _env='dotcloud_testapp', max_parallel_jobs=20,
    )
Пример #8
0
def init_cloud():
    cloud.config.force_serialize_debugging = False
    cloud.config.force_serialize_logging = False
    cloud.config.commit()
    cloud.setkey(2579, "f228c0325cf687779264a0b0698b0cfe40148d65")
Пример #9
0
#!/usr/bin/python
#
import cloud

api_key='3355'
api_secretkey='212ed160e3f416fdac8a3b71c90f3016722856b9'
cloud.setkey(api_key, api_secretkey) 

def PRZM_EXAMS(input_list): 

    import os, sys
    lib_path = os.path.abspath('/home/picloud/PRZM_EXAMS_Picloud')
    sys.path.append(lib_path)

    chem_name = input_list[0]
    noa = input_list[1]
    scenarios = input_list[2]
    unit = input_list[3]
    met = input_list[4]
    inp = input_list[5]
    run = input_list[6]
    exam = input_list[7]
    MM = input_list[8]
    DD = input_list[9]
    YY = input_list[10]
    CAM_f = input_list[11]
    DEPI = input_list[12]
    Ar = input_list[13]
    EFF = input_list[14]
    Drft = input_list[15]
    farm = input_list[16]
Пример #10
0
import boto
from ssl import SSLError
from socket import error as SocketError
from httplib import IncompleteRead
import socket
import time
import cloud
import fnmatch
import os
import random
import h5py
import cPickle

PICLOUD_ID = 2579
PICLOUD_SECRET_KEY = 'f228c0325cf687779264a0b0698b0cfe40148d65'
cloud.setkey(PICLOUD_ID, PICLOUD_SECRET_KEY)

AWS_ACCESS_KEY = 'AKIAJSCF3K3HKREPYE6Q'
AWS_SECRET_KEY = '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'


def get_s3_cxn():
    # wait two minutes before retrying S3 operations
    socket.setdefaulttimeout(120)
    s3_cxn = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_KEY)
    if s3_cxn is None:
        raise RuntimeError("Couldn't connect to S3")
    else:
        return s3_cxn

import features 
from featurePipeline import FeaturePipeline
from optparse import OptionParser
import os, os.path
import h5py, datetime
import boto
import fnmatch 
import progressbar  
import cloud

cloud.setkey(2579, 'f228c0325cf687779264a0b0698b0cfe40148d65')

extractor = FeaturePipeline()
extractor.add_feature('t', features.millisecond_timestamp)
extractor.add_feature('bid', features.best_bid)
extractor.add_feature('offer', features.best_offer)
extractor.add_feature('bid_range', features.bid_range)
extractor.add_feature('offer_range', features.offer_range)

extractor.add_feature('spread', features.spread)

extractor.add_feature('locked', features.locked)
extractor.add_feature('crossed', features.crossed)

extractor.add_feature('midprice', features.midprice)
extractor.add_feature('bid_vwap', features.bid_vwap)
extractor.add_feature('offer_vwap', features.offer_vwap)

extractor.add_feature('bid_slope', features.bid_slope)
extractor.add_feature('offer_slope', features.offer_slope)
Пример #12
0
Created on Dec 17, 2012

@author: Kevin
'''

from pprint import pprint
import time
import pickle

import cloud
from blackJackWorkers import workerHandEVByCount

from pycloud_config import pycloudKey


cloud.setkey(3220, pycloudKey)


def cloudHandEVByCount(players=6, TOTAL_SHOES=100000000, CHUNKING=100000):
    '''
    Accumulate the calcHandEVByCount of __many__ games using pycloud.
    '''
    CHUNKS = TOTAL_SHOES / CHUNKING
    totEV = {}
    totEVName = "HandEVByCount-%d-p%d.p" % (TOTAL_SHOES, players)
    
    # benchmarking
    start = time.clock()
    
    cloudIds = cloud.map(workerHandEVByCount, (players,)*CHUNKS, (CHUNKING,)*CHUNKS)
    
Пример #13
0
    print time.time() - begin,
    print "seconds"


if __name__ == '__main__':
    # Calculate baseline using Intel Core 2 Duo @ 2.33Ghz

    # Find the 10 millionth prime locally
    t = timeit.Timer("get_prime(10000000)", "from __main__ import get_prime")
    print t.timeit(1)  # ~4.6 seconds

    # Read in our keys from config file
    config = ConfigParser.ConfigParser()
    config.read("cloud.config")
    api_key = config.get("PiCloud", "api_key")
    api_secretkey = config.get("PiCloud", "api_secretkey")

    cloud.setkey(int(api_key), api_secretkey)

    # Regular
    test_picloud(cloud, 100000, False)  # ~2.7s
    test_picloud(cloud, 1000000, False)  # ~1.2s
    test_picloud(cloud, 10000000, False)  # ~3.2s
    test_picloud(cloud, 100000000, False)  # ~22s

    # Try high-cpu!
    test_picloud(cloud, 100000, True)  # ~2.7s
    test_picloud(cloud, 1000000, True)  # ~1.2s
    test_picloud(cloud, 10000000, True)  # ~3.2s
    test_picloud(cloud, 100000000, True)  # ~22s
Пример #14
0
import features
from featurePipeline import FeaturePipeline
from optparse import OptionParser
import os, os.path
import h5py, datetime
import boto
import fnmatch
import progressbar
import cloud

cloud.setkey(2579, 'f228c0325cf687779264a0b0698b0cfe40148d65')

extractor = FeaturePipeline()
extractor.add_feature('t', features.millisecond_timestamp)
extractor.add_feature('bid', features.best_bid)
extractor.add_feature('offer', features.best_offer)
extractor.add_feature('bid_range', features.bid_range)
extractor.add_feature('offer_range', features.offer_range)

extractor.add_feature('spread', features.spread)

extractor.add_feature('locked', features.locked)
extractor.add_feature('crossed', features.crossed)

extractor.add_feature('midprice', features.midprice)
extractor.add_feature('bid_vwap', features.bid_vwap)
extractor.add_feature('offer_vwap', features.offer_vwap)

extractor.add_feature('bid_slope', features.bid_slope)
extractor.add_feature('offer_slope', features.offer_slope)
def done_jobs(jobs):
    '''
    Retrive the result of finished jobs from picloud.
    '''
    statuses = cloud.status(list(jobs))
    return list(
        cloud.iresult([
            y[0] for y in filter(lambda x: x[1] == 'done',
                                 zip(list(jobs), statuses))
        ]))


if __name__ == "__main__":

    # connect to PiCloud
    cloud.setkey(KEY_ID, SECRET_KEY)

    # read list of job ids
    if TEST:
        fp = open('jobs_test.%s.pickle' % PARAMETER, 'r')
    else:
        fp = open('jobs.%s.pickle' % PARAMETER, 'r')
    jobs = pickle.load(fp)

    stats = dict()
    stats[PARAMETER] = dict()

    # walk through all parameter values
    for value in ALTERNATIVES[PARAMETER]:
        stats[PARAMETER][value] = dict()
        for stat in STATNAMES:
Пример #16
0
import time
import socket
import urllib
import urllib2
import re
from urllib2 import URLError
import cloud
import os
import redis

# This (horribly written) script will read in a line of domains in format of example.com, one per line and create a job out of those domains and send it to picloud for processing. You must signup for picloud and put in your API key below. 

# Current issues:
#  Does not support HTTPS (manually change it

cloud.setkey("#####", "####################")
set = "dvcspwns"
redis_host = "example.com"
host_list = "dvcshostlist"
#cloud.start_simulator()

# Size of job to run
chunksize = 1000
start = 0
timeout = 10

def chunks(start, size):
    f = open(host_list)
    patterns = []
    count = 0
    lines = []
Пример #17
0
#!/usr/bin/env python

import cloud
import os
cloud.setkey(os.environ['PICLOUD_KEY'], os.environ['PICLOUD_SECRET'])

def sys(cmd) :
  from subprocess import Popen, PIPE

  return Popen(cmd, stdout=PIPE, shell=True).communicate()[0]

def cloudsys(cmd) :
  print cloud.result(cloud.call(sys, cmd)),

from sys import argv
if len(argv) > 1 :
  cloudsys(' '.join(argv[1:]))
  exit()

from sys import stdin
while True :
  print "$ ",
  line = stdin.readline()
  cloudsys(line)
Пример #18
0
import etl
import cloud
from etl.config.drivers.picloud import config
from etl.drivers.picloud.ebay import PiCloudEBayETLDriver
from etl.loaders.dumps import DumpLoader, get_dump_location
import os
import pickle

class TestPiCloudETLDriver(PiCloudEBayETLDriver):
    
    def __init__(self, *args, **kwargs):
        PiCloudEBayETLDriver.__init__(self, *args, **kwargs)
        self.loader = DumpLoader()
        


def loadfile(filename):
    dumplocation = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
    path = os.path.join(dumplocation, filename)
    f = open(path, 'r')
    data = f.read()
    data = pickle.loads(data)
    f.close()
    return data




sellerid = 'officeshoes'
cloud.setkey(config['keyid'], config['key'])
Пример #19
0
import sys
if (sys.version_info[0] != 2) or (sys.version_info[1] != 6
                                  and sys.version_info[1] != 7):
    sys.exit(
        "Sorry, but currently supporting environment only for Python 2.6 and Python 2.7"
    )

import cloud
cloud.setkey(5529, api_secretkey='d492275c1761470b0add06d6ece2c89d406482d7')
cloud_environment = 'venture-2-' + str(sys.version_info[1])
import time


def get_random_random_seed():
    import urllib2
    usock = urllib2.urlopen(
        "http://www.yuraperov.com/MIT.PCP/get_random_seed.php")
    data = usock.read()
    usock.close()
    return int(data)


NumberOfSamples = 100
IntermediateMHIterations = 100


def sample_once(void_argument):
    import venture.engine
    MyRIPL = venture.engine
    MyRIPL.set_seed(get_random_random_seed())
    # Tricky coin
Пример #20
0
from flask.ext.cache import Cache

import cloud
import boto

app = Flask(__name__)
app.config.from_object('config')

# Configure Jinja2 templating engine
for key in jinja_env:
  setattr(app.jinja_env, key, jinja_env[key])

# Login Setup
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'

# MongoDB Setup
mongo = PyMongo(app)

# Cache Setup
cache = Cache(app)

# PiCloud Setup
cloud.setkey(**cloudcreds)

# AWS Setup
s3 = boto.connect_s3()

from whiskey import views, models, forms, gfm
Пример #21
0
  
    #Access the site, get the raw html
    req = urlopen(reqs[i])
    data = req.read()

    #Parse html, the only thing we care about is the HotelListContainer
    soup = BeautifulSoup.BeautifulSoup(data)
    subset_data = soup.find("div", {'class': 'HotelListContainer'})

    #Now tell me if Curry Village is in the Search Results
    is_curry_village_available = unicode(subset_data).find('Curry Village')
    if is_curry_village_available >= 0: 
      voice = Voice()
      voice.login()
      voice.send_sms(PHONE_NUMBER_TO_TEXT, canonical_names[i] + ': Curry Village is Available')

    #And do the same for Housekeeping Camp
    is_housekeeping_camp_available = unicode(subset_data).find('Housekeeping Camp')
    if is_housekeeping_camp_available >= 0: 
      voice = Voice()
      voice.login()
      voice.send_sms(PHONE_NUMBER_TO_TEXT, canonical_names[i] + ': Housekeeping Camp is Available')

#For testing purposes, run main locally to see if the script works 
#main()

#Setup the job on PiCloud as a reoccuring cron job
cloud.setkey(PICLOUD_API_KEY_NUMBER, PICLOUD_API_KEY_PASSWORD)
cloud.cron.register(main, 'yosemite_job', '*/10 17-23 * * *', _env='sameep')
cloud.cron.register(main, 'yosemite_job2', '*/10 0-4 * * *', _env='sameep')
Пример #22
0
from collections import Counter
from collections import defaultdict
import string
from english_stoplist import stoplist
import cloud
import os
import time
import requests
from StringIO import StringIO

from gutenberg import urls

### don't forget to set your own API key and secret in cloud_config.py
from cloud_config import *
cloud.setkey(key,secret)

def cloud_status(jids):
    s=Counter(cloud.status(jids))
    return s   


def url_chunker(url, chunksize=1024):
    """Returns an iterator over contents of a file
        *Params*
        #file - an open FILE object
        #chunksize - how many lines to read at once?
    """
    #url=book[0]
    #bookname=book[1]
    
    user_agent = {'User-agent': 'Mozilla/5.0'}
Пример #23
0
import boto
from ssl import SSLError
from socket import error as SocketError
from httplib import IncompleteRead
import socket
import time
import cloud
import fnmatch
import os
import random
import h5py
import cPickle

PICLOUD_ID = 2579
PICLOUD_SECRET_KEY = "f228c0325cf687779264a0b0698b0cfe40148d65"
cloud.setkey(PICLOUD_ID, PICLOUD_SECRET_KEY)

AWS_ACCESS_KEY = "AKIAJSCF3K3HKREPYE6Q"
AWS_SECRET_KEY = "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"


def get_s3_cxn():
    # wait two minutes before retrying S3 operations
    socket.setdefaulttimeout(120)
    s3_cxn = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_KEY)
    if s3_cxn is None:
        raise RuntimeError("Couldn't connect to S3")
    else:
        return s3_cxn

Пример #24
0
from collections import Counter
import string
from english_stoplist import stoplist
import cloud
import os
import time

### don't forget to set your own API key and secret in cloud_config.py
from cloud_config import *

cloud.setkey(key, secret)

#filename = "../data/hamlet.txt"
#filename = "../data/bacon_the_advancement_of_learning.txt"
filename = "../data/sir_thomas_more.txt"
f = open(filename, 'rb')


def cloud_status(jids):
    stati = cloud.status(jids)
    s = Counter()
    for st in stati:
        s[st] += 1
    return s


def chunker(file, chunksize=1024):
    """Returns an iterator over contents of a file
        *Params*
        #file - an open FILE object
        #chunksize - how many lines to read at once?
Пример #25
0
import time
import socket
import urllib
import urllib2
import re
from urllib2 import URLError
import cloud
import os
import redis

# This (horribly written) script will read in a line of domains in format of example.com, one per line and create a job out of those domains and send it to picloud for processing. You must signup for picloud and put in your API key below.

# Current issues:
#  Does not support HTTPS (manually change it

cloud.setkey("#####", "####################")
set = "dvcspwns"
redis_host = "example.com"
host_list = "dvcshostlist"
#cloud.start_simulator()

# Size of job to run
chunksize = 1000
start = 0
timeout = 10


def chunks(start, size):
    f = open(host_list)
    patterns = []
    count = 0
import csv
import pickle
from settings_alternative import *

def done_jobs(jobs):
    '''
    Retrive the result of finished jobs from picloud.
    '''
    statuses = cloud.status(list(jobs))
    return list(cloud.iresult([y[0] for y in filter(lambda x: x[1]=='done',zip(list(jobs),statuses))]))

if __name__ == "__main__":
    
  
    # connect to PiCloud
    cloud.setkey(KEY_ID,SECRET_KEY)

    # read list of job ids
    if TEST:
        fp = open('jobs_test.%s.pickle' % PARAMETER,'r')
    else:
        fp = open('jobs.%s.pickle' % PARAMETER,'r')
    jobs = pickle.load(fp)


    stats = dict()
    stats[PARAMETER] = dict()

    # walk through all parameter values
    for value in ALTERNATIVES[PARAMETER]:
        stats[PARAMETER][value] = dict()
Пример #27
0
import cloud
import flask
import bson
import json
import pymongo

from jinja2 import Environment, FileSystemLoader

PICLOUD_API_KEY = 0000
PICLOUD_API_SECRETKEY = '****************************'
MONGO_HOST = 'testapp-jlhawn-data-0.azva.dotcloud.net'
MONGO_PORT = 2778
MONGO_WEB_USERNAME = '******'
MONGO_WEB_PASSWORD = '******'

cloud.setkey(PICLOUD_API_KEY, PICLOUD_API_SECRETKEY)

mongo_client = pymongo.MongoClient(MONGO_HOST, MONGO_PORT)

crawlerdb = mongo_client.crawlerdb
crawlerdb.authenticate(MONGO_WEB_USERNAME, MONGO_WEB_PASSWORD)

app = flask.Flask('testapp')

template_dirs = ['templates','/home/dotcloud/current/app/templates']
template_env = Environment(loader=FileSystemLoader(template_dirs))


def still_processing(job):
    """
    Returns whether a job is still processing which is determined by the fact
Пример #28
0
from pyevolve_rastrigin import *
import cloud

# Please change these
cloud.setkey('4027', 'xxxx')

# List of Random seeds and run-Ids
# assuming 10 runs
seed_list=[100*(i+1) for i in range(10)]
runid_list=[i+1 for i in range(10)]

jids = cloud.map(run_ga,seed_list,runid_list)
cloud.join(jids)

for i in range(10):
    cloud.files.get('stats_' + str(i+1) + '.csv','stats_' + str(i+1)+'.csv')

Пример #29
0
from static_ctm import MinTTTLagrangianCTMProblem
from demand import RouteDemand, ODDemand
import cloud
cloud.setkey(1441)
cloud.files.put('../networks/exps/exp8/netd.json')
def runner():
  MinTTTLagrangianCTMProblem.load("netd.json", cloud = True).get_program().cr_solve()
cloud.call(runner, _env = 'science', _fast_serialization=2)
Пример #30
0
import sys
import pickle

import cloud
cloud.setkey(7513, api_secretkey='ca43a3535fa17e28b687f0f1691c67db261392ae')
cloud_environment = 'Julia'

"""
number_of_clusters = int(sys.argv[1])
if_zero_shortlearning = sys.argv[2] # Should be "yes" or "no"
experiment_name = sys.argv[3]"""

# Usage: python picloud_runner.py 200 10 20


TRIALS = int(sys.argv[1])
NUM_PARTICLES = int(sys.argv[2])
REPETITIONS = int(sys.argv[3])
DATASET = int(sys.argv[4])


def run_on_instance(trial_id):
  global number_of_clusters
  global if_zero_shortlearning
  global experiment_name
  import subprocess
  import os
  os.environ['DISPLAY'] = ":1"
  print "Starting"
  ls_output = subprocess.Popen(["/home/picloud/julia/julia", "putative_runner.jl", str(NUM_PARTICLES), str(trial_id), str(REPETITIONS), str(DATASET)], \
Пример #31
0
from flask.ext.cache import Cache

import cloud
import boto

app = Flask(__name__)
app.config.from_object('config')

# Configure Jinja2 templating engine
for key in jinja_env:
    setattr(app.jinja_env, key, jinja_env[key])

# Login Setup
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'

# MongoDB Setup
mongo = PyMongo(app)

# Cache Setup
cache = Cache(app)

# PiCloud Setup
cloud.setkey(**cloudcreds)

# AWS Setup
s3 = boto.connect_s3()

from whiskey import views, models, forms, gfm
Пример #32
0
	return outdict



if __name__ == '__main__':
	
	parser = OptionParser()
	
	parser.add_option('-o', '--outfile', dest = 'outfile',
						help = 'The destination file too write data')
	parser.add_option('-c', '--use-cloud', default = False, action = 'store_true',
						dest = 'usecloud', help = 'Use PiCloud computing')
						
	(options, args) = parser.parse_args()
	
	cloud.setkey(CLOUD_KEY, CLOUD_SECRET)
	if not options.usecloud: cloud.start_simulator()
	
	outdata = ProcessFile(args[0])
	print 'writting data'
	counter = 0
	with open(options.outfile, 'w') as handle:
		l = outdata.keys()
		l.sort()
		for key in l:
			count, frac = outdata[key]
			elm, spec = key
			
			#print key, val
			counter += 1
			#print key[0]
Пример #33
0
#!/usr/bin/python
# Simple demo to show how to run the Pyevolve
# Evolutionary Algorithms framework on PiCloud
# Pyevolve: http://sourceforge.net/projects/pyevolve/ 
# PiCloud: https://www.picloud.com/

# Amit Saha
# http://echorand.me
# For more details, please refer this URL: 

from pyevolve_rastrigin import *
import cloud

# Please change these
cloud.setkey('4027', 'd7277db0e5846403fa15a13a1dc4776ac1245b92')


# List of Random seeds and run-Ids
# assuming 10 runs
seed_list=[100*(i+1) for i in range(10)]
runid_list=[i+1 for i in range(10)]

# calls the method defined in pyevolve_rastrigin.py
# which initiates the GA execution.
# Execute the code on PiCloud
jids = cloud.map(run_ga,seed_list,runid_list)

# check if the jobs are complete, if yes
# pull the stat files 
cloud.join(jids)
print cloud.files.list()
Пример #34
0
    print "seconds"

if __name__ == '__main__':
    # Calculate baseline using Intel Core 2 Duo @ 2.33Ghz 
    
    # Find the 10 millionth prime locally
    t = timeit.Timer("get_prime(10000000)", "from __main__ import get_prime");
    print t.timeit(1) # ~4.6 seconds
    
    # Read in our keys from config file
    config = ConfigParser.ConfigParser()    
    config.read("cloud.config")
    api_key = config.get("PiCloud", "api_key")
    api_secretkey = config.get("PiCloud", "api_secretkey")
    
    cloud.setkey(int(api_key), api_secretkey)
    
    # Regular
    test_picloud(cloud, 100000, False)     # ~2.7s
    test_picloud(cloud, 1000000, False)    # ~1.2s
    test_picloud(cloud, 10000000, False)   # ~3.2s
    test_picloud(cloud, 100000000, False)  # ~22s
    
    # Try high-cpu!
    test_picloud(cloud, 100000, True)     # ~2.7s
    test_picloud(cloud, 1000000, True)    # ~1.2s
    test_picloud(cloud, 10000000, True)   # ~3.2s
    test_picloud(cloud, 100000000, True)  # ~22s
    
   
    
Пример #35
0
import sys
import pickle

import cloud
cloud.setkey(7513, api_secretkey='ca43a3535fa17e28b687f0f1691c67db261392ae')
cloud_environment = 'Julia'

"""
number_of_clusters = int(sys.argv[1])
if_zero_shortlearning = sys.argv[2] # Should be "yes" or "no"
experiment_name = sys.argv[3]"""

# Usage: python picloud_runner.py 100 50 10 2


TRIALS = int(sys.argv[1])
NUM_PARTICLES = int(sys.argv[2])
DELTA = int(sys.argv[3])
INTEGRAL_PATHS = int(sys.argv[4])


def run_on_instance(trial_id):
  global number_of_clusters
  global if_zero_shortlearning
  global experiment_name
  import subprocess
  import os
  os.environ['DISPLAY'] = ":1"
  print "Starting"
  ls_output = subprocess.Popen(["/home/picloud/julia/julia", "runner.jl", str(NUM_PARTICLES), str(DELTA), str(INTEGRAL_PATHS)], \
Пример #36
0
import cloud

apikey="mykey"
cloud.setkey(apikey)
Пример #37
0
             'content' : items[url]['content'],

             'final_url' : urls_shares[url]['final_url'],
             'shares': urls_shares[url]['shares_count'],
             'twitter': urls_shares[url]['shares']['twitter'],
             'facebook': urls_shares[url]['shares']['facebook']

             } for (url,s) in related if not url in ret_urls]

        for article in articles:
            ret_urls.add(article['url'])

        if len(articles) > 0:
            cluster_shares = sum([a['shares'] for a in articles])
            ret.append({'articles' : articles, 'cluster_shares' : cluster_shares })

    ret.sort(key= lambda c : -c['cluster_shares'])
    return ret

if __name__ == '__main__':
    import logging
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
    cloud.setkey(keys.picloud_key,keys.picloud_secret)

#    Test
#    ret = feed_cluster(keys.username,keys.password,[])
#    print ret

    print cloud.rest.publish(shares.get_shares_bulk, "get_shares", _type='c1',_env="feedcluster02")
    print cloud.rest.publish(feed_cluster, "feed_cluster", _type='f2',_env="feedcluster02")
Пример #38
0
import cloud
import sys 
import os
lib_path = os.path.abspath('../../..')
sys.path.append(lib_path)
from ubertool_src import keys_Picloud_S3

cloud.setkey(keys_Picloud_S3.picloud_api_key, keys_Picloud_S3.picloud_api_secretkey)   

def pfam(input_list): 
    import os, sys
    lib_path = os.path.abspath('/home/picloud/pfam_picloud')
    sys.path.append(lib_path)
    
    wat_hl=input_list[0]
    wat_t=input_list[1]
    ben_hl=input_list[2]
    ben_t=input_list[3]
    unf_hl=input_list[4]
    unf_t=input_list[5]
    aqu_hl=input_list[6]
    aqu_t=input_list[7]
    hyd_hl=input_list[8]
    mw=input_list[9]
    vp=input_list[10]
    sol=input_list[11]
    koc=input_list[12]
    hea_h=input_list[13]
    hea_r_t=input_list[14]
    noa=input_list[15]
    dd_out=input_list[16]
Пример #39
0
def init_cloud(): 
    cloud.config.force_serialize_debugging = False
    cloud.config.force_serialize_logging = False 
    cloud.config.commit()
    cloud.setkey(2579, "f228c0325cf687779264a0b0698b0cfe40148d65")