Exemplo n.º 1
0
    def train(self, global_t, sess, train_feed):
        losses = []
        local_t = 0
        total_word_num = 0
        start_time = time.time()
        while True:
            batch = train_feed.next_batch()
            if batch is None:
                break
            inputs, input_lens, outputs = batch
            total_word_num += np.sum(input_lens)
            feed_dict = {
                self.inputs: inputs,
                self.input_lens: input_lens,
                self.labels: outputs,
                self.keep_prob: 0.5
            }
            _, loss, summary = sess.run(
                [self.train_ops, self.loss, self.summary_op], feed_dict)
            self.train_summary_writer.add_summary(summary, global_t)
            losses.append(loss)
            global_t += 1
            local_t += 1
            if local_t % 200 == 0:
                utils.progress(local_t / float(train_feed.num_batch))
        # finish epoch!
        utils.progress(1.0)
        epoch_time = time.time() - start_time
        train_loss = np.sum(losses) / total_word_num * train_feed.batch_size
        print("Train loss for %f and perplexity %f step time %.4f" %
              (train_loss, np.exp(train_loss),
               epoch_time / train_feed.num_batch))

        return global_t, train_loss
Exemplo n.º 2
0
def grade(q, task, verbose, callback):
  '''Sets up a file-based submission for grading, then calls a callback for grading.
  
  Sets up a sandbox directory, compiles the program, and calls the callback function for grading.'''

  correct = False
  metadata = {}

  try:
    sandbox_dir = tempfile.mkdtemp(prefix='proco')
    os.chdir(sandbox_dir)
    
    if verbose:
      utils.progress('Using temporary directory: %s' % sandbox_dir)

    payload = task['payload']
    team_filebase =  task['alias']
    team_extension = task['run_metadata']['extension']
    team_filename = team_filebase + '.' + team_extension
    utils.compile(payload, team_filebase, team_extension, team_filename)
    
    correct = callback(task, team_filebase, team_extension, team_filename, metadata, verbose)

  except GradingException, e:
    utils.progress(e.message)
    metadata['error'] = e.message
Exemplo n.º 3
0
def knn(trainData, trainLabel, testData, testLabel, k):
    predictions = []
    counter = 1
    t1 = time()
    log = []

    for x in range(len(testData)):
        temp = []
        t0 = time()
        neighbors = getNeighbors(trainData, trainLabel, testData[x], k)
        result = getResponse(neighbors)
        predictions.append(result)
        progress(x + 1, len(testData))
        timepredict = ("%0.3fs" % (time() - t0))
        temp.append(testLabel[x])
        temp.append(result)
        if testLabel[x] == result:
            temp.append("1")
        else:
            temp.append("0")
        log.append(temp)
        counter += 1

    timeextract_feature = ("%0.5fs" % ((time() - t1) / len(testData)))
    accuracy = getAccuracy(testLabel, predictions)
    return accuracy, round(((time() - t1) / len(testData)), 5)
def plotOutErrors(samplesH, samplesSBQ, samplesIID, gm, kernel):
    """
        Compute functions outside the RKHS (Gaussian densities with random covariances) 
        and their integrals with respect to the distribution
    """

    N = 20
    nfuncs = 250
    D = 2 # dim
    
    nSBQ = len(samplesSBQ)                         # plot for as many samples as SBQ
    gramH = np.zeros((nSBQ,nSBQ))
    zH = np.zeros((nSBQ))
    fillGram(gramH,zH,kernel,gm,samplesH)          # BQ weights for kernel herding

    gramSBQ = np.zeros((nSBQ,nSBQ))
    zSBQ = np.zeros((nSBQ))
    fillGram(gramSBQ,zSBQ,kernel,gm,samplesSBQ)    # BQ weights for Bayesian quadrature
    
    weightsH = [email protected](gramH)
    weightsSBQ = [email protected](gramSBQ)
    
    errorsIID = 0; errorsH = 0; errorsH_BQ = 0; errorsSBQ = 0

    stdsig = 2      # parameter for covariance matrix
    ratio = [.1,2]  # parameter for covariance matrix

    for k in range(nfuncs):
        progress(k,nfuncs,'Plot Out Errors')
        beta = 2 * (np.random.rand(N) + 1)
        c = 4 * 2 * (np.random.rand(N, 2) - 1/2)   # mean for kernel
        covs = np.zeros((N, D, D))
        
        for k in range(N):
            sig = stdsig * (np.random.rand(D)*(ratio[1] - ratio[0]) + ratio[0])
            covs[k] = np.diag(sig**(-2))
            U, _ = np.linalg.qr(np.random.randn(D,D))
            covs[k] = U @ covs[k] @ U.T

        f = lambda x : np.sum([beta[i] * multivariate_normal.pdf(x, c[i], covs[i]) for i in range(len(beta))], axis=0)

        target = targetOutRKHS(beta, c, gm, covs)
        errorsIID += np.abs(np.cumsum(f(samplesIID)) / np.arange(1, len(samplesIID)+1) - target[None])
        errorsH += np.abs(np.cumsum(f(samplesH)) / np.arange(1, len(samplesH)+1) - target[None])
        errorsH_BQ +=  np.abs(np.cumsum(f(samplesH) * weightsH) - target[None]) 
        errorsSBQ += np.abs(np.cumsum(f(samplesSBQ) * weightsSBQ) - target[None]) 

    errorsIID /= nfuncs; errorsH /= nfuncs; errorsH_BQ /= nfuncs; errorsSBQ /= nfuncs

    plt.figure(figsize=(10, 7))
    plt.plot(errorsSBQ,label="SBQ with BQ weights")
    # plt.plot(errorsH_BQ,label="Herding with BQ weights")
    plt.plot(errorsH,label="Herding with 1/N weights")
    plt.plot(errorsIID,label="iid sampling")
    plt.legend()
    plt.xscale("log")
    plt.yscale("log")
    plt.title("Mean Absolute Error averaged over %s functions outside the RKHS" % nfuncs)
    plt.savefig('figures/Mean Out Error')
    plt.show()
Exemplo n.º 5
0
def train(self, epoch):
    cost = 0
    target = np.zeros([self.batch_size, self.seq_length])
    N = self.loader.sizes[0]
    for idx in xrange(N):
        target.fill(0)
        x, y, x_char = self.loader.next_batch(0)
        for b in xrange(self.batch_size):
            for t, w in enumerate(y[b]):
                target[b][t] = w

        feed_dict = {
            self.word_inputs: x,
            self.char_inputs: x_char,
            self.true_outputs: target,
        }

        _, loss, step, summary_str = self.sess.run(
            [self.optim, self.loss, self.global_step, self.merged_summary],
            feed_dict=feed_dict)

        self.writer.add_summary(summary_str, step)

        if idx % 50 == 0:
            progress(
                idx / N,
                "epoch: [%2d] [%4d/%4d] loss: %2.6f" % (epoch, idx, N, loss))

        cost += loss

    return cost / N
Exemplo n.º 6
0
def image_resized(path, path_save=None, size=None, test=None):
    dirs = os.listdir(path)
    clearing_files(path_save)

    imgCount = int(len(dirs))
    count = 1
    if test is None:
        for item in dirs:
            if os.path.isfile(path+item):
                im = Image.open(path+item)
                f, e = os.path.splitext(path+item)
                # print (f)
                imResize = im.resize((size,size), Image.ANTIALIAS)
                progress(count, imgCount, "Resizing Image")
                imResize.save(path_save + item, 'JPEG', quality=100)
                count+=1
    else:
        # for item in dirs:
            # if os.path.isfile(path+item):
        im = Image.open(path)
        # f, e = os.path.splitext(path+item)
        imResize = im.resize((size,size), Image.ANTIALIAS)
        # print (f)
        # progress(count, imgCount)
        # imResize.save(path_save + item, 'JPEG', quality=100)
        # count+=1
        return imResize
Exemplo n.º 7
0
def get_missing_package( packagesNeed, packagesOrigin, repodata_list, verbose = False ):
    msg                     = "Resolving dependencies"
    packagesToDownload      = packagesNeed.difference( packagesOrigin )
    index                   = 0
    packagesNotFound        = set()

    for package in packagesToDownload:
        repodata        = find_package( package, repodata_list )
        if repodata is not None:
            packagesNeed   |= repodata.get_package_dependencies( package )
        else:
            packagesNotFound.add( package )
        if verbose: print(progress( index,  len(packagesToDownload),msg )),
        index += 1
    max_value = len(packagesToDownload)
    if max_value == 0:
        if index != 0:
            max_value = index
        else:
            max_value   = 1
            index       = 1
    if verbose: print(progress( index,  max_value, msg ))

    packagesToDownload = packagesNeed.difference( packagesOrigin )
    packagesToDownload = packagesToDownload.difference( packagesNotFound )

    return packagesToDownload
def SBQ(num_samples, kernel, gm, area, samples=[]):
    """
        Sequential Bayesian quadrature: select points that
        maximizes the score

        Inputs:
        - kernel: a Gaussian kernel
        - gm: a Gaussian mixture
        - area: the bounds where to herding
        - samples: the existing samples in the herd
        - num_queries: the number of random points to draw for a new sample 
    """

    n = len(samples)
    gram = np.eye(n + num_samples) * kernel.pdf([0, 0], [0, 0])
    z = np.zeros(n + num_samples)
    fillGram(gram, z, kernel, gm, samples)
    for k in range(n, num_samples + len(samples)):
        progress(k - n, num_samples, status='Bayesian Quadrature')
        samples.append(generate_SBQ(kernel, gm, area, samples, gram, z, k))
        update(k, gram, samples[-1], kernel, gm, samples)

        z[k] = E_Gaussian(np.array([samples[-1]]), gm, kernel)

    return np.array(samples), gram, z
Exemplo n.º 9
0
    def test_folder_wav(self, folder_name):
        """
        Function to extract multi pitch from file. Currently supports only HDF5 files.
        """
        sess = tf.Session()
        self.load_model(sess, log_dir=config.log_dir)

        file_list = [x for x in os.listdir(folder_name) if x.endswith('.wav') and not x.startswith('.')]

        count = 0

        unprocessable = []

        for file_name in file_list:
            try:
                mel = self.read_wav_file(os.path.join(folder_name, file_name))
                out_mel, out_f0, out_vuv = self.process_file(mel, sess)
                out_featss = np.concatenate((out_mel, out_f0, out_vuv), axis = -1)

                audio_out = utils.feats_to_audio(out_featss) 

                sf.write(os.path.join(config.output_dir,'./{}_output.wav'.format(file_name.split('/')[-1][:-4])), audio_out, config.fs)

                np.save(os.path.join(config.output_dir_np,file_name[:-4]), out_featss)

            except:
                unprocessable.append(file_name)

            count+=1

            utils.progress(count, len(file_list), "Files processed")
Exemplo n.º 10
0
 def validate_model(self, sess):
     """
     Function to train the model for each epoch
     """
     # feed_dict = {self.input_placeholder: ins, self.output_placeholder: outs, self.is_train: False}
     #
     # step_loss= sess.run(self.loss, feed_dict=feed_dict)
     # summary_str = sess.run(self.summary, feed_dict=feed_dict)
     # return step_loss, summary_str
     val_list = config.val_list
     start_index = randint(
         0,
         len(val_list) - (config.batches_per_epoch_val + 1))
     pre_scores = []
     acc_scores = []
     rec_scores = []
     count = 0
     for file_name in val_list[start_index:start_index +
                               config.batches_per_epoch_val]:
         pre, acc, rec = self.validate_file(file_name, sess)
         pre_scores.append(pre)
         acc_scores.append(acc)
         rec_scores.append(rec)
         count += 1
         utils.progress(count,
                        config.batches_per_epoch_val,
                        suffix='validation done')
     pre_score = np.array(pre_scores).mean()
     acc_score = np.array(acc_scores).mean()
     rec_score = np.array(rec_scores).mean()
     return pre_score, acc_score, rec_score
Exemplo n.º 11
0
def generateMeta(inFolder, data, sys):
   root = inFolder
   print("META-INF folder not detected...")
   print("Generating tree structure...")
   print("Generating directories...")
   root = inFolder+de()+"META-INF" 
   progress((lambda: os.mkdir(root)), "Creating "+root+"...") # Validate progress function, wide-use later
   root += de()+"com"
   print "Creating "+root+"...", 
   os.mkdir(root)
   print "   Done!"
   root += de()+"google"
   print "Creating "+root+"...",
   os.mkdir(root)
   print "   Done!"
   root += de()+"android"
   print "Creating "+root+"...",
   os.mkdir(root)
   print "   Done!"
   print "Generating update-script..."   
   generateUpdateScript(root, data, sys, inFolder)
   print "update-script generation complete!"
   response = raw_input("Would you like to inspect the generated script? (y/n): ")
   if response == 'y':
      genScript = open(root+de()+"update-script", 'rb')
      print("###GENERATED SCRIPT (line numbers not written to file):###\n\n")
      i = 1
      for line in genScript.readlines():
         print str(i)+": "+line,
         i += 1
      print("\n\n###END GENERATED SCRIPT###\n")
Exemplo n.º 12
0
    def mix_model(self):
        sess = tf.Session()
        self.load_model(sess, log_dir=self.config.log_dir)
        val_generator = data_gen(self.config)
        count_batch = 0
        for batch_count, [
                out_audios, out_envelopes, out_features, total_count
        ] in enumerate(val_generator):
            out_features_copy = np.copy(out_features)
            for j in range(int(len(out_features) / 2) - 1):
                out_features[j] = out_features_copy[-1 - j]
                out_features[-1 - j] = out_features_copy[j]

            feed_dict = {self.input_placeholder: out_envelopes[:,:,:self.config.rhyfeats], self.cond_placeholder: out_features,\
             self.output_placeholder: out_audios, self.is_train: False}
            output_full = sess.run(self.output_wav, feed_dict=feed_dict)

            for count in range(self.config.batch_size):
                if self.config.model == "spec":
                    out_audio = utils.griffinlim(
                        np.exp(output_full[count]) - 1, self.config)
                else:
                    out_audio = output_full[count]
                output_file = os.path.join(
                    self.config.output_dir,
                    'output_{}_{}_{}.wav'.format(batch_count, count,
                                                 self.config.model))
                sf.write(output_file, np.clip(out_audio, -1, 1),
                         self.config.fs)
                sf.write(
                    os.path.join(self.config.output_dir,
                                 'gt_{}_{}.wav'.format(batch_count, count)),
                    out_audios[count], self.config.fs)
            utils.progress(batch_count, total_count)
Exemplo n.º 13
0
def inver_cqt(cqt, dcs, nys, h_size):
    CQIStand = standard.NSGIConstantQ(**kwargs)
    recFrame = []

    for j, i in enumerate(range(0,cqt.shape[0], h_size)):
        cqt_frame = cqt[i:i+h_size]
        # import pdb;pdb.set_trace()
        inv_cqt_frame = CQIStand(cqt_frame.T, dcs[j], nys[j])
        recFrame.append(inv_cqt_frame)
        utils.progress(j, int(cqt.shape[0]/h_size), "Inverse Done")
    frameSize = kwargs['inputSize']

    y = recFrame[0]

    invWindow = Windowing(type='triangular',normalized=False, zeroPhase=False)(standard.essentia.array(np.ones(frameSize)))


    for i in range(1,len(recFrame)):
        y = np.hstack([y,np.zeros(int(frameSize/2))])
        y[-frameSize:] = y[-frameSize:] + recFrame[i] 
        utils.progress(i, len(recFrame), "Overlap Done")
        
    y = y[int(frameSize/2):]

    return y
Exemplo n.º 14
0
def adaknn(trainData, trainLabel, testData, testLabel, k):
    predictions = []
    log = []

    t3 = time()
    radius = calculateRadius(trainData, trainLabel)
    time_radius = ("%0.3fs" % (time() - t3))
    # print("All calculate radius done in %s" % time_radius)

    t1 = time()
    for x in range(len(testData)):
        temp = []
        t0 = time()
        neighbors = getNeighbors(trainData, trainLabel, testData[x], k, radius)
        result = getResponse(neighbors)
        predictions.append(result)
        progress(x + 1, len(testData))
        timepredict = ("%0.3fs" % (time() - t0))
        temp.append(testLabel[x])
        temp.append(result)
        if testLabel[x] == result:
            temp.append("1")
        else:
            temp.append("0")
        log.append(temp)

    timeextract_feature = ("%0.5fs" % ((time() - t1) / len(testData)))
    accuracy = getAccuracy(testLabel, predictions)
    return accuracy, round(((time() - t1) / len(testData)), 5), time_radius
Exemplo n.º 15
0
def _news_scrapper():
    for n in config()['news_sites']:
        news_site = config()['news_sites'][n]

        host = news_site['url']
        homepage = news.HomePage(news_site, host)

        total = len(homepage.article_links)
        index = 1
        for link in homepage.article_links:
            article = _fetch_article(news_site, host, link)

            if article and article.title is not None:
                if (db.news.find_one({"title": article.title}) is None):
                    db.news.insert_one({
                        "title": article.title,
                        "content": article.body,
                        "category": article.category,
                        "image": build_link(host, article.image),
                        "date": datetime.datetime.utcnow()
                    })
                    progress(
                        index, total, 'Num of articles: {}'.format(
                            db.news.count_documents({})))
                else:
                    progress(index, total, 'Article already exists!')
            index += 1
        client.close()
Exemplo n.º 16
0
def feature_extraction():
    model = load_model(CHECKPOINT)
    model = Model(inputs=model.input,
                  outputs=model.get_layer('avg_pool').output)
    model.summary()
    image_labels = []
    resnet_feature_list = []
    image_paths = []
    filenames = glob.glob(os.path.join(TRAIN, '*.jpg'))
    random.shuffle(filenames)
    total = len(filenames)
    i = 0
    print('-----Start feature extraction----')
    for fname in filenames:
        img_path = fname
        filename = os.path.basename(fname)
        img = image.load_img(img_path, target_size=(224, 224))
        img_data = image.img_to_array(img)
        img_data = np.expand_dims(img_data, axis=0)
        img_data = preprocess_input(img_data)
        resnet_feature = model.predict(img_data)
        resnet_feature_np = np.array(resnet_feature)
        resnet_feature_list.append(resnet_feature_np.flatten())
        image_labels.append(filename)
        image_paths.append(img_path)
        utils.progress(i, total=total, status='Feature extraction')
        i = i + 1

    # Save the extracted feature
    np.savez('/home/paul/clustering/viper/features.npz', resnet_feature_list,
             image_paths)
    print('----End of feature extraction----')
Exemplo n.º 17
0
def _run_test(grader_executer_cmd, team_input, desired_return_code, verbose):
  '''Run an individual test and return whether it was accepted by the grader.'''
  time_limit = 5

  stdin = tempfile.TemporaryFile(bufsize=10485760)
  stdin.write(team_input)
  stdin.flush()
  stdin.seek(0)
  
  if verbose:
    print 'Team input'
    print '----'
    print team_input
    print '----'
  
  grader_executer = subprocess.Popen(grader_executer_cmd, stdin=stdin, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'), preexec_fn=os.setsid, close_fds=True)    
  start_time = time.time()
  while grader_executer.poll() is None and time.time() - start_time <= time_limit:
    time.sleep(0.5)
  grader_finished = grader_executer.poll() is not None
  if grader_executer.poll() is None:
    if verbose:
      utils.progress('Grader executable did not finish; killing PID %d' % grader_executer.pid)
    os.killpg(grader_executer.pid, signal.SIGKILL)
    return False
  elif grader_executer.returncode != desired_return_code:
    if verbose:
      utils.progress('Grader executable returned %d, not desired %d' % (grader_executer.returncode, desired_return_code))
    return False
  else:
    return True
Exemplo n.º 18
0
  def train(self, epoch):
    cost = 0
    target = np.zeros([self.batch_size, self.seq_length]) 

    N = self.loader.sizes[0]
    for idx in xrange(N):
      target.fill(0)
      x, y, x_char = self.loader.next_batch(0)
      for b in xrange(self.batch_size):
        for t, w in enumerate(y[b]):
          target[b][t] = w

      feed_dict = {
          self.word_inputs: x,
          self.char_inputs: x_char,
          self.true_outputs: target,
      }

      _, loss, step, summary_str = self.sess.run(
          [self.optim, self.loss, self.global_step, self.merged_summary], feed_dict=feed_dict)

      self.writer.add_summary(summary_str, step)

      if idx % 50 == 0:
        if self.use_progressbar:
          progress(idx/N, "epoch: [%2d] [%4d/%4d] loss: %2.6f" % (epoch, idx, N, loss))
        else:
          print("epoch: [%2d] [%4d/%4d] loss: %2.6f" % (epoch, idx, N, loss))

      cost += loss
    return cost / N
def run_sentiment_alignment():
    query_5_result = pd.read_csv("Results/query_5_result.csv")
    user_ratings = query_5_result[['restaurantID', 'rating_y']]
    avg_user_ratings = user_ratings.groupby(['restaurantID']).mean()
    max_sentiment_labels = []
    avg_user_ratings.to_csv("Data/averaged_ratings.csv")
    avg_user_ratings = pd.read_csv("Data/averaged_ratings.csv")

    end = len(avg_user_ratings.index)
    for i in range(end):
        temp1 = query_5_result.loc[
            (query_5_result['restaurantID'] == avg_user_ratings.iloc[i, 0])
            & (query_5_result['Review Sentiment'] == 'positive')]
        temp2 = query_5_result.loc[
            (query_5_result['restaurantID'] == avg_user_ratings.iloc[i, 0])
            & (query_5_result['Review Sentiment'] == 'negative')]
        if len(temp1.index) > len(temp2.index):
            max_sentiment_labels.append('positive')
        else:
            max_sentiment_labels.append('negative')
        utils.progress(i, end - 1, status='Assigning sentiment to restaurants')

    avg_user_ratings['average sentiments'] = max_sentiment_labels
    avg_user_ratings.to_csv("Results/query_6_result.csv", index=False)
    print("The output is generated in Results/query_6_result.csv")
Exemplo n.º 20
0
def it2fknn(trainData, trainLabel, testData, testLabel, k):
    membership = []
    referenceMembership = []
    testMembership = []
    m = 2

    labTrainSet, labTestSet, legend = remapLabels(rlabTrainSet=trainLabel,
                                                  rlabTestSet=testLabel)
    nClasses = len(legend)
    t0 = time()
    membership = assignMembership(trainData, labTrainSet, k, nClasses)
    timeextract = ("%0.5fs" % ((time() - t0)))
    # print "assign membership >" + timeextract
    correct = 0
    t1 = time()
    for x in range(len(testData)):
        testMembership = computeMembership(testData[x], trainData, membership,
                                           k, m, nClasses)
        predict = computeClass(testMembership)
        progress(x + 1, len(testData))
        if legend[predict] == legend[labTestSet[x] - 1]:
            correct += 1
    timeextract_feature = ("%0.5fs" % ((time() - t1) / len(testData)))
    accouracy = round(((float(correct) / float(len(testData))) * 100.0), 2)
    return accouracy, round(((time() - t1) / len(testData)), 5), timeextract
Exemplo n.º 21
0
    def train(self):
        """
        Function to train the model, and save Tensorboard summary, for N epochs. 
        """
        sess = tf.Session()

        self.loss_function()
        self.get_optimizers()
        self.load_model(sess, config.log_dir_sep)
        self.get_summary(sess, config.log_dir_sep)
        start_epoch = int(
            sess.run(tf.train.get_global_step()) /
            (config.batches_per_epoch_train))

        print("Start from: %d" % start_epoch)

        for epoch in range(start_epoch, config.num_epochs):
            data_generator = sep_gen()
            start_time = time.time()

            batch_num = 0
            epoch_train_loss = 0

            with tf.variable_scope('Training'):
                for ins, f0s, feats in data_generator:

                    step_loss, summary_str = self.train_model(
                        ins, f0s, feats, sess)
                    if np.isnan(step_loss):
                        import pdb
                        pdb.set_trace()
                    epoch_train_loss += step_loss

                    self.train_summary_writer.add_summary(summary_str, epoch)
                    self.train_summary_writer.flush()

                    utils.progress(batch_num,
                                   config.batches_per_epoch_train,
                                   suffix='training done')

                    batch_num += 1
                    # import pdb;pdb.set_trace()

                epoch_train_loss = epoch_train_loss / batch_num
                print_dict = {"Training Loss": epoch_train_loss}

            # if (epoch + 1) % config.validate_every == 0:
            # pre, acc, rec = self.validate_model(sess)
            # print_dict["Validation Precision"] = pre
            # print_dict["Validation Accuracy"] = acc
            # print_dict["Validation Recall"] = rec

            end_time = time.time()
            if (epoch + 1) % config.print_every == 0:
                self.print_summary(print_dict, epoch, end_time - start_time)
            if (epoch + 1) % config.save_every == 0 or (
                    epoch + 1) == config.num_epochs:
                self.save_model(sess, epoch + 1, config.log_dir_sep)
Exemplo n.º 22
0
 def _update_progress_bar(to_do, i, N_batches, loss_sum):
     if to_do == 'train':
         status_string="Training | (Batch "+str(i+1)+"/"+str(N_batches)+")"+" | L:" +str(round(loss_sum.cpu().item()/(i+1),3))
         if i==N_batches-1:
             status_string="Training | (Batch "+str(i+1)+"/"+str(N_batches)+")"
     if to_do == 'valid':
         status_string="Validating | (Batch "+str(i+1)+"/"+str(N_batches)+")"
     if to_do == 'forward':
         status_string="Forwarding | (Batch "+str(i+1)+"/"+str(N_batches)+")"
     progress(i, N_batches, status=status_string)
Exemplo n.º 23
0
 def make_wallets(amount):
     data = []
     for i in range(amount):
         progress(i,amount-1,'making wallets...')
         pub, pri = ppk_keygen()
         wallet = {'public':pub,\
                   'private':pri}
         data.append(wallet)
     log_info('[client.Client.make_wallets] writing ({}) wallets to pickle...'.format(amount))
     utils.save_pickle(data,WALLETS_DIR)
Exemplo n.º 24
0
 def mine(self):
     while True:
         progress(self.nounce,0,'[node.block.Block.mine] Mining block with nounce...')
         guess_hash = self.hash_block()
         if guess_hash[:self.mining_difficulty] == '0'*self.mining_difficulty:
             progress(self.nounce,self.nounce,'[node.block.Block.mine] Block mined @ {}'.format(guess_hash))
             break
         self.nounce += 1
     self.block_hash = guess_hash
     return self
def extract(filename, secret=123456, length=64, strength=100, partitions=-1):

    print
    print '########## Retrieval started ##########'

    mesh = pymesh.load_mesh(filename)

    if partitions == -1:
        partitions = mesh.num_vertices / 500

    # Partition the watermarked mesh into patches
    patches, _ = partitioning.mesh_partitioning(filename, mesh, partitions)

    print 'Step 1: Mesh patched'

    data = []

    processed = 0
    utils.progress(processed, len(patches), 'Reading data from patches...')

    # Extract the data from all the patches
    for i, patch in enumerate(patches):

        # Get the eigenVectors, either by computing them or by reading the from a file if they already have been computed
        npy_file = 'saved_eig/' + str(partitions) + '/retrieval_' + str(
            i) + '.npy'

        if os.path.exists(npy_file):
            B = np.load(npy_file)
        else:
            B = compute_eigenvectors(patch.num_vertices, patch.faces)
            np.save(npy_file, B)

        # Get the spectral coefficients
        P = np.matmul(B, patch.vertices[:, 0])
        Q = np.matmul(B, patch.vertices[:, 1])
        R = np.matmul(B, patch.vertices[:, 2])

        # Read the data and add it to the list of data retrieved
        data.append(retrieval.read(P, Q, R, strength))

        # Update the progress bar
        processed += 1
        utils.progress(processed, len(patches), 'Reading data from patches...')

    print
    print 'Step 2: Data retrieved'

    # Use a majority vote to get the data then unscramble it
    final_data = unscramble(data_majority(data, length), secret)

    print '########## Retrieval finished ##########'
    print

    return final_data
Exemplo n.º 26
0
def main():

    # maximus=np.zeros(66)
    # minimus=np.ones(66)*1000
    wav_files = [
        x for x in os.listdir(config.wav_dir_mus)
        if x.endswith('.stem.mp4') and not x.startswith(".")
    ]

    count = 0

    for lf in wav_files:

        # lf = "Actions - One Minute Smile.stem.mp4"
        # print(lf)
        audio, fs = stempeg.read_stems(os.path.join(config.wav_dir_mus, lf),
                                       stem_id=[0, 1, 2, 3, 4])

        mixture = audio[0]

        drums = audio[1]

        bass = audio[2]

        acc = audio[3]

        vocals = audio[4]

        # out_feats = utils.stft_to_feats((vocals[:,0]+vocals[:,1])/2,fs)

        # utils.feats_to_audio(out_feats,lf,fs=fs)

        # import pdb;pdb.set_trace()

        backing = np.clip(drums + bass + acc, 0.0, 1.0)

        if len(backing.shape) == 2:
            backing = (backing[:, 0] + backing[:, 1]) / 2

        # import pdb;pdb.set_trace()

        back_stft = abs(utils.stft(backing))

        hdf5_file = h5py.File(config.backing_dir + 'mus_' + lf[:-9] + '.hdf5',
                              mode='w')

        hdf5_file.create_dataset("back_stft", back_stft.shape, np.float32)

        hdf5_file["back_stft"][:, :] = back_stft

        hdf5_file.close()

        count += 1

        utils.progress(count, len(wav_files))
def equalize(i, img_name, path, processed_path, img_names):
    # print('{} out of {}'.format(i, len(img_names)))
    progress("Equalizing", i, len(img_names))
    dicom = pydicom.dcmread(os.path.join(path, img_name))
    img_array, img_processed = equalize_and_convert(dicom)

    data = Image.fromarray(img_array)
    data.convert("L").save(os.path.join(path, img_name + ".bmp"))

    data = Image.fromarray(img_processed)
    data.convert("L").save(os.path.join(processed_path, img_name + ".bmp"))
Exemplo n.º 28
0
def _run_tests(task, team_filebase, team_extension, team_filename, metadata, verbose):
  '''Execute judge test cases.'''

  time_limit = utils.languages[team_extension]['executer_time_limit'] * task['problem_metadata']['time_multiplier'] 
  num_test_cases = len(task['problem_metadata']['judge_io'])
  
  check_changes(task['problem_metadata']['judge_bugs'][team_extension], task['payload'], team_extension)

  for index, test_case in enumerate(task['problem_metadata']['judge_io']):
    executer_cmd = utils.languages[team_extension]['executer'].substitute(src_filebase=team_filebase, src_filename=team_filename).split()
    
    stdin = tempfile.TemporaryFile(bufsize=52428800)
    stdin.write(test_case['input'])
    stdin.flush()
    stdin.seek(0)

    stdout = tempfile.TemporaryFile(bufsize=52428800)
    
    if verbose:
      stderr = tempfile.TemporaryFile(bufsize=52428800)
    else:
      stderr = open(os.devnull, 'w')
    
    executer = subprocess.Popen(executer_cmd, stdin=stdin, stdout=stdout, stderr=stderr, preexec_fn=os.setsid, close_fds=True)
    start_time = time.time()
    while executer.poll() is None and (time.time() - start_time <= time_limit):
      time.sleep(0.5)
    if executer.poll() is None:
      if verbose:
        utils.progress('Team executable did not finish; killing PID %d after %d seconds' % (executer.pid, time.time() - start_time))
      os.killpg(executer.pid, signal.SIGKILL)
      raise GradingException('Time limit exceeded')
    if executer.returncode != 0:
      if verbose:
        utils.progress('Team executable gave non-zero return code: %d' % executer.returncode)
        stderr.seek(0)
        print stderr.read()
      raise GradingException('Run time error')
    
    stdout.seek(0)
    team_output = stdout.read()
   
    team_output_lines = map(lambda line: line.strip(), team_output.splitlines())
    judge_output_lines = map(lambda line: line.strip(), test_case['output'].splitlines())
    if team_output_lines != judge_output_lines:
      if verbose:
        utils.progress('Failed %2d / %2d' % (index + 1, num_test_cases))
        diff = difflib.Differ()
        sys.stdout.writelines(list(diff.compare(map(lambda line: line + '\n', team_output_lines), map(lambda line: line + '\n', judge_output_lines))))
      raise GradingException('Incorrect output')
    utils.progress('Passed %2d / %2d' % (index + 1, num_test_cases))
  utils.progress('Correct')
  return True
def plotRKHSErrors(samplesH, samplesSBQ, samplesIID, gm, kernel):
    """
        Compute functions in the span of the kernel functions
        and their integrals with respect to the distribution
    """

    N = 20
    nfuncs = 250
    
    nSBQ = len(samplesSBQ)                         # plot for as many samples as SBQ
    gramH = np.zeros((nSBQ,nSBQ))
    zH = np.zeros((nSBQ))
    fillGram(gramH,zH,kernel,gm,samplesH)          # BQ weights for kernel herding

    gramSBQ = np.zeros((nSBQ,nSBQ))
    zSBQ = np.zeros((nSBQ))
    fillGram(gramSBQ,zSBQ,kernel,gm,samplesSBQ)    # BQ weights for Bayesian quadrature
    
    weightsH = [email protected](gramH)
    weightsSBQ = [email protected](gramSBQ)
    
    errorsIID = 0; errorsH = 0; errorsH_BQ = 0; errorsSBQ = 0

    for k in range(nfuncs):
        progress(k,nfuncs,'Plot RKHS Errors')
        beta = 10 * (np.random.rand(N) + 1)
        c = 4 * 2 * (np.random.rand(N, 2) - 1/2)   # mean for kernel 
        factor = 0
        for k in range(N):
            for l in range(N):
                factor += beta[k] * beta[l] * kernel.pdf(c[k], c[l])
        beta /= (factor)**(1/2)                # in the unit ball the RKHS
        f = lambda x : np.sum([beta[i] * kernel.pdf(x, c[i]) for i in range(len(beta))], axis=0)
        target = targetRKHS(beta, c, kernel, gm)
        errorsIID += np.abs(np.cumsum(f(samplesIID)) / np.arange(1, len(samplesIID)+1) - target[None])
        errorsH += np.abs(np.cumsum(f(samplesH)) / np.arange(1, len(samplesH)+1) - target[None])
     
        errorsH_BQ +=  np.abs(np.cumsum(f(samplesH) * weightsH) - target[None]) 
        errorsSBQ += np.abs(np.cumsum(f(samplesSBQ) * weightsSBQ) - target[None]) 

    errorsIID /= nfuncs; errorsH /= nfuncs; errorsH_BQ /= nfuncs; errorsSBQ /= nfuncs

    plt.figure(figsize=(10, 7))
    plt.plot(errorsSBQ,label="SBQ with BQ weights")
    # plt.plot(errorsH_BQ,label="Herding with BQ weights")
    plt.plot(errorsH,label="Herding with 1/N weights")
    plt.plot(errorsIID,label="iid sampling")
    plt.legend()
    plt.xscale("log")
    plt.yscale("log")
    plt.title("Mean Absolute Error averaged over %s functions in the RKHS" % nfuncs)
    plt.savefig('figures/Mean Absolute Error')
    plt.show()
Exemplo n.º 30
0
def clues():
    if not g_all_clues:
        for xd in corpus():  # r in parse_tsv("clues.tsv", "AnswerClue"):
            progress(xd.filename)
            pubid = xd.publication_id()
            dt = xd.date() or ""
            for pos, clue, answer in xd.iterclues():
                ca = ClueAnswer(pubid, dt, answer, clue)
                g_all_clues.append(ca)

        progress()

    return g_all_clues
Exemplo n.º 31
0
def main():
    if not os.path.exists(SAVE_DIR):
        os.makedirs(SAVE_DIR)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('Using PyTorch device : {}'.format(device.upper()))

    model = ResNet18().to(device)
    train_loader, val_loader, test_loader = utils.get_cifar10_data_loaders(n_train=N_TRAIN, \
        n_val=N_VAL, n_test=N_TEST)
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-1, momentum=0.9)
    xent = nn.CrossEntropyLoss()
    step = 0
    n_epochs = 50
    f = open(SAVE_DIR + LOSS_NAME, 'w')
    f.truncate(0)
    f.write('train_step, train_loss\n')
    f.close()
    MODEL_SAVE_PATH = SAVE_DIR + MODEL_NAME
    assert model.training is True

    for i in range(n_epochs):
        for j, (images, labels) in enumerate(train_loader):
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = xent(outputs, labels)
            loss.backward()
            optimizer.step()
            if step % 16 == 0:
                with open(SAVE_DIR + LOSS_NAME, 'a') as f:
                    f.write('{}, {}\n'.format(step, loss.item()))
            step += 1
            utils.progress(
                j + 1, len(train_loader),
                'Batch [{}/{}] Epoch [{}/{}]'.format(j + 1, len(train_loader),
                                                     i + 1, n_epochs))
        torch.save(model.state_dict(), MODEL_SAVE_PATH.format(i))
    print('Training Complete')
    model.eval()
    correct = 0
    for j, (images, labels) in enumerate(test_loader):
        images, labels = images.to(device), labels.to(device)
        logits = model(images)
        correct += (torch.max(logits, 1)[-1] == labels).sum().item()
        utils.progress(j + 1, len(test_loader),
                       'Batch [{}/{}]'.format(j + 1, len(test_loader)))
    print('Test Accuracy = {}%'.format(float(correct) * 100.0 / N_TEST))
    print('Done')
Exemplo n.º 32
0
	def go_all_in(self):
		'''
		Function to allow player to go all in
		'''
		for index, player in enumerate(self.table.players):
			if self == player:
				if player.chips != 0:
					all_in_value = player.chips
					self.table.game.bets[index] += player.chips
					player.chips = 0
					self.all_in = True
					self.talked = True
		self.turn_bet = {'action': 'all_in', 'player_name': self.player_name, 'amount' : all_in_value}
		# Attempt to progress the game
		progress(self.table)
Exemplo n.º 33
0
def outlierScore(k, tree, data):
    """
        Calculates the outlierScore for every data, by calculating the sum of 
        distances between this point and the k-neighrest points to it.
    """
    scores = []
    for index, pair in enumerate(data):
        utils.progress(index, len(data), status="Calculate the outlierScore")
        neirestNeighbors = tree.nearest((pair[3], pair[4]), num_results=k + 1)
        #tree returns indexes +1, + the value sof the element itself
        distances = [
            distance2Pairs(pair, data[x - 1]) for x in neirestNeighbors
        ]
        scores.append([index, sum(sorted(distances[:k + 1]))])
    return sorted(scores, key=lambda x: x[1], reverse=True)
Exemplo n.º 34
0
def frnn_vqrs(trainData, trainLabel, testData, testLabel,k):
    labTrainSet, labTestSet, legend = remapLabels(rlabTrainSet=trainLabel, rlabTestSet=testLabel)
    nClasses = len(legend)
    index_testLabel = 0
    t1 = time()
    correct =0
    for testDataInstance in testData:
        predict = clasifyClass(trainData,labTrainSet,testDataInstance,k,nClasses)
        progress(index_testLabel+1, len(testData))
        if legend[predict] == legend[labTestSet[index_testLabel]]:
            correct+=1
        index_testLabel+=1
    timeextract_feature= ("%0.5fs"%((time() - t1)/len(testData)))
    accouracy = round(((float(correct)/float(len(testData)))*100.0),2)
    return accouracy, round(((time() - t1)/len(testData)),5)
def herding(num_samples, kernel, gm, area, samples=[]):
    """
        Kernel herding: at each step choose points that maximizes the score

        Inputs:
        - num_samples: the number of herding samples 
        - kernel: a Gaussian kernel
        - gm: a Gaussian mixture
        - area: the bounds where to do herding
        - samples: the existing samples in the herd
    """
    for k in range(num_samples):
        progress(k, num_samples, 'Kernel Herding')
        samples.append(generate_herding(kernel, gm, area, samples))
    return np.array(samples)
Exemplo n.º 36
0
def download_packages(workdir, packagesToDownload, repodata_list ):
    msg                 = "Downloading rpm files to resolve dependencies"
    packages_dir        = path.join( workdir, "Packages" )
    index               = 0
    repodata            = None
    packagesNotFound    = set()
    for package in packagesToDownload:
        if package is not None and package != "":
            repodata    = find_package( package, repodata_list )
            if repodata is not None:
                for url in repodata.get_package_url( package ):
                    u = repodata.repo.baseurl + url
                    download_file( u, packages_dir )
        print(progress( index,  len(packagesToDownload), msg )),
        index += 1
    print(progress( index,  len(packagesToDownload), msg ))
Exemplo n.º 37
0
def get_existing_package( workdir ):
    msg = "Listing rpm files"
    cmd = "rpm -qp --queryformat '%{NAME}'"
    packages            = set()
    index               = 0
    rpm_list            = glob( workdir + "/*.rpm" )
    stdout              = None

    for rpm in rpm_list:
        stdout = command( cmd + rpm, pass_exception = True )
        packages.add( stdout )
        print(progress( index,  len(rpm_list), msg )),
        index += 1
    print(progress( index,  len(rpm_list), msg ))

    return packages
Exemplo n.º 38
0
def download_file( url, outputdir, force = False, verbose = False ):
    file_name           = path.join( outputdir, url.split('/')[-1] )
    allow_downloading   = False
    status              = ""

    if not path.exists( file_name ) or force == True:
        allow_downloading = True

    if allow_downloading:
        outfile     = open( file_name, 'wb')
        response    = urllib2.urlopen( url )
        meta        = response.info()
        file_size   = int(meta.get("Content-Length")[0])
        msg         = "Downloading: %s Bytes: %s" % (file_name, file_size)
        file_size_dl    = 0
        block_sz        = 8192
        while True:
            buffer = response.read(block_sz)
            if not buffer:
                break
            file_size_dl += len(buffer)
            outfile.write(buffer)
            if verbose:
                status = progress(file_size_dl, file_size, msg)
                print(status),
        outfile.close()
        if verbose:
            print("")
    return file_name
Exemplo n.º 39
0
def _run_tests(task, team_filebase, team_extension, team_filename, metadata, verbose):
  '''Compile interactive grader, and execute judge test cases.'''
  
  if not task['problem_metadata']['grader']['valid']:
    utils.progress('No grader found.')
    raise Exception('No interactive grader found!')
  
  try:
    payload = task['problem_metadata']['grader']['src']
    grader_filebase =  task['problem_metadata']['grader']['filebase']
    grader_extension = task['problem_metadata']['grader']['extension']
    grader_filename = grader_filebase + '.' + grader_extension
    utils.compile(payload, grader_filebase, grader_extension, grader_filename)
  except Exception, e:
    utils.progress('Internal error when compiling grader')
    raise
Exemplo n.º 40
0
async def tsh(event):
    if event.reply_to_msg_id:
        start = time.time()
        url = await event.get_reply_message()
        ilk = await event.respond("Downloading...")
        try:
            file_path = await url.download_media(progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
                    progress(d, t, ilk, start, "Downloading...")
                ))
        except Exception as e:
            traceback.print_exc()
            print(e)
            await event.respond(f"Downloading Failed\n\n**Error:** {e}")
        
        await ilk.delete()

        try:
            orta = await event.respond("Uploading to TransferSh...")
            download_link, final_date, size = await send_to_transfersh_async(file_path)

            zaman = str(time.time() - start)
            await orta.edit(f"File Successfully Uploaded to TransferSh.\n\nLink:{download_link}\nExpired Date:{final_date}")
        except Exception as e:
            traceback.print_exc()
            print(e)
            await event.respond(f"Uploading Failed\n\n**Error:** {e}")

    raise events.StopPropagation
Exemplo n.º 41
0
def install(silent=False):
    if utils.ADDON.getSetting('OS') == 'Windows':
        return
    
    if utils.ADDON.getSetting('OS') == 'MacOS':
        installMacOS()
        return


    cmdLine  = utils.getSudo()
    cmdLine +='apt-get update;'
    cmdLine +='sudo apt-get -y install openvpn;'
    cmdLine +='sudo apt-get -y install psmisc'

    dp = None
    if not silent:
        dp = utils.progress('Installing VPN application', 'Please be patient this may take a few minutes')

    p = subprocess.Popen(cmdLine, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    (stdout, stderr) = p.communicate()

    if silent:
        return

    import xbmc
    xbmc.sleep(100)
    dp.close()

    success = path.getPath(utils.ADDON.getSetting('OS'), silent=True)

    if success:
        utils.dialogOK('VPN application successfully installed')
    else:
        utils.dialogOK('VPN application installation failed', 'Please try again later')
Exemplo n.º 42
0
	def bet(self,amount):
		'''
		Function to allow player to place a bet of a given amount
		'''
		if self.chips > amount:
			for index,player in enumerate(self.table.players):
				if self == player:
					self.table.game.bets[index] += amount
					player.chips -= amount
					self.talked = True

			self.turn_bet = {'action': 'bet', 'player_name': self.player_name, 'amount' : amount}
			# Attemp to progress the game
			progress(self.table)
		else:
			print 'You don\'t have enough chips --> ALL IN !!!'
			self.go_all_in()
Exemplo n.º 43
0
	def check(self):
		'''
		Function to allow player to check
		'''
		check_allow = True
		player_index = get_player_index(self)
		player_bet = self.table.game.bets[player_index]

		for bet in self.table.game.bets:
			if bet > player_bet:
				check_allow = False

		if check_allow:
			self.talked = True
			self.turn_bet = {'action': 'check', 'player_name': self.player_name}
			# Attempt to progress the game
			progress(self.table)
		else:
			print 'Check not allowed, replay please'
Exemplo n.º 44
0
	def fold(self):
		'''
		Function to allow player to fold
		'''
		for index, player in enumerate(self.table.players):
			if self == player:
				if self.table.game.bets[index]:
					bet = self.table.game.bets[index]
				else:
					bet = 0
				self.table.game.bets[index] = 0
				self.table.game.pot += bet
				self.talked = True

		# Mark the player as folded
		self.folded = True
		self.turn_bet = {'action': 'fold', 'player_name': self.player_name}
		# Attempt to progress the game
		progress(self.table)
Exemplo n.º 45
0
	def call(self):
		'''
		Function to allow player to call
		'''
		max_bet = get_max_bet(self.table.game.bets)
		if self.chips > max_bet:
			# Match the highest bet
			for index, player in enumerate(self.table.players):
				if self == player:
					if self.table.game.bets[index] >= 0:
						self.chips += self.table.game.bets[index]
					self.chips -= max_bet
					self.table.game.bets[index] = max_bet
					self.talked = True
			self.turn_bet = {'action': 'call', 'player_name': self.player_name, 'amount' : max_bet}
			# Attempt to progress the game
			progress(self.table)
		else:
			print 'You don\'t have enough chips --> ALL IN !!!'
			self.go_all_in()
Exemplo n.º 46
0
  def test(self, split_idx, max_batches=None):
    if split_idx == 1:
      set_name = 'Valid'
    else:
      set_name = 'Test'

    N = self.loader.sizes[split_idx]
    if max_batches != None:
      N = min(max_batches, N)

    self.loader.reset_batch_pointer(split_idx)
    target = np.zeros([self.batch_size, self.seq_length]) 

    cost = 0
    for idx in xrange(N):
      target.fill(0)

      x, y, x_char = self.loader.next_batch(split_idx)
      for b in xrange(self.batch_size):
        for t, w in enumerate(y[b]):
          target[b][t] = w

      feed_dict = {
          self.word_inputs: x,
          self.char_inputs: x_char,
          self.true_outputs: target,
      }

      loss = self.sess.run(self.loss, feed_dict=feed_dict)

      if idx % 50 == 0:
        if self.use_progressbar:
          progress(idx/N, "> %s: loss: %2.6f, perplexity: %2.6f" % (set_name, loss, np.exp(loss)))
        else:
          print(" > %s: loss: %2.6f, perplexity: %2.6f" % (set_name, loss, np.exp(loss)))

      cost += loss

    cost = cost / N
    return cost
Exemplo n.º 47
0
def _run_tests(task, team_select, team_correct, team_wrong, verbose):
  '''Compile judge and run both test cases on the judge. '''
  
  payload = task['problem_metadata']['grader']['src']
  grader_filebase = task['problem_metadata']['grader']['filebase']
  grader_extension = task['problem_metadata']['grader']['extension']
  grader_filename = grader_filebase + '.' + grader_extension
  utils.compile(payload, grader_filebase, grader_extension, grader_filename)
  grader_executer_cmd = utils.languages[grader_extension]['executer'].substitute(src_filebase=grader_filebase, src_filename=grader_filename).split()
  
  actual_type = task['division_metadata']['type']

  if actual_type == 'correct' or actual_type == 'sometimes':
    utils.progress('Testing team good input')
    if not _run_test(grader_executer_cmd, team_correct, 100, verbose):
      return False

  if actual_type == 'wrong' or actual_type == 'sometimes':
    utils.progress('Testing team bad input')
    if not _run_test(grader_executer_cmd, team_wrong, 200, verbose):
      return False
  
  return True
Exemplo n.º 48
0
def performUpdate(response, silent):
    try:
        version = response['Version']
        link    = response['Link']
        md5     = response['MD5']
    except:
        return

    path = getDownloadPath()

    if utils.generateMD5(path) != md5:
        if (not silent) and (not utils.yesno(1, 10, 11)):
            return

        dp = None
    
        if not silent:
            dp = utils.progress(1, 14, 15)

        hash   = 0
        count  = 0
        nTries = 3

        if not silent:
            nTries = 1
    
        while (count < nTries) and (hash != md5):
            count += 1
            try:        
                download(link,path,version,dp)
                hash = utils.generateMD5(path)
            except Exception, e:
                utils.deleteFile(path)
                if str(e) == 'Canceled':                    
                    return

        if hash != md5:
            utils.unflagUpdate()
            utils.deleteFile(path)
            utils.setSetting('dVersion', '0.0.0')
            if not silent:
                utils.ok(1, 24, 13)
            return
Exemplo n.º 49
0
    def doUpgradeFW(self, filename):
        utils.log('doupgradeFW %s' % filename)        

        dp = utils.progress(1, 0, 11)
        
        FUNC     = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_void_p))      
        theFunc  = FUNC(callback)
        theFile  = ctypes.c_char_p(filename) 
        theDP    = ctypes.c_void_p(id(dp))
        response = self.lib.fl_upgrade_fw(theFile, VID, MANUFACTURER, ctypes.cast(theDP, ctypes.POINTER(ctypes.c_void_p)), theFunc)

        dp.close()

        utils.log('UPGRADE_FW Response = %d' % response)

        check = self.checkResponse(response)
        if check != utils.FLIRC_OK:
            #UPGRADE_FAILED
            return check        

        return utils.UPGRADE_OK
Exemplo n.º 50
0
    def build_model(self, forward_only, is_copy=True):
        print(" [*] Building a NTM model")

        with tf.variable_scope(self.scope):
            # present start symbol
            if is_copy:
                _, prev_state = self.cell(self.start_symbol, state=None)
                self.save_state(prev_state, 0, self.max_length)

            zeros = np.zeros(self.cell.input_dim, dtype=np.float32)

            tf.get_variable_scope().reuse_variables()
            for seq_length in xrange(1, self.max_length + 1):
                progress(seq_length/float(self.max_length))

                input_ = tf.placeholder(tf.float32, [self.cell.input_dim],
                                        name='input_%s' % seq_length)
                true_output = tf.placeholder(tf.float32, [self.cell.output_dim],
                                             name='true_output_%s' % seq_length)

                self.inputs.append(input_)
                self.true_outputs.append(true_output)

                # present inputs
                _, prev_state = self.cell(input_, prev_state)
                self.save_state(prev_state, seq_length, self.max_length)

                # present end symbol
                if is_copy:
                    _, state = self.cell(self.end_symbol, prev_state)
                    self.save_state(state, seq_length)

                self.prev_states[seq_length] = state

                if not forward_only:
                    # present targets
                    outputs = []
                    for _ in xrange(seq_length):
                        output, state = self.cell(zeros, state)
                        self.save_state(state, seq_length, is_output=True)
                        outputs.append(output)

                    self.outputs[seq_length] = outputs

            if not forward_only:
                for seq_length in xrange(self.min_length, self.max_length + 1):
                    print(" [*] Building a loss model for seq_length %s" % seq_length)

                    loss = sequence_loss(logits=self.outputs[seq_length],
                                        targets=self.true_outputs[0:seq_length],
                                        weights=[1] * seq_length,
                                        average_across_timesteps=False,
                                        average_across_batch=False,
                                        softmax_loss_function=\
                                            binary_cross_entropy_with_logits)

                    self.losses[seq_length] = loss 

                    if not self.params:
                        self.params = tf.trainable_variables()

                    #grads, norm = tf.clip_by_global_norm(
                    #                  tf.gradients(loss, self.params), 5)

                    grads = []
                    for grad in tf.gradients(loss, self.params):
                        if grad is not None:
                            grads.append(tf.clip_by_value(grad,
                                                          self.min_grad,
                                                          self.max_grad))
                        else:
                            grads.append(grad)

                    self.grads[seq_length] = grads
                    self.optims[seq_length] = self.opt.apply_gradients(
                                                  zip(grads, self.params),
                                                  global_step=self.global_step)

        self.saver = tf.train.Saver()
        print(" [*] Build a NTM model finished")
Exemplo n.º 51
0
def performManualUpdate(response, silent):
    try:
        import xbmcgui
        path = getDownloadPath()
        select_name=['Cancel']
        select_url=['Cancel']
        
        for i in json.loads(response):
        
           cVersion = utils.getSetting('cVersion')
           
           if not cVersion in i['Version']:
               select_name.append(i['Version'])
               select_url.append(i['Link']+'*'+i['Version']+'*'+i['MD5'])
               
        link = select_url[xbmcgui.Dialog().select('Your Current Firmware '+ cVersion , select_name)]
        
        if 'Cancel' in link:
            return
        url = link.split('*')[0]
        version = link.split('*')[1]
        md5 = link.split('*')[2]
        
        if utils.generateMD5(path) != md5:
            if (not silent) and (not utils.yesno(1, 11, 0)):
            
                return
    
            dp = None
        
            if silent:
                dp = utils.progress(1, 14, 15)
    
            hash   = 0
            count  = 0
            nTries = 3
    
            if not silent:
                nTries = 1
        
            while (count < nTries) and (hash != md5):
                count += 1
                try:        
                    download(url,path,version,dp)
                    hash = utils.generateMD5(path)
                except Exception, e:
                    utils.deleteFile(path)
                    if str(e) == 'Canceled':                    
                        return
    
            if hash != md5:
                utils.unflagUpdate()
                utils.deleteFile(path)
                utils.setSetting('dVersion', '0.0.0')
                if not silent:
                    utils.ok(1, 24, 13)
                return
            
        utils.setSetting('dVersion', version)
        
        
        if not utils.okReboot(1, 23, 16, 18, delay = 15):
            return
    
        reboot()
Exemplo n.º 52
0
 
 if verbose:
   stderr = tempfile.TemporaryFile(bufsize=10485760)
 else:
   stderr = open(os.devnull, 'w')
 
 team_executer = subprocess.Popen(team_executer_cmd, stderr=stderr, preexec_fn=team_init)
 grader_executer = subprocess.Popen(grader_executer_cmd, stderr=open(os.devnull, 'w'), preexec_fn=grader_init)
 start_time = time.time()
 while grader_executer.poll() is None and time.time() - start_time <= time_limit:
   time.sleep(0.5)
 
 grader_finished = grader_executer.poll() is not None
 if not grader_finished:
   if verbose:
     utils.progress('Grader executable did not finish; killing PID %d after %d seconds' % (grader_executer.pid, time.time() - start_time))
   os.killpg(grader_executer.pid, signal.SIGKILL)
 if team_executer.poll() is None:
   if verbose:
     utils.progress('Team executable did not finish; killing PID %d after %d seconds' % (team_executer.pid, time.time() - start_time))
   os.killpg(team_executer.pid, signal.SIGKILL)
   if not grader_finished:
     raise GradingException('Time limit exceeded')
   else:
     raise GradingException('Incorrect output')
 if team_executer.returncode != 0:
   if verbose:
     utils.progress('Team executable gave non-zero return code: %d' % executer.returncode)
     stderr.seek(0)
     print stderr.read()
   raise GradingException('Run time error')
Exemplo n.º 53
0
  correct = False
  metadata = {}

  try:
    sandbox_dir = tempfile.mkdtemp(prefix='proco')
    os.chdir(sandbox_dir)
    
    if verbose:
      utils.progress('Using temporary directory: %s' % sandbox_dir)

    payload = task['payload']
    team_filebase =  task['alias']
    team_extension = task['run_metadata']['extension']
    team_filename = team_filebase + '.' + team_extension
    utils.compile(payload, team_filebase, team_extension, team_filename)
    
    correct = callback(task, team_filebase, team_extension, team_filename, metadata, verbose)

  except GradingException, e:
    utils.progress(e.message)
    metadata['error'] = e.message
  except Exception, e:
    utils.progress('Internal error: ' + str(e))
    raise
  finally:
    if verbose:
      utils.progress('NOT removing temporary directory %s; please clean up manually!' % sandbox_dir)
    else:
      shutil.rmtree(sandbox_dir)

  q.put({'correct' : correct, 'metadata' : metadata, 'division_id' : int(task['division_id']), 'team_id' : int(task['team_id']), 'problem_id' : int(task['problem_id'])})
Exemplo n.º 54
0
	sample_name = sys.argv[3]
	filename = 'samples/%s.txt' % sample_name
	sample_kmers = kmer_store()
	with open(filename) as f:
		for read in f:
			read_kmers = kmers(read.strip(), k)
			for kmer in read_kmers:
				sample_kmers.update(kmer)

	output_filename = 'pickles/%s_kmers_%d.pickle' % (os.path.basename(os.path.normpath(filename)).replace('.txt',''), k)
	with open(output_filename, 'w') as f:
		cPickle.dump(sample_kmers.kmers, f)

elif version =='genomes':
	full = True if (len(sys.argv) == 4 and sys.argv[3] == 'full') else False
	kmer_spectra = defaultdict(lambda:[0]*20)
	for index, genome_filename in enumerate(progress(filter(lambda x: x.endswith('.fna'), os.listdir('genomes')))):
		kmer_spectrum = {} if full else kmer_store()
		for kmer in kmers(nucleotides_fna('genomes/'+genome_filename), k):
			if full:
				kmer_spectrum[kmer] = kmer_spectrum[kmer]+1 if kmer in kmer_spectrum else 1
			else:
				kmer_spectrum.update(kmer)
		for kmer in kmer_spectrum:
			kmer_spectra[kmer][index] = kmer_spectrum[kmer]

	full_string = 'full_' if full else ''
	with open('pickles/%skmer_spectra_%d.pickle' % (full_string, k), 'w') as f:
		cPickle.dump(dict(kmer_spectra), f)

  def attach_experiment(self,experiment_dir,error_key='dev_mae',verbose=True):
    """
    Overide this to suit your purposes!
    The purpose of this function is to go over each experiment in the supplied experiment_dir and aggregate the best results. 
    the results into a nice dictionary object. I encourage this to be overidden in ones own implementation if their experiment structure
    is different. Alternativley, if an aggregation can be supplied to attach_aggregation, your life will be easier.

    :param experiment_dir: String pointing to valid experiment directory as defined by experimentGenerator
    :returns: 0 on success, <0 for any error.
    
    .. side-effects:: Upon success, the class variable results will be populated. 
    .. note:: A valid experiment directory (if using this as is) should have sufficient permissions set and contain a var directory. The var directory should contain sub folders for each experiment, labeled like: alpha_1__gamma_2/. Underneath each sub directory there should be the folder logs underwhich there should be a file runtime.txt like this logs/runtime.txt. 
    """

    dirs = []   
    results_dir = '%s/%s' %(experiment_dir,'results')
    var_dir     = '%s/%s' %(experiment_dir,'var')
    if not os.path.isdir(var_dir):
      print('There is no var directory. This directory should have all of the sub experiments beneath it.')
      return -1
    if not os.path.isdir(results_dir):
      if ensure_dir(results_dir) < 0:
        print('There is no results directory. Failed attempt to create directory here: %s.' % (results_dir))
        return -2

    print('Building list of directories...') 
    dirs = list()
    num_sep = var_dir.count(os.path.sep)
    level = 1
    for root, ds, fs in os.walk(var_dir):
      num_sep_this = root.count(os.path.sep)
      if num_sep + level <= num_sep_this:
        del ds[:] 
        continue
      dirs = ds
    
    num_experiments = len(dirs)
    results         = {}

    # ---------------------------------
    # Go over each experiment directory
    # -----------------------
    print('Aggregating results.') 
    num_dirs = len(dirs) 
    param_count = None
    last_params = None
    for p,d in enumerate(dirs):
      runtime_fn = '%s/var/%s/logs/runtime.txt' % (experiment_dir,d) 
      if not os.path.isfile(runtime_fn):
        continue
            
      d_end = d.split('/')[-1] 
      # Hyper parameters are in the name of a directory
      hyper_params = self.parse_key_values(d_end,assignment='_',delimeter='__')
      
      runtime = open(runtime_fn,'r').read().split('\n')    
      min_error = float('inf')
      min_error_exp= None
      
      # -----------------------------------------
      # Gather the results for a given experiment
      # -------------------
      for line in runtime:
        if not line:
          continue
        rt = self.parse_key_values(line)
        # ----
        # See if this current line in the runtime log had the best error.
        try: 
          error = float(rt[error_key])
          if error < min_error:
            min_error = error
            min_error_exp = copy.deepcopy(rt)
        except ValueError:
          raise ValueError('Expecting error key %s to evaluate to a float.' % (error_key))
        except KeyError:
          print('ERROR: Parsing file:',runtime_fn)
          print('- Error key %s does not apear on all lines.' % error_key)
          break
        # -------
      
      # ------- 
      if not min_error_exp:
        if verbose:
          print('WARNING: Unable to find best result in experiment %s.' % (runtime_fn))
        continue

      # Add all the hyper params to the best experiment found
      for key in hyper_params:
        min_error_exp[key] = float(hyper_params[key])
      # Update the values pointed to by every key in min_error_exp
      if not param_count:
        param_count = len(min_error_exp)
        last_params = min_error_exp
      elif len(min_error_exp) != param_count:
        print("Error in file:",runtime_fn)
        print("Irregular number of outputs per line.")
        continue
      else:
        param_count = len(min_error_exp)  
        last_params = min_error_exp

      for key in min_error_exp:
        if not key in results:
          results[key] = dict()
          results[key]['values'] = list()
          results[key]['__longest_sequence__'] = -1  
        val = min_error_exp[key] 
        try:
          val = float(val)
        except:
          pass
        max_ = max(len(str(val)),len(key),results[key]['__longest_sequence__'])
        results[key]['__longest_sequence__'] = max_
        results[key]['values'].append(val)

      if( p % 200 == 0):
        progress(p,num_experiments)
    
    # -------

    progress(num_experiments,num_experiments)
    print('(%d) results' % (len(results)))
    self.results = results
    return 0
Exemplo n.º 56
0
def grade(q, task, verbose):
  '''Grades a debug submission.'''
  
  correct = False
  metadata = {}
  
  try:
    payload = json.loads(task['payload'])
    team_select = payload['type']
    team_correct = payload['good']
    team_wrong = payload['bad']
    
    if task['division_metadata']['type'] == team_select:
      try:
        sandbox_dir = tempfile.mkdtemp(prefix='proco')
        os.chdir(sandbox_dir)       
        
        if verbose:
          utils.progress('Using temporary directory: %s' % sandbox_dir)

        correct = _run_tests(task, team_select, team_correct, team_wrong, verbose)
        if correct:
          utils.progress('Correct')
        else:
          utils.progress('Wrong')
      finally:        
        if verbose:
          utils.progress('NOT removing temporary directory %s; please clean up manually!' % sandbox_dir)
        else:
          shutil.rmtree(sandbox_dir)
    else:
      if verbose:
        utils.progress('Team selected %s, should be %s', team_select, task['division_metadata']['type'])
      utils.progress('Wrong (solution type mismatch)')
  except Exception, e:
    utils.progress('Internal error: ' + str(e))
    raise
Exemplo n.º 57
0
f = open('assets/tw_ht_corpus_2.txt', 'a')
p = MDB('tweets')
cols = p.client['tweets'].collection_names()
cols.remove('SPB')
cols.remove('EKB')
cols.remove('Moscow')
print cols
i = 0

counts = []
for c in cols:
    ml = p.client['tweets'][c].find()
    counts.append(ml.count())
    
total = sum(counts)
print 'total:', total, 'documents'

for c in cols:
    ml = p.client['tweets'][c].find()
    for t in ml:
        try:
            dt = text_process(t)[0]
            progress(i, total)
            if dt:
                f.write(dt + '\n')
        except Exception as e:
            print e
        finally:
            i += 1
Exemplo n.º 58
0
 def __iter__(self):
     for line in open(self.filename, 'r'):
         if self.logging:
             progress(self.count, 0, skip=1000, mode = 2)
         self.count += 1
         yield unicode(line).split()
Exemplo n.º 59
0
 judge = AutoJudge()
 print time.strftime('[%H:%M:%S]:', time.localtime()),
 print 'Initialized judge to %s' % judge
 
 while True:
   print time.strftime('[%H:%M:%S]:', time.localtime()),
   task = judge.fetch_task()
   task_type = task['task_type']
   if task_type == 'grade':
     task['run_metadata'] = json.loads(task['run_metadata'])
     print 'Grading run_id %s (team %s, problem %s) of type %s... ' % (task['run_id'], task['team_username'], task['alias'], task['problem_type']),
     utils.reset_progress(False)
     
     problem_metadata, division_metadata = judge.get_cached_metadata(task['problem_id'], task['division_id'], task['problem_metadata_hash'], task['division_metadata_hash'])
     if problem_metadata is None or division_metadata is None:
       utils.progress('Refreshing metadata')
       problem_metadata, division_metadata = judge.update_cached_metadata(task['problem_id'], task['division_id'], task['problem_metadata_hash'], task['division_metadata_hash'])
     
     task['problem_metadata'] = problem_metadata
     task['division_metadata'] = division_metadata
     
     module = utils.import_module(judge.contest_type, task['problem_type'])
     q = multiprocessing.Queue()
     grader = multiprocessing.Process(target=module.grade, args=(q, task, False))
     grader.start()
     result = q.get()
     grader.join()
     print
     judge.submit_judgment(judgment_id=int(task['judgment_id']), **result)
   elif task_type == 'reset':
     judge = AutoJudge()