def test_github_repo(self): with httpimport.github_repo( 'operatorequals', 'covertutils', ): import covertutils self.assertTrue( covertutils.__author__ ) # If this point is reached then the module1 is imported succesfully!
def test_github_repo(self): print("[+] Importing from GitHub") with httpimport.github_repo('operatorequals', 'httpimport-test', module='test_package', branch='main'): import test_package # If this point is reached then the module1 is imported succesfully! self.assertTrue(test_package) del sys.modules['test_package']
from flask import Flask, render_template, request, redirect, url_for, session from httpimport import github_repo app = Flask(__name__) app.secret_key = 'w845n#9V!#!!9C!n%6iY' # Importing Encrypter remotely from GitHub with github_repo(username='******', repo='Encrypter', module='ED'): import ED @app.route('/') def index(): if 'dark_theme' not in session: session['dark_theme'] = False return render_template('index.html', version=ED.__version__, dark_theme=session['dark_theme']) @app.route('/about') def about(): return render_template('about.html', dark_theme=session['dark_theme']) @app.route('/light_theme', methods=['POST']) def light_theme(): session['dark_theme'] = False return redirect(url_for('index'))
import httpimport import requests import time httpimport.INSECURE = True flag = True def job(): print("I'm working...") with httpimport.github_repo('dbader', 'schedule', 'schedule'): import schedule schedule.every(10).seconds.do(job) while True: schedule.run_pending() time.sleep(1) if flag: new_schedule = httpimport.load('new_schedule', 'http://127.0.0.1:5002') print(dir(new_schedule)) new_schedule.add_schedule() flag = False
from httpimport import github_repo, bitbucket_repo with github_repo('operatorequals', 'covertutils'): import covertutils print covertutils with bitbucket_repo('atlassian', 'python-bitbucket', module='pybitbucket'): from pybitbucket import auth # import pybitbucket print pybitbucket
import os import requests import json import httpimport with httpimport.github_repo('DigiBP', 'digibp-camunda-external-python-task', 'cam'): import cam class RiskClient: def __init__(self): self.camunda_rest_url = os.environ.get( "CAMUNDA_REST_URL", "https://digibp.herokuapp.com/engine-rest") self.camunda_tenant_id = os.environ.get("CAMUNDA_TENANT_ID", "showcase") self.worker = cam.Client(self.camunda_rest_url) self.worker.subscribe("DetermineRisks", self.determine_risks_callback, self.camunda_tenant_id) self.worker.subscribe("CalculateRetention", self.calculate_retention_callback, self.camunda_tenant_id) self.worker.polling() def determine_risks_callback(self, taskid, body): age = body[0]['variables']['age']['value'] kw = body[0]['variables']['kw']['value'] license_revocation = body[0]['variables']['licenseRevocation']['value'] request = { "variables": { "age": {
def rosenbrock_v(x): """Returns the value of Rosenbrock's function at x""" return (1 - x[0])**2 + 100*(x[1]- x[0]**2)**2 def rosenbrock(x): """Returns the value of rosenbrock's function and its gradient at x """ val = rosenbrock_v(x) dVdX= np.array([2*(200*x[0]**3 - 200*x[0]*x[1] + x[0]-1),200*(x[1]-x[0]**2)]) return val, dVdX import httpimport with httpimport.github_repo('janchorowski', 'nn_assignments', module='common', branch='nn18'): from common.gradients import check_gradient rosenbrock_v([1,1]) rosenbrock([0,0]) import scipy.optimize as sopt lbfsg_hist = [] def save_hist(x): lbfsg_hist.append(np.array(x)) rosen = lambda x: (1 - x[0])**2 + 100*(x[1]- x[0]**2)**2 rosen([1,1]) x_start = [0.,2.]
import os.path import torch from torch import nn import torch.nn.functional as F import httpimport with httpimport.github_repo('fxia22', 'pointnet.pytorch', module='pointnet', branch='master'): import pointnet.model MODELS_DIR = 'models' MODELS_EXT = '.dms' MODEL_DEFAULT_NAME = 'trained' def cd(S, T): S = S.permute(0, 2, 1).unsqueeze(2) T = T.permute(0, 2, 1).unsqueeze(1) d = torch.sum(torch.pow(S - T, 2), dim=3) d1 = torch.sum(torch.min(d, dim=1)[0], dim=1) d2 = torch.sum(torch.min(d, dim=2)[0], dim=1) return d1+d2 def elbo_loss(x, reconstruction, z_mean, z_log_sigma2, beta=1.0): N = x.size(0) rec_loss = torch.mean(cd(x, reconstruction)) KL_loss = (-0.5 / N) * ( torch.sum(1.0 + z_log_sigma2 - z_mean.pow(2) - z_log_sigma2.exp())) return (rec_loss + beta * KL_loss,