Beispiel #1
0
def compute_trajectory_prediction_mse(args, ground_truth, predictions):
    """
		inputs:
			args: model parameters
			ground_truth: list of groudn truth velocities in absolute frame
			predictions: list of predicted velocities 		"""
    avg_mse = 0
    cnt = 0
    mse_list = []
    for pred, gt in zip(predictions, ground_truth):
        avg_mse = 0
        cnt = 0
        # compute average per trajectory
        # tbp_len
        # path from vel for vel
        for t in range(len(pred)):
            # real trajectory global frame
            real_vel_global_frame = gt.vel_vec[t + args.prev_horizon + 1:t +
                                               args.prediction_horizon +
                                               args.prev_horizon + 1, :2]
            real_traj_global_frame = sup.path_from_vel(
                initial_pos=np.array([0, 0]),
                pred_vel=real_vel_global_frame,
                dt=args.dt)
            # pred_horizon = 10
            vel_pred = np.zeros((args.prediction_horizon, args.output_dim))
            error = 0
            # [1,144]
            pred_t = pred[t][0]
            min_error = np.zeros((pred_t.shape[0]))
            # 144 -> prediction_horizon*output_state_dim = 4*n_mixture
            for sample_id in range(pred_t.shape[0]):
                error = np.zeros((args.n_mixtures))
                for mix_idx in range(args.n_mixtures):
                    # plot predicted trajectory global frame
                    for i in range(args.prediction_horizon):
                        idx = i * args.output_pred_state_dim * args.n_mixtures + mix_idx
                        idy = i * args.output_pred_state_dim * args.n_mixtures + mix_idx + args.n_mixtures
                        if args.normalize_data:
                            mu_x = pred_t[sample_id,
                                          idx] / args.sx_vel + args.min_vel_x
                            mu_y = pred_t[sample_id,
                                          idy] / args.sy_vel + args.min_vel_y
                        else:
                            mu_x = pred_t[sample_id, idx]
                            mu_y = pred_t[sample_id, idy]
                        vel_pred[i, :] = [mu_x, mu_y]

                    traj_pred = sup.path_from_vel(initial_pos=np.array([0, 0]),
                                                  pred_vel=vel_pred,
                                                  dt=args.dt)
                    for pred_step in range(args.prediction_horizon):
                        error[mix_idx] += np.linalg.norm(
                            real_traj_global_frame[pred_step, :] -
                            traj_pred[pred_step, :]) / args.prediction_horizon
                min_error[sample_id] = min(error)
            avg_mse = (avg_mse * cnt + min(min_error)) / (cnt + 1)
            cnt += 1
        mse_list.append(avg_mse)
    return avg_mse, mse_list
Beispiel #2
0
    def plot(self, ax, x_scale=1, y_scale=1):
        """
    Plots the trajectories and the static occupancy grid of all agents in this container.
    """
        colormap = pl.get_cmap('rainbow')
        c_norm = pl.matplotlib.colors.Normalize(vmin=0,
                                                vmax=len(
                                                    self.agent_data.keys()))
        scalar_color_map = pl.cm.ScalarMappable(norm=c_norm, cmap=colormap)

        for cnt, id in enumerate(self.agent_data.keys()):
            color_value = scalar_color_map.to_rgba(cnt)
            self.agent_data[id].plot(ax,
                                     color=color_value,
                                     x_scale=1,
                                     y_scale=1)

        #sup.plot_grid(ax, np.array([self.occupancy_grid.center[0], self.occupancy_grid.center[1]]), self.occupancy_grid.gridmap, self.occupancy_grid.resolution, self.occupancy_grid.map_size)
        sup.plot_grid(ax, np.array([0, 0]), self.occupancy_grid.gridmap,
                      self.occupancy_grid.resolution,
                      self.occupancy_grid.map_size)
        ax.set_xlim(
            [-self.occupancy_grid.center[0], self.occupancy_grid.center[0]])
        ax.set_ylim(
            [-self.occupancy_grid.center[1], self.occupancy_grid.center[1]])

        ax.set_aspect('equal')
Beispiel #3
0
 def delete(self, start, size, allele):
     if allele.lower() == "m":
         if start > self.maternalHaplotypeLength:
             raise ValueError(
                 "The starting point of a mutation cannot be over maternal-haplotype-chromosome length!"
             )
         else:
             startbin, endbin = Support.startendbins(
                 bins=self.maternalHaplotype, start=start, size=size)
             self.maternalHaplotype = self.maternalHaplotype[
                 0:startbin] + self.maternalHaplotype[endbin:]
             self.maternalHaplotypeLength = sum(
                 seg[1] - seg[0] for seg in self.maternalHaplotype)
     elif allele.lower() == "p":
         if start > self.paternalHaplotypeLength:
             raise ValueError(
                 "The starting point of a mutation cannot be over paternal-haplotype-chromosome length!"
             )
         else:
             startbin, endbin = Support.startendbins(
                 bins=self.paternalHaplotype, start=start, size=size)
             self.paternalHaplotype = self.paternalHaplotype[
                 0:startbin] + self.paternalHaplotype[endbin:]
             self.paternalHaplotypeLength = sum(
                 seg[1] - seg[0] for seg in self.paternalHaplotype)
     else:
         raise ValueError(
             "The specified allele should be equal to either M or P (non-case sensitive)!"
         )
Beispiel #4
0
def parse_data(path, verbose=1):
    first = True
    second = True
    k = 0
    counter = 0
    with open(path, "r") as inputFile:
        for line in inputFile:
            if first:
                first = False
                tokens = line.split(",")
                input_size = int(tokens[0])
                output_size = int(tokens[1])
                samples_size = int(tokens[2])
                inputs = numpy.zeros((samples_size, input_size))
                outputs = numpy.zeros((samples_size, output_size))
            elif second:
                second = False
                if verbose:
                    Support.colored_print("Parameters: ", "blue")
                    Support.colored_print(line, "blue")
            else:
                counter += 1
                if counter < samples_size:
                    input, output = line.split('=')
                    for i, e in enumerate(input.split()):
                        inputs[k][i] = float(e.strip())

                    for i, e in enumerate(output.split()):
                        outputs[k][i] = float(e.strip())
                    k += 1
                else:
                    break
    return inputs, outputs, input_size, output_size
Beispiel #5
0
 def generateControl(self, gameData, playerId):
     act = Support.randomFromList(self.actionName)
     fr = Support.randomFromList(self.firstName)
     sc = Support.randomFromList(self.secondName)
     th = Support.randomFromList(self.thirdName)
     
     fullName = act + " " + fr + " " + sc + " " + th
     print "Generated control with name " + fullName
     return Control(playerId, fullName, str(gameData.getFreeControlId()))
Beispiel #6
0
def pipeline(img, isVideo=False):
    # Image Preprocessing
    undst, binary, binary_warped = PreProcessing.preprocess_image(img)

    # Lane Detection Code Start
    lanes, leftx, lefty, rightx, righty, ploty = LaneFinding.get_lane_lines(
        binary_warped, isVideo)

    lcurve, rcurve = Support.get_real_lanes_curvature(ploty, leftx, lefty,
                                                      rightx, righty)

    output = draw_lane_area(undst, binary_warped, ploty, leftx, lefty, rightx,
                            righty, isVideo)

    left_fit, right_fit, dummy = Support.fit_polylines(binary_warped.shape[0],
                                                       leftx,
                                                       lefty,
                                                       rightx,
                                                       righty,
                                                       x_scale_factor=1,
                                                       y_scale_factor=1)

    left_fitx, right_fitx = Support.get_polylines_points(
        ploty, left_fit, right_fit)

    if (isVideo is True):
        lcurve, rcurve = getSmoothedCurveData(lcurve, rcurve)
        left_fitx, right_fitx = getSmoothedLanesData(left_fitx, right_fitx)

    shiftFromLaneCenter_m, side = calculate_shift_from_lane_center(
        binary_warped, left_fitx, right_fitx)

    Font = cv2.FONT_HERSHEY_SIMPLEX
    color = (255, 255, 255)
    cv2.putText(output, 'curve = ' + str((lcurve + rcurve) / 2) + ' m',
                (10, 100), Font, 1, color, 2, cv2.LINE_AA)

    cv2.putText(
        output, 'Vehicle is ' + str(shiftFromLaneCenter_m) + ' (m) ' + side +
        ' of lane center', (10, 150), Font, 1, color, 2, cv2.LINE_AA)
    # Lane Detection Code End

    # Vehicle Detection Code Start
    cars_boxs = get_classified_cars_boxs(undst)
    classified_boxs = Visualisation.draw_boxes(undst,
                                               cars_boxs,
                                               color=(0, 0, 255),
                                               thick=6)
    filtered_boxs, heat_map = get_heat_map_boxs(cars_boxs, undst, isVideo)
    output = Visualisation.draw_boxes(output,
                                      filtered_boxs,
                                      color=(0, 0, 255),
                                      thick=6)
    # Vehicle Detection Code End

    return undst, classified_boxs, heat_map, output
Beispiel #7
0
async def search(message, args):
    """
        Sending a git hub search result
        https://github.com/nosv1/jc/search?q=&type=wikis
    """

    jc = Support.get_jc_from_channel(message.channel)
    

    query = " ".join(args[2:]).strip()
    results = None

    if query:
        results = Support.search_github(query)

        results_description = ""
        outputted = 0
        for result in results:
            if len(results_description) < 1000:
                outputted += 1

                # **title** - `command`
                title = result['title'].split("`") # title should be >> title `command` >> [title, command, '']
                command = title[1] if len(title) > 1 else ""
                results_description += f"**{title[0]}** - `{command}`\n"
                # results_description += f"**[{title[0]}]({result['link']})**" + (f" - `{command}`\n" if command else "\n")

                # p = result['p'].split("\n") + [" "] # [@jc command help, snippet]
                # results_description += f"`{p[0].strip().replace('**', '')}`\n\n"

                # \/ old, used to be Title \n Command \n Body
                # results_description += f"`{p[0].strip().replace('**', '')}`\n{p[1].strip()}\n\n"


        if not results_description: # no results
            results_description += f"{jc} help\n"


        await Support.simple_bot_response(message.channel,
            title = f"{len(results)} Result{'s' if outputted != 1 else ''}",
            description=results_description
        )

        Logger.log("search", results_description)


    else:
        description = f"`@{jc} ? <search_words>`\n"
        description += f"`@{jc} ? custom embeds`"
        await Support.simple_bot_response(message.channel,
            title="No Search Words Provided",
            description=description,
            reply_message=message
        )

        Logger.log("Bot Reponse", "Simple Help Search")
 def plot(self):
     #画两端结点
     x=[self.a1,self.a2]
     y=[self.b1,self.b2]
     plt.scatter(x,y,c='w',marker='o', s=60,edgecolors=self.pc,linewidths=self.c1*self.l, zorder=100)#s是点大小
     #画杆
     plt.plot(x,y,c=self.tc,linewidth=self.c2*self.l,zorder=1)
     #画支座
     sp1=Support.Support(self.sx1,self.sy1,self.a1,self.b1,self.l,self.sc)
     sp1.display()
     sp2=Support.Support(self.sx2,self.sy2,self.a2,self.b2,self.l,self.sc)
     sp2.display()
     plt.tight_layout()
Beispiel #9
0
def logArgs(args):
    Support.log('\n'.join(["Arguments:"] + [
        '\t{}:\t{}'.format(arg, args[arg])
        for arg in args if arg != 'mutations'
    ] + [""]),
                level="INFO")
    Support.log('\n'.join(["Mutations:"] + [
        '\t{}:\t{}'.format(mut, args['mutations'][mut])
        for mut in args['mutations']
        if mut != 'clonalfocal' and mut != 'subclonalfocal'
    ] + [""]),
                level="INFO")
    if 'clonalfocal' in args['mutations']:
        Support.log('\n'.join(['Clonal focal CNAs:'] + [
            '\tmean_len= {}, standard_deviation= {}, quantity= {}'.format(
                key[0], key[1], args['mutations']['clonalfocal'][key])
            for key in args['mutations']['clonalfocal']
        ] + [""]),
                    level="INFO")
    if 'subclonalfocal' in args['mutations']:
        Support.log('\n'.join(['Subclonal focal CNAs:'] + [
            '\tmean_len= {}, standard_deviation= {}, quantity= {}'.format(
                key[0], key[1], args['mutations']['subclonalfocal'][key])
            for key in args['mutations']['subclonalfocal']
        ] + [""]),
                    level="INFO")
Beispiel #10
0
def draw_hangman(lives):
    if lives == 5:
        sup.draw("Lives 5.txt")
    if lives == 4:
        sup.draw("Lives 4.txt")

    if lives == 3:
        sup.draw("Lives 3.txt")

    if lives == 2:
        sup.draw("Lives 2.txt")

    if lives == 1:
        sup.draw("Lives 1.txt")
Beispiel #11
0
def DisplaySupportType():
    """
        Purpose: List all Support type for products.
        Argument(s): None
        Return: list of Suppport types.


   """
    #SUPPORT MODULE FOR THE SUPPORT CLASS ( TABLE )
    #----------------------------------------------
    import Support

    #SUPPORT OBJECT
    #--------------
    NewSupport = Support.Support()
    Supports = NewSupport.getAllSupport()

    count = 1

    if (len(Supports) != 0):
        print "No.   REFERENCE NO.   START DATE   END DATE   SUPPORT LEVEL"
        print "---   -------------   ----------   --------   -------------"
        for support in Supports:
            print "{0}   {1:15}          {2:15}       {3:15}     {4}".format(
                count, support[1], support[2], support[3], support[4])
            count = count + 1
Beispiel #12
0
  def gradients(self, x, loss_function, y_target):
    '''
    Inputs: 
      x: network input 
      loss_function: Since the gradients of the loss function need to be computed, this has to be provided.
      y_target: Target values for the network output. 
    Return value:
      gradients: Gradients of the loss function w.r.t. all weights and biases of the network. 
                 Gradients have a weights and biases member, the indexing starts with 0 for the first hidden layer (W_1, b_1)
                 and ends with the output layer (W_out, b_out)
    '''
    gradients = sup.Variables()
    
    # Outputs of each layer (layer_evaluations[0] is input x)
    layer_evaluations = []
    for layer_idx, layer in enumerate(self.layers):
      layer_evaluations.append(self.evaluateLayer(layer_idx, x))

    # Output equals the evaluation of the last layer
    network_output = self.output(x)

    # Derivative of cost w.r.t. the network output
    dCost_dy = #TODO: implement cost function derivative w.r.t. output variables of network
    # Element-wise multiplication with sigmoid derivative (sigmoid is applied element-wise)
    delta_fused = #TODO: start backpropagating the error 
    
    # Gradient backpropagation
    ## Start from last layer and propagate error gradient through until first layer
    ## Attention!!!: layer_evaluations[0] is the network input while self.layers[0] is the first hidden layer
    for layer_idx in np.arange(len(self.layers)-1, -1, -1):
      logger.debug('Computing the gradient for layer {}'.format(layer_idx))
      # If layer is not last layer, update delta_fused (which is accumulating the back-propagated gradient)
      # TODO: implement backpropagation of gradient for arbitrary number of layers
    return gradients
Beispiel #13
0
def runL1000CDS2(infile, outfile):

    # Read infile
    cdDataframe = pd.read_table(infile, index_col='gene_symbol').fillna(0)

    # Initialize dataframes
    linkDataframe = pd.DataFrame()
    signatureDataframe = pd.DataFrame()

    # Loop through timepoints
    for comparison in cdDataframe.columns:

        # Run L1000CDS2
        resultDict = S.getL1000CDS2Results(cdDataframe, comparison)

        # Add comparison labels
        resultDict['links']['comparison'] = comparison
        resultDict['signatures']['comparison'] = comparison

        # Append dataframes
        linkDataframe = pd.concat([linkDataframe, resultDict['links']])
        signatureDataframe = pd.concat(
            [signatureDataframe, resultDict['signatures']])

    # Write files
    linkDataframe.to_csv(outfile, sep='\t', index=False)
    signatureDataframe.to_csv(outfile.replace('links', 'signatures'),
                              sep='\t',
                              index=False)
Beispiel #14
0
    def build_new(self):
        logger.info("Build New")
        self.allunits = []
        self.factions = {}
        self.allreinforcements = {}
        self.prefabs = []
        self.objective = None
        self.phase_music = None
        self.map = None
        self.game_constants = Counter()
        self.game_constants['level'] = 0
        self.game_constants['money'] = 0
        self.convoy = []
        self.play_time = 0
        self.support = Support.Support_Graph(
            'Data/support_nodes.txt',
            'Data/support_edges.txt') if cf.CONSTANTS['support'] else None
        self.unlocked_lore = []
        self.statistics = []
        self.market_items = set()

        self.sweep()
        self.generic()

        # Turn tutorial mode off if the difficulty does not start with a tutorial
        if not int(self.mode['tutorial']):
            cf.OPTIONS['Display Hints'] = 0
Beispiel #15
0
 def plot(self):
     #画两端结点
     x = [self.a1, self.a2]
     y = [self.b1, self.b2]
     plt.scatter(x,
                 y,
                 c='w',
                 marker='o',
                 s=self.c8 * self.l,
                 edgecolors=self.pc,
                 linewidths=self.c7 * self.l,
                 zorder=100)  #s是点大小
     #画杆
     plt.plot(x, y, c=self.tc, linewidth=self.c6 * self.l, zorder=1)
     #画支座
     sp1 = Support.Support(self.sx1, self.sy1, self.a1, self.b1, self.l,
                           self.sc)
     sp1.display()
     sp2 = Support.Support(self.sx2, self.sy2, self.a2, self.b2, self.l,
                           self.sc)
     sp2.display()
     #画力
     pass
     #标数据
     l1 = np.array([self.a1, self.b1])
     l2 = np.array([self.a2, self.b2])
     #计算角度
     if self.a1 == self.a2:
         if self.b1 > self.b2:
             angle = -90
         else:
             angle = 90
     else:
         angle = math.atan((self.b2 - self.b1) /
                           (self.a2 - self.a1)) / (2 * math.pi) * 360
     plt.text((l1[0] + l2[0]) / 2, (l1[1] + l2[1]) / 2,
              str(self.result),
              fontsize=self.c5 * self.l,
              rotation=angle,
              rotation_mode='anchor',
              zorder=1000)
     #画外力
     f1 = Force.Force(self.a1, self.b1, self.fx1, self.fy1, self.l, self.ax)
     f1.ForcePlot()
     f2 = Force.Force(self.a2, self.b2, self.fx2, self.fy2, self.l, self.ax)
     f2.ForcePlot()
Beispiel #16
0
def getSendDataHandler(participant,topic):
	localInterface = Support.doSubnetTranslation(topic.localInterface)

	if topic.transport == TRANSPORT_MC:
		return McSendDataHandler(localInterface,topic,topic.timeToLive)
	if topic.transport == TRANSPORT_UDP:
		return UdpSendDataHandler(topic)

	return None
Beispiel #17
0
 def __init__(self, name, length, binsize):
     self.name = name
     self.length = length
     self.binsize = binsize
     self.reference = Support.bins(size=self.length, step=self.binsize)
     self.maternalHaplotype = copy.deepcopy(self.reference)
     self.maternalHaplotypeLength = copy.deepcopy(self.length)
     self.paternalHaplotype = copy.deepcopy(self.reference)
     self.paternalHaplotypeLength = copy.deepcopy(self.length)
Beispiel #18
0
def draw_lane_area(undist,
                   warped,
                   ploty,
                   leftx,
                   lefty,
                   rightx,
                   righty,
                   isVideo=False):
    left_fit, right_fit, dummy = Support.fit_polylines(warped.shape[0],
                                                       leftx,
                                                       lefty,
                                                       rightx,
                                                       righty,
                                                       x_scale_factor=1,
                                                       y_scale_factor=1)

    left_fitx, right_fitx = Support.get_polylines_points(
        ploty, left_fit, right_fit)

    if (isVideo is True):
        left_fitx, right_fitx = getSmoothedLanesData(left_fitx, right_fitx)
    # Create an image to draw the lines on
    warp_zero = np.zeros_like(warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array(
        [np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv,
                                  (undist.shape[1], undist.shape[0]))
    # Combine the result with the original image
    result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)

    return result
Beispiel #19
0
 def __init__(self, reference, snplist, snpratio, HEHOratio, ignorelist):
     self.reference = reference
     self.snplist = Support.parseSNPList(snplist)
     self.snpratio = snpratio
     self.HEHOratio = HEHOratio
     self.ignorelist = ignorelist
     self.chromosomes = []
     self.lengths = {}
     self.maternalfa = None
     self.paternalfa = None
     self.numsnps = 0
     self.hetsnps = 0
Beispiel #20
0
def evaluate(path_network, input):
    Support.colored_print("Loading neural network...", "blue")
    neural_network = NeuralNetwork.NeuralNetwork()
    neural_network.load(path_network)
    Support.colored_print("Evaluating...", "blue")
    result = neural_network.evaluate(input)
    Support.colored_print(result, "pink")
Beispiel #21
0
def Find_Line(filtered_mask):
    im2, contours, hierarchy = cv2.findContours(filtered_mask, cv2.RETR_LIST,
                                                cv2.CHAIN_APPROX_SIMPLE)

    r, c = 400, 400
    dp = cv2.cvtColor(filtered_mask, cv2.COLOR_GRAY2BGR)
    cv2.line(dp, (c // 2, 1), (c // 2, r), (255, 0, 0), 5)

    midPoint = c // 2

    maxArea = 0
    offset = 0.0
    angle = 0.0

    for cntrs in contours:
        mu = cv2.moments(cntrs, False)
        x, y, w, h = cv2.boundingRect(cntrs)

        #if mu['m00'] > 120.0:
        if True:
            cx = x + (w // 2)  # centre point
            area = mu['m00']
            if area > maxArea:
                maxArea = area
                offset = midPoint - cx
                #offset = cx
                angle = math.atan2(w if offset >= 0 else -w, h)
            cv2.rectangle(dp, (x, y), (x + w, y + h), (0, 255, 0), 2)
        else:
            cv2.rectangle(dp, (x, y), (x + w, y + h), (0, 0, 255), 2)

        #print(area, cx, maxArea)

    cv2.imshow("frame", dp)

    angle = int(Support.remap(angle, -math.pi / 2, math.pi / 2, 0, 255))
    offset = int(127 - Support.remap(offset, 0, c, 0, 255))

    return offset, angle
Beispiel #22
0
def is_lane_curvature_accepted(ploty, leftx, lefty, rightx, righty):
    left_curverad, right_curverad = Support.get_real_lanes_curvature(
        ploty, leftx, lefty, rightx, righty)

    print(left_curverad, right_curverad)
    if left_curverad > 2000 and right_curverad > 2000:
        return True  # Almost straight lanes
    elif (left_curverad < 80 or right_curverad < 80):
        return False
    # elif (left_fit[0] > 0 and right_fit[0] < 0) or (right_fit[0] > 0 and left_fit[0] < 0):
    #     return False # same curvature wit opposite direction
    else:
        return True
Beispiel #23
0
async def set_invite_link(message, args, author_perms):
    """
    """

    jc = Support.get_jc_from_channel(message.channel)

    guild = message.guild if message.guild else message.author
    jc_guild = get_jc_guild(guild.id)

    if not jc_guild:  # if not in db, create new one
        jc_guild = Guild(
            guild.id,
            prefix=f"@{Support.get_jc_from_channel(message.channel)}")

    jc_guild.name = guild.name  # set some attrs
    jc_guild.guild = guild if message.guild else None

    jc_guild.edit_guild()

    if validators.url(args[2]):  # link provided

        if message.guild and not author_perms.create_instant_invite:  # missing permission
            await Support.missing_permission("Create Invite", message)
            return

        jc_guild.invite_link = args[2]

        await simple_bot_response(
            message.channel,
            description=
            f"**{jc_guild.name}'s Default Event Invite Link:** {jc_guild.invite_link}"
        )

    elif args[2]:  # invalid link
        await simple_bot_response(
            message.channel,
            title="Invalid Link",
            description=f"`@{jc} {args[1]} <invite_link>`",
            reply_message=message)

    else:  # no link provided
        description = f"**{jc_guild.name}'s Default Event Invite Link:** {jc_guild.invite_link if jc_guild.invite_link else '`None Provided`'}\n\n"

        description += f"`@{jc} {args[1]} <invite_link>`"

        await simple_bot_response(message.channel,
                                  description=description,
                                  reply_message=message)

    jc_guild.edit_guild()
Beispiel #24
0
def basesize(isize):
    size = 0
    try:
        if isize[-2:] == "kb":
            size = int(isize[:-2]) * 1000
        elif isize[-2:] == "Mb":
            size = int(isize[:-2]) * 1000000
        else:
            size = int(isize)
        return size
    except:
        raise ValueError(
            sp.error(
                "Size must be a number, optionally ending with either \"kb\" or \"Mb\"!"
            ))
Beispiel #25
0
def guess_character(selected_word, guessed_chars):
    "Prompts the user to make a guess"
    indexes = []
    char = input("Try and guess a character: ")
    char = char.lower()
    selected_word = selected_word.lower()
    if char in selected_word and not(char in guessed_chars):
        print("YOu guessed correctly!")
        indexes = sup.find_in_string(selected_word, char)
        return indexes, [char]
    elif char in guessed_chars:
        print("You already guessed that dummy")
        return [-2], []
    else:
        print("You guessed incorrectly ;(")
        return [-1], [char]
Beispiel #26
0
 def generateTasks (self , gameData, fixedTask=None, fixedTime=None):
     for (pid, pdata) in gameData.player.iteritems():
         if not gameData.hasTasks (pid):
             print "Generating task for player " + pid
             
             acts = gameData.getPossibleActions()
             if len(acts) > 0:
                 if fixedTask == None: 
                     ourAct = Support.randomFromList(acts)
                 else:
                     ourAct = acts[fixedTask]
                     
                 if fixedTime == None:
                     runtime = random.randint(self.taskTime[0], self.taskTime[1])
                 else:
                     runtime = fixedTime
                     
                 gameData.currentTasks += [ Task(ourAct[1], ourAct[0], pid, runtime) ]
                 print "> Task added"
    def testBatchGradient(self):
        # Manually build a gradient list
        gradient_list = []
        n_layers = 2
        batch_size = 3
        w = np.ones([3, 5])
        b = np.ones([1, 5])
        grad = sup.Variables()
        for i in range(n_layers):
            grad.weights.append(w)
            grad.biases.append(b)

        for j in range(batch_size):
            gradient_list.append(grad)

        # Manually compute batch gradient
        batch_gradient_manual = grad * batch_size

        batch_gradient = self.optimizer.computeBatchGradient(gradient_list)

        self.assertTrue(batch_gradient_manual == batch_gradient)
    def testVariables(self):
        var = sup.Variables()
        var.weights.append(np.ones([2, 2]))
        var.weights.append(np.ones([2, 2]))
        var.biases.append(np.ones([1, 2]))
        var.biases.append(np.ones([1, 2]))

        # Multiplication
        var_neg = var * (-1)
        for i in range(len(var)):
            self.assertTrue(
                np.all(var.weights[i] +
                       var_neg.weights[i] == np.zeros_like(var.weights[i])))
            self.assertTrue(
                np.all(var.biases[i] +
                       var_neg.biases[i] == np.zeros_like(var.biases[i])))

        # Addition
        var_add = var + var_neg
        for i in range(len(var_add)):
            self.assertTrue(
                np.all(var_add.weights[i] == np.zeros_like(var.weights[i])))
            self.assertTrue(
                np.all(var_add.biases[i] == np.zeros_like(var.biases[i])))

        # Subtraction
        var_sub = var - var
        for i in range(len(var_sub)):
            self.assertTrue(
                np.all(var_sub.weights[i] == np.zeros_like(var.weights[i])))
            self.assertTrue(
                np.all(var_sub.biases[i] == np.zeros_like(var.biases[i])))

        # Equality
        self.assertTrue(var == var)
        self.assertFalse(var == var_sub)

        # Inequality
        self.assertFalse(var != var)
        self.assertTrue(var != var_sub)
Beispiel #29
0
def submitEnrichrGenesets(infile, outfile):

    # Read infile
    cdDataframe = pd.read_table(infile, index_col='gene_symbol').fillna(0)

    # Initialize link dataframe
    resultDataframe = pd.DataFrame()

    # Loop through timepoints
    for comparison in cdDataframe.columns:

        # Get Enrichr links
        enrichrLinkDataframe = S.uploadToEnrichr(cdDataframe, comparison)

        # Add comparison label
        enrichrLinkDataframe['comparison'] = comparison

        # Concatenate
        resultDataframe = pd.concat([resultDataframe, enrichrLinkDataframe])

    # Save data
    resultDataframe.to_csv(outfile, sep='\t', index=False)
Beispiel #30
0
def getReceiveDataHandler(participant,topic):
	key = __makeKey(topic)

	rdh = None
	if key in __ReceiveDataHandler:
		rdh = __ReceiveDataHandler[key]
		if max(rdh.topic.sampleMaxSize,topic.sampleMaxSize) > PACKET_MAX_SIZE:
			message = "Warning: "
			if topic.transport == TRANSPORT_UDP:
				message += "UDP Transport"
			else:
				message += "Same port (%s)" % topic.port
			message += " is used with Topics with 'sampleMaxSize' > %s" % PACKET_MAX_SIZE
			print message
	else:
		localInterface = Support.doSubnetTranslation(topic.localInterface)
		if topic.transport == TRANSPORT_MC:
			rdh = McReceiveDataHandler(localInterface,topic)
		if topic.transport == TRANSPORT_UDP:
			rdh = UdpReceiveDataHandler(localInterface,topic)
		rdh.start()
		__ReceiveDataHandler[key] = rdh
	return rdh
    def build_new(self):
        logger.info("Build New")
        self.allunits = []
        self.groups = {}
        self.allreinforcements = {}
        self.prefabs = []
        self.objective = None
        self.map = None
        self.counters = {}
        self.counters['level'] = 0
        self.convoy = []
        self.counters['money'] = 0
        self.play_time = 0
        self.game_constants = []
        self.support = Support.Support_Graph(
            'Data/support_nodes.txt',
            'Data/support_edges.txt') if cf.CONSTANTS['support'] else None
        self.modify_stats = cf.read_growths_file()
        self.unlocked_lore = []
        self.statistics = []
        self.market_items = set()

        self.sweep()
        self.generic()
Beispiel #32
0
def getEnrichrResults(infile, outfile):

    # Read infile
    enrichrLinkDataframe = pd.read_table(infile,
                                         index_col=['geneset', 'comparison'])

    # Initialize result dataframe
    resultDataframe = pd.DataFrame()

    # Set libraries
    libraries = [
        'ChEA_2016', 'KEGG_2016', 'GO_Biological_Process_2015',
        'GO_Cellular_Component_2015', 'GO_Molecular_Function_2015', 'VirusMINT'
    ]

    # Loop through timepoints, genesets and libraries
    for geneset in enrichrLinkDataframe.index.levels[0]:
        for comparison in enrichrLinkDataframe.index.levels[1]:
            for library in libraries:

                # Get enrichment results
                enrichmentResultDataframe = S.getEnrichmentResults(
                    enrichrLinkDataframe.loc[(geneset, comparison),
                                             'userListId'], library)

                # Add labels
                enrichmentResultDataframe['comparison'] = comparison
                enrichmentResultDataframe['geneset'] = geneset
                enrichmentResultDataframe['library'] = library

                # Concatenate
                resultDataframe = pd.concat(
                    [resultDataframe, enrichmentResultDataframe])

# Write file
    resultDataframe.to_csv(outfile, sep='\t', index=False)
Beispiel #33
0
    def load(self, load_info):
        logger.info("Load")
        # Rebuild gameStateObj
        self.allunits = [
            UnitObject.UnitObject(info) for info in load_info['allunits']
        ]
        self.factions = load_info['factions'] if 'factions' in load_info else (
            load_info['groups'] if 'groups' in load_info else {})
        self.allreinforcements = load_info['allreinforcements']
        self.prefabs = load_info['prefabs']
        self.triggers = load_info.get('triggers', dict())
        map_info = load_info['map']
        self.playtime = load_info['playtime']
        self.convoy = [
            ItemMethods.deserialize(item_dict)
            for item_dict in load_info['convoy']
        ]
        self.convoy = [item for item in self.convoy if item]
        self.turncount = load_info['turncount']
        self.game_constants = load_info['game_constants']
        self.level_constants = load_info['level_constants']
        self.objective = CustomObjects.Objective.deserialize(
            load_info['objective']) if load_info['objective'] else None
        self.phase_music = CustomObjects.PhaseMusic.deserialize(
            load_info['phase_music']) if load_info['phase_music'] else None
        support_dict = load_info['support']
        self.talk_options = load_info['talk_options']
        self.base_conversations = load_info['base_conversations']
        self.stateMachine = StateMachine.StateMachine(
            load_info['state_list'][0], load_info['state_list'][1])
        self.statistics = load_info['statistics']
        # self.message = [Dialogue.Dialogue_Scene(scene) for scene in load_info['message']]
        self.message = []
        self.unlocked_lore = load_info['unlocked_lore']
        self.market_items = load_info.get('market_items', set())
        self.mode = load_info.get('mode', self.default_mode())

        # Map
        self.map = SaveLoad.create_map('Data/Level' +
                                       str(self.game_constants['level']))
        if map_info:
            self.map.replay_commands(map_info['command_list'],
                                     self.game_constants['level'])
            self.map.command_list = map_info['command_list']
            for position, current_hp in map_info['HP']:
                self.map.tiles[position].set_hp(current_hp)

        # Statuses
        for index, info in enumerate(load_info['allunits']):
            for s_dict in info['status_effects']:
                if isinstance(s_dict, dict):
                    StatusObject.deserialize(s_dict, self.allunits[index],
                                             self)
                else:
                    self.allunits[index].status_effects.append(s_dict)

        # Support
        if cf.CONSTANTS['support']:
            self.support = Support.Support_Graph('Data/support_nodes.txt',
                                                 'Data/support_edges.txt')
            self.support.deserialize(support_dict)
        else:
            self.support = None

        # Set up blitting surface
        if self.map:
            mapSurfWidth = self.map.width * GC.TILEWIDTH
            mapSurfHeight = self.map.height * GC.TILEHEIGHT
            self.mapSurf = Engine.create_surface((mapSurfWidth, mapSurfHeight))

            self.grid_manager = AStar.Grid_Manager(self.map)
            self.boundary_manager = CustomObjects.BoundaryManager(self.map)

            for unit in self.allunits:
                if unit.position:
                    self.grid_manager.set_unit_node(unit.position, unit)

        self.generic()
        if 'phase_info' in load_info:
            self.phase.current, self.phase.previous = load_info['phase_info']
Beispiel #34
0
def gradient_descent(train_elements,
                     alpha,
                     numIterations,
                     k,
                     verbose=0,
                     jump_enabled=0):
    inputs = []
    outputs = []
    for e in range(0, len(train_elements)):
        inputs.append(train_elements[e].input)
        outputs.append(train_elements[e].output)

    x = numpy.asarray(inputs)
    y = numpy.asarray(outputs)
    m, n = numpy.shape(x)
    theta = numpy.ones(n)
    x_trans = x.transpose()
    counter_for_jump = 0
    previous_cost = 0
    for i in range(0, numIterations):
        results = []
        for j in range(len(train_elements)):
            results.append(
                knn.get_error_estimation_weighted_on_input(
                    train_elements[j].input, theta,
                    train_elements[j].neighbors_i,
                    train_elements[j].neighbors_o, k, False))

        hypothesis = numpy.asarray(results)
        loss = hypothesis - y
        cost = numpy.sum(loss**2) / (2 * m)
        if verbose:
            Support.colored_print("Iteration %d | Cost: %f" % (i, cost), "red")

        if jump_enabled:
            if previous_cost == cost:
                counter_for_jump += 1
                if counter_for_jump > 10:
                    counter_for_jump = 0
                    if cost > 10:
                        # making jump
                        # selecting random indexes to perturbate
                        indexes_to_perturbate = numpy.random.choice(
                            range(len(theta)),
                            int(float(len(theta)) * 0.4),
                            replace=False)
                        for j in range(len(indexes_to_perturbate)):
                            # selecting random percentage perturbation
                            perturbation_value = random.randint(1, 6) * 0.1
                            perturbated = theta[
                                indexes_to_perturbate[j]] * perturbation_value
                            if random.randint(0, 2) == 0:
                                perturbated *= -1
                            theta[indexes_to_perturbate[j]] = perturbated
                        i -= 1
                        continue
            else:
                previous_cost = cost

        # avg gradient per example
        gradient = numpy.dot(x_trans, loss) / m
        # update
        theta = theta - alpha * gradient

    cost = numpy.sum(loss**2) / (2 * m)
    return theta, cost
Beispiel #35
0
#!/usr/bin/python
#coding=utf-8
#导入模块
import Support
#引用模块里的函数
Support.print_func("Test module.")
Beispiel #36
0
'''
Created on Mar 27, 2014

@author: Mark.E.Frymire
'''
import Support
Support.print_func("#1")
    
from Support import print_func
print_func("#2")


Money = 2000
print Money

def incrementMoney():
    aLocalVar = "Hi, I'm a local string."
    print "Locals:", locals()
    print "Globals:", globals()    
    # Python assumes any variable assigned a value in a function is local, so you can't do this...
    # Money = Money + 1
    # We have to first specify that we're using the global version.
    global Money    
    Money = Money + 1

incrementMoney()
print Money


# Use dir to get the names defined by a module
import math
Beispiel #37
0
    my_game_tree[curr_node_id].print_node_details()
    curr_gamestate = my_game_tree[curr_node_id].node_gamestate
    empty_found = False
    """Find index of first empty (i.e. 0) in the current gamestate"""
    for game_row_cntr in range(game_config.GAME_DIMENSION * game_config.GAME_DIMENSION):
        for game_column_cntr in range(game_config.GAME_DIMENSION * game_config.GAME_DIMENSION):
            if curr_gamestate[game_row_cntr][game_column_cntr] == 0:
                empty_found = True
                empty_row_idx = game_row_cntr
                empty_col_idx = game_column_cntr
                break
        if empty_found:
            break

    """get list of possible values for empty position"""
    possible_vals = Support.compute_choices(curr_gamestate, empty_row_idx, empty_col_idx)
    print(possible_vals)
    """check if any possible values returned"""
    if len(possible_vals) > 0:
        game_node_level = game_node_level + 1
        print("New tree level:", game_node_level)
        """for each possible value , create a MCTS node with appropriate values"""
        for new_val in possible_vals:
            """tmp_MCTSnode.clear_node()"""
            print("New value:", new_val)
            tmp_gamestate = curr_gamestate
            tmp_gamestate[empty_row_idx][empty_col_idx] = new_val
            game_node_count = game_node_count + 1
            my_game_tree[curr_node_id].node_children.append(game_node_count)
            my_game_tree.append(
                MCTS_node.MCTSnode(