datetime.datetime.now

Here are the examples of the python api datetime.datetime.now taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

159 Examples 7

Example 51

Project: edx-platform Source File: provider.py
@transaction.atomic
def create_credit_request(course_key, provider_id, username):
    """
    Initiate a request for credit from a credit provider.

    This will return the parameters that the user's browser will need to POST
    to the credit provider.  It does NOT calculate the signature.

    Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.

    A provider can be configured either with *integration enabled* or not.
    If automatic integration is disabled, this method will simply return
    a URL to the credit provider and method set to "GET", so the student can
    visit the URL and request credit directly.  No database record will be created
    to track these requests.

    If automatic integration *is* enabled, then this will also return the parameters
    that the user's browser will need to POST to the credit provider.
    These parameters will be digitally signed using a secret key shared with the credit provider.

    A database record will be created to track the request with a 32-character UUID.
    The returned dictionary can be used by the user's browser to send a POST request to the credit provider.

    If a pending request already exists, this function should return a request description with the same UUID.
    (Other parameters, such as the user's full name may be different than the original request).

    If a completed request (either accepted or rejected) already exists, this function will
    raise an exception.  Users are not allowed to make additional requests once a request
    has been completed.

    Arguments:
        course_key (CourseKey): The identifier for the course.
        provider_id (str): The identifier of the credit provider.
        username (str): The user initiating the request.

    Returns: dict

    Raises:
        UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
        CreditProviderNotConfigured: The credit provider has not been configured for this course.
        RequestAlreadyCompleted: The user has already submitted a request and received a response
            from the credit provider.

    Example Usage:
        >>> create_credit_request(course.id, "hogwarts", "ron")
        {
            "url": "https://credit.example.com/request",
            "method": "POST",
            "parameters": {
                "request_uuid": "557168d0f7664fe59097106c67c3f847",
                "timestamp": 1434631630,
                "course_org": "HogwartsX",
                "course_num": "Potions101",
                "course_run": "1T2015",
                "final_grade": "0.95",
                "user_username": "ron",
                "user_email": "[email protected]",
                "user_full_name": "Ron Weasley",
                "user_mailing_address": "",
                "user_country": "US",
                "signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
            }
        }

    """
    try:
        user_eligibility = CreditEligibility.objects.select_related('course').get(
            username=username,
            course__course_key=course_key
        )
        credit_course = user_eligibility.course
        credit_provider = CreditProvider.objects.get(provider_id=provider_id)
    except CreditEligibility.DoesNotExist:
        log.warning(
            u'User "%s" tried to initiate a request for credit in course "%s", '
            u'but the user is not eligible for credit',
            username, course_key
        )
        raise UserIsNotEligible
    except CreditProvider.DoesNotExist:
        log.error(u'Credit provider with ID "%s" has not been configured.', provider_id)
        raise CreditProviderNotConfigured

    # Check if we've enabled automatic integration with the credit
    # provider.  If not, we'll show the user a link to a URL
    # where the user can request credit directly from the provider.
    # Note that we do NOT track these requests in our database,
    # since the state would always be "pending" (we never hear back).
    if not credit_provider.enable_integration:
        return {
            "url": credit_provider.provider_url,
            "method": "GET",
            "parameters": {}
        }
    else:
        # If automatic credit integration is enabled, then try
        # to retrieve the shared signature *before* creating the request.
        # That way, if there's a misconfiguration, we won't have requests
        # in our system that we know weren't sent to the provider.
        shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
        if shared_secret_key is None:
            msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format(
                provider_id=credit_provider.provider_id
            )
            log.error(msg)
            raise CreditProviderNotConfigured(msg)

    # Initiate a new request if one has not already been created
    credit_request, created = CreditRequest.objects.get_or_create(
        course=credit_course,
        provider=credit_provider,
        username=username,
    )

    # Check whether we've already gotten a response for a request,
    # If so, we're not allowed to issue any further requests.
    # Skip checking the status if we know that we just created this record.
    if not created and credit_request.status != "pending":
        log.warning(
            (
                u'Cannot initiate credit request because the request with UUID "%s" '
                u'exists with status "%s"'
            ), credit_request.uuid, credit_request.status
        )
        raise RequestAlreadyCompleted

    if created:
        credit_request.uuid = uuid.uuid4().hex

    # Retrieve user account and profile info
    user = User.objects.select_related('profile').get(username=username)

    # Retrieve the final grade from the eligibility table
    try:
        final_grade = CreditRequirementStatus.objects.get(
            username=username,
            requirement__namespace="grade",
            requirement__name="grade",
            requirement__course__course_key=course_key,
            status="satisfied"
        ).reason["final_grade"]

        # NOTE (CCB): Limiting the grade to seven characters is a hack for ASU.
        if len(unicode(final_grade)) > 7:
            final_grade = u'{:.5f}'.format(final_grade)
        else:
            final_grade = unicode(final_grade)

    except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
        msg = 'Could not retrieve final grade from the credit eligibility table for ' \
              'user [{user_id}] in course [{course_key}].'.format(user_id=user.id, course_key=course_key)
        log.exception(msg)
        raise UserIsNotEligible(msg)

    # Getting the students's enrollment date
    course_enrollment = CourseEnrollment.get_enrollment(user, course_key)
    enrollment_date = course_enrollment.created if course_enrollment else ""

    # Getting the student's course completion date
    completion_date = get_last_exam_completion_date(course_key, username)

    parameters = {
        "request_uuid": credit_request.uuid,
        "timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
        "course_org": course_key.org,
        "course_num": course_key.course,
        "course_run": course_key.run,
        "enrollment_timestamp": to_timestamp(enrollment_date) if enrollment_date else "",
        "course_completion_timestamp": to_timestamp(completion_date) if completion_date else "",
        "final_grade": final_grade,
        "user_username": user.username,
        "user_email": user.email,
        "user_full_name": user.profile.name,
        "user_mailing_address": "",
        "user_country": (
            user.profile.country.code
            if user.profile.country.code is not None
            else ""
        ),
    }

    credit_request.parameters = parameters
    credit_request.save()

    if created:
        log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid)
    else:
        log.info(
            u'Updated request for credit with UUID "%s" so the user can re-issue the request',
            credit_request.uuid
        )

    # Sign the parameters using a secret key we share with the credit provider.
    parameters["signature"] = signature(parameters, shared_secret_key)

    return {
        "url": credit_provider.provider_url,
        "method": "POST",
        "parameters": parameters
    }

Example 52

Project: pycog Source File: sgd.py
Function: train
    def train(self, gradient_data, validation_data, savefile):
        """
        Train the RNN.

        Paramters
        ---------

        gradient_data : pycog.Dataset
                        Gradient dataset.

        validation_data : pycog.Dataset
                          Validation dataset.

        savefile : str
                   File to save network information in.

        """
        checkfreq = self.p['checkfreq']
        if checkfreq is None:
            checkfreq = int(1e4)//gradient_data.minibatch_size

        patience = self.p['patience']
        if patience is None:
            patience = 100*checkfreq

        alpha        = self.p['dt']/self.p['tau']
        lambda_Omega = self.p['lambda_Omega']
        lr           = self.p['learning_rate']
        maxnorm      = self.p['max_gradient_norm']
        bound        = self.p['bound']
        save_exclude = ['callback', 'performance', 'terminate']

        #---------------------------------------------------------------------------------
        # Continue previous run if we can
        #---------------------------------------------------------------------------------

        if os.path.isfile(savefile):
            with open(savefile) as f:
                save = pickle.load(f)
            best          = save['best']
            init_p        = save['current']
            first_iter    = save['iter']
            costs_history = save['costs_history']
            Omega_history = save['Omega_history']

            # Restore RNGs for datasets
            gradient_data.rng   = save['rng_gradient']
            validation_data.rng = save['rng_validation']

            # Restore parameter values
            for i, j in zip(self.trainables, init_p):
                i.set_value(j)

            print(("[ {}.SGD.train ] Recovered saved model,"
                   " continuing from iteration {}.").format(THIS, first_iter))
        else:
            best = {
                'iter':        1,
                'cost':        np.inf,
                'other_costs': [],
                'params':      SGD.get_values(self.save_values)
                }
            first_iter    = best['iter']
            costs_history = []
            Omega_history = []

            # Save initial conditions
            save = {
                'params':         {k: v for k, v in self.p.items()
                                   if k not in save_exclude},
                'varlist':        self.trainable_names,
                'iter':           1,
                'current':        SGD.get_values(self.trainables),
                'best':           best,
                'costs_history':  costs_history,
                'Omega_history':  Omega_history,
                'rng_gradient':   gradient_data.rng,
                'rng_validation': validation_data.rng
                }
            base, ext = os.path.splitext(savefile)
            dump(base + '_init' + ext, save)

        #---------------------------------------------------------------------------------
        # Updates
        #---------------------------------------------------------------------------------

        performance = self.p['performance']
        terminate   = self.p['terminate']
        tr_Omega    = None
        tr_gnorm    = None
        try:
            tstart = datetime.datetime.now()
            for iter in xrange(first_iter, 1+self.p['max_iter']):
                if iter % checkfreq == 1:
                    #---------------------------------------------------------------------
                    # Timestamp
                    #---------------------------------------------------------------------

                    tnow      = datetime.datetime.now()
                    totalsecs = (tnow - tstart).total_seconds()

                    hrs  = int(totalsecs//3600)
                    mins = int(totalsecs%3600)//60
                    secs = int(totalsecs%60)

                    timestamp = tnow.strftime('%b %d %Y %I:%M:%S %p').replace(' 0', ' ')
                    print('{} updates - {} ({} hrs {} mins {} secs elapsed)'
                          .format(iter-1, timestamp, hrs, mins, secs))

                    #---------------------------------------------------------------------
                    # Validate
                    #---------------------------------------------------------------------

                    # Validation cost
                    costs = self.f_cost(*validation_data(best['other_costs']))
                    z     = costs[-1] # network outputs
                    costs = [float(i) for i in costs[:-1]]
                    s0    = "| validation loss / RMSE"
                    s1    = ": {:.6f} / {:.6f}".format(costs[0], costs[1])

                    # Dashes
                    nfill = 70

                    # Compute task-specific performance
                    if performance is not None:
                        costs.append(performance(validation_data.get_trials(),
                                                 SGD.get_value(z)))
                        s0    += " / performance"
                        s1    += " / {:.2f}".format(costs[-1])
                    s = s0 + s1

                    # Callback
                    if self.p['callback'] is not None:
                        callback_results = self.p['callback'](
                            validation_data.get_trials(), SGD.get_value(z)
                            )
                    else:
                        callback_results = None

                    # Keep track of costs
                    costs_history.append((gradient_data.ntrials, costs))

                    # Record the value of the regularization term in the last iteration
                    if tr_Omega is not None:
                        Omega_history.append(
                            (gradient_data.ntrials, lambda_Omega*tr_Omega)
                            )

                    # New best
                    if costs[0] < best['cost']:
                        s += ' ' + '-'*(nfill - len(s))
                        s += " NEW BEST (prev. best: {:.6f})".format(best['cost'])
                        best = {
                            'iter':        iter,
                            'cost':        costs[0],
                            'other_costs': costs[1:],
                            'params':      SGD.get_values(self.save_values)
                            }
                    print(s)

                    # Spectral radius
                    rho = RNN.spectral_radius(self.Wrec_.eval())

                    # Format
                    Omega = ('n/a' if tr_Omega is None
                             else '{:.8f}'.format(float(tr_Omega)))
                    gnorm = ('n/a' if tr_gnorm is None
                             else '{:.8f}'.format(float(tr_gnorm)))

                    # Info
                    print("| Omega      (last iter) = {}".format(Omega))
                    print("| grad. norm (last iter) = {}".format(gnorm))
                    print("| rho                    = {:.8f}".format(rho))
                    sys.stdout.flush()

                    #---------------------------------------------------------------------
                    # Save progress
                    #---------------------------------------------------------------------

                    save = {
                        'params':         {k: v for k, v in self.p.items()
                                           if k not in save_exclude},
                        'varlist':        self.trainable_names,
                        'iter':           iter,
                        'current':        SGD.get_values(self.trainables),
                        'best':           best,
                        'costs_history':  costs_history,
                        'Omega_history':  Omega_history,
                        'rng_gradient':   gradient_data.rng,
                        'rng_validation': validation_data.rng
                        }
                    dump(savefile, save)

                    if costs[1] <= self.p['min_error']:
                        print("Reached minimum error of {:.6f}"
                              .format(self.p['min_error']))
                        break

                    # This termination criterion assumes that performance is not None
                    if terminate(np.array([c[-1] for _, c in costs_history])):
                        print("Termination criterion satisfied -- we\'ll call it a day.")
                        break

                if iter - best['iter'] > patience:
                    print("We've run out of patience -- time to give up.")
                    break

                #-------------------------------------------------------------------------
                # Training step
                #-------------------------------------------------------------------------

                tr_cost, tr_gnorm, tr_Omega, tr_nelems, tr_x = self.train_step(
                    *(gradient_data(best['other_costs'], callback_results)
                      + [alpha, lambda_Omega, lr, maxnorm, bound])
                     )

                #-------------------------------------------------------------------------
        except KeyboardInterrupt:
            print("[ {}.SGD.train ] Training interrupted by user during iteration {}."
                  .format(THIS, iter))

Example 53

Project: dpa-pipe Source File: houdini.py
    def _render_to_product(self):

        # get render node reference
        render_node = self.session.hou.node(self._node_to_render)

        # ---- progress dialog
        num_ops = 8
        cur_op = 0
        progress_dialog = QtGui.QProgressDialog(
            "Product render...", "", cur_op, num_ops, self)
        progress_dialog.setWindowTitle("Dark Knight is busy...")
        progress_dialog.setAutoReset(False)
        progress_dialog.setLabelText("Preparing nuke file for rendering...")
        progress_dialog.show()

        #########################################
        # ensure the product has been created
        #########################################
        progress_dialog.setLabelText("Creating product...")

        if not render_node.type().name()=='ifd' or not self._version_note:
            raise Exception("The supplied node is not a WriteProduct node.")

        print "Creating product for node... " + str(render_node)

        ptask_area = PTaskArea.current()
        ptask = PTask.get(ptask_area.spec)

        if ptask_area.version:
            ptask_version = ptask.version(ptask_area.version)
        else:
            ptask_version = ptask.latest_version

        category = 'imgseq'
        file_type = 'exr'

        product_name = render_node.name()
        product_desc = render_node.name() + " mantra render"
        product_ver_note = self._version_note

        camera_node = self.session.hou.node(render_node.evalParm('camera'))
        if not camera_node:
            raise Exception("Camera specified is not valid.")
        width = camera_node.evalParm("resx")
        height = camera_node.evalParm("resy")
        resolution = "%sx%s" % (width, height)
            
        create_action_cls = ActionRegistry().get_action('create', 'product')
        if not create_action_cls:
            raise Exception("Unable to find product creation action.")

        create_action = create_action_cls(
            product=product_name,
            ptask=ptask.spec,
            version=ptask_version.number,
            category=category,
            description=product_desc,
            file_type=file_type,
            resolution=resolution,
            note=product_ver_note,
        )

        try:
            create_action()
        except ActionError as e:
            raise Exception("Unable to create product: " + str(e))

        # provision the ifd directory
        try:
            create_action.product_repr.area.provision('ifd')
        except Exception as e:
            raise Exception(
                "Unable to create ifd file directory: " + str(e))

        ifd_dir = os.path.join(create_action.product_repr.area.path,
            'ifd', product_name + '.$F4.ifd')
        out_path = os.path.join(create_action.product_repr.area.path,
            product_name + '.$F4.' + file_type)

        # by default, the mantra frame range has an expression on frame numbers
        render_node.parm('f1').deleteAllKeyframes()
        render_node.parm('f2').deleteAllKeyframes()

        # set frange
        render_node.parm('trange').set(1)
        render_node.parm('f1').set(self._frange.start)
        render_node.parm('f2').set(self._frange.end)
        render_node.parm('f3').set(self._frange.step)

        # set output
        render_node.parm('soho_outputmode').set(1)
        render_node.parm('soho_diskfile').set(ifd_dir)
        render_node.parm('soho_diskfile').disable(0)
        render_node.parm('vm_picture').set(out_path)
        render_node.parm('soho_mkpath').set(1)

        product_repr = create_action.product_repr
        product_repr_area = product_repr.area

        cur_op += 1
        progress_dialog.setValue(cur_op)

        #########################################
        # create ifd files
        #########################################
        progress_dialog.setLabelText("Generating ifd files...")
        render_node.parm('execute').pressButton()
        ifd_file_list = glob.glob(
                            os.path.join(
                                create_action.product_repr.area.path,
                                'ifd', '*.ifd')
                            )
        for ifd_file in ifd_file_list:
            os.chmod(ifd_file, 0770)

        cur_op += 1
        progress_dialog.setValue(cur_op)

        #########################################
        # sync current work area to version snapshot to render from
        #########################################
        progress_dialog.setLabelText("Sync'ing the latest work...")

        try:
            self.session.save() 
            self._sync_latest()
        except Exception as e:
            self._show_error("Unable to save & sync the latest work: " + str(e))
            self.setEnabled(True)
            progress_dialog.close()
            return

        cur_op += 1
        progress_dialog.setValue(cur_op)

        #########################################
        # ensure queue directory exists
        #########################################
        progress_dialog.setLabelText("Provisioning the queue directory...")

        try:
            product_repr_area.provision('queue')
        except Exception as e:
            raise DarkKnightError(
                "Unable to create queue scripts directory: " + str(e))

        cur_op += 1
        progress_dialog.setValue(cur_op)

        out_dir = product_repr_area.path
        ifd_dir = product_repr_area.dir(dir_name='ifd')
        queue_dir = product_repr_area.dir(dir_name='queue')
        tasks_info_file = os.path.join(queue_dir, 'tasks_info.cfg')
        tasks_info_config = Config()

        cur_op += 1
        progress_dialog.setValue(cur_op)


        #########################################
        # buidling queue scripts
        #########################################
        progress_dialog.setLabelText("Building the queue script...")

        # # dpaset command to run
        dpaset_cmd = 'eval "`dpa env ptask {pt}@{vn}`"'.format(
            pt=ptask.spec, vn=ptask_version.number)

        # write out queue shell scripts
        frame_scripts = []
        for frame in self._frame_list:

            frame_padded = str(frame).zfill(4)

            ifd_file = os.path.join(ifd_dir, 
                "{pn}.{fn}.ifd".format(pn=product_name, fn=frame_padded))

            script_path = os.path.join(queue_dir, 
                "{pn}.{fn}.sh".format(pn=product_name, fn=frame_padded))

            out_file = os.path.join(out_dir, 
                "{pn}.{fn}.{ft}".format(pn=product_name, fn=frame_padded, ft=file_type) )

            render_cmd = "/opt/hfs14/bin/mantra -f {ifd} -V 2a".\
                format(
                    ifd=ifd_file
                )

            with open(script_path, "w") as script_file:
                script_file.write("#!/bin/bash\n\n")

                # XXX these should happen automatically in the queue...
                script_file.write("source /DPA/wookie/dpa/bash/startup.bash\n")
                script_file.write("pipeup\n\n")

                script_file.write("# set the ptask version to render\n")
                script_file.write(dpaset_cmd + "\n\n")

                script_file.write("# render!\n")
                script_file.write(render_cmd + "\n\n")

            frame_scripts.append((frame_padded, script_path, out_file))

            os.chmod(script_path, 0770)

        cur_op += 1
        progress_dialog.setValue(cur_op)


        ################################################
        # submit to the queue
        ################################################
        now = datetime.datetime.now()
        task_id_base = get_unique_id(product_repr_area.spec, dt=now)

        frame_tasks = []
        # create frame tasks
        for (frame, frame_script, out_file) in frame_scripts:

            progress_dialog.setLabelText(
                "Submitting frame: " + frame_script)

            task_id = task_id_base + "_" + frame

            if not self._debug_mode:

                # create tasks, don't actually submit yet
                create_queue_task(self._render_queue, frame_script, task_id,
                    output_file=out_file, submit=False, 
                    log_path=frame_script + '.log')

                frame_tasks.append((frame, task_id))
                #
                #  resubmit frame-by-frame because 
                #  group submit seems to be occasionally
                #  having problems.
                os.system("cqresubmittask {qn} {tid}".format(
                    qn=self._render_queue, tid=task_id))

        cur_op += 1
        progress_dialog.setValue(cur_op)

        ################################################
        # task info stuff, allows task ids to 
        # be retrieved with product spec
        ################################################
        progress_dialog.setLabelText("Creating task info file...")

        tasks_info_file = os.path.join(queue_dir, 'tasks_info.cfg')
        tasks_info_config = Config()
        tasks_info_config.add('base_id', task_id_base)

        frame_info = Config()
        for (frame, task_id) in frame_tasks:
            frame_info.add(str(frame), task_id)
        tasks_info_config.add('frame_ids', frame_info)

        tasks_info_config.write(tasks_info_file)
        os.chmod(tasks_info_file, 0660)

Example 54

Project: EventMonkey Source File: WindowsEventManager.py
def HandleRecords(filename,options,eventfile_type,record_list,recovered,dbHandler,elastic_actions,progressBar):
    pid = os.getpid()
    sql_records = []
    
    recovered_flag = 0
    if recovered:
        recovered_flag = 1
    
    for i in range(len(record_list)):
        progressBar.Increment(1)
        try:
            record = record_list[i]
        except Exception as error:
            WINEVENT_LOGGER.error("[PID: {}][{}] record index {}\tERROR: {}-{}\tRecovered: {}\tNot able to get record.".format(
                pid,
                filename,
                i,
                str(type(error)),
                str(error),
                str(recovered)
            ))
            continue
        
        #Get task id if exists for debugging#
        taskid = None
        
        #########################################################################################################
        ## XML Handling
        #########################################################################################################
        #if evtx, check xml string#
        xml_string = None
        jrec = None
        drec = None
        if eventfile_type == 'evtx':
            try:
                xml_string = record.xml_string
                #Strip null values just incase
                xml_string = xml_string.strip(b'\0')
            except Exception as error:
                WINEVENT_LOGGER.warn("[PID: {}][{}] record index {}, event_id {}\tWARN: {}-{}\tRecovered: {}\tNot able to get xml string.".format(
                    pid,
                    filename,
                    i,
                    record.identifier,
                    str(type(error)),
                    str(error),
                    str(recovered)
                ))
                xml_string = None
                
            if xml_string is not None:
                list_names = [
                    'Event.EventData.Data',
                    'Event.EventData.Binary',
                ]
                drec = XmlHandler.GetDictionary(xml_string,force_list=list_names)['Event']
                jrec = json.dumps(drec)
                try:
                    taskid = drec['System']['Task']
                except:
                    WINEVENT_LOGGER.debug('[PID: {}][{}] No Task ID for record at index {} (Recovered: {})'.format(pid,filename,i,str(recovered)))
        elif eventfile_type == 'evtxtract':
            # xml stirng is excaped, we need to decode it #
            xml_string = record['xml']
            list_names = [
                'Event.EventData.Data',
                'Event.EventData.Binary',
            ]
            # xml stirng is excaped, we need to decode it #
            drec = XmlHandler.GetDictionary(record['xml'].decode('string_escape'),force_list=list_names)['Event']
            jrec = json.dumps(drec)
        #########################################################################################################
        
        rdic = {}
        rdic['eventfile_type']=eventfile_type
        
        if recovered:
            # If the record is recovered but corrupt, we should try getting as many
            # attributes as possible
            try:
                rdic['computer_name']=getattr(record,'computer_name',None)
            except:
                rdic['computer_name']=None
            try:
                rdic['creation_time']=getattr(record,'creation_time',None)
            except:
                rdic['creation_time']=None
            try:
                rdic['data']=getattr(record,'data',None)
            except:
                rdic['data']=None
            try:
                rdic['event_category']=getattr(record,'event_category',None)
            except:
                rdic['event_category']=None
            try:
                rdic['event_identifier']=getattr(record,'event_identifier',None)
            except:
                rdic['event_identifier']=None
            try:
                rdic['event_identifier_qualifiers']=getattr(record,'event_identifier_qualifiers',None)
            except:
                rdic['event_identifier_qualifiers']=None
            try:
                rdic['event_level']=getattr(record,'event_level',None)
            except:
                rdic['event_level']=None
            try:
                rdic['identifier']=getattr(record,'identifier',None)
            except:
                rdic['identifier']=None
            try:
                rdic['offset']=getattr(record,'offset',None)
            except:
                rdic['offset']=None
            try:
                rdic['source_name']=getattr(record,'source_name',None)
            except:
                rdic['source_name']=None
            try:
                rdic['user_security_identifier']=getattr(record,'user_security_identifier',None)
            except:
                rdic['user_security_identifier']=None
            try:
                rdic['written_time']=getattr(record,'written_time',None)
            except:
                rdic['written_time']=None
        else:
            rdic['computer_name']=getattr(record,'computer_name',None)
            rdic['creation_time']=getattr(record,'creation_time',None)
            
            rdic['data']=getattr(record,'data',None)
            
            rdic['event_category']=getattr(record,'event_category',None)
            rdic['event_identifier']=getattr(record,'event_identifier',None)
            rdic['event_identifier_qualifiers']=getattr(record,'event_identifier_qualifiers',None)
            rdic['event_level']=getattr(record,'event_level',None)
            rdic['identifier']=getattr(record,'identifier',None)
            rdic['offset']=getattr(record,'offset',None)
            rdic['source_name']=getattr(record,'source_name',None)
            rdic['user_security_identifier']=getattr(record,'user_security_identifier',None)
            rdic['written_time']=getattr(record,'written_time',None)
        
        rdic['strings'] = ''
        rdic['xml_string'] = xml_string
        
        c = 0
        
        if eventfile_type == 'evtx' or eventfile_type == 'evt':
            rdic['strings']=[]
            try:
                for rstring in record.strings:
                    try:
                        rdic['strings'].append(rstring)
                    except Exception as error:
                        WINEVENT_LOGGER.info("[PID: {}][{}] record index {}, id {}\tINFO: {}-{}\tRecovered: {}\tNot able to get string at index {}.".format(
                            pid,
                            filename,
                            i,
                            record.identifier,
                            str(type(error)),
                            str(error),
                            str(recovered),
                            c
                        ))
                    c+=1
                rdic['strings'] = unicode(rdic['strings'])
            except Exception as error:
                WINEVENT_LOGGER.info("[PID: {}][{}] record index {}, id {}\tINFO: {}-{}\tRecovered: {}\tNot able to iterate strings.".format(
                    pid,
                    filename,
                    i,
                    record.identifier,
                    str(type(error)),
                    str(error),
                    str(recovered)
                ))
                rdic['strings'] = None
        
        #Create unique hash#
        md5 = hashlib.md5()
        md5.update(str(rdic))
        hash_id = md5.hexdigest()
        
        we_description = None
        we_tags = None
        
        if drec is not None:
            if drec['System']['EventID']['#text'] is not None:
                if drec['System']['Channel']['#text'] is not None:
                    try:
                        we_description = EVENT_ID_DESCRIPTIONS[unicode(drec['System']['Channel']['#text'])][int(drec['System']['EventID']['#text'])]['description']
                        we_tags = EVENT_ID_DESCRIPTIONS[unicode(drec['System']['Channel']['#text'])][int(drec['System']['EventID']['#text'])]['tags']
                        pass
                    except:
                        pass
        
        sql_insert = {
            'we_hash_id':hash_id,
            'we_source':filename,
            'we_jrec':jrec,
            'we_recovered':recovered,
            'we_index':i,
            'we_description':we_description,
            'we_tags':str(we_tags),
            'recovered':recovered_flag
        }
        
        sql_insert.update(rdic)
        
        sql_records.append(sql_insert)
        
        #Add Elastic Records#
        if options.eshost is not None:
            #Add Timestamp#
            timestamp = datetime.datetime.now()
            
            # If event type is evt, make drec = rdic
            # This is because evt has no xml to make
            # into a dictionary
            if eventfile_type == 'evt' or drec is None:
                # This contains binary, which is not supported by elastic, thus
                # we need to remove it. We will encode it as base64
                dvalue = rdic.pop("data", None)
                rdic['data_printable']=getattr(record,'data',None)
                if rdic['data_printable'] is not None:
                    rdic['data_printable'] = rdic['data_printable'].decode('ascii','replace')
                if dvalue is not None:
                    rdic['data_base64']=base64.b64encode(getattr(record,'data',None))
                else:
                    rdic['data_base64']=None
                drec = rdic
            
            drec.update({
                'index_timestamp': timestamp,
                'recovered':recovered,
                'source_filename':filename,
                'index':i,
                'tags':we_tags,
                'description':we_description
            })
            
            action = {
                "_index": options.index_name,
                "_type": 'winevent',
                "_id": hash_id,
                "_source": drec
            }
            
            elastic_actions.append(action)
        
    dbHandler.InsertFromListOfDicts(
        'winevent',
        sql_records,
        WINEVENT_COLUMN_ORDER
    )

Example 55

Project: million-dollar-curve Source File: 04_generate_curve_using_bbs.py
Function: main
def main():

    # Test local versions of libraries

    utils.test_python_version()
    utils.test_gmpy2_version()
    utils.test_pari_version()
    utils.test_pari_seadata()
    
    now = datetime.now()
    
    # Parse command line arguments

    parser = argparse.ArgumentParser(description="Generate an Edwards curve over a given prime field, suited for cryptographic purposes.")
    parser.add_argument("input_file",
                        help="""JSON file containing the BBS parameters and the prime of the underlying field (typically, the output of
                        03_generate_prime_field_using_bbs.py.
                        """)
    parser.add_argument("output_file", help="Output file where this script will write the parameter d of the curve and the current BBS parameters.")
    parser.add_argument("--start",
                        type=int,
                        help="Number of the candidate to start with (default is 1).",
                        default=1)
    parser.add_argument("--max_nbr_of_tests",
                        type=int,
                        help="Number of candidates to test before stopping the script (default is to continue until success).")
    parser.add_argument("--fast",
                        help=""" While computing a the curve cardinality with SAE, early exit when the cardinality will obviously be divisible by
                        a small integer > 4. This reduces the time required to find the final curve, but the
                        cardinalities of previous candidates are not fully computed.
                        """,
                        default=False,
                        action="store_true")

    args = parser.parse_args()

    
    # Check arguments

    print("Checking inputs...")
    
    output_file = args.output_file
    if os.path.exists(output_file):
        utils.exit_error("The output file '%s' already exists. Exiting."%(output_file))

    input_file = args.input_file
    with open(input_file, "r") as f:
        data = json.load(f)

        
    # Declare a few important variables
        
    bbs_p = int(data["bbs_p"])
    bbs_q = int(data["bbs_q"])
    bbs_n = bbs_p * bbs_q
    bbs_s = int(data["bbs_s"]) % bbs_n
    p = int(data["p"])

    start = max(int(args.start),1)

    max_nbr_of_tests = None
    if args.max_nbr_of_tests:
        max_nbr_of_tests = int(args.max_nbr_of_tests)
        
    if not subroutines.is_strong_strong_prime(bbs_p):
        utils.exit_error("bbs_p is not a strong strong prime.")
    if not subroutines.is_strong_strong_prime(bbs_q):
        utils.exit_error("bbs_q is not a strong strong prime.")
    if not (subroutines.deterministic_is_pseudo_prime(p) and p%4 == 3):
        utils.exit_error("p is not a prime congruent to 3 modulo 4.")

        
    # Initialize BBS

    print("Initializing BBS...")
    bbs = bbsengine.BBS(bbs_p, bbs_q, bbs_s)

    
    # Info about the prime field
    
    utils.colprint("Prime of the underlying prime field:", "%d (size: %d)"%(p, gmpy2.bit_length(p)))    
    size = gmpy2.bit_length(p) # total number of bits queried to bbs for each test

    
    # Skip the first "start" candidates
    
    candidate_nbr = start-1
    bbs.skipbits(size * (start-1))


    # Start looking for "d"
    
    while True:
        
        if max_nbr_of_tests and candidate_nbr >= start + max_nbr_of_tests - 1:
            print("Did not find an adequate parameter, starting at candidate %d (included), limiting to %d candidates."%(start, max_nbr_of_tests))
            utils.exit_error("Last candidate checked was number %d."%(candidate_nbr))

        candidate_nbr += 1

        bits = bbs.genbits(size)
        d = 0
        for bit in bits:
            d = (d << 1) | bit
        print("The candidate number %d is d = %d (ellapsed time: %s)"%(candidate_nbr, d, str(datetime.now()-now)))

        
        # Test 1
        
        if not utils.check(d != 0 and d < p, "d != 0 and d < p", 1):
            continue

        # Test 2
        
        if not utils.check(gmpy2.legendre(d, p) == -1, "d is not a square modulo p", 2):
            continue
        
        # Test 3
        
        if args.fast:
            cardinality = subroutines.sea_edwards(1, d, p, 4)
        else:
            cardinality = subroutines.sea_edwards(1, d, p)
        assert(cardinality % 4 == 0)
        q = cardinality>>2
        if not utils.check(subroutines.deterministic_is_pseudo_prime(q), "The curve cardinality / 4 is prime", 3):
            continue

        # Test 4
        
        trace = p+1-cardinality
        cardinality_twist = p+1+trace
        assert(cardinality_twist % 4 == 0)
        q_twist = cardinality_twist>>2
        if not utils.check(subroutines.deterministic_is_pseudo_prime(q_twist), "The twist cardinality / 4 is prime", 4):
            continue
        
        # Test 5

        if not utils.check(q != p and q_twist != p, "Curve and twist are safe against additive transfer", 5):
            continue
        
        # Test 6

        embedding_degree = subroutines.embedding_degree(p, q)
        if not utils.check(embedding_degree > (q-1) // 100, "Curve is safe against multiplicative transfer", 6):
            continue

        # Test 7

        embedding_degree_twist = subroutines.embedding_degree(p, q_twist)
        if not utils.check(embedding_degree_twist > (q_twist-1) // 100, "Twist is safe against multiplicative transfer", 7):
            continue

        # Test 8

        D = subroutines.cm_field_discriminant(p, trace)
        if not utils.check(abs(D) >= 2**100, "Absolute value of the discriminant is larger than 2^100", 8):
            continue

        break

    
    # Find a base point

    while True:
    
        bits = bbs.genbits(size)
        y = 0
        for bit in bits:
            y = (y<<1) | bit
        u = int((1 - y**2) * gmpy2.invert(1 - d*y**2, p)) % p
        if gmpy2.legendre(u, p) == -1:
            continue
        x = gmpy2.powmod(u, (p+1) // 4, p)
        (x,y) = subroutines.add_on_edwards(x, y, x, y, d, p)
        (x,y) = subroutines.add_on_edwards(x, y, x, y, d, p)
        if (x, y) == (0, 1):
            continue

        assert((x**2 + y**2) % p == (1 + d*x**2*y**2) % p)
        
        break

    
    # Print some informations
    
    utils.colprint("Number of the successful candidate:", str(candidate_nbr))
    utils.colprint("Edwards elliptic curve parameter d is:", str(d))
    utils.colprint("Number of points:", str(cardinality))
    utils.colprint("Number of points on the twist:", str(cardinality_twist))
    utils.colprint("Embedding degree of the curve:", "%d"%embedding_degree)
    utils.colprint("Embedding degree of the twist:", "%d"%embedding_degree_twist)
    utils.colprint("Discriminant:", "%d"%D)
    utils.colprint("Trace:", "%d"%trace)
    utils.colprint("Base point coordinates:", "(%d, %d)"%(x, y))

    
    # Save p, d, x, y, etc. to the output_file

    print("Saving the parameters to %s"%output_file)
    bbs_s = bbs.s
    with open(output_file, "w") as f:
        json.dump({"p": int(p),
                   "bbs_p": int(bbs_p),
                   "bbs_q": int(bbs_q),
                   "bbs_s": int(bbs_s),
                   "candidate_nbr": int(candidate_nbr),
                   "d": int(d),
                   "cardinality": cardinality,
                   "cardinality_twist": cardinality_twist,
                   "embedding_degree": embedding_degree,
                   "embedding_degree_twist": embedding_degree_twist,
                   "discriminant": D,
                   "trace": trace,
                   "base_point_x": x,
                   "base_point_y": y},
                  f,
                  sort_keys=True)

Example 56

Project: ceph-scripts Source File: ceph-sls.py
Function: write_xml
def write_xml(slsid='Ceph'):
  osd_states = cephinfo.get_osd_states()
  osd_stats_sum = cephinfo.get_osd_stats_sum()
  pg_stats_sum = cephinfo.get_pg_stats_sum()['stat_sum']
  pg_map = cephinfo.stat_data['pgmap']
  try:
    latency = cephinfo.get_write_latency()
    read_latency = cephinfo.get_read_latency()
    cephinfo.rados_cleanup(latency[0])
  except IndexError:
    latency = ['',[0,0,0]]
    read_latency = [0,0,0]
  pg_states = cephinfo.get_pg_states()
  osd_df = cephinfo.osd_df_data['nodes']
  activity = cephinfo.get_smooth_activity(10)
  status, availabilityinfo = get_status(pg_stats_sum, latency[1][0]*1000)
  context = {
    "slsid"              : slsid,
    "timestamp"          : datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S'),
    "status"             : status,
    "availabilityinfo"   : availabilityinfo,
    "n_mons"             : cephinfo.get_n_mons(),
    "n_quorum"           : cephinfo.get_n_mons_quorum(),
    "n_pools"            : cephinfo.get_n_pools(),
    "n_osds"             : cephinfo.get_n_osds(),
    "n_osds_up"          : osd_states['up'],
    "n_osds_in"          : osd_states['in'],
    "n_pgs"              : cephinfo.get_n_pgs(),
    "n_osd_gb_total"     : osd_stats_sum['kb'] / 1024 / 1024,
    "n_osd_gb_used"      : osd_stats_sum['kb_used'] / 1024 / 1024,
    "n_osd_gb_avail"     : osd_stats_sum['kb_avail'] / 1024 / 1024,
    "n_pg_gbytes"        : pg_stats_sum['num_bytes'] / 1024 / 1024 / 1024,
    "n_objects"          : pg_stats_sum['num_objects'],
    "n_object_copies"    : pg_stats_sum['num_object_copies'],
    "n_objects_degraded" : pg_stats_sum['num_objects_degraded'],
    "n_objects_unfound"  : pg_stats_sum['num_objects_unfound'],
    "n_objects_misplaced": pg_stats_sum['num_objects_misplaced'],
    "n_read_gb"          : pg_stats_sum['num_read_kb'] / 1024 / 1024,
    "n_write_gb"         : pg_stats_sum['num_write_kb'] / 1024 / 1024,
    "latency_ms"         : latency[1][0]*1000,
    "latency_max_ms"     : latency[1][1]*1000,
    "latency_min_ms"     : latency[1][2]*1000,
    "read_latency_ms"    : read_latency[0]*1000,
    "read_latency_max_ms": read_latency[1]*1000,
    "read_latency_min_ms": read_latency[2]*1000,
    "n_openstack_volumes": cephinfo.get_n_openstack_volumes(),
    "n_openstack_images" : cephinfo.get_n_openstack_images(),
    "op_per_sec"         : activity[0],
    "read_mb_sec"        : activity[1],
    "write_mb_sec"       : activity[2],
    "graphite_prefix"    : slsid.replace('_','.').lower() + '.sls',
    "graphite_osd_prefix": slsid.replace('_','.').lower() + '.osds',
    "graphite_timestamp" : int(time.time()),
  }

  for state in pg_states.keys():
    context['n_pgs_%s' % state] = pg_states[state]

  template = """
<?xml version="1.0" encoding="utf-8"?>

<serviceupdate xmlns="http://sls.cern.ch/SLS/XML/update">
    <id>{slsid}</id>

    <contact>[email protected]</contact>
    <webpage>https://twiki.cern.ch/twiki/bin/viewauth/DSSGroup/CephProject</webpage>

    <availabilitydesc>Status is available, degraded, or unavailable when the Ceph status is HEALTH_OK, HEALTH_WARN, or HEALTH_ERR, respectively.</availabilitydesc>

    <timestamp>{timestamp}</timestamp>

    <status>{status}</status>

    <availabilityinfo>{availabilityinfo}</availabilityinfo>

    <data>
        <numericvalue name="n_mons" desc="Num Mons">{n_mons}</numericvalue>
        <numericvalue name="n_quorum" desc="Num Mons in Quorum">{n_quorum}</numericvalue>
        <numericvalue name="n_pools" desc="Num Pools">{n_pools}</numericvalue>
        <numericvalue name="n_osds" desc="Num OSDs">{n_osds}</numericvalue>
        <numericvalue name="n_osds_up" desc="Num OSDs Up">{n_osds_up}</numericvalue>
        <numericvalue name="n_osds_in" desc="Num OSDs In">{n_osds_in}</numericvalue>
        <numericvalue name="n_pgs" desc="Num PGs">{n_pgs}</numericvalue>
"""

  for state in pg_states.keys():
    template = template + '        <numericvalue name="n_pgs_%s" desc="Num PGs %s">{n_pgs_%s}</numericvalue>\n' % (state, state, state)

  template = template + """        <numericvalue name="n_osd_gb_total" desc="OSD Gigabytes Total">{n_osd_gb_total}</numericvalue>
        <numericvalue name="n_osd_gb_used" desc="OSD Gigabytes Used">{n_osd_gb_used}</numericvalue>
        <numericvalue name="n_osd_gb_avail" desc="OSD Gigabytes Avail">{n_osd_gb_avail}</numericvalue>
        <numericvalue name="n_pg_gbytes" desc="PG Gigabytes">{n_pg_gbytes}</numericvalue>
        <numericvalue name="n_objects" desc="Num Objects">{n_objects}</numericvalue>
        <numericvalue name="n_object_copies" desc="Num Object Copies">{n_object_copies}</numericvalue>
        <numericvalue name="n_objects_degraded" desc="Num Objects Degraded">{n_objects_degraded}</numericvalue>
        <numericvalue name="n_objects_unfound" desc="Num Objects Unfound">{n_objects_unfound}</numericvalue>
        <numericvalue name="n_objects_misplaced" desc="Num Objects Misplaced">{n_objects_misplaced}</numericvalue>
        <numericvalue name="n_read_gb" desc="Total Read (GB)">{n_read_gb}</numericvalue>
        <numericvalue name="n_write_gb" desc="Total Write (GB)">{n_write_gb}</numericvalue>
        <numericvalue name="latency_ms" desc="Average">{latency_ms}</numericvalue>
        <numericvalue name="latency_max_ms" desc="Max">{latency_max_ms}</numericvalue>
        <numericvalue name="latency_min_ms" desc="Min">{latency_min_ms}</numericvalue>
        <numericvalue name="read_latency_ms" desc="Average">{read_latency_ms}</numericvalue>
        <numericvalue name="read_latency_max_ms" desc="Max">{read_latency_max_ms}</numericvalue>
        <numericvalue name="read_latency_min_ms" desc="Min">{read_latency_min_ms}</numericvalue>
        <numericvalue name="n_openstack_volumes" desc="Num OpenStack Volumes">{n_openstack_volumes}</numericvalue>
        <numericvalue name="n_openstack_images" desc="Num OpenStack Images">{n_openstack_images}</numericvalue>
        <numericvalue name="read_mb_sec" desc="Read MB/s">{read_mb_sec}</numericvalue>
        <numericvalue name="write_mb_sec" desc="Write MB/s">{write_mb_sec}</numericvalue>
        <numericvalue name="op_per_sec" desc="Operations Per Second">{op_per_sec}</numericvalue>
    </data>
</serviceupdate>
"""
  print template.format(**context)

  # generate Graphite update
  graphite = """
{graphite_prefix}.n_mons {n_mons} {graphite_timestamp}
{graphite_prefix}.n_quorum {n_quorum} {graphite_timestamp}
{graphite_prefix}.n_pools {n_pools} {graphite_timestamp}
{graphite_prefix}.n_osds {n_osds} {graphite_timestamp}
{graphite_prefix}.n_osds_up {n_osds_up} {graphite_timestamp}
{graphite_prefix}.n_osds_in {n_osds_in} {graphite_timestamp}
{graphite_prefix}.n_pgs {n_pgs} {graphite_timestamp}
"""

  for state in pg_states.keys():
    graphite = graphite + "{graphite_prefix}.n_pgs_%s {n_pgs_%s} {graphite_timestamp}\n" % (state, state)

  graphite = graphite + """{graphite_prefix}.n_osd_gb_total {n_osd_gb_total} {graphite_timestamp}
{graphite_prefix}.n_osd_gb_used {n_osd_gb_used} {graphite_timestamp}
{graphite_prefix}.n_osd_gb_avail {n_osd_gb_avail} {graphite_timestamp}
{graphite_prefix}.n_pg_gbytes {n_pg_gbytes} {graphite_timestamp}
{graphite_prefix}.n_objects {n_objects} {graphite_timestamp}
{graphite_prefix}.n_object_copies {n_object_copies} {graphite_timestamp}
{graphite_prefix}.n_objects_degraded {n_objects_degraded} {graphite_timestamp}
{graphite_prefix}.n_objects_unfound {n_objects_unfound} {graphite_timestamp}
{graphite_prefix}.n_objects_misplaced {n_objects_misplaced} {graphite_timestamp}
{graphite_prefix}.n_read_gb {n_read_gb} {graphite_timestamp}
{graphite_prefix}.n_write_gb {n_write_gb} {graphite_timestamp}
{graphite_prefix}.latency_ms {latency_ms} {graphite_timestamp}
{graphite_prefix}.latency_max_ms {latency_max_ms} {graphite_timestamp}
{graphite_prefix}.latency_min_ms {latency_min_ms} {graphite_timestamp}
{graphite_prefix}.read_latency_ms {read_latency_ms} {graphite_timestamp}
{graphite_prefix}.read_latency_max_ms {read_latency_max_ms} {graphite_timestamp}
{graphite_prefix}.read_latency_min_ms {read_latency_min_ms} {graphite_timestamp}
{graphite_prefix}.n_openstack_volumes {n_openstack_volumes} {graphite_timestamp}
{graphite_prefix}.n_openstack_images {n_openstack_images} {graphite_timestamp}
{graphite_prefix}.read_mb_sec {read_mb_sec} {graphite_timestamp}
{graphite_prefix}.write_mb_sec {write_mb_sec} {graphite_timestamp}
{graphite_prefix}.op_per_sec {op_per_sec} {graphite_timestamp}
"""

  for osd in osd_df:
    graphite = graphite + "{graphite_osd_prefix}.%s.crush_weight %s {graphite_timestamp}\n" % (osd['id'], osd['crush_weight'])
    graphite = graphite + "{graphite_osd_prefix}.%s.reweight %s {graphite_timestamp}\n" % (osd['id'], osd['reweight'])
    graphite = graphite + "{graphite_osd_prefix}.%s.kb %s {graphite_timestamp}\n" % (osd['id'], osd['kb'])
    graphite = graphite + "{graphite_osd_prefix}.%s.kb_used %s {graphite_timestamp}\n" % (osd['id'], osd['kb_used'])
    graphite = graphite + "{graphite_osd_prefix}.%s.kb_avail %s {graphite_timestamp}\n" % (osd['id'], osd['kb_avail'])
    graphite = graphite + "{graphite_osd_prefix}.%s.utilization %s {graphite_timestamp}\n" % (osd['id'], osd['utilization'])
    graphite = graphite + "{graphite_osd_prefix}.%s.var %s {graphite_timestamp}\n" % (osd['id'], osd['var'])

  update = graphite.format(**context)
  sock = socket.socket()
  sock.connect((CARBON_SERVER, CARBON_PORT))
  sock.sendall(update)
  sock.close()

Example 57

Project: feedhq Source File: models.py
    def update_feed(self, url, etag=None, last_modified=None, subscribers=1,
                    backoff_factor=1, previous_error=None, link=None,
                    title=None, hub=None):
        url = URLObject(url)
        # Check if this domain has rate-limiting rules
        ratelimit_key = 'ratelimit:{0}'.format(
            url.netloc.without_auth().without_port())
        retry_at = cache.get(ratelimit_key)
        if retry_at:
            retry_in = (epoch_to_utc(retry_at) - timezone.now()).seconds
            schedule_job(url, schedule_in=retry_in,
                         connection=get_redis_connection())
            return

        if subscribers == 1:
            subscribers_text = '1 subscriber'
        else:
            subscribers_text = '{0} subscribers'.format(subscribers)

        headers = {
            'User-Agent': USER_AGENT % subscribers_text,
            'Accept': feedparser.ACCEPT_HEADER,
        }

        if last_modified:
            headers['If-Modified-Since'] = force_bytes(last_modified)
        if etag:
            headers['If-None-Match'] = force_bytes(etag)
        if last_modified or etag:
            headers['A-IM'] = force_bytes('feed')

        if settings.TESTS:
            # Make sure requests.get is properly mocked during tests
            if str(type(requests.get)) != "<class 'unittest.mock.MagicMock'>":
                raise ValueError("Not Mocked")

        auth = None
        if url.auth != (None, None):
            auth = url.auth

        start = datetime.datetime.now()
        error = None
        try:
            response = requests.get(
                six.text_type(url.without_auth()), headers=headers, auth=auth,
                timeout=UniqueFeed.request_timeout(backoff_factor))
        except (requests.RequestException, socket.timeout, socket.error,
                IncompleteRead, DecodeError) as e:
            logger.debug("Error fetching %s, %s" % (url, str(e)))
            if isinstance(e, IncompleteRead):
                error = UniqueFeed.CONNECTION_ERROR
            elif isinstance(e, DecodeError):
                error = UniqueFeed.DECODE_ERROR
            else:
                error = UniqueFeed.TIMEOUT
            self.backoff_feed(url, error, backoff_factor)
            return
        except LocationParseError:
            logger.debug(u"Failed to parse URL for %s", url)
            self.mute_feed(url, UniqueFeed.PARSE_ERROR)
            return

        elapsed = (datetime.datetime.now() - start).seconds

        ctype = response.headers.get('Content-Type', None)
        if (response.history and
            url != response.url and ctype is not None and (
                ctype.startswith('application') or
                ctype.startswith('text/xml') or
                ctype.startswith('text/rss'))):
            redirection = None
            for index, redirect in enumerate(response.history):
                if redirect.status_code != 301:
                    break
                # Actual redirection is next request's url
                try:
                    redirection = response.history[index + 1].url
                except IndexError:  # next request is final request
                    redirection = response.url

            if redirection is not None and redirection != url:
                self.handle_redirection(url, redirection)

        update = {'last_update': int(time.time())}

        if response.status_code == 410:
            logger.debug(u"Feed gone, %s", url)
            self.mute_feed(url, UniqueFeed.GONE)
            return

        elif response.status_code in [400, 401, 403, 404, 500, 502, 503]:
            self.backoff_feed(url, str(response.status_code), backoff_factor)
            return

        elif response.status_code not in [200, 204, 226, 304]:
            logger.debug(u"%s returned %s", url, response.status_code)

            if response.status_code == 429:
                # Too Many Requests
                # Prevent next jobs from fetching the URL before retry-after
                retry_in = int(response.headers.get('Retry-After', 60))
                retry_at = timezone.now() + datetime.timedelta(
                    seconds=retry_in)
                cache.set(ratelimit_key,
                          int(retry_at.strftime('%s')),
                          retry_in)
                schedule_job(url, schedule_in=retry_in)
                return

        else:
            # Avoid going back to 1 directly if it isn't safe given the
            # actual response time.
            if previous_error and error is None:
                update['error'] = None
            backoff_factor = min(backoff_factor, self.safe_backoff(elapsed))
            update['backoff_factor'] = backoff_factor

        if response.status_code == 304:
            schedule_job(url,
                         schedule_in=UniqueFeed.delay(backoff_factor, hub),
                         connection=get_redis_connection(), **update)
            return

        if 'etag' in response.headers:
            update['etag'] = response.headers['etag']
        else:
            update['etag'] = None

        if 'last-modified' in response.headers:
            update['modified'] = response.headers['last-modified']
        else:
            update['modified'] = None

        try:
            if not response.content:
                content = ' '  # chardet won't detect encoding on empty strings
            else:
                content = response.content
        except socket.timeout:
            logger.debug(u'%s timed out', url)
            self.backoff_feed(url, UniqueFeed.TIMEOUT, backoff_factor)
            return

        parsed = feedparser.parse(content)

        if not is_feed(parsed):
            self.backoff_feed(url, UniqueFeed.NOT_A_FEED,
                              UniqueFeed.MAX_BACKOFF)
            return

        if 'link' in parsed.feed and parsed.feed.link != link:
            update['link'] = parsed.feed.link

        if 'title' in parsed.feed and parsed.feed.title != title:
            update['title'] = parsed.feed.title

        if 'links' in parsed.feed:
            for link in parsed.feed.links:
                if link.rel == 'hub':
                    update['hub'] = link.href
        if 'hub' not in update:
            update['hub'] = None
        else:
            subs_key = u'pshb:{0}'.format(url)
            enqueued = cache.get(subs_key)
            if not enqueued and not settings.DEBUG:
                cache.set(subs_key, True, 3600 * 24)
                enqueue(ensure_subscribed, args=[url, update['hub']],
                        queue='low')

        schedule_job(url,
                     schedule_in=UniqueFeed.delay(
                         update.get('backoff_factor', backoff_factor),
                         update['hub']),
                     connection=get_redis_connection(), **update)

        entries = list(filter(
            None,
            [self.entry_data(entry, parsed) for entry in parsed.entries]
        ))
        if len(entries):
            enqueue(store_entries, args=[url, entries], queue='store')

Example 58

Project: Chatbot Source File: hotel.py
	def time_extract(self, keywords):
		while True:
			target_week = 0
			target_month = 0
			target_day = 0
			for key in keywords:
				if key == '一月' or key == '1月':
					self.month = '一月' 
					target_month = 1
				elif key == '二月' or key == '2月':
					self.month = '二月' 
					target_month = 2
				elif key == '三月' or key == '3月':
					self.month = '三月'
					target_month = 3
				elif key == '四月' or key == '4月':
					self.month = '四月' 
					target_month = 4
				elif key == '五月' or key == '5月':
					self.month = '五月' 
					target_month = 5
				elif key == '六月' or key == '6月':
					self.month = '六月'
					target_month = 6
				elif key == '七月' or key == '7月':
					self.month = '七月' 
					target_month = 7
				elif key == '八月' or key == '8月':
					self.month = '八月' 
					target_month = 8
				elif key == '九月' or key == '9月':
					self.month = '九月' 
					target_month = 9
				elif key == '十月' or key == '10月':
					self.month = '十月' 
					target_month = 10
				elif key == '十一月' or key == '11月':
					self.month = '十一月' 
					target_month = 11
				elif key == '十二月' or key == '12月':
					self.month = '十二月'
					target_month = 12
				elif key == '一日' or key == '一號':
					self.day = '一日'
					target_day = 1
				elif key == '二日' or key == '二號':
					self.day = '二日'
					target_day = 2
				elif key == '三日' or key == '三號':
					self.day = '三日'
					target_day = 3
				elif key == '四日' or key == '四號':
					self.day = '四日'
					target_day = 4	
				elif key == '五日' or key == '五號':
					self.day = '五日'
					target_day = 5
				elif key == '六日' or key == '六號':
					self.day = '六日'
					target_day = 6
				elif key == '七日' or key == '七號':
					self.month = '七日'
					target_day = 7
				elif key == '八日' or key == '八號':
					self.month = '八日'
					target_day = 8
				elif key == '九日' or key == '九號':
					self.month = '九日'
					target_day = 9
				elif key == '十日' or key == '十號':
					self.month = '十日'
					target_day = 10
				elif key == '十一日' or key == '十一號':
					self.month = '十一日'
					target_day = 11
				elif key == '十二日' or key == '十二號':
					self.month = '十二日'
					target_day = 12
				elif key == '十三日' or key == '十三號':
					self.month = '十三日'
					target_day = 13
				elif key == '十四日' or key == '十四號':
					self.month = '十四日'
					target_day = 14
				elif key == '十五日' or key == '十五號':
					self.month = '十五日'
					target_day = 15
				elif key == '十六日' or key == '十六號':
					self.day = '十六日'
					target_day = 16
				elif key == '十七日' or key == '十七號':
					self.day = '十七日'
					target_day = 17
				elif key == '十八日' or key == '十八號':
					self.day = '十八日'
					target_day = 18
				elif key == '十九日' or key == '十九號':
					self.month = '十九日'
					target_day = 19
				elif key == '二十日' or key == '二十號':
					self.day = '二十日'
					target_day = 20
				elif key == '二十一日' or key == '二十一號':
					self.day = '二十一日'
					target_day = 21
				elif key == '二十二日' or key == '二十二號':
					self.day = '二十二日'
					target_day = 22
				elif key == '二十三日' or key == '二十三號':
					self.day = '二十三日'
					target_day = 23
				elif key == '二十四日' or key == '二十四號':
					self.day = '二十四日'
					target_day = 24
				elif key == '二十五日' or key == '二十五號':
					self.day = '二十五日'
					target_day = 25
				elif key == '二十六日' or key == '二十六號':
					self.day = '二十六日'
					target_day = 26
				elif key == '二十七日' or key == '二十七號':
					self.day = '二十七日'
					target_day = 27
				elif key == '二十八日' or key == '二十八號':
					self.day = '二十八日'
					target_day = 28
				elif key == '二十九日' or key == '二十九號':
					self.day = '二十九日'
					target_day = 29
				elif key == '三十日' or key == '三十號':
					self.day = '三十日'
					target_day = 30
				elif key == '三十一日' or key == '三十一號':
					self.day = '三十一日'
					target_day = 31
				elif key == '星期一' or key == '禮拜一':
					self.week = '星期一'
					target_week = 0
				elif key == '星期二' or key == '禮拜二':
					self.week = '星期二' 
					target_week = 1
				elif key == '星期三' or key == '禮拜三':
					self.week = '星期三' 
					target_week = 2
				elif key == '星期四' or key == '禮拜四':
					self.week = '星期四' 
					target_week = 3
				elif key == '星期五' or key == '禮拜五':
					self.week = '星期五' 
					target_week = 4
				elif key == '星期六' or key == '禮拜六':
					self.week = '星期六' 
					target_week = 5
				elif key == '星期日' or key == '禮拜日' or key == '星期天' or key == '星期日':
					self.week = '星期日'
					target_week = 6
				elif key == '明天':
					self.add_day = '明天' 
				elif key == '後天':
					self.add_day = '後天'
				
			now = datetime.datetime.now()
				
			if self.add_day is not "":
				self.hotel_dic["time"] = 1
				if self.add_day == '明天':
					now += datetime.timedelta(days=1)
					self.date = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
					now += datetime.timedelta(days=1)
					self.end = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
				elif self.add_day == '後天':
					now += datetime.timedelta(days=2)
					self.date = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
					now += datetime.timedelta(days=1)
					self.end = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
				self.hotel_dic["time"] = self.date
				self.hotel_dic["end"] = self.end
					
			elif self.month is not "" and self.day is not "":
				if now < datetime.datetime(now.year, target_month, target_day):
					if target_month == 4 or target_month == 6 or target_month == 9 or target_month == 11:
						if target_day > 30:
							self.date = "ERROR!! Invaild Day!!"
							return
					if target_month == 2:
						if now.year % 4000 == 0:
							if target_day > 28:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif now.year % 400 == 0:
							if target_day > 29:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif now.year % 100 == 0:
							if target_day > 28:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif now.year % 4 == 0:
							if target_day > 29:
								self.date = "ERROR!! Invaild Day!!"
								return
					now = datetime.date(now.year, target_month, target_day)
					self.date = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
					now += datetime.timedelta(days=1)
					self.end = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
				else:
					if target_month == 4 or target_month == 6 or target_month == 9 or target_month == 11:
						if target_day > 30:
							self.date = "ERROR!! Invaild Day!!"
							return
					if target_month == 2:
						if (now.year + 1) % 4000 == 0:
							if target_day > 28:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif (now.year + 1) % 400 == 0:
							if target_day > 29:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif (now.year + 1) % 100 == 0:
							if target_day > 28:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif (now.year + 1) % 4 == 0:
							if target_day > 29:
								self.date = "ERROR!! Invaild Day!!"
								return
					now = datetime.date(now.year + 1, target_month, target_day)
					self.date = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
					now += datetime.timedelta(days=1)
					self.end = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
				self.hotel_dic["time"] = self.date
				self.hotel_dic["end"] = self.end
				
			elif self.month is "" and self.day is not "":
				if now.day() < target_day:
					if now.month() == 4 or now.month() == 6 or now.month() == 9 or now.month() == 11:
						if target_day > 30:
							self.date = "ERROR!! Invaild Day!!"
							return
					if now.month() == 2:
						if now.year % 4000 == 0:
							if target_day > 28:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif now.year % 400 == 0:
							if target_day > 29:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif now.year % 100 == 0:
							if target_day > 28:
								self.date = "ERROR!! Invaild Day!!"
								return
						elif now.year % 4 == 0:
							if target_day > 29:
								self.date = "ERROR!! Invaild Day!!"
								return
					now = datetime.date(now.year, now.month, target_day)
					self.date = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
					now += datetime.timedelta(days=1)
					self.end = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
				else:
					if now.day() < target_day:
						if now.month() == 3 or now.month() == 5 or now.month() == 7 or now.month() == 10:
							if target_day > 30:
									self.date = "ERROR!! Invaild Day!!"
									return
						if now.month() == 1:
							if now.year % 4000 == 0:
								if target_day > 28:
									self.date = "ERROR!! Invaild Day!!"
									return
							elif now.year % 400 == 0:
								if target_day > 29:
									self.date = "ERROR!! Invaild Day!!"
									return
							elif now.year % 100 == 0:
								if target_day > 28:
									self.date = "ERROR!! Invaild Day!!"
									return
							elif now.year % 4 == 0:
								if target_day > 29:
									self.date = "ERROR!! Invaild Day!!"
									return
					now = datetime.date(now.year, now.month + 1, target_day)
					self.date = str(now.year) + "-" + str(now.month + 1) + "-" + str(target_day)
					now += datetime.timedelta(days=1)
					self.end = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
				self.hotel_dic["time"] = self.date
				self.hotel_dic["end"] = self.end
				
			elif self.week is not "":
				if now.weekday() < target_week:
					now += datetime.timedelta(days=target_week - now.weekday())
					self.date = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
					now += datetime.timedelta(days=1)
					self.end = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
				else:
					now += datetime.timedelta(days=7 - target_week + now.weekday())
					self.date = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
					now += datetime.timedelta(days=1)
					self.end = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
				self.hotel_dic["time"] = self.date
				self.hotel_dic["end"] = self.end
			break

Example 59

Project: ggrc-core Source File: 20140722203407_4b3316aa1acf_move_existing_workflows_to_new_contexts.py
def upgrade():
  current_datetime = datetime.now()

  # Get the roles we'll need later
  workflow_owner_role = get_role('WorkflowOwner')
  workflow_member_role = get_role('WorkflowMember')

  # Get all current workflows
  connection = op.get_bind()
  workflows = connection.execute(
      select([workflows_table.c.id])
      .where(workflows_table.c.context_id == None))  # noqa

  for workflow in workflows:
    workflow_id = workflow.id

    # Create the Workflow context
    connection.execute(
        contexts_table.insert().values(
            context_id=None,
            description='',
            related_object_id=workflow_id,
            related_object_type='Workflow',
            modified_by_id=1,
            created_at=current_datetime,
            updated_at=current_datetime,
        ))

    # Get the context id
    context = connection.execute(
        select([contexts_table.c.id]).where(
            and_(
                contexts_table.c.related_object_id == workflow_id,
                contexts_table.c.related_object_type == 'Workflow')
        )).fetchone()
    context_id = context.id

    # Move the Workflow into the new context
    op.execute(workflows_table.update().values(context_id=context_id)
               .where(workflows_table.c.id == workflow_id))

  # Now, select *all* workflows, since the rest applies to all equally
  workflows = connection.execute(
      select([workflows_table.c.id, workflows_table.c.context_id]))

  for workflow in workflows:
    workflow_id = workflow.id
    context_id = workflow.context_id

    # Create the Context Implications to/from the Workflow context
    op.execute(context_implications_table.insert().values(
        source_context_id=context_id,
        source_context_scope='Workflow',
        context_id=None,
        context_scope=None,
        modified_by_id=1,
        created_at=current_datetime,
        updated_at=current_datetime
    ))

    op.execute(context_implications_table.insert().values(
        source_context_id=None,
        source_context_scope=None,
        context_id=context_id,
        context_scope='Workflow',
        modified_by_id=1,
        created_at=current_datetime,
        updated_at=current_datetime
    ))

    # Add role assignments for owners and delete the object_owner relationships
    owners = connection.execute(
        select([object_owners_table.c.id, object_owners_table.c.person_id])
        .where(
            and_(
                object_owners_table.c.ownable_id == workflow_id,
                object_owners_table.c.ownable_type == 'Workflow')
        )).fetchall()

    for owner in owners:
      connection.execute(
          user_roles_table.insert().values(
              context_id=context_id,
              role_id=workflow_owner_role.id,
              person_id=owner.person_id,
              modified_by_id=1,
              created_at=current_datetime,
              updated_at=current_datetime,
          ))
      connection.execute(
          object_owners_table.delete().where(
              object_owners_table.c.id == owner.id))

    # Add role assignments for WorkflowPerson objects
    members = connection.execute(
        select([workflow_people_table.c.person_id])
        .where(workflow_people_table.c.workflow_id == workflow_id)
    ).fetchall()

    for member in members:
      connection.execute(
          user_roles_table.insert().values(
              context_id=context_id,
              role_id=workflow_member_role.id,
              person_id=member.person_id,
              modified_by_id=1,
              created_at=current_datetime,
              updated_at=current_datetime,
          ))

    '''
    directly_connected_tables = [
        workflow_objects_table,
        workflow_people_table,
        workflow_tasks_table,
        task_groups_table,
        cycles_table,
        ]

    polymorphically_connected_tables = [
        object_files_table,
        object_folders_table,
        object_owners_table,
        ]

    cycle_connected_tables = [
        cycle_task_groups_table,
        cycle_task_entries_table,
        cycle_task_group_objects_table,
        cycle_task_group_object_tasks_table,
        ]
    '''

    # Update rows for directly-connected tables
    op.execute(workflow_objects_table.update().values(context_id=context_id)
               .where(workflow_objects_table.c.workflow_id == workflow_id))

    op.execute(workflow_people_table.update().values(context_id=context_id)
               .where(workflow_people_table.c.workflow_id == workflow_id))

    op.execute(workflow_tasks_table.update().values(context_id=context_id)
               .where(workflow_tasks_table.c.workflow_id == workflow_id))

    op.execute(task_groups_table.update().values(context_id=context_id)
               .where(task_groups_table.c.workflow_id == workflow_id))

    op.execute(
        task_group_objects_table.update()
        .values(context_id=context_id)
        .where(task_group_objects_table.c.task_group_id.in_(
            select([task_groups_table.c.id])
            .where(task_groups_table.c.workflow_id == workflow_id))))

    op.execute(
        task_group_tasks_table.update()
        .values(context_id=context_id)
        .where(task_group_tasks_table.c.task_group_id.in_(
            select([task_groups_table.c.id])
            .where(task_groups_table.c.workflow_id == workflow_id))))

    op.execute(cycles_table.update().values(context_id=context_id)
               .where(cycles_table.c.workflow_id == workflow_id))

    # Update rows for polymorphically-connected tables
    op.execute(object_files_table.update().values(context_id=context_id)
               .where(
        and_(
            object_files_table.c.fileable_id == workflow_id,
            object_files_table.c.fileable_type == 'Workflow')))

    op.execute(object_folders_table.update().values(context_id=context_id)
               .where(
        and_(
            object_folders_table.c.folderable_id == workflow_id,
            object_folders_table.c.folderable_type == 'Workflow')))

    # Update rows for cycle-connected tables
    op.execute(
        cycle_task_entries_table.update()
        .values(context_id=context_id)
        .where(cycle_task_entries_table.c.cycle_id.in_(
            select([cycles_table.c.id])
            .where(cycles_table.c.workflow_id == workflow_id))))

    op.execute(
        cycle_task_groups_table.update()
        .values(context_id=context_id)
        .where(cycle_task_groups_table.c.cycle_id.in_(
            select([cycles_table.c.id])
            .where(cycles_table.c.workflow_id == workflow_id))))

    op.execute(
        cycle_task_group_objects_table.update()
        .values(context_id=context_id)
        .where(cycle_task_group_objects_table.c.cycle_id.in_(
            select([cycles_table.c.id])
            .where(cycles_table.c.workflow_id == workflow_id))))

    op.execute(
        cycle_task_group_object_tasks_table.update()
        .values(context_id=context_id)
        .where(cycle_task_group_object_tasks_table.c.cycle_id.in_(
            select([cycles_table.c.id])
            .where(cycles_table.c.workflow_id == workflow_id))))

Example 60

Project: bitex Source File: main.py
Function: on_message
    def on_message(self, raw_message):
        if self.honey_pot_connection:
            self.application.log('INFO', "HONEY_POT", raw_message )

        if self.trade_client is None or not self.trade_client.isConnected():
            return

        self.last_message_datetime.append(datetime.now())
        message_time_last_second = self.last_message_datetime[-1] - timedelta(seconds=1)
        for x in xrange(0, len(self.last_message_datetime)):
            if self.last_message_datetime[x] > message_time_last_second:
                self.last_message_datetime = self.last_message_datetime[x:]
                break
        if len(self.last_message_datetime) > 15:  # higher than 15 messages per second
            self.application.log("ERROR",
                                 "TOO_MANY_MESSAGES",
                                 "Exceed 15 messages per second. [ip=" + self.remote_ip + ",'" + raw_message + "']")
            self.write_message(
                '{"MsgType":"ERROR", "Description":"Too many messages per second", "Detail": "16 messages in the last second"}')
            self.application.unregister_connection(self)
            self.trade_client.close()
            self.close()
            return

        try:
            req_msg = JsonMessage(raw_message)
        except InvalidMessageException as e:
            self.write_message(
                '{"MsgType":"ERROR", "Description":"Invalid message", "Detail": "' +
                str(e) +
                '"}')
            self.application.unregister_connection(self)
            self.trade_client.close()
            self.close()
            return

        req_msg.set('RemoteIP' ,self.remote_ip)

        if req_msg.isUserRequest():
            if req_msg.has('Password'):
                raw_message = raw_message.replace(req_msg.get('Password'), '*')
            if req_msg.has('NewPassword'):
                raw_message = raw_message.replace(req_msg.get('NewPassword'), '*')
            self.application.log('IN', self.trade_client.connection_id ,raw_message )



        if req_msg.isTestRequest() or req_msg.isHeartbeat():
            dt = datetime.now()
            response_msg = {
                'MsgType'           : '0',
                'TestReqID'         : req_msg.get('TestReqID'),
                'ServerTimestamp'   : int(mktime(dt.timetuple()) + dt.microsecond/1000.0 )
            }

            sendTime = req_msg.get('SendTime')
            if sendTime:
                response_msg['SendTime'] = sendTime


            self.write_message(str(json.dumps(response_msg, cls=JsonEncoder)))
            return


        if req_msg.isTradeHistoryRequest():  # Trade History request
            self.on_trade_history_request(req_msg)
            return

        if req_msg.isMarketDataRequest():  # Market Data Request
            self.on_market_data_request(req_msg)

            if not self.trade_client.isConnected():
                self.application.log('DEBUG', self.trade_client.connection_id, 'not self.trade_client.isConnected()' )
                self.application.unregister_connection(self)
                self.trade_client.close()
                self.close()
            return

        if req_msg.isSecurityStatusRequest():
            self.on_security_status_request(req_msg)
            return

        if req_msg.isDepositRequest():
            if not req_msg.get('DepositMethodID') and not req_msg.get('DepositID'):

                currency = req_msg.get('Currency')

                secret = uuid.uuid4().hex
                callback_url = self.application.options.callback_url + secret

                hot_wallet  = self.get_broker_wallet('hot', currency)
                cold_wallet = self.get_broker_wallet('cold', currency)
                if not hot_wallet and not cold_wallet:
                    return

                if not hot_wallet and cold_wallet:
                    dest_wallet = cold_wallet
                elif hot_wallet and not cold_wallet:
                    dest_wallet = hot_wallet
                else:
                    # 62.5% of all deposits go to the cold wallet, and 37.5% go to the hot wallet
                    dest_wallet = hot_wallet
                    if secret[0] in ('0','1','2','3','4','5','6','7','8','9'):
                        dest_wallet = cold_wallet

                if not dest_wallet:
                    return

                parameters = urllib.urlencode({
                    'method': 'create',
                    'address': dest_wallet,
                    'callback': callback_url,
                    'currency': currency
                })

                try:
                    url_payment_processor = self.application.options.url_payment_processor + '?' + parameters
                    self.application.log('DEBUG', self.trade_client.connection_id, "invoking..."  + url_payment_processor )
                    response = urllib2.urlopen(url_payment_processor)
                    data = json.load(response)
                    self.application.log('DEBUG', self.trade_client.connection_id, str(data) )

                    req_msg.set('InputAddress', data['input_address'])
                    req_msg.set('Destination', data['destination'])
                    req_msg.set('Secret', secret)
                except urllib2.HTTPError as e:
                    out_message = json.dumps({
                      'MsgType': 'ERROR',
                      'ReqID': req_msg.get('DepositReqID'),
                      'Description': 'Blockchain.info is not available at this moment, please try again within few minutes',
                      'Detail': str(e)
                    })
                    self.write_message(out_message)
                    return
                except Exception as e:
                    out_message = json.dumps({
                      'MsgType': 'ERROR',
                      'ReqID': req_msg.get('DepositReqID'),
                      'Description': 'Error retrieving a new deposit address from Blockchain.info. Please, try again',
                      'Detail': str(e)
                    })
                    self.write_message(out_message)
                    return

        try:
            resp_message = self.trade_client.sendMessage(req_msg)
            if resp_message:
                self.write_message(resp_message.raw_message)

            if resp_message and resp_message.isUserResponse():
                self.user_response = resp_message
                if self.is_user_logged():
                    self.application.log('LOGIN_OK', self.trade_client.connection_id, raw_message )
                    #TODO: Request open order list 
                    #self.trade_client.

 
                else:
                    self.application.log('LOGIN_FAILED', self.trade_client.connection_id, raw_message )


            if not self.trade_client.isConnected():
                self.application.log('DEBUG', self.trade_client.connection_id, 'not self.trade_client.isConnected()' )
                self.application.unregister_connection(self)
                self.trade_client.close()
                self.close()
        except TradeClientException as e:
            exception_message = {
                'MsgType': 'ERROR',
                'Description': 'Invalid message',
                'Detail': str(e)
            }
            self.write_message(json.dumps(exception_message))
            self.application.unregister_connection(self)
            self.trade_client.close()
            self.close()

Example 61

Project: django-compositepks Source File: filters.py
Function: get_filter_tests
def get_filter_tests():
    now = datetime.now()
    now_tz = datetime.now(LocalTimezone(now))
    now_tz_i = datetime.now(FixedOffset((3 * 60) + 15)) # imaginary time zone
    return {
        # Default compare with datetime.now()
        'filter-timesince01' : ('{{ a|timesince }}', {'a': datetime.now() + timedelta(minutes=-1, seconds = -10)}, '1 minute'),
        'filter-timesince02' : ('{{ a|timesince }}', {'a': datetime.now() - timedelta(days=1, minutes = 1)}, '1 day'),
        'filter-timesince03' : ('{{ a|timesince }}', {'a': datetime.now() - timedelta(hours=1, minutes=25, seconds = 10)}, '1 hour, 25 minutes'),

        # Compare to a given parameter
        'filter-timesince04' : ('{{ a|timesince:b }}', {'a':now - timedelta(days=2), 'b':now - timedelta(days=1)}, '1 day'),
        'filter-timesince05' : ('{{ a|timesince:b }}', {'a':now - timedelta(days=2, minutes=1), 'b':now - timedelta(days=2)}, '1 minute'),

        # Check that timezone is respected
        'filter-timesince06' : ('{{ a|timesince:b }}', {'a':now_tz - timedelta(hours=8), 'b':now_tz}, '8 hours'),

        # Regression for #7443
        'filter-timesince07': ('{{ earlier|timesince }}', { 'earlier': now - timedelta(days=7) }, '1 week'),
        'filter-timesince08': ('{{ earlier|timesince:now }}', { 'now': now, 'earlier': now - timedelta(days=7) }, '1 week'),
        'filter-timesince09': ('{{ later|timesince }}', { 'later': now + timedelta(days=7) }, '0 minutes'),
        'filter-timesince10': ('{{ later|timesince:now }}', { 'now': now, 'later': now + timedelta(days=7) }, '0 minutes'),

        # Ensures that differing timezones are calculated correctly
        'filter-timesince11' : ('{{ a|timesince }}', {'a': now}, '0 minutes'),
        'filter-timesince12' : ('{{ a|timesince }}', {'a': now_tz}, '0 minutes'),
        'filter-timesince13' : ('{{ a|timesince }}', {'a': now_tz_i}, '0 minutes'),
        'filter-timesince14' : ('{{ a|timesince:b }}', {'a': now_tz, 'b': now_tz_i}, '0 minutes'),
        'filter-timesince15' : ('{{ a|timesince:b }}', {'a': now, 'b': now_tz_i}, ''),
        'filter-timesince16' : ('{{ a|timesince:b }}', {'a': now_tz_i, 'b': now}, ''),

        # Default compare with datetime.now()
        'filter-timeuntil01' : ('{{ a|timeuntil }}', {'a':datetime.now() + timedelta(minutes=2, seconds = 10)}, '2 minutes'),
        'filter-timeuntil02' : ('{{ a|timeuntil }}', {'a':(datetime.now() + timedelta(days=1, seconds = 10))}, '1 day'),
        'filter-timeuntil03' : ('{{ a|timeuntil }}', {'a':(datetime.now() + timedelta(hours=8, minutes=10, seconds = 10))}, '8 hours, 10 minutes'),

        # Compare to a given parameter
        'filter-timeuntil04' : ('{{ a|timeuntil:b }}', {'a':now - timedelta(days=1), 'b':now - timedelta(days=2)}, '1 day'),
        'filter-timeuntil05' : ('{{ a|timeuntil:b }}', {'a':now - timedelta(days=2), 'b':now - timedelta(days=2, minutes=1)}, '1 minute'),

        # Regression for #7443
        'filter-timeuntil06': ('{{ earlier|timeuntil }}', { 'earlier': now - timedelta(days=7) }, '0 minutes'),
        'filter-timeuntil07': ('{{ earlier|timeuntil:now }}', { 'now': now, 'earlier': now - timedelta(days=7) }, '0 minutes'),
        'filter-timeuntil08': ('{{ later|timeuntil }}', { 'later': now + timedelta(days=7, hours=1) }, '1 week'),
        'filter-timeuntil09': ('{{ later|timeuntil:now }}', { 'now': now, 'later': now + timedelta(days=7) }, '1 week'),

        # Ensures that differing timezones are calculated correctly
        'filter-timeuntil10' : ('{{ a|timeuntil }}', {'a': now_tz_i}, '0 minutes'),
        'filter-timeuntil11' : ('{{ a|timeuntil:b }}', {'a': now_tz_i, 'b': now_tz}, '0 minutes'),

        'filter-addslash01': ("{% autoescape off %}{{ a|addslashes }} {{ b|addslashes }}{% endautoescape %}", {"a": "<a>'", "b": mark_safe("<a>'")}, ur"<a>\' <a>\'"),
        'filter-addslash02': ("{{ a|addslashes }} {{ b|addslashes }}", {"a": "<a>'", "b": mark_safe("<a>'")}, ur"<a>\&#39; <a>\'"),

        'filter-capfirst01': ("{% autoescape off %}{{ a|capfirst }} {{ b|capfirst }}{% endautoescape %}", {"a": "fred>", "b": mark_safe("fred>")}, u"Fred> Fred>"),
        'filter-capfirst02': ("{{ a|capfirst }} {{ b|capfirst }}", {"a": "fred>", "b": mark_safe("fred>")}, u"Fred> Fred>"),

        # Note that applying fix_ampsersands in autoescape mode leads to
        # double escaping.
        'filter-fix_ampersands01': ("{% autoescape off %}{{ a|fix_ampersands }} {{ b|fix_ampersands }}{% endautoescape %}", {"a": "a&b", "b": mark_safe("a&b")}, u"a&amp;b a&amp;b"),
        'filter-fix_ampersands02': ("{{ a|fix_ampersands }} {{ b|fix_ampersands }}", {"a": "a&b", "b": mark_safe("a&b")}, u"a&amp;amp;b a&amp;b"),

        'filter-floatformat01': ("{% autoescape off %}{{ a|floatformat }} {{ b|floatformat }}{% endautoescape %}", {"a": "1.42", "b": mark_safe("1.42")}, u"1.4 1.4"),
        'filter-floatformat02': ("{{ a|floatformat }} {{ b|floatformat }}", {"a": "1.42", "b": mark_safe("1.42")}, u"1.4 1.4"),

        # The contents of "linenumbers" is escaped according to the current
        # autoescape setting.
        'filter-linenumbers01': ("{{ a|linenumbers }} {{ b|linenumbers }}", {"a": "one\n<two>\nthree", "b": mark_safe("one\n<two>\nthree")}, u"1. one\n2. <two>\n3. three 1. one\n2. <two>\n3. three"),
        'filter-linenumbers02': ("{% autoescape off %}{{ a|linenumbers }} {{ b|linenumbers }}{% endautoescape %}", {"a": "one\n<two>\nthree", "b": mark_safe("one\n<two>\nthree")}, u"1. one\n2. <two>\n3. three 1. one\n2. <two>\n3. three"),

        'filter-lower01': ("{% autoescape off %}{{ a|lower }} {{ b|lower }}{% endautoescape %}", {"a": "Apple & banana", "b": mark_safe("Apple &amp; banana")}, u"apple & banana apple &amp; banana"),
        'filter-lower02': ("{{ a|lower }} {{ b|lower }}", {"a": "Apple & banana", "b": mark_safe("Apple &amp; banana")}, u"apple &amp; banana apple &amp; banana"),

        # The make_list filter can destroy existing escaping, so the results are
        # escaped.
        'filter-make_list01': ("{% autoescape off %}{{ a|make_list }}{% endautoescape %}", {"a": mark_safe("&")}, u"[u'&']"),
        'filter-make_list02': ("{{ a|make_list }}", {"a": mark_safe("&")}, u"[u&#39;&amp;&#39;]"),
        'filter-make_list03': ('{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}', {"a": mark_safe("&")}, u"[u'&']"),
        'filter-make_list04': ('{{ a|make_list|stringformat:"s"|safe }}', {"a": mark_safe("&")}, u"[u'&']"),

        # Running slugify on a pre-escaped string leads to odd behaviour,
        # but the result is still safe.
        'filter-slugify01': ("{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}", {"a": "a & b", "b": mark_safe("a &amp; b")}, u"a-b a-amp-b"),
        'filter-slugify02': ("{{ a|slugify }} {{ b|slugify }}", {"a": "a & b", "b": mark_safe("a &amp; b")}, u"a-b a-amp-b"),

        # Notice that escaping is applied *after* any filters, so the string
        # formatting here only needs to deal with pre-escaped characters.
        'filter-stringformat01': ('{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}', {"a": "a<b", "b": mark_safe("a<b")}, u".  a<b. .  a<b."),
        'filter-stringformat02': ('.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.', {"a": "a<b", "b": mark_safe("a<b")}, u".  a<b. .  a<b."),

        # XXX No test for "title" filter; needs an actual object.

        'filter-truncatewords01': ('{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}', {"a": "alpha & bravo", "b": mark_safe("alpha &amp; bravo")}, u"alpha & ... alpha &amp; ..."),
        'filter-truncatewords02': ('{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}', {"a": "alpha & bravo", "b": mark_safe("alpha &amp; bravo")}, u"alpha &amp; ... alpha &amp; ..."),

        # The "upper" filter messes up entities (which are case-sensitive),
        # so it's not safe for non-escaping purposes.
        'filter-upper01': ('{% autoescape off %}{{ a|upper }} {{ b|upper }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a &amp; b")}, u"A & B A &AMP; B"),
        'filter-upper02': ('{{ a|upper }} {{ b|upper }}', {"a": "a & b", "b": mark_safe("a &amp; b")}, u"A &amp; B A &amp;AMP; B"),

        'filter-urlize01': ('{% autoescape off %}{{ a|urlize }} {{ b|urlize }}{% endautoescape %}', {"a": "http://example.com/?x=&y=", "b": mark_safe("http://example.com?x=&amp;y=")}, u'<a href="http://example.com/?x=&y=" rel="nofollow">http://example.com/?x=&y=</a> <a href="http://example.com?x=&amp;y=" rel="nofollow">http://example.com?x=&amp;y=</a>'),
        'filter-urlize02': ('{{ a|urlize }} {{ b|urlize }}', {"a": "http://example.com/?x=&y=", "b": mark_safe("http://example.com?x=&amp;y=")}, u'<a href="http://example.com/?x=&amp;y=" rel="nofollow">http://example.com/?x=&amp;y=</a> <a href="http://example.com?x=&amp;y=" rel="nofollow">http://example.com?x=&amp;y=</a>'),
        'filter-urlize03': ('{% autoescape off %}{{ a|urlize }}{% endautoescape %}', {"a": mark_safe("a &amp; b")}, 'a &amp; b'),
        'filter-urlize04': ('{{ a|urlize }}', {"a": mark_safe("a &amp; b")}, 'a &amp; b'),

        # This will lead to a nonsense result, but at least it won't be
        # exploitable for XSS purposes when auto-escaping is on.
        'filter-urlize05': ('{% autoescape off %}{{ a|urlize }}{% endautoescape %}', {"a": "<script>alert('foo')</script>"}, "<script>alert('foo')</script>"),
        'filter-urlize06': ('{{ a|urlize }}', {"a": "<script>alert('foo')</script>"}, '<script>alert(&#39;foo&#39;)</script>'),

        # mailto: testing for urlize
        'filter-urlize07': ('{{ a|urlize }}', {"a": "Email me at [email protected]"}, 'Email me at <a href="mailto:[email protected]">[email protected]</a>'),
        'filter-urlize08': ('{{ a|urlize }}', {"a": "Email me at <[email protected]>"}, 'Email me at <<a href="mailto:[email protected]">[email protected]</a>>'),

        'filter-urlizetrunc01': ('{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}', {"a": '"Unsafe" http://example.com/x=&y=', "b": mark_safe('&quot;Safe&quot; http://example.com?x=&amp;y=')}, u'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> &quot;Safe&quot; <a href="http://example.com?x=&amp;y=" rel="nofollow">http:...</a>'),
        'filter-urlizetrunc02': ('{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}', {"a": '"Unsafe" http://example.com/x=&y=', "b": mark_safe('&quot;Safe&quot; http://example.com?x=&amp;y=')}, u'&quot;Unsafe&quot; <a href="http://example.com/x=&amp;y=" rel="nofollow">http:...</a> &quot;Safe&quot; <a href="http://example.com?x=&amp;y=" rel="nofollow">http:...</a>'),

        'filter-wordcount01': ('{% autoescape off %}{{ a|wordcount }} {{ b|wordcount }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a &amp; b")}, "3 3"),
        'filter-wordcount02': ('{{ a|wordcount }} {{ b|wordcount }}', {"a": "a & b", "b": mark_safe("a &amp; b")}, "3 3"),

        'filter-wordwrap01': ('{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a & b")}, u"a &\nb a &\nb"),
        'filter-wordwrap02': ('{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}', {"a": "a & b", "b": mark_safe("a & b")}, u"a &amp;\nb a &\nb"),

        'filter-ljust01': ('{% autoescape off %}.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, u".a&b  . .a&b  ."),
        'filter-ljust02': ('.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, u".a&amp;b  . .a&b  ."),

        'filter-rjust01': ('{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, u".  a&b. .  a&b."),
        'filter-rjust02': ('.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, u".  a&amp;b. .  a&b."),

        'filter-center01': ('{% autoescape off %}.{{ a|center:"5" }}. .{{ b|center:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, u". a&b . . a&b ."),
        'filter-center02': ('.{{ a|center:"5" }}. .{{ b|center:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, u". a&amp;b . . a&b ."),

        'filter-cut01': ('{% autoescape off %}{{ a|cut:"x" }} {{ b|cut:"x" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"&y &amp;y"),
        'filter-cut02': ('{{ a|cut:"x" }} {{ b|cut:"x" }}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"&amp;y &amp;y"),
        'filter-cut03': ('{% autoescape off %}{{ a|cut:"&" }} {{ b|cut:"&" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"xy xamp;y"),
        'filter-cut04': ('{{ a|cut:"&" }} {{ b|cut:"&" }}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"xy xamp;y"),
        # Passing ';' to cut can break existing HTML entities, so those strings
        # are auto-escaped.
        'filter-cut05': ('{% autoescape off %}{{ a|cut:";" }} {{ b|cut:";" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"x&y x&ampy"),
        'filter-cut06': ('{{ a|cut:";" }} {{ b|cut:";" }}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"x&amp;y x&amp;ampy"),

        # The "escape" filter works the same whether autoescape is on or off,
        # but it has no effect on strings already marked as safe.
        'filter-escape01': ('{{ a|escape }} {{ b|escape }}', {"a": "x&y", "b": mark_safe("x&y")}, u"x&amp;y x&y"),
        'filter-escape02': ('{% autoescape off %}{{ a|escape }} {{ b|escape }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "x&amp;y x&y"),

        # It is only applied once, regardless of the number of times it
        # appears in a chain.
        'filter-escape03': ('{% autoescape off %}{{ a|escape|escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;y"),
        'filter-escape04': ('{{ a|escape|escape }}', {"a": "x&y"}, u"x&amp;y"),

        # Force_escape is applied immediately. It can be used to provide
        # double-escaping, for example.
        'filter-force-escape01': ('{% autoescape off %}{{ a|force_escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;y"),
        'filter-force-escape02': ('{{ a|force_escape }}', {"a": "x&y"}, u"x&amp;y"),
        'filter-force-escape03': ('{% autoescape off %}{{ a|force_escape|force_escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;amp;y"),
        'filter-force-escape04': ('{{ a|force_escape|force_escape }}', {"a": "x&y"}, u"x&amp;amp;y"),

        # Because the result of force_escape is "safe", an additional
        # escape filter has no effect.
        'filter-force-escape05': ('{% autoescape off %}{{ a|force_escape|escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;y"),
        'filter-force-escape06': ('{{ a|force_escape|escape }}', {"a": "x&y"}, u"x&amp;y"),
        'filter-force-escape07': ('{% autoescape off %}{{ a|escape|force_escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;y"),
        'filter-force-escape07': ('{{ a|escape|force_escape }}', {"a": "x&y"}, u"x&amp;y"),

        # The contents in "linebreaks" and "linebreaksbr" are escaped
        # according to the current autoescape setting.
        'filter-linebreaks01': ('{{ a|linebreaks }} {{ b|linebreaks }}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, u"<p>x&amp;<br />y</p> <p>x&<br />y</p>"),
        'filter-linebreaks02': ('{% autoescape off %}{{ a|linebreaks }} {{ b|linebreaks }}{% endautoescape %}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, u"<p>x&<br />y</p> <p>x&<br />y</p>"),

        'filter-linebreaksbr01': ('{{ a|linebreaksbr }} {{ b|linebreaksbr }}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, u"x&amp;<br />y x&<br />y"),
        'filter-linebreaksbr02': ('{% autoescape off %}{{ a|linebreaksbr }} {{ b|linebreaksbr }}{% endautoescape %}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, u"x&<br />y x&<br />y"),

        'filter-safe01': ("{{ a }} -- {{ a|safe }}", {"a": u"<b>hello</b>"}, "<b>hello</b> -- <b>hello</b>"),
        'filter-safe02': ("{% autoescape off %}{{ a }} -- {{ a|safe }}{% endautoescape %}", {"a": "<b>hello</b>"}, u"<b>hello</b> -- <b>hello</b>"),

        'filter-removetags01': ('{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, u"x <p>y</p> x <p>y</p>"),
        'filter-removetags02': ('{% autoescape off %}{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}{% endautoescape %}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, u"x <p>y</p> x <p>y</p>"),

        'filter-striptags01': ('{{ a|striptags }} {{ b|striptags }}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x y x y"),
        'filter-striptags02': ('{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x y x y"),

        'filter-first01': ('{{ a|first }} {{ b|first }}', {"a": ["a&b", "x"], "b": [mark_safe("a&b"), "x"]}, "a&amp;b a&b"),
        'filter-first02': ('{% autoescape off %}{{ a|first }} {{ b|first }}{% endautoescape %}', {"a": ["a&b", "x"], "b": [mark_safe("a&b"), "x"]}, "a&b a&b"),

        'filter-last01': ('{{ a|last }} {{ b|last }}', {"a": ["x", "a&b"], "b": ["x", mark_safe("a&b")]}, "a&amp;b a&b"),
        'filter-last02': ('{% autoescape off %}{{ a|last }} {{ b|last }}{% endautoescape %}', {"a": ["x", "a&b"], "b": ["x", mark_safe("a&b")]}, "a&b a&b"),

        'filter-random01': ('{{ a|random }} {{ b|random }}', {"a": ["a&b", "a&b"], "b": [mark_safe("a&b"), mark_safe("a&b")]}, "a&amp;b a&b"),
        'filter-random02': ('{% autoescape off %}{{ a|random }} {{ b|random }}{% endautoescape %}', {"a": ["a&b", "a&b"], "b": [mark_safe("a&b"), mark_safe("a&b")]}, "a&b a&b"),

        'filter-slice01': ('{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}', {"a": "a&b", "b": mark_safe("a&b")}, "&amp;b &b"),
        'filter-slice02': ('{% autoescape off %}{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, "&b &b"),

        'filter-unordered_list01': ('{{ a|unordered_list }}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
        'filter-unordered_list02': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
        'filter-unordered_list03': ('{{ a|unordered_list }}', {"a": ["x>", [[mark_safe("<y"), []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
        'filter-unordered_list04': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [[mark_safe("<y"), []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
        'filter-unordered_list05': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),

        # Literal string arguments to the default filter are always treated as
        # safe strings, regardless of the auto-escaping state.
        #
        # Note: we have to use {"a": ""} here, otherwise the invalid template
        # variable string interferes with the test result.
        'filter-default01': ('{{ a|default:"x<" }}', {"a": ""}, "x<"),
        'filter-default02': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": ""}, "x<"),
        'filter-default03': ('{{ a|default:"x<" }}', {"a": mark_safe("x>")}, "x>"),
        'filter-default04': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": mark_safe("x>")}, "x>"),

        'filter-default_if_none01': ('{{ a|default:"x<" }}', {"a": None}, "x<"),
        'filter-default_if_none02': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": None}, "x<"),

        'filter-phone2numeric01': ('{{ a|phone2numeric }} {{ b|phone2numeric }}', {"a": "<1-800-call-me>", "b": mark_safe("<1-800-call-me>") }, "<1-800-2255-63> <1-800-2255-63>"),
        'filter-phone2numeric02': ('{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}', {"a": "<1-800-call-me>", "b": mark_safe("<1-800-call-me>") }, "<1-800-2255-63> <1-800-2255-63>"),

        # Ensure iriencode keeps safe strings:
        'filter-iriencode01': ('{{ url|iriencode }}', {'url': '?test=1&me=2'}, '?test=1&amp;me=2'),
        'filter-iriencode02': ('{% autoescape off %}{{ url|iriencode }}{% endautoescape %}', {'url': '?test=1&me=2'}, '?test=1&me=2'),
        'filter-iriencode03': ('{{ url|iriencode }}', {'url': mark_safe('?test=1&me=2')}, '?test=1&me=2'),
        'filter-iriencode04': ('{% autoescape off %}{{ url|iriencode }}{% endautoescape %}', {'url': mark_safe('?test=1&me=2')}, '?test=1&me=2'),

        # Chaining a bunch of safeness-preserving filters should not alter
        # the safe status either way.
        'chaining01': ('{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}', {"a": "a < b", "b": mark_safe("a < b")}, " A < b . A < b "),
        'chaining02': ('{% autoescape off %}{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}{% endautoescape %}', {"a": "a < b", "b": mark_safe("a < b")}, " A < b . A < b "),

        # Using a filter that forces a string back to unsafe:
        'chaining03': ('{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}', {"a": "a < b", "b": mark_safe("a < b")}, "A < .A < "),
        'chaining04': ('{% autoescape off %}{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}{% endautoescape %}', {"a": "a < b", "b": mark_safe("a < b")}, "A < .A < "),

        # Using a filter that forces safeness does not lead to double-escaping
        'chaining05': ('{{ a|escape|capfirst }}', {"a": "a < b"}, "A < b"),
        'chaining06': ('{% autoescape off %}{{ a|escape|capfirst }}{% endautoescape %}', {"a": "a < b"}, "A < b"),

        # Force to safe, then back (also showing why using force_escape too
        # early in a chain can lead to unexpected results).
        'chaining07': ('{{ a|force_escape|cut:";" }}', {"a": "a < b"}, "a &amp;lt b"),
        'chaining08': ('{% autoescape off %}{{ a|force_escape|cut:";" }}{% endautoescape %}', {"a": "a < b"}, "a &lt b"),
        'chaining09': ('{{ a|cut:";"|force_escape }}', {"a": "a < b"}, "a < b"),
        'chaining10': ('{% autoescape off %}{{ a|cut:";"|force_escape }}{% endautoescape %}', {"a": "a < b"}, "a < b"),
        'chaining11': ('{{ a|cut:"b"|safe }}', {"a": "a < b"}, "a < "),
        'chaining12': ('{% autoescape off %}{{ a|cut:"b"|safe }}{% endautoescape %}', {"a": "a < b"}, "a < "),
        'chaining13': ('{{ a|safe|force_escape }}', {"a": "a < b"}, "a < b"),
        'chaining14': ('{% autoescape off %}{{ a|safe|force_escape }}{% endautoescape %}', {"a": "a < b"}, "a < b"),

        # Filters decorated with stringfilter still respect is_safe.
        'autoescape-stringfilter01': (r'{{ unsafe|capfirst }}', {'unsafe': UnsafeClass()}, 'You &amp; me'),
        'autoescape-stringfilter02': (r'{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}', {'unsafe': UnsafeClass()}, 'You & me'),
        'autoescape-stringfilter03': (r'{{ safe|capfirst }}', {'safe': SafeClass()}, 'You > me'),
        'autoescape-stringfilter04': (r'{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}', {'safe': SafeClass()}, 'You > me'),

        'escapejs01': (r'{{ a|escapejs }}', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}, 'testing\\x0D\\x0Ajavascript \\x27string\\x22 \\x3Cb\\x3Eescaping\\x3C/b\\x3E'),
        'escapejs02': (r'{% autoescape off %}{{ a|escapejs }}{% endautoescape %}', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}, 'testing\\x0D\\x0Ajavascript \\x27string\\x22 \\x3Cb\\x3Eescaping\\x3C/b\\x3E'),

        # Boolean return value from length_is should not be coerced to a string
        'lengthis01': (r'{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}', {}, 'Length not 0'),
        'lengthis02': (r'{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}', {}, 'Length is 1'),

        'join01': (r'{{ a|join:", " }}', {'a': ['alpha', 'beta & me']}, 'alpha, beta &amp; me'),
        'join02': (r'{% autoescape off %}{{ a|join:", " }}{% endautoescape %}', {'a': ['alpha', 'beta & me']}, 'alpha, beta & me'),
        'join03': (r'{{ a|join:" &amp; " }}', {'a': ['alpha', 'beta & me']}, 'alpha &amp; beta &amp; me'),
        'join04': (r'{% autoescape off %}{{ a|join:" &amp; " }}{% endautoescape %}', {'a': ['alpha', 'beta & me']}, 'alpha &amp; beta & me'),
    }

Example 62

Project: cstar_perf Source File: stress_compare.py
def stress_compare(revisions,
                   title,
                   log,
                   operations = [],
                   subtitle = '',
                   capture_fincore=False,
                   initial_destroy=True,
                   leave_data=False,
                   keep_page_cache=False,
                   git_fetch_before_test=True,
                   bootstrap_before_test=True,
                   teardown_after_test=True
               ):
    """
    Run Stress on multiple C* branches and compare them.

    revisions - List of dictionaries that contain cluster configurations
                to trial. This is combined with the default config.
    title - The title of the comparison
    subtitle - A subtitle for more information (displayed smaller underneath)
    log - The json file path to record stats to
    operations - List of dictionaries indicating the operations. Example:
       [# cassandra-stress command, node defaults to cluster defined 'stress_node'
        {'type': 'stress',
         'command': 'write n=19M -rate threads=50',
         'node': 'node1',
         'wait_for_compaction': True},
        # nodetool command to run in parallel on nodes:
        {'type': 'nodetool',
         'command': 'decomission',
         'nodes': ['node1','node2']},
        # cqlsh script, node defaults to cluster defined 'stress_node'
        {'type': 'cqlsh',
         'script': "use my_ks; INSERT INTO blah (col1, col2) VALUES (val1, val2);",
         'node': 'node1'}
       ]
    capture_fincore - Enables capturing of linux-fincore logs of C* data files.
    initial_destroy - Destroy all data before the first revision is run.
    leave_data - Whether to leave the Cassandra data/commitlog/etc directories intact between revisions.
    keep_page_cache - Whether to leave the linux page cache intact between revisions.
    git_fetch_before_test (bool): If True, will update the cassandra.git with fab_common.git_repos
    bootstrap_before_test (bool): If True, will bootstrap DSE / C* before running the operations
    teardown_after_test (bool): If True, will shutdown DSE / C* after all of the operations
    """
    validate_revisions_list(revisions)
    validate_operations_list(operations)

    pristine_config = copy.copy(fab_config)

    # initial_destroy and git_fetch_before_test can be set in the job configuration,
    # or manually in the call to this function.
    # Either is fine, but they shouldn't conflict. If they do, a ValueError is raised.
    initial_destroy = get_bool_if_method_and_config_values_do_not_conflict('initial_destroy',
                                                                           initial_destroy,
                                                                           pristine_config,
                                                                           method_name='stress_compare')

    if initial_destroy:
        logger.info("Cleaning up from prior runs of stress_compare ...")
        teardown(destroy=True, leave_data=False)

    # https://datastax.jira.com/browse/CSTAR-633
    git_fetch_before_test = get_bool_if_method_and_config_values_do_not_conflict('git_fetch_before_test',
                                                                                 git_fetch_before_test,
                                                                                 pristine_config,
                                                                                 method_name='stress_compare')

    stress_shas = maybe_update_cassandra_git_and_setup_stress(operations, git_fetch=git_fetch_before_test)

    # Flamegraph Setup
    if flamegraph.is_enabled():
        execute(flamegraph.setup)

    with GracefulTerminationHandler() as handler:
        for rev_num, revision_config in enumerate(revisions):
            config = copy.copy(pristine_config)
            config.update(revision_config)
            revision = revision_config['revision']
            config['log'] = log
            config['title'] = title
            config['subtitle'] = subtitle
            product = dse if config.get('product') == 'dse' else cstar

            # leave_data, bootstrap_before_test, and teardown_after_test can be set in the job configuration,
            # or manually in the call to this function.
            # Either is fine, but they shouldn't conflict. If they do, a ValueError is raised.
            leave_data = get_bool_if_method_and_config_values_do_not_conflict('leave_data',
                                                                              leave_data,
                                                                              revision_config,
                                                                              method_name='stress_compare')

            # https://datastax.jira.com/browse/CSTAR-638
            bootstrap_before_test = get_bool_if_method_and_config_values_do_not_conflict('bootstrap_before_test',
                                                                                         bootstrap_before_test,
                                                                                         revision_config,
                                                                                         method_name='stress_compare')

            # https://datastax.jira.com/browse/CSTAR-639
            teardown_after_test = get_bool_if_method_and_config_values_do_not_conflict('teardown_after_test',
                                                                                       teardown_after_test,
                                                                                       revision_config,
                                                                                       method_name='stress_compare')

            logger.info("Bringing up {revision} cluster...".format(revision=revision))

            # Drop the page cache between each revision, especially
            # important when leave_data=True :
            if not keep_page_cache:
                drop_page_cache()

            # Only fetch from git on the first run and if git_fetch_before_test is True
            git_fetch_before_bootstrap = True if rev_num == 0 and git_fetch_before_test else False
            if bootstrap_before_test:
                revision_config['git_id'] = git_id = bootstrap(config,
                                                               destroy=initial_destroy,
                                                               leave_data=leave_data,
                                                               git_fetch=git_fetch_before_bootstrap)
            else:
                revision_config['git_id'] = git_id = config['revision']

            if flamegraph.is_enabled(revision_config):
                execute(flamegraph.ensure_stopped_perf_agent)
                execute(flamegraph.start_perf_agent, rev_num)

            if capture_fincore:
                start_fincore_capture(interval=10)

            last_stress_operation_id = 'None'
            for operation_i, operation in enumerate(operations, 1):
                try:
                    start = datetime.datetime.now()
                    stats = {
                        "id": str(uuid.uuid1()),
                        "type": operation['type'],
                        "revision": revision,
                        "git_id": git_id,
                        "start_date": start.isoformat(),
                        "label": revision_config.get('label', revision_config['revision']),
                        "test": '{operation_i}_{operation}'.format(
                            operation_i=operation_i,
                            operation=operation['type'])
                    }

                    if operation['type'] == 'stress':
                        last_stress_operation_id = stats['id']
                        # Default to all the nodes of the cluster if no
                        # nodes were specified in the command:
                        if operation.has_key('nodes'):
                            cmd = "{command} -node {hosts}".format(
                                command=operation['command'],
                                hosts=",".join(operation['nodes']))
                        elif '-node' in operation['command']:
                            cmd = operation['command']
                        else:
                            cmd = "{command} -node {hosts}".format(
                                command=operation['command'],
                                hosts=",".join([n for n in fab_config['hosts']]))
                        stats['command'] = cmd
                        stats['intervals'] = []
                        stats['test'] = '{operation_i}_{operation}'.format(
                            operation_i=operation_i, operation=cmd.strip().split(' ')[0]).replace(" ", "_")
                        logger.info('Running stress operation : {cmd}  ...'.format(cmd=cmd))
                        # Run stress:
                        # (stress takes the stats as a parameter, and adds
                        #  more as it runs):
                        stress_sha = stress_shas[operation.get('stress_revision', 'default')]
                        stats = stress(cmd, revision, stress_sha, stats=stats)
                        # Wait for all compactions to finish (unless disabled):
                        if operation.get('wait_for_compaction', True):
                            compaction_throughput = revision_config.get("compaction_throughput_mb_per_sec", 16)
                            wait_for_compaction(compaction_throughput=compaction_throughput)

                    elif operation['type'] == 'nodetool':
                        if 'nodes' not in operation:
                            operation['nodes'] = 'all'
                        if operation['nodes'] in ['all','ALL']:
                            nodes = [n for n in fab_config['hosts']]
                        else:
                            nodes = operation['nodes']

                        set_nodetool_path(os.path.join(product.get_bin_path(), 'nodetool'))
                        logger.info("Running nodetool on {nodes} with command: {command}".format(nodes=operation['nodes'], command=operation['command']))
                        stats['command'] = operation['command']
                        output = nodetool_multi(nodes, operation['command'])
                        stats['output'] = output
                        logger.info("Nodetool command finished on all nodes")

                    elif operation['type'] == 'cqlsh':
                        logger.info("Running cqlsh commands on {node}".format(node=operation['node']))
                        set_cqlsh_path(os.path.join(product.get_bin_path(), 'cqlsh'))
                        output = cqlsh(operation['script'], operation['node'])
                        stats['output'] = output.split("\n")
                        stats['command'] = operation['script']
                        logger.info("Cqlsh commands finished")

                    elif operation['type'] == 'bash':
                        nodes = operation.get('nodes', [n for n in fab_config['hosts']])
                        logger.info("Running bash commands on: {nodes}".format(nodes=nodes))
                        stats['output'] = bash(operation['script'], nodes)
                        stats['command'] = operation['script']
                        logger.info("Bash commands finished")

                    elif operation['type'] == 'spark_cassandra_stress':
                        nodes = operation.get('nodes', [n for n in fab_config['hosts']])
                        stress_node = config.get('stress_node', None)
                        # Note: once we have https://datastax.jira.com/browse/CSTAR-617, we should fix this to use
                        # client-tool when DSE_VERSION >= 4.8.0
                        # https://datastax.jira.com/browse/DSP-6025: dse client-tool
                        master_regex = re.compile(r"(.|\n)*(?P<master>spark:\/\/\d+.\d+.\d+.\d+:\d+)(.|\n)*")
                        master_out = dsetool_cmd(nodes[0], options='sparkmaster')[nodes[0]]
                        master_match = master_regex.match(master_out)
                        if not master_match:
                            raise ValueError('Could not find master address from "dsetool sparkmaster" cmd\n'
                                             'Found output: {f}'.format(f=master_out))
                        master_string = master_match.group('master')
                        build_spark_cassandra_stress = bool(distutils.util.strtobool(
                            str(operation.get('build_spark_cassandra_stress', 'True'))))
                        remove_existing_spark_data = bool(distutils.util.strtobool(
                            str(operation.get('remove_existing_spark_data', 'True'))))
                        logger.info("Running spark_cassandra_stress on {stress_node} "
                                    "using spark.cassandra.connection.host={node} and "
                                    "spark-master {master}".format(stress_node=stress_node,
                                                                   node=nodes[0],
                                                                   master=master_string))
                        output = spark_cassandra_stress(operation['script'], nodes, stress_node=stress_node,
                                                        master=master_string,
                                                        build_spark_cassandra_stress=build_spark_cassandra_stress,
                                                        remove_existing_spark_data=remove_existing_spark_data)
                        stats['output'] = output.get('output', 'No output captured')
                        stats['spark_cass_stress_time_in_seconds'] = output.get('stats', {}).get('TimeInSeconds', 'No time captured')
                        stats['spark_cass_stress_ops_per_second'] = output.get('stats', {}).get('OpsPerSecond', 'No ops/s captured')
                        logger.info("spark_cassandra_stress finished")

                    elif operation['type'] == 'ctool':
                        logger.info("Running ctool with parameters: {command}".format(command=operation['command']))
                        ctool = Ctool(operation['command'], common.config)
                        output = execute(ctool.run)
                        stats['output'] = output
                        logger.info("ctool finished")

                    elif operation['type'] == 'dsetool':
                        if 'nodes' not in operation:
                            operation['nodes'] = 'all'
                        if operation['nodes'] in ['all','ALL']:
                            nodes = [n for n in fab_config['hosts']]
                        else:
                            nodes = operation['nodes']

                        dsetool_options = operation['script']
                        logger.info("Running dsetool {command} on {nodes}".format(nodes=operation['nodes'], command=dsetool_options))
                        stats['command'] = dsetool_options
                        output = dsetool_cmd(nodes=nodes, options=dsetool_options)
                        stats['output'] = output
                        logger.info("dsetool command finished on all nodes")

                    elif operation['type'] == 'dse':
                        logger.info("Running dse command on {node}".format(node=operation['node']))
                        output = dse_cmd(node=operation['node'], options=operation['script'])
                        stats['output'] = output.split("\n")
                        stats['command'] = operation['script']
                        logger.info("dse commands finished")

                    end = datetime.datetime.now()
                    stats['end_date'] = end.isoformat()
                    stats['op_duration'] = str(end - start)
                    log_stats(stats, file=log)
                finally:
                    # Copy node logs:
                    retrieve_logs_and_create_tarball(job_id=stats['id'])
                    revision_config['last_log'] = stats['id']

                if capture_fincore:
                    stop_fincore_capture()
                    log_dir = os.path.join(CSTAR_PERF_LOGS_DIR, stats['id'])
                    retrieve_fincore_logs(log_dir)
                    # Restart fincore capture if this is not the last
                    # operation:
                    if operation_i < len(operations):
                        start_fincore_capture(interval=10)

            if flamegraph.is_enabled(revision_config):
                # Generate and Copy node flamegraphs
                execute(flamegraph.stop_perf_agent)
                execute(flamegraph.generate_flamegraph, rev_num)
                flamegraph_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf', 'flamegraph')
                flamegraph_test_dir = os.path.join(flamegraph_dir, last_stress_operation_id)
                retrieve_flamegraph(flamegraph_test_dir, rev_num+1)
                sh.tar('cfvz', "{}.tar.gz".format(stats['id']), last_stress_operation_id, _cwd=flamegraph_dir)
                shutil.rmtree(flamegraph_test_dir)

            log_add_data(log, {'title':title,
                               'subtitle': subtitle,
                               'revisions': revisions})
            if teardown_after_test:
                if revisions[-1].get('leave_data', leave_data):
                    teardown(destroy=False, leave_data=True)
                else:
                    kill_delay = 300 if profiler.yourkit_is_enabled(revision_config) else 0
                    teardown(destroy=True, leave_data=False, kill_delay=kill_delay)

            if profiler.yourkit_is_enabled(revision_config):
                yourkit_config = profiler.yourkit_get_config()
                yourkit_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf', 'yourkit')
                yourkit_test_dir = os.path.join(yourkit_dir, last_stress_operation_id)
                retrieve_yourkit(yourkit_test_dir, rev_num+1)
                sh.tar('cfvz', "{}.tar.gz".format(stats['id']),
                       last_stress_operation_id, _cwd=yourkit_dir)
                shutil.rmtree(yourkit_test_dir)

Example 63

Project: 8-bits Source File: posts.py
def apply_posts(shard=None,
                insertion_post_id=None,
                lease_seconds=10,
                max_tasks=20):
    """Applies a set of pending posts to a shard.

    If shard is None then this function will apply mods for whatever is the
    first shard it can find in the pull task queue.

    insertion_post_id is the post_id that first caused this apply task to be
    enqueued. This task will retry until it applies the insertion_post_id
    itself or it can confirm that the insertion_post_id has already been
    applied. insertion_post_id may be empty if the apply task is not associated
    with a particular post (such as cronjobs/cleanup tasks).
    """
    # Do not use caching for NDB in this task queue worker.
    ctx = ndb.get_context()
    ctx.set_cache_policy(lambda x: False)
    ctx.set_memcache_policy(lambda x: False)

    # Fetch the new Posts to put in sequence.
    queue = taskqueue.Queue(config.pending_queue)

    # When no shard is specified, process the first tag we find.
    task_list = []
    if not shard:
        task_list.extend(queue.lease_tasks(lease_seconds, 1))
        if not task_list:
            logging.debug('apply_posts with no specific shard found no tasks')
            return
        params = task_list[0].extract_params()
        shard = params['shard']
        logging.debug('apply_posts with no specific shard found shard=%r',
                      shard)

    # Clear the dirty bit on this shard to start the time horizon.
    dirty_bit(shard, clear=True)

    # Find tasks pending for the current shard.
    task_list.extend(
        queue.lease_tasks_by_tag(lease_seconds, max_tasks, tag=str(shard)))

    receipt_key_list = []
    new_topic = None
    for task in task_list:
        params = task.extract_params()

        # Extract the new topic shard associated with this task, if any. The
        # last one wins. If all of the found posts have already been applied,
        # then topic assignment will be ignored.
        new_topic = params.get('new_topic') or new_topic

        post_id_list = params.get('post_ids')
        if post_id_list is None:
            # This may happen on replica shards if it turns out there are no
            # unapplied post IDs but an apply task still ran.
            post_id_list = []
        elif not isinstance(post_id_list, list):
            post_id_list = [post_id_list]

        for post_id in post_id_list:
            receipt_key = ndb.Key(
                models.Post._get_kind(), post_id,
                models.Receipt._get_kind(), shard)
            receipt_key_list.append(receipt_key)

    receipt_list = ndb.get_multi(receipt_key_list)

    # Some tasks may be in the pull queue that were already put in sequence.
    # So ignore these and only apply the new ones.
    unapplied_receipts = [
        models.Receipt(key=k)
        for k, r in zip(receipt_key_list, receipt_list)
        if r is None]
    unapplied_post_ids = [r.post_id for r in unapplied_receipts]

    # Double check if we think there should be work to apply but we didn't find
    # any. This will force the apply task to retry immediately if the post task
    # was not found. This can happen when the pull queue's consistency is
    # behind.
    if not unapplied_receipts and insertion_post_id:
        receipt_key = ndb.Key(
            models.Post._get_kind(), insertion_post_id,
            models.Receipt._get_kind(), shard)
        receipt = receipt_key.get()
        if receipt:
            logging.warning(
                'No post application to do for shard=%r, but post_id=%r '
                'already applied; doing nothing in this task',
                shard, insertion_post_id)
            new_topic = None
            # Do not 'return' here. We need to increment the shard sequence or
            # else tasks will not run for this shard in the future because of
            # de-duping.
        else:
            raise base.Error('No post application to do for shard=%r, but'
                             'post_id=%r has not been applied; will retry' %
                             (shard, insertion_post_id))

    now = datetime.datetime.now()

    def txn():
        shard_record = models.Shard.get_by_id(shard)
        # TODO(bslatkin): Just drop this task entirely if the shard cannot
        # be found. Could happen for old shards that were cleaned up.
        assert shard_record

        # One of the tasks in this batch has a topic assignment. Apply it here.
        if new_topic:
            logging.debug('Changing topic from %r to %r',
                          shard_record.current_topic, new_topic)
            shard_record.current_topic = new_topic
            shard_record.topic_change_time = now

        new_sequence_numbers = list(xrange(
            shard_record.sequence_number,
            shard_record.sequence_number + len(unapplied_receipts)))
        shard_record.sequence_number += max(1, len(unapplied_receipts))

        # Write post references that point at the newly sequenced posts.
        to_put = [shard_record]
        for receipt, sequence in zip(unapplied_receipts, new_sequence_numbers):
            to_put.append(models.PostReference(
                id=sequence,
                parent=shard_record.key,
                post_id=receipt.post_id))
            # Update the receipt entity here; it will be written outside this
            # transaction, since these receipts may span multiple entity
            # groups.
            receipt.sequence = sequence

        # Enqueue replica posts transactionally, to make sure everything
        # definitely will get copied over to the replica shard.
        if shard_record.current_topic:
            enqueue_post_task(shard_record.current_topic, unapplied_post_ids)

        ndb.put_multi(to_put)

        return shard_record, new_sequence_numbers

    # Have this only attempt a transaction a single time. If the transaction
    # fails the task queue will retry this task within 4 seconds. Because
    # apply tasks are always named by the current Shard.sequence_number we
    # can be reasonably sure that no other apply task for this shard will be
    # running concurrently when this fails.
    shard_record, new_sequence_numbers = ndb.transaction(txn, retries=1)
    replica_shard = shard_record.current_topic

    logging.debug('Applied %d posts for shard=%r, sequence_numbers=%r',
                  len(unapplied_receipts), shard, new_sequence_numbers)

    futures = []

    # Save receipts for all the posts.
    futures.extend(ndb.put_multi_async(unapplied_receipts))

    # Notify all logged in users of the new posts.
    futures.append(notify_posts(
        shard, unapplied_post_ids, sequence_numbers=new_sequence_numbers))

    # Replicate posts to a topic shard.
    if replica_shard:
        logging.debug('Replicating source shard=%r to replica shard=%r',
                      shard, replica_shard)
        futures.append(enqueue_apply_task(replica_shard))

    # Success! Delete the tasks from this queue.
    queue.delete_tasks(task_list)

    # Always run one more apply task to clean up any posts that came in
    # while this transaction was processing.
    if dirty_bit(shard, check=True):
        futures.append(enqueue_apply_task(shard))

    # Wait on all pending futures in case they raise errors.
    ndb.Future.wait_all(futures)

    # For root shards, add shard cleanup task to check for user presence and
    # cause notification of user logouts if the channel API did not detect the
    # user closing the connection.
    if not shard_record.root_shard:
        presence.enqueue_cleanup_task(shard)

Example 64

Project: drawquest-web Source File: models.py
def heavy_state_sync(user, app_version=None, app_version_tuple=None, tab_last_seen_timestamps={}):
    from drawquest.apps.brushes.models import Brush
    from drawquest.apps.palettes.models import user_palettes, Color

    twitter_keys = '{}@{}'.format(settings.TWITTER_APP_KEY , settings.TWITTER_APP_SECRET)
    twitter_keys = twitter_keys[-6:] + twitter_keys[:-6]
    twitter_keys = swapcase(twitter_keys)

    ret = {
        'realtime_sync': realtime_sync(user),
        'user_palettes': user_palettes(user),
        'current_quest': current_quest_details(),
        'onboarding_quest_id': knobs.ONBOARDING_QUEST_ID,
        'sync': twitter_keys,
        'tumblr_success_regex': '''<div style="margin-bottom:10px; font-size:40px; color:#777;">Done!</div>''',
        'rewards': {
            'amounts': knobs.REWARDS,
            'copy': {
                'quest_of_the_day': _("You drew the Quest of the Day"),
                'archived_quest':   _("You drew a Quest"),
                'first_quest':      _("Woo! Your first Quest ever!"),
                'streak_3':         _("Quest Streak: 3"),
                'streak_10':        _("Quest Streak: 10"),
                'streak_100':       _("Epic! 100 straight Quests"),
            },
            'iphone_copy': {
                'archived_quest': _("You drew a Quest"),
                'first_quest': _("Your first Quest!"),
                'quest_of_the_day': _("Quest of the Day!"),
                'streak_10': _("Bonus Streak"),
                'streak_100': _("Bonus Streak"),
                'streak_3': _("Bonus Streak"),
                'personal_share': _("Shared with Facebook"),
                'personal_twitter_share': _("Shared with Twitter"),
            },
        },
        'features': {
            'invite_from_facebook': True,
            'invite_from_twitter': True,
            'user_search': True,
            'urban_airship_registration_before_auth': True,
            'urban_airship_registration': True,
        },
        'logging': {
            'on': True,
            'authentication-controller': {
                'request-for-me': False,
            },
            'facebook-controller': {
                'open-active-session-with-read-permissions': False,
                'request-new-publish-permissions': False,
                'request-new-publish-permissions-cancelled': False,
                'request-new-read-permissions': False,
                'request-new-read-permissions-cancelled': False,
            },
            'facebook-friends-coordinator': {
                'present-requests-dialog': False,
                'request-my-friends': False,
            },
            'http-request': {
                'error-auth/login_with_facebook': {
                    'mute-error-codes': {
                        '403': True,
                    }
                },
                'error-auth/login_with_twitter': {
                    'mute-error-codes': {
                        '403': True,
                    }
                },
                'error-quests/gallery_for_comment': {
                    'mute-error-codes': {
                        '404': True,
                    }
                }
            },
            'private-api': {
                'failed-activity/iphone_activities': {
                    'mute-error-codes': {
                        '1005': True,
                    }
                }
            },
            'sharing-controller': {
                'present-feed-dialog': False,
                'present-share-dialog-with-link': False,
            },
            'shop-controller': {
                'add-payment': False,
                'brush-products-request': False,
                'coin-products-request': False,
            },
            'twitter-api-manager': {
                'step-1': False,
                'step-2': False,
            },
            'twitter-controller': {
                'request-data-cursored-user-ids': False,
                'request-data-send-dm': False,
                'request-data-unknown': False,
                'request-data-users-for-ids': False,
            },
        },
        #TODO use settings.LOCALES once that's ready
        'supported_languages': ['de', 'en', 'es', 'fr', 'ja', 'ko', 'nl', 'pt', 'ru', 'th', 'zh-Hant', 'zh-Hans'],
        'l10n_files_url': None,
        'user_colors': list(Color.for_user(user)),
        'user_brushes': list(Brush.for_user(user)),
        'global_brushes': list(Brush.for_global()),
        'comment_view_logging_interval': 10,
        'papertrail': {
            'host': 'logs.papertrailapp.com',
            'port': 27889,
            'disabled_logging_points': [],
        },
        'modals': {},
    }

    if app_version_tuple and app_version_tuple >= (3,):
        ret['appirater_url'] = 'itms-apps://itunes.apple.com/app/idAPP_ID'
    else:
        ret['appirater_url'] = 'itms-apps://ax.itunes.apple.com/WebObjects/MZStore.woa/wa/viewContentsUserReviews?type=Purple+Software&id=APP_ID'

    try:
        ret['color_alert_version'] = int(redis.get('color_alert_version'))
    except TypeError:
        ret['color_alert_version'] = 0

    if user.is_authenticated():
        user_kv_items = user.kv.hgetall()
        user_kv_items = dict((key, val) for key, val in user_kv_items.items()
                             if key in [
                                 'saw_update_modal_for_version',
                                 'saw_share_web_profile_modal',
                                 'publish_to_facebook',
                                 'publish_to_twitter',
                             ])

        ret.update({
            'user_email': user.email,
            'user_profile': user_profile(user.username),
            'balance': economy.balance(user),
            'completed_quest_ids': completed_quest_ids(user),
            'web_profile_privacy': user.kv.web_profile_privacy.get(),
            'twitter_privacy': user.kv.twitter_privacy.get(),
            'facebook_privacy': user.kv.facebook_privacy.get(),
            'user_kv': user_kv_items,
            'reminders': {
                'invite': 1,
            },
        })

        if (app_version and parse_version(knobs.CURRENT_APP_VERSION) > parse_version(app_version)):
            saw_version = user_kv_items.get('saw_update_modal_for_version')
            if (saw_version is None
                    or parse_version(saw_version) < parse_version(knobs.CURRENT_APP_VERSION)):
                ret['modals']['show_update_modal_for_version'] = knobs.CURRENT_APP_VERSION
                ret['modals']['update_modal_type'] = 'alert'

        if not user_kv_items.get('saw_share_web_profile_modal'):
            ret['modals']['show_share_web_profile_modal'] = (user.date_joined <= (datetime.now() - td(days=2))
                                                             or user.comments.count() >= 3)

    ret['tab_badge_type'] = 'flag'
    if tab_last_seen_timestamps:
        ret['tab_badges'] = tab_badges(user, last_seen_timestamps=tab_last_seen_timestamps)

    return ret

Example 65

Project: SublimeApex Source File: metadata.py
Function: deploy
    def deploy(self, base64_zip):
        """ Deploy zip file

        Arguments:

        * zipFile -- base64 encoded zipfile 
        """
        result = self.login()
        if not result or not result["success"]: return

        # Log the StartTime
        start_time = datetime.datetime.now()

        # Populate the soap_body with actual session id
        deploy_options = self.settings["deploy_options"]
        
        # If just checkOnly, output VALIDATE, otherwise, output DEPLOY
        deploy_or_validate = "validate" if deploy_options["checkOnly"] else "deploy"

        # 1. Issue a deploy request to start the asynchronous retrieval
        headers = {
            "Content-Type": "text/xml;charset=UTF-8",
            "SOAPAction": '""'
        }

        # [sf:%s]
        Printer.get('log').write_start().write("[sf:%s] Start request for a deploy..." % deploy_or_validate)
        options = deploy_options
        options["zipfile"] = base64_zip
        soap_body = self.soap.create_request('deploy', options)

        try:
            response = requests.post(self.metadata_url, soap_body, verify=False, headers=headers)
        except Exception as e:
            self.result = {
                "Error Message":  "Network Issue" if "Max retries exceeded" in str(e) else str(e),
                "URL": self.metadata_url,
                "Operation": "DEPLOY",
                "success": False
            }
            return self.result

        # Check whether session_id is expired
        if "INVALID_SESSION_ID" in response.text:
            Printer.get('log').write("[sf:%s] Session expired, need login again" % deploy_or_validate)
            self.login(True)
            return self.deploy(base64_zip)

        # If status_code is > 399, which means it has error
        # If status_code is > 399, which means it has error
        if response.status_code > 399:
            self.result = util.get_response_error(response)
            return self.result

        # [sf:%s]
        Printer.get('log').write("[sf:%s] Request for a deploy submitted successfully." % deploy_or_validate)

        # Get async process id
        async_process_id = util.getUniqueElementValueFromXmlString(response.content, "id")

        # [sf:%s]
        Printer.get('log').write("[sf:%s] Request ID for the current deploy task: %s" % (deploy_or_validate, async_process_id))

        # [sf:%s]
        Printer.get('log').write("[sf:%s] Waiting for server to finish processing the request..." % deploy_or_validate)

        # 2. issue a check status loop request to assure the async
        # process is done
        result = self.check_deploy_status(async_process_id)

        body = result["body"]

        index = 1
        failure_dict = {}
        while body["status"] in ["Pending", "InProgress", "Canceling"]:
            if "stateDetail" in body:
                if int(body["numberComponentsDeployed"]) < int(body["numberComponentsTotal"]):
                    Printer.get('log').write("[sf:%s] Request Status: %s (%s/%s)  -- %s" % (
                        deploy_or_validate,
                        body["status"], 
                        body["numberComponentsDeployed"],
                        body["numberComponentsTotal"],
                        body["stateDetail"]
                    ))
                else:
                    Printer.get('log').write("[sf:%s] TestRun Status: %s (%s/%s)  -- %s" % (
                        deploy_or_validate,
                        body["status"], 
                        body["numberTestsCompleted"],
                        body["numberTestsTotal"],
                        body["stateDetail"]
                    ))
            else:
                Printer.get('log').write("[sf:%s] Request Status: %s" % (
                    deploy_or_validate, body["status"]
                ))

            # Process Test Run Result
            if "runTestResult" in body["details"] and \
                "failures" in body["details"]["runTestResult"]:

                failures = body["details"]["runTestResult"]["failures"]
                if isinstance(failures, dict):
                    if failures["id"] not in failure_dict:
                        failure_dict[failures["id"]] = failures

                        Printer.get('log').write("-" * 84).write("Test Failures: ")
                        Printer.get('log').write("%s.\t%s" % (index, failures["message"]))
                        for msg in failures["stackTrace"].split("\n"):
                            Printer.get('log').write("\t%s" % msg)

                        # [sf:deploy]
                        Printer.get('log').write("-" * 84)

                        index += index
                        
                elif isinstance(failures, list):
                    for f in failures:
                        if f["id"] not in failure_dict:
                            failure_dict[f["id"]] = f

                            Printer.get('log').write("-" * 84).write("Test Failures: ")
                            Printer.get('log').write("%s.\t%s" % (index, f["message"]))

                            # If compile error, there will no stack trace
                            if isinstance(f["stackTrace"], str):
                                for msg in f["stackTrace"].split("\n"):
                                    Printer.get('log').write("\t%s" % msg)
                                Printer.get('log').write("-" * 84)

                            index += 1

            # Thread Wait
            sleep_seconds = 2 if body["status"] == "Pending" else 1
            time.sleep(sleep_seconds)
            
            result = self.check_deploy_status(async_process_id)
            body = result["body"]

        # Check if job is canceled
        if body["status"] == "Canceled":
            Printer.get('log').write("\nBUILD FAILED", False)
            Printer.get('log').write("cuem******* DEPLOYMENT FAILED ***********", False)
            Printer.get('log').write("Request ID: %s" % async_process_id, False)
            Printer.get('log').write("\nRequest Canceled", False)
            Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)

        # If check status request failed, this will not be done
        elif body["status"] == "Failed":
            # Append failure message
            Printer.get('log').write("[sf:%s] Request Failed\n\nBUILD FAILED" % deploy_or_validate)
            Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
            Printer.get('log').write("Request ID: %s" % async_process_id, False)

            # print (json.dumps(body, indent=4))

            # Output Failure Details
            failures_messages = []
            if "componentFailures" in body["details"]:
                component_failures = body["details"]["componentFailures"]
                if isinstance(component_failures, dict):
                    component_failure = component_failures
                    failures_messages.append("1. %s -- %s: %s (line %s)" % (
                        component_failure["fileName"],
                        component_failure["problemType"],
                        component_failure["problem"].replace("\n", " "),
                        component_failure["lineNumber"] \
                            if "lineNumber" in component_failure else "0"
                    ))
                elif isinstance(component_failures, list):
                    for index in range(len(component_failures)):
                        component_failure = component_failures[index]
                        failures_messages.append("%s. %s -- %s: %s (line %s)" % (
                            index+1, 
                            component_failure["fileName"],
                            component_failure["problemType"],
                            component_failure["problem"],
                            component_failure["lineNumber"] \
                                if "lineNumber" in component_failure else "0"
                        ))
            elif "errorMessage" in body:
                Printer.get('log').write("\n" + body["errorMessage"], False)

            warning_messages = []
            if "runTestResult" in body["details"]:
                runTestResult = body["details"]["runTestResult"]
                if "codeCoverageWarnings" in runTestResult:
                    coverage_warnings = runTestResult["codeCoverageWarnings"]
                    if isinstance(runTestResult["codeCoverageWarnings"], dict):
                        coverage_warnings = [coverage_warnings]
                    elif isinstance(runTestResult["codeCoverageWarnings"], list):
                        coverage_warnings = coverage_warnings

                    for warn in coverage_warnings:
                        if not isinstance(warn["name"], str): continue
                        warning_messages.append("%s -- %s" % (warn["name"], warn["message"]))

            # Output failure message
            if failures_messages:
                Printer.get('log').write("\n\nAll Component Failures:", False)
                Printer.get('log').write("\n"+"\n\n".join(failures_messages), False)

            # Output warning message
            if warning_messages:
                Printer.get('log').write("\n\nTest Coverage Warnings:", False)
                Printer.get('log').write("\n"+"\n".join(warning_messages), False)
            
            # End for Deploy Result
            Printer.get('log').write("\n*********** %s FAILED ***********" % (
                deploy_or_validate.upper()), False)
        else:
            # Append succeed message
            Printer.get('log').write("\n[sf:%s] Request Succeed" % deploy_or_validate, False)
            Printer.get('log').write("[sf:%s] *********** %s SUCCEEDED ***********" % (
                deploy_or_validate, deploy_or_validate.upper()), False)
            Printer.get('log').write("[sf:%s] Finished request %s successfully." % (
                deploy_or_validate, async_process_id), False)

        # Total time
        total_seconds = (datetime.datetime.now() - start_time).seconds
        Printer.get('log').write("\n\nTotal time: %s seconds" % total_seconds, False)

        # Display debug log message in the new view
        if "header" in result and result["header"] and "debugLog" in result["header"]:
            view = sublime.active_window().new_file()
            view.run_command("new_view", {
                "name": "Debugging Information",
                "input": result["header"]["debugLog"]
            })

        self.result = result

Example 66

Project: kaggle-heart Source File: predict.py
def predict_model(expid, mfile=None):
    metadata_path = MODEL_PATH + "%s.pkl" % (expid if not mfile else mfile)
    prediction_path = INTERMEDIATE_PREDICTIONS_PATH + "%s.pkl" % expid
    submission_path = SUBMISSION_PATH + "%s.csv" % expid

    if theano.config.optimizer != "fast_run":
        print "WARNING: not running in fast mode!"

    print "Using"
    print "  %s" % metadata_path
    print "To generate"
    print "  %s" % prediction_path
    print "  %s" % submission_path

    print "Build model"
    interface_layers = config().build_model()

    output_layers = interface_layers["outputs"]
    input_layers = interface_layers["inputs"]
    top_layer = lasagne.layers.MergeLayer(
        incomings=output_layers.values()
    )
    all_layers = lasagne.layers.get_all_layers(top_layer)
    num_params = lasagne.layers.count_params(top_layer)
    print "  number of parameters: %d" % num_params
    print string.ljust("  layer output shapes:",36),
    print string.ljust("#params:",10),
    print "output shape:"
    for layer in all_layers[:-1]:
        name = string.ljust(layer.__class__.__name__, 32)
        num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
        num_param = string.ljust(num_param.__str__(), 10)
        print "    %s %s %s" % (name,  num_param, layer.output_shape)

    xs_shared = {
        key: lasagne.utils.shared_empty(dim=len(l_in.output_shape), dtype='float32') for (key, l_in) in input_layers.iteritems()
    }
    idx = T.lscalar('idx')

    givens = dict()

    for key in input_layers.keys():
        if key=="sunny":
            givens[input_layers[key].input_var] = xs_shared[key][idx*config().sunny_batch_size:(idx+1)*config().sunny_batch_size]
        else:
            givens[input_layers[key].input_var] = xs_shared[key][idx*config().batch_size:(idx+1)*config().batch_size]

    network_outputs = [
        lasagne.layers.helper.get_output(network_output_layer, deterministic=True)
        for network_output_layer in output_layers.values()
    ]

    iter_test = theano.function([idx], network_outputs + theano_printer.get_the_stuff_to_print(),
                                 givens=givens, on_unused_input="ignore",
                                 # mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
                                 )

    print "Load model parameters for resuming"
    resume_metadata = np.load(metadata_path)
    lasagne.layers.set_all_param_values(top_layer, resume_metadata['param_values'])
    num_batches_chunk = config().batches_per_chunk
    num_batches = get_number_of_test_batches()
    num_chunks = int(np.ceil(num_batches / float(config().batches_per_chunk)))

    chunks_train_idcs = range(1, num_chunks+1)

    data_loader.filter_patient_folders()

    create_test_gen = partial(config().create_test_gen,
                              required_input_keys = xs_shared.keys(),
                              required_output_keys = ["patients", "classification_correction_function"],
                              )

    print "Generate predictions with this model"
    start_time = time.time()
    prev_time = start_time


    predictions = [{"patient": i+1,
                    "systole": np.zeros((0,600)),
                    "diastole": np.zeros((0,600))
                    } for i in xrange(NUM_PATIENTS)]


    for e, test_data in izip(itertools.count(start=1), buffering.buffered_gen_threaded(create_test_gen())):
        print "  load testing data onto GPU"

        for key in xs_shared:
            xs_shared[key].set_value(test_data["input"][key])


        patient_ids = test_data["output"]["patients"]
        classification_correction = test_data["output"]["classification_correction_function"]
        print "  patients:", " ".join(map(str, patient_ids))
        print "  chunk %d/%d" % (e, num_chunks)

        for b in xrange(num_batches_chunk):
            iter_result = iter_test(b)
            network_outputs = tuple(iter_result[:len(output_layers)])
            network_outputs_dict = {output_layers.keys()[i]: network_outputs[i] for i in xrange(len(output_layers))}
            kaggle_systoles, kaggle_diastoles = config().postprocess(network_outputs_dict)
            kaggle_systoles, kaggle_diastoles = kaggle_systoles.astype('float64'), kaggle_diastoles.astype('float64')
            for idx, patient_id in enumerate(patient_ids[b*config().batch_size:(b+1)*config().batch_size]):
                if patient_id != 0:
                    index = patient_id-1
                    patient_data = predictions[index]
                    assert patient_id==patient_data["patient"]

                    kaggle_systole = kaggle_systoles[idx:idx+1,:]
                    kaggle_diastole = kaggle_diastoles[idx:idx+1,:]
                    assert np.isfinite(kaggle_systole).all() and np.isfinite(kaggle_systole).all()
                    kaggle_systole = classification_correction[b*config().batch_size + idx](kaggle_systole)
                    kaggle_diastole = classification_correction[b*config().batch_size + idx](kaggle_diastole)
                    assert np.isfinite(kaggle_systole).all() and np.isfinite(kaggle_systole).all()
                    patient_data["systole"] =  np.concatenate((patient_data["systole"], kaggle_systole ),axis=0)
                    patient_data["diastole"] = np.concatenate((patient_data["diastole"], kaggle_diastole ),axis=0)

        now = time.time()
        time_since_start = now - start_time
        time_since_prev = now - prev_time
        prev_time = now
        est_time_left = time_since_start * (float(num_chunks - (e + 1)) / float(e + 1 - chunks_train_idcs[0]))
        eta = datetime.now() + timedelta(seconds=est_time_left)
        eta_str = eta.strftime("%c")
        print "  %s since start (%.2f s)" % (utils.hms(time_since_start), time_since_prev)
        print "  estimated %s to go (ETA: %s)" % (utils.hms(est_time_left), eta_str)
        print

    already_printed = False
    for prediction in predictions:
        if prediction["systole"].size>0 and prediction["diastole"].size>0:
            average_method =  getattr(config(), 'tta_average_method', partial(np.mean, axis=0))
            prediction["systole_average"] = average_method(prediction["systole"])
            prediction["diastole_average"] = average_method(prediction["diastole"])
            try:
                test_if_valid_distribution(prediction["systole_average"])
                test_if_valid_distribution(prediction["diastole_average"])
            except:
                if not already_printed:
                    print "WARNING: These distributions are not distributions"
                    already_printed = True
                prediction["systole_average"] = make_monotone_distribution(prediction["systole_average"])
                prediction["diastole_average"] = make_monotone_distribution(prediction["diastole_average"])
                test_if_valid_distribution(prediction["systole_average"])
                test_if_valid_distribution(prediction["diastole_average"])


    print "Calculating training and validation set scores for reference"

    validation_dict = {}
    for patient_ids, set_name in [(validation_patients_indices, "validation"),
                                      (train_patients_indices,  "train")]:
        errors = []
        for patient in patient_ids:
            prediction = predictions[patient-1]
            if "systole_average" in prediction:
                assert patient == regular_labels[patient-1, 0]
                error = CRSP(prediction["systole_average"], regular_labels[patient-1, 1])
                errors.append(error)
                error = CRSP(prediction["diastole_average"], regular_labels[patient-1, 2])
                errors.append(error)
        if len(errors)>0:
            errors = np.array(errors)
            estimated_CRSP = np.mean(errors)
            print "  %s kaggle loss: %f" % (string.rjust(set_name, 12), estimated_CRSP)
            validation_dict[set_name] = estimated_CRSP
        else:
            print "  %s kaggle loss: not calculated" % (string.rjust(set_name, 12))


    print "dumping prediction file to %s" % prediction_path
    with open(prediction_path, 'w') as f:
        pickle.dump({
                        'metadata_path': metadata_path,
                        'prediction_path': prediction_path,
                        'submission_path': submission_path,
                        'configuration_file': config().__name__,
                        'git_revision_hash': utils.get_git_revision_hash(),
                        'experiment_id': expid,
                        'time_since_start': time_since_start,
                        'param_values': lasagne.layers.get_all_param_values(top_layer),
                        'predictions': predictions,
                        'validation_errors': validation_dict,
                    }, f, pickle.HIGHEST_PROTOCOL)
    print "prediction file dumped"

    print "dumping submission file to %s" % submission_path
    with open(submission_path, 'w') as csvfile:
        csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
        csvwriter.writerow(['Id'] + ['P%d'%i for i in xrange(600)])
        for prediction in predictions:
            # the submission only has patients 501 to 700
            if prediction["patient"] in data_loader.test_patients_indices:
                if "diastole_average" not in prediction or "systole_average" not in prediction:
                    raise Exception("Not all test-set patients were predicted")
                csvwriter.writerow(["%d_Diastole" % prediction["patient"]] + ["%.18f" % p for p in prediction["diastole_average"].flatten()])
                csvwriter.writerow(["%d_Systole" % prediction["patient"]] + ["%.18f" % p for p in prediction["systole_average"].flatten()])
    print "submission file dumped"

    return

Example 67

Project: certitude Source File: iocscan_queue.py
def demarrer_scanner(hWaitStop=None, batch=None):
    loggingiocscan.info('Starting an IOC scanner instance : ' + threadname)

    print ''
    print '\tPlease log in to launch scan'
    print ''
    username = raw_input('Username: ')
    password = getpass.getpass('Password: ')
    print ''

    # Get user
    u = session.query(User).filter_by(username = username).first()

    # No user or bad password
    if not u or hashPassword(password) != u.password:
        loggingiocscan.critical('Username or password incorrect, stopping the initialization...')
        raw_input()
        return

    # Get KEY and decrypt MASTER_KEY
    keyFromPassword = crypto.keyFromText(password, base64.b64decode(u.b64_kdf_salt))
    MASTER_KEY = crypto.decrypt(u.encrypted_master_key, keyFromPassword)

    mk_cksum = session.query(GlobalConfig).filter_by(key = 'master_key_checksum').first()

    # No checksum in config ???
    if not mk_cksum:
        loggingiocscan.critical('Database is broken, please create a new one, stopping the initialization...')
        del MASTER_KEY
        raw_input()
        return

    # Someone has been playing with the database !
    if checksum(MASTER_KEY)!=mk_cksum.value:
        loggingiocscan.critical('MASTER_KEY may have been altered, stopping the initialization...')
        del MASTER_KEY
        raw_input()
        return

    loggingiocscan.info('Login successful !')
    # INITIALIZATION

    # TODO : initialise all IOCs in DB, then link them to CP

    all_xmliocs = session.query(XMLIOC).order_by(XMLIOC.name.asc())
    all_cp = session.query(ConfigurationProfile).order_by(ConfigurationProfile.name.asc())

    ioc_by_cp = {}
    for cp in all_cp:
        if cp.ioc_list == '':
            loggingiocscan.warning('No IOC defined for profile "%s"' % cp.name)
            continue

        ioc_by_cp[cp.id] = []
        for e in cp.ioc_list.split(','):
            ioc_by_cp[cp.id].append(int(e))

    tree_by_ioc = {}


    # Retrieves evaluators for current mode
    FLAT_MODE = (IOC_MODE == 'flat')
    allowedElements = {}
    evaluatorList = ioc_modules.flatEvaluatorList if FLAT_MODE else ioc_modules.logicEvaluatorList

    for name, classname in evaluatorList.items():
        allowedElements[name] = classname.evalList

    # Parse XML Ioc into IOC trees according to what we can do
    for xmlioc in all_xmliocs:

        content = base64.b64decode(xmlioc.xml_content)
        oip = openiocparser.OpenIOCParser(content, allowedElements, FLAT_MODE, fromString=True)
        oip.parse()
        iocTree = oip.getTree()

        # Trees may be stripped from non valid elements
        if iocTree is not None:
            tree_by_ioc[xmlioc.id] = {'name':xmlioc.name, 'tree':iocTree}

    # Each configuration profile has a set of trees
    tree_by_cp = {cpid: {i:tree_by_ioc[i] for i in ioclist} for (cpid, ioclist) in ioc_by_cp.items()}

    halt = False
    tache = None
    batchquery = None

    # Batch filtering
    if batch is not None:
        loggingiocscan.info('Filtering for batch "%s"' % batch)
        batchquery = session.query(Batch).filter( Batch.name == batch).first()

        if batchquery is None:
            loggingiocscan.error('Unknown batch "%s" ...' % batch)
            halt = True

    # LAUNCH
    # Main loop
    while not halt:
        try:

            # Get targets to be scanned
            # and that are not currently being scanned
            # or that don't have any retry left
            queue = session.query(Task).filter_by(iocscanned=False, reserved_ioc=False, reserved_hash=False).filter(Task.retries_left_ioc > 0)

            # Batch filtering
            if batchquery is not None:
                queue = queue.filter_by(batch_id = batchquery.id)

            taille_queue = queue.count()

            # Compute the time after which targets are still recovering from last scan
            # Gets target which last retry is NULL or before that time
            limite_a_reessayer = datetime.datetime.now() - datetime.timedelta(0, SECONDES_ENTRE_TENTATIVES)
            a_scanner = queue.filter(or_(Task.last_retry_ioc <= limite_a_reessayer, Task.last_retry_ioc == None))
            taille_a_scanner = a_scanner.count()

            # Reads this list
            while taille_a_scanner > 0:

                # Max priority
                priorite_max = a_scanner.order_by(Task.priority_ioc.desc()).first().priority_ioc
                taches_priorite_max = a_scanner.filter(Task.priority_ioc==priorite_max)
                nbre_taches_priorite_max = taches_priorite_max.count()
                if BASE_DE_DONNEES_QUEUE.startswith('sqlite'):
                    tache = taches_priorite_max.order_by(func.random()).first()
                else:
                    tache = taches_priorite_max.order_by(func.newid()).first()

                # Mutex on the task
                tache.reserved_ioc = True
                tache.date_debut = datetime.datetime.now()
                session.commit()

                loggingiocscan.debug('===============================================================================')
                loggingiocscan.debug('Wake up, there is work to do !')
                loggingiocscan.info('Queue size : ' + str(taille_queue) + ', including ' + str(taille_a_scanner) + ' to scan, including ' + str(nbre_taches_priorite_max) + ' at top priority (' + str(priorite_max) + ')')

                loggingiocscan.debug('  --------------------------------')
                loggingiocscan.info('         Starting IOC Scan')
                loggingiocscan.info('        Target : ' + str(tache.ip))
                loggingiocscan.debug('  --------------------------------')

                # Recover Windows Credential and Configuration Profile from Batch
                batch = session.query(Batch).filter_by(id = tache.batch_id).first()
                wc = session.query(WindowsCredential).filter_by(id = batch.windows_credential_id).first()
                cp = session.query(ConfigurationProfile).filter_by(id = batch.configuration_profile_id).first()

                if not wc:
                    raise Exception('WindowsCredential %d does not exist' % tache.windows_credential_id)

                if not cp:
                    raise Exception('ConfigurationProfile %d does not exist' % tache.configuration_profile_id)

                # Decrypt password using MASTER_KEY and create target object
                targetPassword = crypto.decrypt(wc.encrypted_password, MASTER_KEY)
                targetObject = {'ip':       tache.ip,
                                'login':    wc.login,
                                'password': targetPassword,
                                'domain':   wc.domain,
                                }

                # If high confidentiality is enabled, create local directory if needed
                if cp.host_confidential:
                    loggingiocscan.info('"High confidentiality" mode enabled')
                    testdir = os.path.join(IOC_COMPONENT_ROOT, IOC_CONFIDENTIAL_DIRECTORY)
                    if not os.path.isdir(testdir):
                        loggingiocscan.info('Creating confidential directory %s' % testdir)
                        os.makedirs(testdir)

                # Let the scan begin

                if cp.id in tree_by_cp.keys():
                    resultats_scan = scan(targetObject, tree_by_cp[cp.id], cp.host_confidential)
                else:
                    loggingiocscan.warning('No IOC to scan (profile=%s)' % cp.name)
                    resultats_scan = {}

                analyse(resultats_scan, tache)

                # Update queue size
                taille_a_scanner = a_scanner.count()

                try:
                    # If launched as a service (probably removed soon, TODO)
                    halt = (win32event.WaitForSingleObject(hWaitStop, 2000) == win32event.WAIT_OBJECT_0)
                except:
                    pass
                if halt:
                    # Stop signal encountered
                    break

            if halt:
                loggingiocscan.info('Stopping IOC scanner : ' + threadname)
                break
            loggingiocscan.debug('(IOC scanner sleeping for ' + str(SLEEP) + ' seconds...)' \
                + (' (' + str(taille_queue) + ' waiting)' if taille_queue > 0 else ''))
            time.sleep(SLEEP)
        except KeyboardInterrupt:
            halt = True
        except Exception, e:
            loggingiocscan.error('Exception caught : %s, %s, %s' % (repr(e), str(e.message), str(e)))

            # Cancel changes and unreserve task
            session.rollback()
            if tache is not None:
                tache.reserved_ioc = False
                tache.retries_left_ioc = max(0,tache.retries_left_ioc - 1)
            session.commit()

Example 68

Project: multi_key_dict Source File: multi_key_dict.py
def test_multi_key_dict():
    contains_all = lambda cont, in_items: not (False in [c in cont for c in in_items])

    m = multi_key_dict()
    assert( len(m) == 0 ), 'expected len(m) == 0'
    all_keys = list()

    m['aa', 12, 32, 'mmm'] = 123  # create a value with multiple keys..
    assert( len(m) == 1 ), 'expected len(m) == 1'
    all_keys.append(('aa', 'mmm', 32, 12)) # store it for later

    # try retrieving other keys mapped to the same value using one of them
    res = m.get_other_keys('aa')
    expected = ['mmm', 32, 12]
    assert(set(res) == set(expected)), 'get_other_keys(\'aa\'): {0} other than expected: {1} '.format(res, expected)

    # try retrieving other keys mapped to the same value using one of them: also include this key
    res = m.get_other_keys(32, True)
    expected = ['aa', 'mmm', 32, 12]
    assert(set(res) == set(expected)), 'get_other_keys(32): {0} other than expected: {1} '.format(res, expected)

    assert( m.has_key('aa') == True ), 'expected m.has_key(\'aa\') == True'
    assert( m.has_key('aab') == False ), 'expected m.has_key(\'aab\') == False'

    assert( m.has_key(12) == True ), 'expected m.has_key(12) == True'
    assert( m.has_key(13) == False ), 'expected m.has_key(13) == False'
    assert( m.has_key(32) == True ), 'expected m.has_key(32) == True'

    m['something else'] = 'abcd'
    assert( len(m) == 2 ), 'expected len(m) == 2'
    all_keys.append(('something else',)) # store for later

    m[23] = 0
    assert( len(m) == 3 ), 'expected len(m) == 3'
    all_keys.append((23,)) # store for later

    # check if it's possible to read this value back using either of keys
    assert( m['aa'] == 123 ), 'expected m[\'aa\'] == 123'
    assert( m[12] == 123 ), 'expected m[12] == 123'
    assert( m[32] == 123 ), 'expected m[32] == 123'
    assert( m['mmm'] == 123 ), 'expected m[\'mmm\'] == 123'

    # now update value and again - confirm it back - using different keys..
    m['aa'] = 45
    assert( m['aa'] == 45 ), 'expected m[\'aa\'] == 45'
    assert( m[12] == 45 ), 'expected m[12] == 45'
    assert( m[32] == 45 ), 'expected m[32] == 45'
    assert( m['mmm'] == 45 ), 'expected m[\'mmm\'] == 45'

    m[12] = '4'
    assert( m['aa'] == '4' ), 'expected m[\'aa\'] == \'4\''
    assert( m[12] == '4' ), 'expected m[12] == \'4\''

    # test __str__
    m_str_exp = '{(23): 0, (\'aa\', \'mmm\', 32, 12): \'4\', (\'something else\'): \'abcd\'}'
    m_str = str(m)
    assert (len(m_str) > 0), 'str(m) should not be empty!'    
    assert (m_str[0] == '{'), 'str(m) should start with \'{\', but does with \'%c\'' % m_str[0]
    assert (m_str[-1] == '}'), 'str(m) should end with \'}\', but does with \'%c\'' % m_str[-1]

    # check if all key-values are there as expected. They might be sorted differently
    def get_values_from_str(dict_str):
        sorted_keys_and_values = []
        for k in dict_str.split(', ('):
            keys, val = k.strip('{}() ').replace(')', '').split(':')
            keys = tuple(sorted([k.strip() for k in keys.split(',')]))
            sorted_keys_and_values.append((keys, val))
        return sorted_keys_and_values
    exp = get_values_from_str(m_str_exp)
    act = get_values_from_str(m_str)
    assert (set(act) == set(exp)), 'str(m) values: \'{0}\' are not {1} '.format(act, exp)

    # try accessing / creating new (keys)-> value mapping whilst one of these
    # keys already maps to a value in this dictionarys
    try:
        m['aa', 'bb'] = 'something new'
        assert(False), 'Should not allow adding multiple-keys when one of keys (\'aa\') already exists!'
    except KeyError as err:
        pass

    # now check if we can get all possible keys (formed in a list of tuples)
    # each tuple containing all keys)
    res = sorted([sorted([str(x) for x in k]) for k in m.keys()])
    expected = sorted([sorted([str(x) for x in k]) for k in all_keys])
    assert(res == expected), 'unexpected values from m.keys(), got:\n%s\n expected:\n%s' %(res, expected) 

    # check default items (which will unpack tupe with key(s) and value)
    num_of_elements = 0
    for keys, value in m.items():
        sorted_keys = sorted([str(k) for k in keys])
        num_of_elements += 1
        assert(sorted_keys in expected), 'm.items(): unexpected keys: %s' % (sorted_keys)
        assert(m[keys[0]] == value), 'm.items(): unexpected value: %s (keys: %s)' % (value, keys)
    assert(num_of_elements > 0), 'm.items() returned generator that did not produce anything'

    # test default iterkeys()
    num_of_elements = 0
    for keys in m.keys():
        num_of_elements += 1
        keys_s = sorted([str(k) for k in keys])
        assert(keys_s in expected), 'm.keys(): unexpected keys: {0}'.format(keys_s)

    assert(num_of_elements > 0), 'm.iterkeys() returned generator that did not produce anything'

    # test iterkeys(int, True): useful to get all info from the dictionary
    # dictionary is iterated over the type specified, but all keys are returned.
    num_of_elements = 0
    for keys in m.iterkeys(int, True):
        keys_s = sorted([str(k) for k in keys])
        num_of_elements += 1
        assert(keys_s in expected), 'm.iterkeys(int, True): unexpected keys: {0}'.format(keys_s)
    assert(num_of_elements > 0), 'm.iterkeys(int, True) returned generator that did not produce anything'


    # test values for different types of keys()
    expected = set([0, '4'])
    res = set(m.values(int))
    assert (res == expected), 'm.values(int) are {0}, but expected: {1}.'.format(res, expected)

    expected = sorted(['4', 'abcd'])
    res = sorted(m.values(str)) 
    assert (res == expected), 'm.values(str) are {0}, but expected: {1}.'.format(res, expected)

    current_values = set([0, '4', 'abcd']) # default (should give all values)
    res = set(m.values()) 
    assert (res == current_values), 'm.values() are {0}, but expected: {1}.'.format(res, current_values)

    #test itervalues() (default) - should return all values. (Itervalues for other types are tested below)
    vals = set()
    for value in m.itervalues():
        vals.add(value)
    assert (current_values == vals), 'itervalues(): expected {0}, but collected {1}'.format(current_values, vals)

    #test items(int)
    items_for_int = sorted([((12, 32), '4'), ((23,), 0)])
    assert (items_for_int == sorted(m.items(int))), 'items(int): expected {0}, but collected {1}'.format(items_for_int, 
                                                                                                     sorted(m.items(int)))

    # test items(str)
    items_for_str = set([(('aa','mmm'), '4'), (('something else',), 'abcd')])
    res = set(m.items(str))
    assert (set(res) == items_for_str), 'items(str): expected {0}, but collected {1}'.format(items_for_str, res)

    # test items() (default - all items)
    # we tested keys(), values(), and __get_item__ above so here we'll re-create all_items using that 
    all_items = set()
    keys = m.keys()
    values = m.values()
    for k in keys:
        all_items.add( (tuple(k), m[k[0]]) )

    res = set(m.items())
    assert (all_items == res), 'items() (all items): expected {0},\n\t\t\t\tbut collected {1}'.format(all_items, res)

    # now test deletion..
    curr_len = len(m)
    del m[12]
    assert( len(m) == curr_len - 1 ), 'expected len(m) == %d' % (curr_len - 1)
    assert(not m.has_key(12)), 'expected deleted key to no longer be found!'

    # try again 
    try:
        del m['aa']
        assert(False), 'cant remove again: item m[\'aa\'] should not exist!'
    except KeyError as err:
        pass

    # try to access non-existing 
    try:
        k =  m['aa']
        assert(False), 'removed item m[\'aa\'] should not exist!'
    except KeyError as err:
        pass

    # try to access non-existing with a different key 
    try:
        k =  m[12]
        assert(False), 'removed item m[12] should not exist!'
    except KeyError as err:
        pass

    # prepare for other tests (also testing creation of new items)
    del m
    m = multi_key_dict()
    tst_range = list(range(10, 40)) + list(range(50, 70))
    for i in tst_range:
        m[i] = i # will create a dictionary, where keys are same as items

    # test items()
    for key, value in m.items(int):
        assert(key == (value,)), 'items(int): expected {0}, but received {1}'.format(key, value)

    # test iterkeys()
    num_of_elements = 0
    returned_keys = set()
    for key in m.iterkeys(int):
        returned_keys.add(key)
        num_of_elements += 1
    assert(num_of_elements > 0), 'm.iteritems(int) returned generator that did not produce anything'
    assert (returned_keys == set(tst_range)), 'iterkeys(int): expected {0}, but received {1}'.format(expected, key)


    #test itervalues(int)
    num_of_elements = 0
    returned_values  = set()
    for value in m.itervalues(int):
        returned_values.add(value)
        num_of_elements += 1
    assert (num_of_elements > 0), 'm.itervalues(int) returned generator that did not produce anything'
    assert (returned_values == set(tst_range)), 'itervalues(int): expected {0}, but received {1}'.format(expected, value)

    # test values(int)
    res = sorted([x for x in m.values(int)])
    assert (res == tst_range), 'm.values(int) is not as expected.'

    # test keys()
    assert (set(m.keys(int)) == set(tst_range)), 'm.keys(int) is not as expected.'

    # test setitem with multiple keys
    m['xy', 999, 'abcd'] = 'teststr'
    try:
        m['xy', 998] = 'otherstr'
        assert(False), 'creating / updating m[\'xy\', 998] should fail!'
    except KeyError as err:
        pass

    # test setitem with multiple keys
    m['cd'] = 'somethingelse'
    try:
        m['cd', 999] = 'otherstr'
        assert(False), 'creating / updating m[\'cd\', 999] should fail!'
    except KeyError as err:
        pass

    m['xy', 999] = 'otherstr'
    assert (m['xy']  == 'otherstr'), 'm[\'xy\'] is not as expected.'
    assert (m[999]   == 'otherstr'), 'm[999] is not as expected.'
    assert (m['abcd'] == 'otherstr'), 'm[\'abcd\'] is not as expected.'

    m['abcd', 'xy']   =  'another'
    assert (m['xy']  == 'another'), 'm[\'xy\'] is not == \'another\'.'
    assert (m[999]   == 'another'), 'm[999] is not == \'another\''
    assert (m['abcd'] == 'another'), 'm[\'abcd\'] is not  == \'another\'.'

    # test get functionality of basic dictionaries
    m['CanIGet'] = 'yes'
    assert (m.get('CanIGet') == 'yes')
    assert (m.get('ICantGet') == None)
    assert (m.get('ICantGet', "Ok") == "Ok")

    k = multi_key_dict()
    k['1:12', 1] = 'key_has_:'
    k.items() # should not cause any problems to have : in key
    assert (k[1] == 'key_has_:'), 'k[1] is not equal to \'abc:def:ghi\''

    import datetime
    n = datetime.datetime.now()
    l = multi_key_dict()
    l[n] = 'now' # use datetime obj as a key

    #test keys..
    res = [x for x in l.keys()][0] # for python3 keys() returns dict_keys dictionarly
    expected = n,
    assert(expected == res), 'Expected \"{0}\", but got: \"{1}\"'.format(expected, res)

    res = [x for x in l.keys(datetime.datetime)][0]
    assert(n == res), 'Expected {0} as a key, but got: {1}'.format(n, res)
    
    res = [x for x in l.values()] # for python3 keys() returns dict_values dictionarly
    expected = ['now']
    assert(res == expected), 'Expected values: {0}, but got: {1}'.format(expected, res)

    # test items..
    exp_items = [((n,), 'now')]
    r = list(l.items())
    assert(r == exp_items), 'Expected for items(): tuple of keys: {0}, but got: {1}'.format(r, exp_items) 
    assert(exp_items[0][1] == 'now'), 'Expected for items(): value: {0}, but got: {1}'.format('now', 
                                                                                              exp_items[0][1])

    x = multi_key_dict({('k', 'kilo'):1000, ('M', 'MEGA', 1000000):1000000}, milli=0.01)
    assert (x['k'] == 1000), 'x[\'k\'] is not equal to 1000'
    x['kilo'] = 'kilo'
    assert (x['kilo'] == 'kilo'), 'x[\'kilo\'] is not equal to \'kilo\''

    y = multi_key_dict([(('two', 'duo'), 2), (('one', 'uno'), 1), ('three', 3)])

    assert (y['two'] == 2), 'y[\'two\'] is not equal to 2'
    y['one'] = 'one'
    assert (y['one'] == 'one'), 'y[\'one\'] is not equal to \'one\''

    try:
        y = multi_key_dict([(('two', 'duo'), 2), ('one', 'uno', 1), ('three', 3)])
        assert(False), 'creating dictionary using iterable with tuples of size > 2 should fail!'
    except:
        pass

    print ('All test passed OK!')

Example 69

Project: ocspbuilder Source File: __init__.py
    def build(self, responder_private_key=None, responder_certificate=None):
        """
        Validates the request information, constructs the ASN.1 structure and
        signs it.

        The responder_private_key and responder_certificate parameters are only
        required if the response_status is "successful".

        :param responder_private_key:
            An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
            object for the private key to sign the response with

        :param responder_certificate:
            An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate
            object of the certificate associated with the private key

        :return:
            An asn1crypto.ocsp.OCSPResponse object of the response
        """

        if self._response_status != 'successful':
            return ocsp.OCSPResponse({
                'response_status': self._response_status
            })

        is_oscrypto = isinstance(responder_private_key, asymmetric.PrivateKey)
        if not isinstance(responder_private_key, keys.PrivateKeyInfo) and not is_oscrypto:
            raise TypeError(_pretty_message(
                '''
                responder_private_key must be an instance of
                asn1crypto.keys.PrivateKeyInfo or
                oscrypto.asymmetric.PrivateKey, not %s
                ''',
                _type_name(responder_private_key)
            ))

        cert_is_oscrypto = isinstance(responder_certificate, asymmetric.Certificate)
        if not isinstance(responder_certificate, x509.Certificate) and not cert_is_oscrypto:
            raise TypeError(_pretty_message(
                '''
                responder_certificate must be an instance of
                asn1crypto.x509.Certificate or
                oscrypto.asymmetric.Certificate, not %s
                ''',
                _type_name(responder_certificate)
            ))

        if cert_is_oscrypto:
            responder_certificate = responder_certificate.asn1

        if self._certificate is None:
            raise ValueError(_pretty_message(
                '''
                certificate must be set if the response_status is
                "successful"
                '''
            ))
        if self._certificate_status is None:
            raise ValueError(_pretty_message(
                '''
                certificate_status must be set if the response_status is
                "successful"
                '''
            ))

        def _make_extension(name, value):
            return {
                'extn_id': name,
                'critical': False,
                'extn_value': value
            }

        response_data_extensions = []
        single_response_extensions = []

        for name, value in self._response_data_extensions.items():
            response_data_extensions.append(_make_extension(name, value))
        if self._nonce:
            response_data_extensions.append(
                _make_extension('nonce', self._nonce)
            )

        if not response_data_extensions:
            response_data_extensions = None

        for name, value in self._single_response_extensions.items():
            single_response_extensions.append(_make_extension(name, value))

        if self._certificate_issuer:
            single_response_extensions.append(
                _make_extension(
                    'certificate_issuer',
                    [
                        x509.GeneralName(
                            name='directory_name',
                            value=self._certificate_issuer.subject
                        )
                    ]
                )
            )

        if not single_response_extensions:
            single_response_extensions = None

        responder_key_hash = getattr(responder_certificate.public_key, self._key_hash_algo)

        if self._certificate_status == 'good':
            cert_status = ocsp.CertStatus(
                name='good',
                value=core.Null()
            )
        elif self._certificate_status == 'unknown':
            cert_status = ocsp.CertStatus(
                name='unknown',
                value=core.Null()
            )
        else:
            status = self._certificate_status
            reason = status if status != 'revoked' else 'unspecified'
            cert_status = ocsp.CertStatus(
                name='revoked',
                value={
                    'revocation_time': self._revocation_date,
                    'revocation_reason': reason,
                }
            )

        issuer = self._certificate_issuer if self._certificate_issuer else responder_certificate
        if issuer.subject != self._certificate.issuer:
            raise ValueError(_pretty_message(
                '''
                responder_certificate does not appear to be the issuer for
                the certificate. Perhaps set the .certificate_issuer attribute?
                '''
            ))

        produced_at = datetime.now(timezone.utc)

        if self._this_update is None:
            self._this_update = produced_at

        if self._next_update is None:
            self._next_update = self._this_update + timedelta(days=7)

        response_data = ocsp.ResponseData({
            'responder_id': ocsp.ResponderId(name='by_key', value=responder_key_hash),
            'produced_at': produced_at,
            'responses': [
                {
                    'cert_id': {
                        'hash_algorithm': {
                            'algorithm': self._key_hash_algo
                        },
                        'issuer_name_hash': getattr(self._certificate.issuer, self._key_hash_algo),
                        'issuer_key_hash': getattr(issuer.public_key, self._key_hash_algo),
                        'serial_number': self._certificate.serial_number,
                    },
                    'cert_status': cert_status,
                    'this_update': self._this_update,
                    'next_update': self._next_update,
                    'single_extensions': single_response_extensions
                }
            ],
            'response_extensions': response_data_extensions
        })

        signature_algo = responder_private_key.algorithm
        if signature_algo == 'ec':
            signature_algo = 'ecdsa'

        signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)

        if responder_private_key.algorithm == 'rsa':
            sign_func = asymmetric.rsa_pkcs1v15_sign
        elif responder_private_key.algorithm == 'dsa':
            sign_func = asymmetric.dsa_sign
        elif responder_private_key.algorithm == 'ec':
            sign_func = asymmetric.ecdsa_sign

        if not is_oscrypto:
            responder_private_key = asymmetric.load_private_key(responder_private_key)
        signature_bytes = sign_func(responder_private_key, response_data.dump(), self._hash_algo)

        certs = None
        if self._certificate_issuer:
            certs = [responder_certificate]

        return ocsp.OCSPResponse({
            'response_status': self._response_status,
            'response_bytes': {
                'response_type': 'basic_ocsp_response',
                'response': {
                    'tbs_response_data': response_data,
                    'signature_algorithm': {'algorithm': signature_algorithm_id},
                    'signature': signature_bytes,
                    'certs': certs
                }
            }
        })

Example 70

Project: ganga Source File: DiracFile.py
    def put(self, lfn='', force=False, uploadSE="", replicate=False):
        """
        Try to upload file sequentially to storage elements defined in configDirac['allDiracSE'].
        File will be uploaded to the first SE that the upload command succeeds for.

        The file is uploaded to the SE described by the DiracFile.defaultSE attribute

        Alternatively, the user can specify an uploadSE which contains an SE
        which the file is to be uploaded to.

        If the user wants to replicate this file(s) across all SE then they should state replicate = True.

        Return value will be either the stdout from the dirac upload command if not
        using the wildcard characters '*?[]' in the namePattern.
        If the wildcard characters are used then the return value will be a list containing
        newly created DiracFile objects which were the result of glob-ing the wildcards.

        The objects in this list will have been uploaded or had their failureReason attribute populated if the
        upload failed.
        """

        if self.lfn != "" and force == False and lfn == '':
            logger.warning("Warning you're about to 'put' this DiracFile: %s on the grid as it already has an lfn: %s" % (self.namePattern, self.lfn))
            decision = raw_input('y / [n]:')
            while not (decision.lower() in ['y', 'n'] or decision.lower() == ''):
                decision = raw_input('y / [n]:')

            if decision.lower() == 'y':
                pass
            else:
                return

        if (lfn != '' and self.lfn != '') and force == False:
            logger.warning("Warning you're attempting to put this DiracFile: %s" % self.namePattern)
            logger.warning("It currently has an LFN associated with it: %s" % self.lfn)
            logger.warning("Do you want to continue and attempt to upload to: %s" % lfn)
            decision = raw_input('y / [n]:')
            while not (decision.lower() in ['y', 'n', '']):
                decision = raw_input('y / [n]:')

            if decision.lower() == 'y':
                pass
            else:
                return

        if lfn and os.path.basename(lfn) != self.namePattern:
            logger.warning("Changing namePattern from: '%s' to '%s' during put operation" % (self.namePattern, os.path.basename(lfn)))

        if lfn:
            self.lfn = lfn

        # looks like will only need this for the interactive uploading of jobs.
        # Also if any backend need dirac upload on client then when downloaded
        # this will upload then delete the file.

        if self.namePattern == "":
            if self.lfn != '':
                logger.warning("'Put'-ing a file with ONLY an existing LFN makes no sense!")
            raise GangaException('Can\'t upload a file without a local file name.')

        sourceDir = self.localDir
        if self.localDir is None:
            sourceDir = os.getcwd()
            # attached to a job, use the joboutputdir
            if self._parent != None and os.path.isdir(self.getJobObject().outputdir):
                sourceDir = self.getJobObject().outputdir

        if not os.path.isdir(sourceDir):
            raise GangaException('localDir attribute is not a valid dir, don\'t know from which dir to take the file')

        if regex.search(self.namePattern) is not None:
            if self.lfn != "":
                logger.warning("Cannot specify a single lfn for a wildcard namePattern")
                logger.warning("LFN will be generated automatically")
                self.lfn = ""

        if not self.remoteDir:
            try:
                job = self.getJobObject()
                lfn_folder = os.path.join("GangaUploadedFiles", "GangaJob_%s" % job.getFQID('.'))
            except AssertionError:
                t = datetime.datetime.now()
                this_date = t.strftime("%H.%M_%A_%d_%B_%Y")
                lfn_folder = os.path.join("GangaUploadedFiles", 'GangaFiles_%s' % this_date)
            self.lfn = os.path.join(DiracFile.diracLFNBase(), lfn_folder, self.namePattern)

        if self.remoteDir[:4] == 'LFN:':
            lfn_base = self.remoteDir[4:]
        else:
            lfn_base = self.remoteDir

        if uploadSE == "":
            if self.defaultSE != "":
                storage_elements = [self.defaultSE]
            else:
                if configDirac['allDiracSE']:
                    storage_elements = [random.choice(configDirac['allDiracSE'])]
                else:
                    raise GangaException("Can't upload a file without a valid defaultSE or storageSE, please provide one")
        elif isinstance(uploadSE, list):
            storage_elements = uploadSE
        else:
            storage_elements = [uploadSE]

        outputFiles = GangaList()
        for this_file in glob.glob(os.path.join(sourceDir, self.namePattern)):
            name = this_file

            if not os.path.exists(name):
                if not self.compressed:
                    raise GangaException('Cannot upload file. File "%s" must exist!' % name)
                name += '.gz'
                if not os.path.exists(name):
                    raise GangaException('File "%s" must exist!' % name)
            else:
                if self.compressed:
                    os.system('gzip -c %s > %s.gz' % (name, name))
                    name += '.gz'
                    if not os.path.exists(name):
                        raise GangaException('File "%s" must exist!' % name)

            if lfn == "":
                lfn = os.path.join(lfn_base, os.path.basename(name))

            #lfn = os.path.join(os.path.dirname(self.lfn), this_file)

            d = DiracFile()
            d.namePattern = os.path.basename(name)
            d.compressed = self.compressed
            d.localDir = sourceDir
            stderr = ''
            stdout = ''
            logger.debug('Uploading file \'%s\' to \'%s\' as \'%s\'' % (name, storage_elements[0], lfn))
            logger.debug('execute: uploadFile("%s", "%s", %s)' % (lfn, name, str([storage_elements[0]])))
            stdout = execute('uploadFile("%s", "%s", %s)' % (lfn, name, str([storage_elements[0]])))
            if type(stdout) == str:
                logger.warning("Couldn't upload file '%s': \'%s\'" % (os.path.basename(name), stdout))
                continue
            if stdout.get('OK', False) and lfn in stdout.get('Value', {'Successful': {}})['Successful']:
                # when doing the two step upload delete the temp file
                if self.compressed or self._parent != None:
                    os.remove(name)
                # need another eval as datetime needs to be included.
                guid = stdout['Value']['Successful'][lfn].get('GUID', '')
                if regex.search(self.namePattern) is not None:
                    d.lfn = lfn
                    d.remoteDir = os.path.dirname(lfn)
                    d.locations = stdout['Value']['Successful'][lfn].get('allDiracSE', '')
                    d.guid = guid
                    outputFiles.append(GPIProxyObjectFactory(d))
                    continue
                else:
                    self.lfn = lfn
                    self.remoteDir = os.path.dirname(lfn)
                    self.locations = stdout['Value']['Successful'][lfn].get('allDiracSE', '')
                    self.guid = guid
                # return ## WHY?
            else:
                failureReason = "Error in uploading file %s : %s" % (os.path.basename(name), str(stdout))
                logger.error(failureReason)
                if regex.search(self.namePattern) is not None:
                    d.failureReason = failureReason
                    outputFiles.append(GPIProxyObjectFactory(d))
                    continue
                self.failureReason = failureReason
                return str(stdout)

        if replicate == True:

            if len(outputFiles) == 1 or len(outputFiles) == 0:
                storage_elements.pop(0)
                for se in storage_elements:
                    self.replicate(se)
            else:
                storage_elements.pop(0)
                for this_file in outputFiles:
                    for se in storage_elements:
                        this_file.replicate(se)

        if len(outputFiles) > 0:
            return outputFiles
        else:
            outputFiles.append(self)
            return outputFiles

Example 71

Project: Sick-Beard Source File: tvdb_api.py
    def __init__(self,
                interactive = False,
                select_first = False,
                debug = False,
                cache = True,
                banners = False,
                actors = False,
                custom_ui = None,
                language = None,
                search_all_languages = False,
                apikey = None,
                forceConnect=False,
                useZip=False,
                dvdorder=False):

        """interactive (True/False):
            When True, uses built-in console UI is used to select the correct show.
            When False, the first search result is used.

        select_first (True/False):
            Automatically selects the first series search result (rather
            than showing the user a list of more than one series).
            Is overridden by interactive = False, or specifying a custom_ui

        debug (True/False) DEPRECATED:
             Replaced with proper use of logging module. To show debug messages:

                 >>> import logging
                 >>> logging.basicConfig(level = logging.DEBUG)

        cache (True/False/str/unicode/urllib2 opener):
            Retrieved XML are persisted to to disc. If true, stores in
            tvdb_api folder under your systems TEMP_DIR, if set to
            str/unicode instance it will use this as the cache
            location. If False, disables caching.  Can also be passed
            an arbitrary Python object, which is used as a urllib2
            opener, which should be created by urllib2.build_opener

        banners (True/False):
            Retrieves the banners for a show. These are accessed
            via the _banners key of a Show(), for example:

            >>> Tvdb(banners=True)['scrubs']['_banners'].keys()
            ['fanart', 'poster', 'series', 'season']

        actors (True/False):
            Retrieves a list of the actors for a show. These are accessed
            via the _actors key of a Show(), for example:

            >>> t = Tvdb(actors=True)
            >>> t['scrubs']['_actors'][0]['name']
            u'Zach Braff'

        custom_ui (tvdb_ui.BaseUI subclass):
            A callable subclass of tvdb_ui.BaseUI (overrides interactive option)

        language (2 character language abbreviation):
            The language of the returned data. Is also the language search
            uses. Default is "en" (English). For full list, run..

            >>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
            ['da', 'fi', 'nl', ...]

        search_all_languages (True/False):
            By default, Tvdb will only search in the language specified using
            the language option. When this is True, it will search for the
            show in and language
        
        apikey (str/unicode):
            Override the default thetvdb.com API key. By default it will use
            tvdb_api's own key (fine for small scripts), but you can use your
            own key if desired - this is recommended if you are embedding
            tvdb_api in a larger application)
            See http://thetvdb.com/?tab=apiregister to get your own key

        forceConnect (bool):
            If true it will always try to connect to theTVDB.com even if we
            recently timed out. By default it will wait one minute before
            trying again, and any requests within that one minute window will
            return an exception immediately.

        useZip (bool):
            Download the zip archive where possibale, instead of the xml.
            This is only used when all episodes are pulled.
            And only the main language xml is used, the actor and banner xml are lost.
        """
        
        global lastTimeout
        
        # if we're given a lastTimeout that is less than 1 min just give up
        if not forceConnect and lastTimeout != None and datetime.datetime.now() - lastTimeout < datetime.timedelta(minutes=1):
            raise tvdb_error("We recently timed out, so giving up early this time")
        
        self.shows = ShowContainer() # Holds all Show classes
        self.corrections = {} # Holds show-name to show_id mapping

        self.config = {}

        if apikey is not None:
            self.config['apikey'] = apikey
        else:
            self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key

        self.config['debug_enabled'] = debug # show debugging messages

        self.config['custom_ui'] = custom_ui

        self.config['interactive'] = interactive # prompt for correct series?

        self.config['select_first'] = select_first

        self.config['search_all_languages'] = search_all_languages

        self.config['useZip'] = useZip

        self.config['dvdorder'] = dvdorder

        if cache is True:
            self.config['cache_enabled'] = True
            self.config['cache_location'] = self._getTempDir()
            self.urlopener = urllib2.build_opener(
                CacheHandler(self.config['cache_location'])
            )

        elif cache is False:
            self.config['cache_enabled'] = False
            self.urlopener = urllib2.build_opener() # default opener with no caching

        elif isinstance(cache, basestring):
            self.config['cache_enabled'] = True
            self.config['cache_location'] = cache
            self.urlopener = urllib2.build_opener(
                CacheHandler(self.config['cache_location'])
            )

        elif isinstance(cache, urllib2.OpenerDirector):
            # If passed something from urllib2.build_opener, use that
            log().debug("Using %r as urlopener" % cache)
            self.config['cache_enabled'] = True
            self.urlopener = cache

        else:
            raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))

        self.config['banners_enabled'] = banners
        self.config['actors_enabled'] = actors

        if self.config['debug_enabled']:
            warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
            "To enable debug messages, use the following code before importing: "
            "import logging; logging.basicConfig(level=logging.DEBUG)")
            logging.basicConfig(level=logging.DEBUG)


        # List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml
        # Hard-coded here as it is realtively static, and saves another HTTP request, as
        # recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
        self.config['valid_languages'] = [
            "da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr",
            "ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"
        ]

        # thetvdb.com should be based around numeric language codes,
        # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
        # requires the language ID, thus this mapping is required (mainly
        # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
        self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
        'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
        'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
        'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}

        if language is None:
            self.config['language'] = 'en'
        else:
            if language not in self.config['valid_languages']:
                raise ValueError("Invalid language %s, options are: %s" % (
                    language, self.config['valid_languages']
                ))
            else:
                self.config['language'] = language

        # The following url_ configs are based of the
        # http://thetvdb.com/wiki/index.php/Programmers_API
        self.config['base_url'] = "http://thetvdb.com"

        if self.config['search_all_languages']:
            self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config
        else:
            self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config

        self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
        self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config

        self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
        self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config

        self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
        self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config

Example 72

Project: core Source File: wlanemanetests.py
def main():
    ''' Main routine when running from command-line.
    '''
    usagestr = "usage: %prog [-h] [options] [args]"
    parser = optparse.OptionParser(usage = usagestr)
    parser.set_defaults(numnodes = 10, delay = 3, duration = 10, rate = 512,
                        verbose = False,
                        numping = 50, numiperf = 1, nummgen = 1)

    parser.add_option("-d", "--delay", dest = "delay", type = float,
                      help = "wait time before testing")
    parser.add_option("-l", "--logfile", dest = "logfile", type = str,
                      help = "log detailed output to the specified file")
    parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
                      help = "number of nodes")
    parser.add_option("-r", "--rate", dest = "rate", type = float,
                      help = "kbps rate to use for MGEN CPU tests")
    parser.add_option("--numping", dest = "numping", type = int,
                      help = "number of ping latency test runs")
    parser.add_option("--numiperf", dest = "numiperf", type = int,
                      help = "number of iperf throughput test runs")
    parser.add_option("--nummgen", dest = "nummgen", type = int,
                      help = "number of MGEN CPU tests runs")
    parser.add_option("-t", "--time", dest = "duration", type = int,
                      help = "duration in seconds of throughput and CPU tests")
    parser.add_option("-v", "--verbose", dest = "verbose",
                      action = "store_true", help = "be more verbose")

    def usage(msg = None, err = 0):
        sys.stdout.write("\n")
        if msg:
            sys.stdout.write(msg + "\n\n")
        parser.print_help()
        sys.exit(err)

    # parse command line opt
    (opt, args) = parser.parse_args()

    if opt.numnodes < 2:
        usage("invalid numnodes: %s" % opt.numnodes)
    if opt.delay < 0.0:
        usage("invalid delay: %s" % opt.delay)
    if opt.rate < 0.0:
        usage("invalid rate: %s" % opt.rate)

    for a in args:
        sys.stderr.write("ignoring command line argument: '%s'\n" % a)

    results = {}
    starttime = datetime.datetime.now()
    exp = Experiment(opt = opt, start=starttime)
    exp.info("Starting wlanemanetests.py tests %s" % starttime.ctime())

    # system sanity checks here
    emanever, emaneverstr = Emane.detectversionfromcmd()
    if opt.verbose:
        exp.info("Detected EMANE version %s" % (emaneverstr,))

    # bridged
    exp.info("setting up bridged tests 1/2 no link effects")
    exp.info("creating topology: numnodes = %s" % \
                (opt.numnodes, ))
    exp.createbridgedsession(numnodes=opt.numnodes, verbose=opt.verbose)
    exp.setnodes()
    exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
    time.sleep(opt.delay)
    results['0 bridged'] = exp.runalltests("bridged")
    exp.info("done; elapsed time: %s" % (datetime.datetime.now() - exp.start))

    # bridged with netem
    exp.info("setting up bridged tests 2/2 with netem")
    exp.setneteffects(bw=54000000, delay=0)
    exp.info("waiting %s sec (queue bring-up)" % opt.delay)
    results['1.0 netem'] = exp.runalltests("netem")
    exp.info("shutting down bridged session")

    # bridged with netem (1 Mbps,200ms)
    exp.info("setting up bridged tests 3/2 with netem")
    exp.setneteffects(bw=1000000, delay=20000)
    exp.info("waiting %s sec (queue bring-up)" % opt.delay)
    results['1.2 netem_1M'] = exp.runalltests("netem_1M")
    exp.info("shutting down bridged session")

    # bridged with netem (54 kbps,500ms)
    exp.info("setting up bridged tests 3/2 with netem")
    exp.setneteffects(bw=54000, delay=100000)
    exp.info("waiting %s sec (queue bring-up)" % opt.delay)
    results['1.4 netem_54K'] = exp.runalltests("netem_54K")
    exp.info("shutting down bridged session")
    exp.reset()

    # EMANE bypass model
    exp.info("setting up EMANE tests 1/2 with bypass model")
    exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
                           cls=EmaneBypassModel, values=None)
    exp.setnodes()
    exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
    time.sleep(opt.delay)
    results['2.0 bypass'] = exp.runalltests("bypass")
    exp.info("shutting down bypass session")
    exp.reset()
    
    exp.info("waiting %s sec (between EMANE tests)" % opt.delay)
    time.sleep(opt.delay)

    # EMANE RF-PIPE model: no restrictions (max datarate)
    exp.info("setting up EMANE tests 2/4 with RF-PIPE model")
    rfpipevals = list(EmaneRfPipeModel.getdefaultvalues())
    rfpnames = EmaneRfPipeModel.getnames()
    rfpipevals[ rfpnames.index('datarate') ] = '4294967295' # max value
    if emanever < Emane.EMANE091:
        rfpipevals[ rfpnames.index('pathlossmode') ] = '2ray'
        rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '1'
    else:
        rfpipevals[ rfpnames.index('propagationmodel') ] = '2ray'
    exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
                          cls=EmaneRfPipeModel, values=rfpipevals)
    exp.setnodes()
    exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
    time.sleep(opt.delay)
    results['3.0 rfpipe'] = exp.runalltests("rfpipe")
    exp.info("shutting down RF-PIPE session")
    exp.reset()

    # EMANE RF-PIPE model: 54M datarate
    exp.info("setting up EMANE tests 3/4 with RF-PIPE model 54M")
    rfpipevals = list(EmaneRfPipeModel.getdefaultvalues())
    rfpnames = EmaneRfPipeModel.getnames()
    rfpipevals[ rfpnames.index('datarate') ] = '54000000'
    # TX delay != propagation delay
    #rfpipevals[ rfpnames.index('delay') ] = '5000'
    if emanever < Emane.EMANE091:
        rfpipevals[ rfpnames.index('pathlossmode') ] = '2ray'
        rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '1'
    else:
        rfpipevals[ rfpnames.index('propagationmodel') ] = '2ray'
    exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
                          cls=EmaneRfPipeModel, values=rfpipevals)
    exp.setnodes()
    exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
    time.sleep(opt.delay)
    results['4.0 rfpipe54m'] = exp.runalltests("rfpipe54m")
    exp.info("shutting down RF-PIPE session")
    exp.reset()

    # EMANE RF-PIPE model:  54K datarate
    exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss")
    rfpipevals = list(EmaneRfPipeModel.getdefaultvalues())
    rfpnames = EmaneRfPipeModel.getnames()
    rfpipevals[ rfpnames.index('datarate') ] = '54000'
    if emanever < Emane.EMANE091:
        rfpipevals[ rfpnames.index('pathlossmode') ] = 'pathloss'
        rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '0'
    else:
        rfpipevals[ rfpnames.index('propagationmodel') ] = 'precomputed'
    exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
                          cls=EmaneRfPipeModel, values=rfpipevals)
    exp.setnodes()
    exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
    time.sleep(opt.delay)
    exp.info("sending pathloss events to govern connectivity")
    exp.setpathloss(opt.numnodes)
    results['5.0 pathloss'] = exp.runalltests("pathloss")
    exp.info("shutting down RF-PIPE session")
    exp.reset()

    # EMANE RF-PIPE model (512K, 200ms)
    exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss")
    rfpipevals = list(EmaneRfPipeModel.getdefaultvalues())
    rfpnames = EmaneRfPipeModel.getnames()
    rfpipevals[ rfpnames.index('datarate') ] = '512000'
    rfpipevals[ rfpnames.index('delay') ] = '200'
    rfpipevals[ rfpnames.index('pathlossmode') ] = 'pathloss'
    rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '0'
    exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
                          cls=EmaneRfPipeModel, values=rfpipevals)
    exp.setnodes()
    exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
    time.sleep(opt.delay)
    exp.info("sending pathloss events to govern connectivity")
    exp.setpathloss(opt.numnodes)
    results['5.1 pathloss'] = exp.runalltests("pathloss")
    exp.info("shutting down RF-PIPE session")
    exp.reset()
  
    # summary of results in CSV format
    exp.info("----- summary of results (%s nodes, rate=%s, duration=%s) -----" \
             % (opt.numnodes, opt.rate, opt.duration))
    exp.info("netname:latency,mdev,throughput,cpu,loss")

    for test in sorted(results.keys()):
        (latency, mdev, throughput, cpu, loss) = results[test]
        exp.info("%s:%.03f,%.03f,%d,%.02f,%.02f" % \
                 (test, latency, mdev, throughput, cpu,loss))

    exp.logend()
    return exp

Example 73

Project: oscrypto Source File: tls.py
    def _handshake(self):
        """
        Perform an initial TLS handshake
        """

        session_context = None
        ssl_policy_ref = None
        crl_search_ref = None
        crl_policy_ref = None
        ocsp_search_ref = None
        ocsp_policy_ref = None
        policy_array_ref = None

        try:
            if osx_version_info < (10, 8):
                session_context_pointer = new(Security, 'SSLContextRef *')
                result = Security.SSLNewContext(False, session_context_pointer)
                handle_sec_error(result)
                session_context = unwrap(session_context_pointer)

            else:
                session_context = Security.SSLCreateContext(
                    null(),
                    SecurityConst.kSSLClientSide,
                    SecurityConst.kSSLStreamType
                )

            result = Security.SSLSetIOFuncs(
                session_context,
                _read_callback_pointer,
                _write_callback_pointer
            )
            handle_sec_error(result)

            self._connection_id = id(self) % 2147483647
            _connection_refs[self._connection_id] = self
            _socket_refs[self._connection_id] = self._socket
            result = Security.SSLSetConnection(session_context, self._connection_id)
            handle_sec_error(result)

            utf8_domain = self._hostname.encode('utf-8')
            result = Security.SSLSetPeerDomainName(
                session_context,
                utf8_domain,
                len(utf8_domain)
            )
            handle_sec_error(result)

            if osx_version_info >= (10, 10):
                disable_auto_validation = self._session._manual_validation or self._session._extra_trust_roots
                explicit_validation = (not self._session._manual_validation) and self._session._extra_trust_roots
            else:
                disable_auto_validation = True
                explicit_validation = not self._session._manual_validation

            # Ensure requested protocol support is set for the session
            if osx_version_info < (10, 8):
                for protocol in ['SSLv2', 'SSLv3', 'TLSv1']:
                    protocol_const = _PROTOCOL_STRING_CONST_MAP[protocol]
                    enabled = protocol in self._session._protocols
                    result = Security.SSLSetProtocolVersionEnabled(
                        session_context,
                        protocol_const,
                        enabled
                    )
                    handle_sec_error(result)

                if disable_auto_validation:
                    result = Security.SSLSetEnableCertVerify(session_context, False)
                    handle_sec_error(result)

            else:
                protocol_consts = [_PROTOCOL_STRING_CONST_MAP[protocol] for protocol in self._session._protocols]
                min_protocol = min(protocol_consts)
                max_protocol = max(protocol_consts)
                result = Security.SSLSetProtocolVersionMin(
                    session_context,
                    min_protocol
                )
                handle_sec_error(result)
                result = Security.SSLSetProtocolVersionMax(
                    session_context,
                    max_protocol
                )
                handle_sec_error(result)

                if disable_auto_validation:
                    result = Security.SSLSetSessionOption(
                        session_context,
                        SecurityConst.kSSLSessionOptionBreakOnServerAuth,
                        True
                    )
                    handle_sec_error(result)

            # Disable all sorts of bad cipher suites
            supported_ciphers_pointer = new(Security, 'size_t *')
            result = Security.SSLGetNumberSupportedCiphers(session_context, supported_ciphers_pointer)
            handle_sec_error(result)

            supported_ciphers = deref(supported_ciphers_pointer)

            cipher_buffer = buffer_from_bytes(supported_ciphers * 4)
            supported_cipher_suites_pointer = cast(Security, 'uint32_t *', cipher_buffer)
            result = Security.SSLGetSupportedCiphers(
                session_context,
                supported_cipher_suites_pointer,
                supported_ciphers_pointer
            )
            handle_sec_error(result)

            supported_ciphers = deref(supported_ciphers_pointer)
            supported_cipher_suites = array_from_pointer(
                Security,
                'uint32_t',
                supported_cipher_suites_pointer,
                supported_ciphers
            )
            good_ciphers = []
            for supported_cipher_suite in supported_cipher_suites:
                cipher_suite = int_to_bytes(supported_cipher_suite, width=2)
                cipher_suite_name = CIPHER_SUITE_MAP.get(cipher_suite, cipher_suite)
                good_cipher = _cipher_blacklist_regex.search(cipher_suite_name) is None
                if good_cipher:
                    good_ciphers.append(supported_cipher_suite)

            num_good_ciphers = len(good_ciphers)
            good_ciphers_array = new(Security, 'uint32_t[]', num_good_ciphers)
            array_set(good_ciphers_array, good_ciphers)
            good_ciphers_pointer = cast(Security, 'uint32_t *', good_ciphers_array)
            result = Security.SSLSetEnabledCiphers(
                session_context,
                good_ciphers_pointer,
                num_good_ciphers
            )
            handle_sec_error(result)

            # Set a peer id from the session to allow for session reuse, the hostname
            # is appended to prevent a bug on OS X 10.7 where it tries to reuse a
            # connection even if the hostnames are different.
            peer_id = self._session._peer_id + self._hostname.encode('utf-8')
            result = Security.SSLSetPeerID(session_context, peer_id, len(peer_id))
            handle_sec_error(result)

            handshake_result = Security.SSLHandshake(session_context)
            if self._exception is not None:
                exception = self._exception
                self._exception = None
                raise exception
            while handshake_result == SecurityConst.errSSLWouldBlock:
                handshake_result = Security.SSLHandshake(session_context)
                if self._exception is not None:
                    exception = self._exception
                    self._exception = None
                    raise exception

            if osx_version_info < (10, 8) and osx_version_info >= (10, 7):
                do_validation = explicit_validation and handshake_result == 0
            else:
                do_validation = explicit_validation and handshake_result == SecurityConst.errSSLServerAuthCompleted

            if do_validation:
                trust_ref_pointer = new(Security, 'SecTrustRef *')
                result = Security.SSLCopyPeerTrust(
                    session_context,
                    trust_ref_pointer
                )
                handle_sec_error(result)
                trust_ref = unwrap(trust_ref_pointer)

                cf_string_hostname = CFHelpers.cf_string_from_unicode(self._hostname)
                ssl_policy_ref = Security.SecPolicyCreateSSL(True, cf_string_hostname)
                result = CoreFoundation.CFRelease(cf_string_hostname)
                handle_cf_error(result)

                # Create a new policy for OCSP checking to disable it
                ocsp_oid_pointer = struct(Security, 'CSSM_OID')
                ocsp_oid = unwrap(ocsp_oid_pointer)
                ocsp_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_OCSP)
                ocsp_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_OCSP)
                ocsp_oid.Data = cast(Security, 'char *', ocsp_oid_buffer)

                ocsp_search_ref_pointer = new(Security, 'SecPolicySearchRef *')
                result = Security.SecPolicySearchCreate(
                    SecurityConst.CSSM_CERT_X_509v3,
                    ocsp_oid_pointer,
                    null(),
                    ocsp_search_ref_pointer
                )
                handle_sec_error(result)
                ocsp_search_ref = unwrap(ocsp_search_ref_pointer)

                ocsp_policy_ref_pointer = new(Security, 'SecPolicyRef *')
                result = Security.SecPolicySearchCopyNext(ocsp_search_ref, ocsp_policy_ref_pointer)
                handle_sec_error(result)
                ocsp_policy_ref = unwrap(ocsp_policy_ref_pointer)

                ocsp_struct_pointer = struct(Security, 'CSSM_APPLE_TP_OCSP_OPTIONS')
                ocsp_struct = unwrap(ocsp_struct_pointer)
                ocsp_struct.Version = SecurityConst.CSSM_APPLE_TP_OCSP_OPTS_VERSION
                ocsp_struct.Flags = (
                    SecurityConst.CSSM_TP_ACTION_OCSP_DISABLE_NET |
                    SecurityConst.CSSM_TP_ACTION_OCSP_CACHE_READ_DISABLE
                )
                ocsp_struct_bytes = struct_bytes(ocsp_struct_pointer)

                cssm_data_pointer = struct(Security, 'CSSM_DATA')
                cssm_data = unwrap(cssm_data_pointer)
                cssm_data.Length = len(ocsp_struct_bytes)
                ocsp_struct_buffer = buffer_from_bytes(ocsp_struct_bytes)
                cssm_data.Data = cast(Security, 'char *', ocsp_struct_buffer)

                result = Security.SecPolicySetValue(ocsp_policy_ref, cssm_data_pointer)
                handle_sec_error(result)

                # Create a new policy for CRL checking to disable it
                crl_oid_pointer = struct(Security, 'CSSM_OID')
                crl_oid = unwrap(crl_oid_pointer)
                crl_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_CRL)
                crl_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_CRL)
                crl_oid.Data = cast(Security, 'char *', crl_oid_buffer)

                crl_search_ref_pointer = new(Security, 'SecPolicySearchRef *')
                result = Security.SecPolicySearchCreate(
                    SecurityConst.CSSM_CERT_X_509v3,
                    crl_oid_pointer,
                    null(),
                    crl_search_ref_pointer
                )
                handle_sec_error(result)
                crl_search_ref = unwrap(crl_search_ref_pointer)

                crl_policy_ref_pointer = new(Security, 'SecPolicyRef *')
                result = Security.SecPolicySearchCopyNext(crl_search_ref, crl_policy_ref_pointer)
                handle_sec_error(result)
                crl_policy_ref = unwrap(crl_policy_ref_pointer)

                crl_struct_pointer = struct(Security, 'CSSM_APPLE_TP_CRL_OPTIONS')
                crl_struct = unwrap(crl_struct_pointer)
                crl_struct.Version = SecurityConst.CSSM_APPLE_TP_CRL_OPTS_VERSION
                crl_struct.CrlFlags = 0
                crl_struct_bytes = struct_bytes(crl_struct_pointer)

                cssm_data_pointer = struct(Security, 'CSSM_DATA')
                cssm_data = unwrap(cssm_data_pointer)
                cssm_data.Length = len(crl_struct_bytes)
                crl_struct_buffer = buffer_from_bytes(crl_struct_bytes)
                cssm_data.Data = cast(Security, 'char *', crl_struct_buffer)

                result = Security.SecPolicySetValue(crl_policy_ref, cssm_data_pointer)
                handle_sec_error(result)

                policy_array_ref = CFHelpers.cf_array_from_list([
                    ssl_policy_ref,
                    crl_policy_ref,
                    ocsp_policy_ref
                ])

                result = Security.SecTrustSetPolicies(trust_ref, policy_array_ref)
                handle_sec_error(result)

                if self._session._extra_trust_roots:
                    ca_cert_refs = []
                    ca_certs = []
                    for cert in self._session._extra_trust_roots:
                        ca_cert = load_certificate(cert)
                        ca_certs.append(ca_cert)
                        ca_cert_refs.append(ca_cert.sec_certificate_ref)

                    result = Security.SecTrustSetAnchorCertificatesOnly(trust_ref, False)
                    handle_sec_error(result)

                    array_ref = CFHelpers.cf_array_from_list(ca_cert_refs)
                    result = Security.SecTrustSetAnchorCertificates(trust_ref, array_ref)
                    handle_sec_error(result)

                result_pointer = new(Security, 'SecTrustResultType *')
                result = Security.SecTrustEvaluate(trust_ref, result_pointer)
                handle_sec_error(result)

                trust_result_code = deref(result_pointer)
                invalid_chain_error_codes = set([
                    SecurityConst.kSecTrustResultProceed,
                    SecurityConst.kSecTrustResultUnspecified
                ])
                if trust_result_code not in invalid_chain_error_codes:
                    handshake_result = SecurityConst.errSSLXCertChainInvalid
                else:
                    handshake_result = Security.SSLHandshake(session_context)
                    while handshake_result == SecurityConst.errSSLWouldBlock:
                        handshake_result = Security.SSLHandshake(session_context)

            self._done_handshake = True

            handshake_error_codes = set([
                SecurityConst.errSSLXCertChainInvalid,
                SecurityConst.errSSLCertExpired,
                SecurityConst.errSSLCertNotYetValid,
                SecurityConst.errSSLUnknownRootCert,
                SecurityConst.errSSLNoRootCert,
                SecurityConst.errSSLHostNameMismatch,
                SecurityConst.errSSLInternal,
            ])

            # In testing, only errSSLXCertChainInvalid was ever returned for
            # all of these different situations, however we include the others
            # for completeness. To get the real reason we have to use the
            # certificate from the handshake and use the deprecated function
            # SecTrustGetCssmResultCode().
            if handshake_result in handshake_error_codes:
                trust_ref_pointer = new(Security, 'SecTrustRef *')
                result = Security.SSLCopyPeerTrust(
                    session_context,
                    trust_ref_pointer
                )
                handle_sec_error(result)
                trust_ref = unwrap(trust_ref_pointer)

                result_code_pointer = new(Security, 'OSStatus *')
                result = Security.SecTrustGetCssmResultCode(trust_ref, result_code_pointer)
                result_code = deref(result_code_pointer)

                chain = extract_chain(self._server_hello)

                self_signed = False
                revoked = False
                expired = False
                not_yet_valid = False
                no_issuer = False
                cert = None
                bad_hostname = False

                if chain:
                    cert = chain[0]
                    oscrypto_cert = load_certificate(cert)
                    self_signed = oscrypto_cert.self_signed
                    revoked = result_code == SecurityConst.CSSMERR_TP_CERT_REVOKED
                    no_issuer = not self_signed and result_code == SecurityConst.CSSMERR_TP_NOT_TRUSTED
                    expired = result_code == SecurityConst.CSSMERR_TP_CERT_EXPIRED
                    not_yet_valid = result_code == SecurityConst.CSSMERR_TP_CERT_NOT_VALID_YET
                    bad_hostname = result_code == SecurityConst.CSSMERR_APPLETP_HOSTNAME_MISMATCH

                    # On macOS 10.12, some expired certificates return errSSLInternal
                    if osx_version_info >= (10, 12):
                        validity = cert['tbs_certificate']['validity']
                        not_before = validity['not_before'].chosen.native
                        not_after = validity['not_after'].chosen.native
                        utcnow = datetime.datetime.now(timezone.utc)
                        expired = not_after < utcnow
                        not_yet_valid = not_before > utcnow

                if chain and chain[0].hash_algo in set(['md5', 'md2']):
                    raise_weak_signature(chain[0])

                if revoked:
                    raise_revoked(cert)

                if bad_hostname:
                    raise_hostname(cert, self._hostname)

                elif expired or not_yet_valid:
                    raise_expired_not_yet_valid(cert)

                elif no_issuer:
                    raise_no_issuer(cert)

                elif self_signed:
                    raise_self_signed(cert)

                if detect_client_auth_request(self._server_hello):
                    raise_client_auth()

                raise_verification(cert)

            if handshake_result == SecurityConst.errSSLPeerHandshakeFail:
                if detect_client_auth_request(self._server_hello):
                    raise_client_auth()
                raise_handshake()

            if handshake_result == SecurityConst.errSSLWeakPeerEphemeralDHKey:
                raise_dh_params()

            if handshake_result in set([SecurityConst.errSSLRecordOverflow, SecurityConst.errSSLProtocol]):
                self._server_hello += _read_remaining(self._socket)
                raise_protocol_error(self._server_hello)

            if handshake_result in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):
                if not self._done_handshake:
                    self._server_hello += _read_remaining(self._socket)
                if detect_other_protocol(self._server_hello):
                    raise_protocol_error(self._server_hello)
                raise_disconnection()

            if osx_version_info < (10, 10):
                dh_params_length = get_dh_params_length(self._server_hello)
                if dh_params_length is not None and dh_params_length < 1024:
                    raise_dh_params()

            would_block = handshake_result == SecurityConst.errSSLWouldBlock
            server_auth_complete = handshake_result == SecurityConst.errSSLServerAuthCompleted
            manual_validation = self._session._manual_validation and server_auth_complete
            if not would_block and not manual_validation:
                handle_sec_error(handshake_result, TLSError)

            self._session_context = session_context

            protocol_const_pointer = new(Security, 'SSLProtocol *')
            result = Security.SSLGetNegotiatedProtocolVersion(
                session_context,
                protocol_const_pointer
            )
            handle_sec_error(result)
            protocol_const = deref(protocol_const_pointer)

            self._protocol = _PROTOCOL_CONST_STRING_MAP[protocol_const]

            cipher_int_pointer = new(Security, 'SSLCipherSuite *')
            result = Security.SSLGetNegotiatedCipher(
                session_context,
                cipher_int_pointer
            )
            handle_sec_error(result)
            cipher_int = deref(cipher_int_pointer)

            cipher_bytes = int_to_bytes(cipher_int, width=2)
            self._cipher_suite = CIPHER_SUITE_MAP.get(cipher_bytes, cipher_bytes)

            session_info = parse_session_info(
                self._server_hello,
                self._client_hello
            )
            self._compression = session_info['compression']
            self._session_id = session_info['session_id']
            self._session_ticket = session_info['session_ticket']

        except (OSError, socket_.error):
            if session_context:
                if osx_version_info < (10, 8):
                    result = Security.SSLDisposeContext(session_context)
                    handle_sec_error(result)
                else:
                    result = CoreFoundation.CFRelease(session_context)
                    handle_cf_error(result)

            self._session_context = None
            self.close()

            raise

        finally:
            # Trying to release crl_search_ref or ocsp_search_ref results in
            # a segmentation fault, so we do not do that

            if ssl_policy_ref:
                result = CoreFoundation.CFRelease(ssl_policy_ref)
                handle_cf_error(result)
                ssl_policy_ref = None

            if crl_policy_ref:
                result = CoreFoundation.CFRelease(crl_policy_ref)
                handle_cf_error(result)
                crl_policy_ref = None

            if ocsp_policy_ref:
                result = CoreFoundation.CFRelease(ocsp_policy_ref)
                handle_cf_error(result)
                ocsp_policy_ref = None

            if policy_array_ref:
                result = CoreFoundation.CFRelease(policy_array_ref)
                handle_cf_error(result)
                policy_array_ref = None

Example 74

Project: sylkserver Source File: session.py
    def _OH_ProcessRemoteOperation(self, operation):
        notification = operation.notification
        stanza = notification.data.stanza
        if notification.name == 'XMPPGotJingleSessionTerminate':
            if self.state not in ('incoming', 'connecting', 'connected_pending_accept', 'connected'):
                return
            if self._timer is not None and self._timer.active():
                self._timer.cancel()
            self._timer = None
            # Session ended remotely
            prev_state = self.state
            self.state = 'terminated'
            if prev_state == 'incoming':
                reason = stanza.jingle.reason.value if stanza.jingle.reason else 'cancel'
                notification.center.post_notification('JingleSessionDidFail', self, NotificationData(originator='remote', reason=reason))
            else:
                notification.center.post_notification('JingleSessionWillEnd', self, NotificationData(originator='remote'))
                streams = self.proposed_streams if prev_state == 'connecting' else self.streams
                for stream in streams:
                    notification.center.remove_observer(self, sender=stream)
                    stream.deactivate()
                    stream.end()
                self.end_time = datetime.now()
                notification.center.post_notification('JingleSessionDidEnd', self, NotificationData(originator='remote'))
            self._channel.send_exception(proc.ProcExit)
        elif notification.name == 'XMPPGotJingleSessionInfo':
            info = stanza.jingle.info
            if not info:
                return
            if info == 'ringing':
                if self.state not in ('connecting', 'connected_pending_accept'):
                    return
                notification.center.post_notification('JingleSessionGotRingIndication', self)
            elif info in ('hold', 'unhold'):
                if self.state != 'connected':
                    return
                notification.center.post_notification('JingleSessionDidChangeHoldState', self, NotificationData(originator='remote', on_hold=info=='hold', partial=False))
        elif notification.name == 'XMPPGotJingleDescriptionInfo':
            if self.state != 'connecting':
                return

            # Add candidates acquired on transport-info stanzas
            for s in self._pending_transport_info_stanzas:
                for c in s.jingle.content:
                    content = next(content for content in stanza.jingle.content if content.name == c.name)
                    content.transport.candidates.extend(c.transport.candidates)
                    if isinstance(content.transport, jingle.IceUdpTransport):
                        if not content.transport.ufrag and c.transport.ufrag:
                            content.transport.ufrag = c.transport.ufrag
                        if not content.transport.password and c.transport.password:
                            content.transport.password = c.transport.password

            remote_sdp = jingle_to_sdp(stanza.jingle)
            try:
                self._sdp_negotiator.set_remote_answer(remote_sdp)
                self._sdp_negotiator.negotiate()
            except SIPCoreError:
                # The description-info stanza may have been just a parameter change, not a full 'SDP'
                return

            if self._timer is not None and self._timer.active():
                self._timer.cancel()
            self._timer = None

            del self._pending_transport_info_stanzas[:]

            # Get active SDPs (negotiator may make changes)
            local_sdp = self._sdp_negotiator.active_local
            remote_sdp = self._sdp_negotiator.active_remote

            notification.center.post_notification('JingleSessionWillStart', sender=self)
            stream_map = dict((stream.index, stream) for stream in self.proposed_streams)
            for index, local_media in enumerate(local_sdp.media):
                remote_media = remote_sdp.media[index]
                stream = stream_map[index]
                if remote_media.port:
                    stream.start(local_sdp, remote_sdp, index)
                else:
                    notification.center.remove_observer(self, sender=stream)
                    self.proposed_streams.remove(stream)
                    del stream_map[stream.index]
                    stream.deactivate()
                    stream.end()
            removed_streams = [stream for stream in self.proposed_streams if stream.index >= len(local_sdp.media)]
            for stream in removed_streams:
                notification.center.remove_observer(self, sender=stream)
                self.proposed_streams.remove(stream)
                del stream_map[stream.index]
                stream.deactivate()
                stream.end()

            try:
                with api.timeout(self.media_stream_timeout):
                    wait_count = len(self.proposed_streams)
                    while wait_count > 0:
                        notification = operation.channel.wait()
                        if notification.name == 'MediaStreamDidStart':
                            wait_count -= 1
            except (MediaStreamDidFailError, api.TimeoutError), e:
                for stream in self.proposed_streams:
                    notification.center.remove_observer(self, sender=stream)
                    stream.deactivate()
                    stream.end()
                if isinstance(e, api.TimeoutError):
                    error = 'media stream timed out while starting'
                else:
                    error = 'media stream failed: %s' % e.data.reason
                self._fail(originator='local', reason='failed-application', description=error)
            else:
                self.state = 'connected_pending_accept'
                self.streams = self.proposed_streams
                self.proposed_streams = None
                self.start_time = datetime.now()
                # Hold the streams to prevent real RTP from flowing
                for stream in self.streams:
                    stream.hold()
        elif notification.name == 'XMPPGotJingleSessionAccept':
            if self.state not in ('connecting', 'connected_pending_accept'):
                return
            if self._timer is not None and self._timer.active():
                self._timer.cancel()
            self._timer = None

            if self.state == 'connected_pending_accept':
                # We already negotiated ICE and media is 'flowing' (not really because streams are on hold)
                # unhold the streams and pretend the session just started
                for stream in self.streams:
                    stream.unhold()
                self.state = 'connected'
                notification.center.post_notification('JingleSessionDidStart', self, NotificationData(streams=self.streams))
                return

            # Add candidates acquired on transport-info stanzas
            for s in self._pending_transport_info_stanzas:
                for c in s.jingle.content:
                    content = next(content for content in stanza.jingle.content if content.name == c.name)
                    content.transport.candidates.extend(c.transport.candidates)
                    if isinstance(content.transport, jingle.IceUdpTransport):
                        if not content.transport.ufrag and c.transport.ufrag:
                            content.transport.ufrag = c.transport.ufrag
                        if not content.transport.password and c.transport.password:
                            content.transport.password = c.transport.password
            del self._pending_transport_info_stanzas[:]

            remote_sdp = jingle_to_sdp(stanza.jingle)
            try:
                self._sdp_negotiator.set_remote_answer(remote_sdp)
                self._sdp_negotiator.negotiate()
            except SIPCoreError, e:
                for stream in self.proposed_streams:
                    notification.center.remove_observer(self, sender=stream)
                    stream.deactivate()
                    stream.end()
                self._fail(originator='remote', reason='incompatible-parameters', description=str(e))
                return

            # Get active SDPs (negotiator may make changes)
            local_sdp = self._sdp_negotiator.active_local
            remote_sdp = self._sdp_negotiator.active_remote

            notification.center.post_notification('JingleSessionWillStart', sender=self)
            stream_map = dict((stream.index, stream) for stream in self.proposed_streams)
            for index, local_media in enumerate(local_sdp.media):
                remote_media = remote_sdp.media[index]
                stream = stream_map[index]
                if remote_media.port:
                    stream.start(local_sdp, remote_sdp, index)
                else:
                    notification.center.remove_observer(self, sender=stream)
                    self.proposed_streams.remove(stream)
                    del stream_map[stream.index]
                    stream.deactivate()
                    stream.end()
            removed_streams = [stream for stream in self.proposed_streams if stream.index >= len(local_sdp.media)]
            for stream in removed_streams:
                notification.center.remove_observer(self, sender=stream)
                self.proposed_streams.remove(stream)
                del stream_map[stream.index]
                stream.deactivate()
                stream.end()

            try:
                with api.timeout(self.media_stream_timeout):
                    wait_count = len(self.proposed_streams)
                    while wait_count > 0:
                        notification = operation.channel.wait()
                        if notification.name == 'MediaStreamDidStart':
                            wait_count -= 1
            except (MediaStreamDidFailError, api.TimeoutError), e:
                for stream in self.proposed_streams:
                    notification.center.remove_observer(self, sender=stream)
                    stream.deactivate()
                    stream.end()
                if isinstance(e, api.TimeoutError):
                    error = 'media stream timed out while starting'
                else:
                    error = 'media stream failed: %s' % e.data.reason
                self._fail(originator='local', reason='failed-application', description=error)
            else:
                self.state = 'connected'
                self.streams = self.proposed_streams
                self.proposed_streams = None
                self.start_time = datetime.now()
                notification.center.post_notification('JingleSessionDidStart', self, NotificationData(streams=self.streams))
        elif notification.name == 'XMPPGotJingleTransportInfo':
            if self.state != 'connecting':
                # ICE trickling not supported yet, so only accept candidates before accept
                return
            self._pending_transport_info_stanzas.append(stanza)

Example 75

Project: bitex Source File: verification_webhook_handler.py
  def post(self, *args, **kwargs):
    formID        = self.get_argument('formID')
    submissionID  = self.get_argument('submissionID')

    dt = datetime.datetime.now()
    createdAt = int(mktime(dt.timetuple()) + dt.microsecond/1000000.0)

    raw_request = json.loads(self.get_argument('rawRequest'))
    print raw_request

    broker_id             = None
    user_id               = None
    first_name            = None
    middle_name           = None
    last_name             = None
    birth_date_day        = None
    birth_date_month      = None
    birth_date_year       = None
    phone_number_country  = None
    phone_number_area     = None
    phone_number_phone    = None
    address_addr_line1    = None
    address_addr_line2    = None
    address_city          = None
    address_state         = None
    address_postal        = None
    address_country       = None
    address_country_code  = None
    finger_print          = None
    stunt_ip              = None

    photo_fields          = []
    id_fields             = []

    for key, value in raw_request.iteritems():
      if 'broker_id' in key:
        broker_id = int(value)
      if 'user_id' in key:
        user_id = int(value)
      if 'photo_fields' in key:
        photo_fields = value.split(',')
      if 'id_fields' in key:
        id_fields = value.split(',')

      # jotform
      if 'name' in key and isinstance(value, dict ) and 'first' in value:
        first_name = value['first']
      if 'name' in key and isinstance(value, dict ) and 'middle' in value:
        middle_name = value['middle']
      if 'name' in key and isinstance(value, dict ) and 'last' in value:
        last_name = value['last']

      if 'birthDate' in key and isinstance(value, dict ) and 'day' in value:
        birth_date_day = value['day']
      if 'birthDate' in key and isinstance(value, dict ) and 'day' in value:
        birth_date_month = value['month']
      if 'birthDate' in key and isinstance(value, dict ) and 'day' in value:
        birth_date_year = value['year']

      if 'phoneNumber' in key and isinstance(value, dict ) and 'country' in value:
        phone_number_country = value['country']
      if 'phoneNumber' in key and isinstance(value, dict ) and 'area' in value:
        phone_number_area = value['area']
      if 'phoneNumber' in key and isinstance(value, dict ) and 'phone' in value:
        phone_number_phone = value['phone']

      if 'address' in key and isinstance(value, dict ) and 'addr_line1' in value:
        address_addr_line1 = value['addr_line1']
      if 'address' in key and isinstance(value, dict ) and 'addr_line2' in value:
        address_addr_line2 = value['addr_line2']
      if 'address' in key and isinstance(value, dict ) and 'city' in value:
        address_city = value['city']
      if 'address' in key and isinstance(value, dict ) and 'state' in value:
        address_state = value['state']
      if 'address' in key and isinstance(value, dict ) and 'postal' in value:
        address_postal = value['postal']
      if 'address' in key and isinstance(value, dict ) and 'country' in value:
        address_country = value['country']
        address_country_code = get_country_code(address_country)

      if 'finger_print' in key:
        finger_print = value

      if 'stunt_ip' in key:
        stunt_ip = value

      #form stack
      if 'name-first' in key:
        first_name = value
      if 'name-middle' in key:
        middle_name = value
      if 'name-last' in key:
        last_name = value

      if 'address-address' in key:
        address_addr_line1 = value
      if 'address-address2' in key:
        address_addr_line2 = value
      if 'address-city' in key:
        address_city = value
      if 'address-state' in key:
        address_state = value
      if 'address-zip' in key:
        address_postal = value
      if 'address-country' in key:
        address_country = value
        address_country_code = get_country_code(address_country)


    uploaded_files = []
    for field in photo_fields:
      for key, value in raw_request.iteritems():
        if field in key:
          if isinstance(value, list ):
            uploaded_files.extend(value)
          else:
            uploaded_files.append(value)


    import random
    req_id = random.randrange(600000,900000)

    if birth_date_month[:3].upper() in ['JAN', 'GEN']:
      birth_date_month = '01'
    elif birth_date_month[:3].upper() in ['FEV', 'FEB']:
      birth_date_month = '02'
    elif birth_date_month[:3].upper() in ['MAR', u'MÄR']:
      birth_date_month = '03'
    elif birth_date_month[:3].upper() in ['ABR', 'APR', 'AVR']:
      birth_date_month = '04'
    elif birth_date_month[:3].upper() in ['MAY', 'MAI', 'MAG']:
      birth_date_month = '05'
    elif birth_date_month[:3].upper() in ['JUN', 'GIU']:
      birth_date_month = '06'
    elif birth_date_month[:3] in ['jui']:
      birth_date_month = '06'
    elif birth_date_month[:3] in ['Jui']:
      birth_date_month = '07'
    elif birth_date_month[:3].upper() in ['JUL', 'LUG']:
      birth_date_month = '07'
    elif birth_date_month[:3].upper() in ['AGO', 'AUG', 'AOU']:
      birth_date_month = '08'
    elif birth_date_month[:3].upper() in ['SET', 'SEP']:
      birth_date_month = '09'
    elif birth_date_month[:3].upper() in ['OUT', 'OCT', 'OTT', 'OKT']:
      birth_date_month = '10'
    elif birth_date_month[:3].upper() in ['NOV']:
      birth_date_month = '11'
    elif birth_date_month[:3].upper() in ['DEZ', 'DEC', 'DIC']:
      birth_date_month = '12'
    else:
      birth_date_month = birth_date_month[:3].upper()

    verify_request_message = {
      'MsgType': 'B8',
      'VerifyCustomerReqID':req_id,
      'ClientID': user_id,
      'BrokerID': broker_id,
      'VerificationData':  {
        'formID': formID,
        'submissionID': submissionID,
        'created_at': createdAt,
        'name': {
          'first': first_name,
          'middle': middle_name,
          'last': last_name,
        },
        'address': {
          'street1': address_addr_line1,
          'street2': address_addr_line2,
          'city': address_city,
          'state': address_state,
          'postal_code': address_postal,
          'country': address_country,
          'country_code': address_country_code,
        },
        'phone_number': phone_number_country + phone_number_area + phone_number_phone,
        'date_of_birth': birth_date_year + '-' +  birth_date_month + '-' + birth_date_day,
        'uploaded_files': uploaded_files
      },
      'Verify': 1
    }

    if finger_print:
      verify_request_message['VerificationData']['browser_finger_print'] = finger_print

    try:
      if stunt_ip:
        verify_request_message['VerificationData']['stunt_ip'] = json.loads(stunt_ip)
    except:
      pass

    for field in id_fields:
      for key, value in raw_request.iteritems():
        if field in key:
          field_name = camel_to_underscore(field)
          if 'identification' not in verify_request_message['VerificationData']:
            verify_request_message['VerificationData']['identification'] = {}
          verify_request_message['VerificationData']['identification'][ field_name ] = value

    verify_request_message['VerificationData'] = json.dumps(verify_request_message['VerificationData'])

    self.application.application_trade_client.sendJSON(verify_request_message)
    self.write('*ok*')

Example 76

Project: tvdb_api Source File: tvdb_api.py
    def __init__(self,
                interactive = False,
                select_first = False,
                debug = False,
                cache = True,
                banners = False,
                actors = False,
                custom_ui = None,
                language = None,
                search_all_languages = False,
                apikey = None,
                forceConnect=False,
                useZip=False,
                dvdorder=False):

        """interactive (True/False):
            When True, uses built-in console UI is used to select the correct show.
            When False, the first search result is used.

        select_first (True/False):
            Automatically selects the first series search result (rather
            than showing the user a list of more than one series).
            Is overridden by interactive = False, or specifying a custom_ui

        debug (True/False) DEPRECATED:
             Replaced with proper use of logging module. To show debug messages:

                 >>> import logging
                 >>> logging.basicConfig(level = logging.DEBUG)

        cache (True/False/str/unicode/urllib2 opener):
            Retrieved XML are persisted to to disc. If true, stores in
            tvdb_api folder under your systems TEMP_DIR, if set to
            str/unicode instance it will use this as the cache
            location. If False, disables caching.  Can also be passed
            an arbitrary Python object, which is used as a urllib2
            opener, which should be created by urllib2.build_opener

            In Python 3, True/False enable or disable default
            caching. Passing string specified directory where to store
            the "tvdb.sqlite3" cache file. Also a custom
            requests.Session instance can be passed (e.g maybe a
            customised instance of requests_cache.CachedSession)

        banners (True/False):
            Retrieves the banners for a show. These are accessed
            via the _banners key of a Show(), for example:

            >>> Tvdb(banners=True)['scrubs']['_banners'].keys()
            ['fanart', 'poster', 'series', 'season']

        actors (True/False):
            Retrieves a list of the actors for a show. These are accessed
            via the _actors key of a Show(), for example:

            >>> t = Tvdb(actors=True)
            >>> t['scrubs']['_actors'][0]['name']
            u'Zach Braff'

        custom_ui (tvdb_ui.BaseUI subclass):
            A callable subclass of tvdb_ui.BaseUI (overrides interactive option)

        language (2 character language abbreviation):
            The language of the returned data. Is also the language search
            uses. Default is "en" (English). For full list, run..

            >>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
            ['da', 'fi', 'nl', ...]

        search_all_languages (True/False):
            By default, Tvdb will only search in the language specified using
            the language option. When this is True, it will search for the
            show in and language
        
        apikey (str/unicode):
            Override the default thetvdb.com API key. By default it will use
            tvdb_api's own key (fine for small scripts), but you can use your
            own key if desired - this is recommended if you are embedding
            tvdb_api in a larger application)
            See http://thetvdb.com/?tab=apiregister to get your own key

        forceConnect (bool):
            If true it will always try to connect to theTVDB.com even if we
            recently timed out. By default it will wait one minute before
            trying again, and any requests within that one minute window will
            return an exception immediately.

        useZip (bool):
            Download the zip archive where possibale, instead of the xml.
            This is only used when all episodes are pulled.
            And only the main language xml is used, the actor and banner xml are lost.
        """
        

        global lastTimeout
        
        # if we're given a lastTimeout that is less than 1 min just give up
        if not forceConnect and lastTimeout != None and datetime.datetime.now() - lastTimeout < datetime.timedelta(minutes=1):
            raise tvdb_error("We recently timed out, so giving up early this time")
        
        self.shows = ShowContainer() # Holds all Show classes
        self.corrections = {} # Holds show-name to show_id mapping

        self.config = {}

        if apikey is not None:
            self.config['apikey'] = apikey
        else:
            self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key

        self.config['debug_enabled'] = debug # show debugging messages

        self.config['custom_ui'] = custom_ui

        self.config['interactive'] = interactive # prompt for correct series?

        self.config['select_first'] = select_first

        self.config['search_all_languages'] = search_all_languages

        self.config['useZip'] = useZip

        self.config['dvdorder'] = dvdorder

        if not IS_PY2: # FIXME: Allow using requests in Python 2?
            import requests_cache
            if cache is True:
                self.session = requests_cache.CachedSession(
                    expire_after=21600, # 6 hours
                    backend='sqlite',
                    cache_name=self._getTempDir(),
                    )
                self.config['cache_enabled'] = True
            elif cache is False:
                self.session = requests.Session()
                self.config['cache_enabled'] = False
            elif isinstance(cache, text_type):
                # Specified cache path
                self.session = requests_cache.CachedSession(
                    expire_after=21600, # 6 hours
                    backend='sqlite',
                    cache_name=os.path.join(cache, "tvdb_api"),
                    )
            else:
                self.session = cache
                try:
                    self.session.get
                except AttributeError:
                    raise ValueError("cache argument must be True/False, string as cache path or requests.Session-type object (e.g from requests_cache.CachedSession)")
        else:
            # For backwards compatibility in Python 2.x
            if cache is True:
                self.config['cache_enabled'] = True
                self.config['cache_location'] = self._getTempDir()
                self.urlopener = urllib2.build_opener(
                    CacheHandler(self.config['cache_location'])
                )

            elif cache is False:
                self.config['cache_enabled'] = False
                self.urlopener = urllib2.build_opener() # default opener with no caching

            elif isinstance(cache, basestring):
                self.config['cache_enabled'] = True
                self.config['cache_location'] = cache
                self.urlopener = urllib2.build_opener(
                    CacheHandler(self.config['cache_location'])
                )

            elif isinstance(cache, urllib2.OpenerDirector):
                # If passed something from urllib2.build_opener, use that
                log().debug("Using %r as urlopener" % cache)
                self.config['cache_enabled'] = True
                self.urlopener = cache

            else:
                raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))

        self.config['banners_enabled'] = banners
        self.config['actors_enabled'] = actors

        if self.config['debug_enabled']:
            warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
            "To enable debug messages, use the following code before importing: "
            "import logging; logging.basicConfig(level=logging.DEBUG)")
            logging.basicConfig(level=logging.DEBUG)


        # List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml
        # Hard-coded here as it is realtively static, and saves another HTTP request, as
        # recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
        self.config['valid_languages'] = [
            "da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr",
            "ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"
        ]

        # thetvdb.com should be based around numeric language codes,
        # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
        # requires the language ID, thus this mapping is required (mainly
        # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
        self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
        'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
        'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
        'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}

        if language is None:
            self.config['language'] = 'en'
        else:
            if language not in self.config['valid_languages']:
                raise ValueError("Invalid language %s, options are: %s" % (
                    language, self.config['valid_languages']
                ))
            else:
                self.config['language'] = language

        # The following url_ configs are based of the
        # http://thetvdb.com/wiki/index.php/Programmers_API
        self.config['base_url'] = "http://thetvdb.com"

        if self.config['search_all_languages']:
            self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config
        else:
            self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config

        self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
        self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config

        self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
        self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config

        self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
        self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config

Example 77

Project: CuckooSploit Source File: from_1_1_to_1_2-added_states.py
def _perform(upgrade):
    conn = op.get_bind()

    # Deal with Alembic shit.
    # Alembic is so ORMish that it was impossible to write code which works on different DBMS.
    if conn.engine.driver == "psycopg2":
        # Altering status ENUM.
        # This shut of raw SQL is here because alembic doesn't deal well with alter_colum of ENUM type.
        # Commit because SQLAlchemy doesn't support ALTER TYPE in a transaction.
        op.execute('COMMIT')
        if upgrade:
            conn.execute("ALTER TYPE status_type ADD VALUE 'failed_reporting'")
        else:
            conn.execute("ALTER TYPE status_type DROP ATTRIBUTE IF EXISTS failed_reporting")
    else:
        # Read data.
        tasks_data = []
        old_tasks = conn.execute("select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, clock, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
        for item in old_tasks:
            d = {}
            d["id"] = item[0]
            d["target"] = item[1]
            d["category"] = item[2]
            d["timeout"] = item[3]
            d["priority"] = item[4]
            d["custom"] = item[5]
            d["machine"] = item[6]
            d["package"] = item[7]
            d["options"] = item[8]
            d["platform"] = item[9]
            d["memory"] = item[10]
            d["enforce_timeout"] = item[11]

            if isinstance(item[12], datetime):
                d["clock"] = item[12]
            elif item[12]:
                d["clock"] = parse(item[12])
            else:
                d["clock"] = None

            if isinstance(item[13], datetime):
                d["added_on"] = item[13]
            elif item[13]:
                d["added_on"] = parse(item[13])
            else:
                d["added_on"] = None

            if isinstance(item[14], datetime):
                d["started_on"] = item[14]
            elif item[14]:
                d["started_on"] = parse(item[14])
            else:
                d["started_on"] = None

            if isinstance(item[15], datetime):
                d["completed_on"] = item[15]
            elif item[15]:
                d["completed_on"] = parse(item[15])
            else:
                d["completed_on"] = None

            d["status"] = item[16]
            d["sample_id"] = item[17]

            tasks_data.append(d)
        if conn.engine.driver == "mysqldb":
            # Disable foreign key checking to migrate table avoiding checks.
            op.execute('SET foreign_key_checks = 0')

            # Drop old table.
            op.drop_table("tasks")

            # Drop old Enum.
            sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
            # Create table with 1.2 schema.
            if upgrade:
                op.create_table(
                    "tasks",
                    sa.Column("id", sa.Integer(), nullable=False),
                    sa.Column("target", sa.String(length=255), nullable=False),
                    sa.Column("category", sa.String(length=255), nullable=False),
                    sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
                    sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
                    sa.Column("custom", sa.String(length=255), nullable=True),
                    sa.Column("machine", sa.String(length=255), nullable=True),
                    sa.Column("package", sa.String(length=255), nullable=True),
                    sa.Column("options", sa.String(length=255), nullable=True),
                    sa.Column("platform", sa.String(length=255), nullable=True),
                    sa.Column("memory", sa.Boolean(), nullable=False, default=False),
                    sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
                    sa.Column("clock", sa.DateTime(timezone=False), default=datetime.now, nullable=False),
                    sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
                    sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
                    sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
                    sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", "failed_analysis", "failed_processing", "failed_reporting", name="status_type"), server_default="pending", nullable=False),
                    sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
                    sa.PrimaryKeyConstraint("id")
                )
            else:
                op.create_table(
                    "tasks",
                    sa.Column("id", sa.Integer(), nullable=False),
                    sa.Column("target", sa.String(length=255), nullable=False),
                    sa.Column("category", sa.String(length=255), nullable=False),
                    sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
                    sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
                    sa.Column("custom", sa.String(length=255), nullable=True),
                    sa.Column("machine", sa.String(length=255), nullable=True),
                    sa.Column("package", sa.String(length=255), nullable=True),
                    sa.Column("options", sa.String(length=255), nullable=True),
                    sa.Column("platform", sa.String(length=255), nullable=True),
                    sa.Column("memory", sa.Boolean(), nullable=False, default=False),
                    sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
                    sa.Column("clock", sa.DateTime(timezone=False), default=datetime.now, nullable=False),
                    sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
                    sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
                    sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
                    sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", "failed_analysis", "failed_processing", name="status_type"), server_default="pending", nullable=False),
                    sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
                    sa.PrimaryKeyConstraint("id")
                )
            op.execute('COMMIT')

            # Insert data.
            op.bulk_insert(db.Task.__table__, tasks_data)
            # Enable foreign key.
            op.execute('SET foreign_key_checks = 1')

        else:
            op.drop_table("tasks")

            # Create table with 1.2 schema.
            if upgrade:
                op.create_table(
                    "tasks",
                    sa.Column("id", sa.Integer(), nullable=False),
                    sa.Column("target", sa.String(length=255), nullable=False),
                    sa.Column("category", sa.String(length=255), nullable=False),
                    sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
                    sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
                    sa.Column("custom", sa.String(length=255), nullable=True),
                    sa.Column("machine", sa.String(length=255), nullable=True),
                    sa.Column("package", sa.String(length=255), nullable=True),
                    sa.Column("options", sa.String(length=255), nullable=True),
                    sa.Column("platform", sa.String(length=255), nullable=True),
                    sa.Column("memory", sa.Boolean(), nullable=False, default=False),
                    sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
                    sa.Column("clock", sa.DateTime(timezone=False), default=datetime.now, nullable=False),
                    sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
                    sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
                    sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
                    sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", "failed_analysis", "failed_processing", "failed_reporting", name="status_type"), server_default="pending", nullable=False),
                    sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
                    sa.PrimaryKeyConstraint("id")
                )
            else:
                op.create_table(
                    "tasks",
                    sa.Column("id", sa.Integer(), nullable=False),
                    sa.Column("target", sa.String(length=255), nullable=False),
                    sa.Column("category", sa.String(length=255), nullable=False),
                    sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
                    sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
                    sa.Column("custom", sa.String(length=255), nullable=True),
                    sa.Column("machine", sa.String(length=255), nullable=True),
                    sa.Column("package", sa.String(length=255), nullable=True),
                    sa.Column("options", sa.String(length=255), nullable=True),
                    sa.Column("platform", sa.String(length=255), nullable=True),
                    sa.Column("memory", sa.Boolean(), nullable=False, default=False),
                    sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
                    sa.Column("clock", sa.DateTime(timezone=False), default=datetime.now, nullable=False),
                    sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
                    sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
                    sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
                    sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", "failed_analysis", "failed_processing", name="status_type"), server_default="pending", nullable=False),
                    sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
                    sa.PrimaryKeyConstraint("id")
                )

            # Insert data.
            op.bulk_insert(db.Task.__table__, tasks_data)

Example 78

Project: xbmc-addon-tvtumbler Source File: tvdb_api.py
    def __init__(self,
                interactive = False,
                select_first = False,
                debug = False,
                cache = True,
                banners = False,
                actors = False,
                custom_ui = None,
                language = None,
                search_all_languages = False,
                apikey = None,
                forceConnect=False,
                useZip=False):

        """interactive (True/False):
            When True, uses built-in console UI is used to select the correct show.
            When False, the first search result is used.

        select_first (True/False):
            Automatically selects the first series search result (rather
            than showing the user a list of more than one series).
            Is overridden by interactive = False, or specifying a custom_ui

        debug (True/False) DEPRECATED:
             Replaced with proper use of logging module. To show debug messages:

                 >>> import logging
                 >>> logging.basicConfig(level = logging.DEBUG)

        cache (True/False/str/unicode/urllib2 opener):
            Retrieved XML are persisted to to disc. If true, stores in
            tvdb_api folder under your systems TEMP_DIR, if set to
            str/unicode instance it will use this as the cache
            location. If False, disables caching.  Can also be passed
            an arbitrary Python object, which is used as a urllib2
            opener, which should be created by urllib2.build_opener

        banners (True/False):
            Retrieves the banners for a show. These are accessed
            via the _banners key of a Show(), for example:

            >>> Tvdb(banners=True)['scrubs']['_banners'].keys()
            ['fanart', 'poster', 'series', 'season']

        actors (True/False):
            Retrieves a list of the actors for a show. These are accessed
            via the _actors key of a Show(), for example:

            >>> t = Tvdb(actors=True)
            >>> t['scrubs']['_actors'][0]['name']
            u'Zach Braff'

        custom_ui (tvdb_ui.BaseUI subclass):
            A callable subclass of tvdb_ui.BaseUI (overrides interactive option)

        language (2 character language abbreviation):
            The language of the returned data. Is also the language search
            uses. Default is "en" (English). For full list, run..

            >>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
            ['da', 'fi', 'nl', ...]

        search_all_languages (True/False):
            By default, Tvdb will only search in the language specified using
            the language option. When this is True, it will search for the
            show in and language
        
        apikey (str/unicode):
            Override the default thetvdb.com API key. By default it will use
            tvdb_api's own key (fine for small scripts), but you can use your
            own key if desired - this is recommended if you are embedding
            tvdb_api in a larger application)
            See http://thetvdb.com/?tab=apiregister to get your own key

        forceConnect (bool):
            If true it will always try to connect to theTVDB.com even if we
            recently timed out. By default it will wait one minute before
            trying again, and any requests within that one minute window will
            return an exception immediately.

        useZip (bool):
            Download the zip archive where possibale, instead of the xml.
            This is only used when all episodes are pulled.
            And only the main language xml is used, the actor and banner xml are lost.
        """
        
        global lastTimeout
        
        # if we're given a lastTimeout that is less than 1 min just give up
        if not forceConnect and lastTimeout != None and datetime.datetime.now() - lastTimeout < datetime.timedelta(minutes=1):
            raise tvdb_error("We recently timed out, so giving up early this time")
        
        self.shows = ShowContainer() # Holds all Show classes
        self.corrections = {} # Holds show-name to show_id mapping

        self.config = {}

        if apikey is not None:
            self.config['apikey'] = apikey
        else:
            self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key

        self.config['debug_enabled'] = debug # show debugging messages

        self.config['custom_ui'] = custom_ui

        self.config['interactive'] = interactive # prompt for correct series?

        self.config['select_first'] = select_first

        self.config['search_all_languages'] = search_all_languages

        self.config['useZip'] = useZip


        if cache is True:
            self.config['cache_enabled'] = True
            self.config['cache_location'] = self._getTempDir()
            self.urlopener = urllib2.build_opener(
                CacheHandler(self.config['cache_location'])
            )

        elif cache is False:
            self.config['cache_enabled'] = False
            self.urlopener = urllib2.build_opener() # default opener with no caching

        elif isinstance(cache, basestring):
            self.config['cache_enabled'] = True
            self.config['cache_location'] = cache
            self.urlopener = urllib2.build_opener(
                CacheHandler(self.config['cache_location'])
            )

        elif isinstance(cache, urllib2.OpenerDirector):
            # If passed something from urllib2.build_opener, use that
            log().debug("Using %r as urlopener" % cache)
            self.config['cache_enabled'] = True
            self.urlopener = cache

        else:
            raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))

        self.config['banners_enabled'] = banners
        self.config['actors_enabled'] = actors

        if self.config['debug_enabled']:
            warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
            "To enable debug messages, use the following code before importing: "
            "import logging; logging.basicConfig(level=logging.DEBUG)")
            logging.basicConfig(level=logging.DEBUG)


        # List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml
        # Hard-coded here as it is realtively static, and saves another HTTP request, as
        # recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
        self.config['valid_languages'] = [
            "da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr",
            "ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"
        ]

        # thetvdb.com should be based around numeric language codes,
        # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
        # requires the language ID, thus this mapping is required (mainly
        # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
        self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
        'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
        'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
        'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}

        if language is None:
            self.config['language'] = 'en'
        else:
            if language not in self.config['valid_languages']:
                raise ValueError("Invalid language %s, options are: %s" % (
                    language, self.config['valid_languages']
                ))
            else:
                self.config['language'] = language

        # The following url_ configs are based of the
        # http://thetvdb.com/wiki/index.php/Programmers_API
        self.config['base_url'] = "http://thetvdb.com"

        if self.config['search_all_languages']:
            self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config
        else:
            self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config

        self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
        self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config

        self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
        self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config

        self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
        self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config

Example 79

Project: sentry-old Source File: test_events.py
    def test_create(self):
        # redis is so blazing fast that we have to artificially inflate dates
        # or tests wont pass :)
        now = datetime.datetime.now()

        event, group = app.client.store(
            'sentry.events.Message',
            tags=(
                ('server', 'foo.bar'),
                ('culprit', 'foo.bar.zoo.baz'),
            ),
            date=now,
            time_spent=53,
            data={
                'sentry.interfaces.Message': {
                    'message': 'hello world'
                }
            },
            event_id='foobar',
        )
        group_id = group.pk

        self.assertTrue(group.pk)
        self.assertEquals(group.type, 'sentry.events.Message')
        self.assertEquals(group.time_spent, 53)
        self.assertEquals(group.count, 1)
        self.assertEquals(len(group.tags), 2)

        tag = group.tags[0]

        self.assertEquals(tag[0], 'server')
        self.assertEquals(tag[1], 'foo.bar')

        tag = group.tags[1]

        self.assertEquals(tag[0], 'culprit')
        self.assertEquals(tag[1], 'foo.bar.zoo.baz')

        events = group.get_relations(Event)

        self.assertEquals(len(events), 1)

        event = events[0]

        self.assertEquals(event.time_spent, group.time_spent)
        self.assertEquals(event.type, group.type)
        self.assertEquals(event.date, group.last_seen)
        self.assertEquals(len(event.tags), 2)

        tag = event.tags[0]

        self.assertEquals(tag[0], 'server')
        self.assertEquals(tag[1], 'foo.bar')

        tag = event.tags[1]

        self.assertEquals(tag[0], 'culprit')
        self.assertEquals(tag[1], 'foo.bar.zoo.baz')

        event, group = app.client.store(
            'sentry.events.Message',
            tags=(
                ('server', 'foo.bar'),
            ),
            date=now + datetime.timedelta(seconds=1),
            time_spent=100,
            data={
                'sentry.interfaces.Message': {
                    'message': 'hello world',
                },
            },
            event_id='foobar2',
        )

        self.assertEquals(group.pk, group_id)
        self.assertEquals(group.count, 2)
        self.assertEquals(group.time_spent, 153)
        self.assertEquals(len(group.tags), 2)

        tag = group.tags[0]

        self.assertEquals(tag[0], 'server')
        self.assertEquals(tag[1], 'foo.bar')

        tag = group.tags[1]

        self.assertEquals(tag[0], 'culprit')
        self.assertEquals(tag[1], 'foo.bar.zoo.baz')

        events = group.get_relations(Event, desc=False)

        self.assertEquals(len(events), 2)

        event = events[1]

        self.assertEquals(event.time_spent, 100)
        self.assertEquals(event.type, group.type)
        self.assertEquals(group.last_seen, event.date)
        self.assertEquals(len(event.tags), 1)

        tag = event.tags[0]

        self.assertEquals(tag[0], 'server')
        self.assertEquals(tag[1], 'foo.bar')

        tags = Tag.objects.order_by('-count')

        self.assertEquals(len(tags), 2, tags)

        groups = Group.objects.all()

        self.assertEquals(len(groups), 1)

        event, group = app.client.store(
            'sentry.events.Message',
            tags=(
                ('server', 'foo.bar'),
            ),
            date=now + datetime.timedelta(seconds=1),
            time_spent=100,
            data={
                'sentry.interfaces.Message': {
                    'message': 'hello world 2',
                },
            },
            event_id='foobar2',
        )

        self.assertNotEquals(group.pk, group_id)
        self.assertEquals(group.count, 1)
        self.assertEquals(group.time_spent, 100)
        self.assertEquals(len(group.tags), 1)

        tag = group.tags[0]

        self.assertEquals(tag[0], 'server')
        self.assertEquals(tag[1], 'foo.bar')

        events = group.get_relations(Event, desc=False)

        self.assertEquals(len(events), 1)

        event = events[0]

        self.assertEquals(event.time_spent, 100)
        self.assertEquals(event.type, group.type)
        self.assertEquals(group.last_seen, event.date)
        self.assertEquals(len(event.tags), 1)

        tag = event.tags[0]

        self.assertEquals(tag[0], 'server')
        self.assertEquals(tag[1], 'foo.bar')

        tags = Tag.objects.order_by('-count')

        self.assertEquals(len(tags), 2, tags)

        tag = tags[0]

        self.assertEquals(tag.key, 'server')
        self.assertEquals(tag.value, 'foo.bar')
        self.assertEquals(tag.count, 2)

        tag = tags[1]

        self.assertEquals(tag.key, 'culprit')
        self.assertEquals(tag.value, 'foo.bar.zoo.baz')
        self.assertEquals(tag.count, 1)

        groups = Group.objects.all()

        self.assertEquals(len(groups), 2)

Example 80

Project: haoide Source File: metadata.py
Function: deploy
    def deploy(self, base64_zip, test_classes=[]):
        """ Deploy zip file

        Arguments:

        * zipFile -- base64 encoded zipfile 
        """
        result = self.login()
        if not result or not result["success"]: return

        # Log the StartTime
        start_time = datetime.datetime.now()

        # Populate the soap_body with actual options
        deploy_options = self.settings["deploy_options"]
        
        # If just checkOnly, output VALIDATE, otherwise, output DEPLOY
        deploy_or_validate = "validate" if deploy_options["checkOnly"] else "deploy"

        # [sf:deploy]
        Printer.get('log').write_start().write("[sf:%s] Start request for a deploy..." % deploy_or_validate)
        options = deploy_options
        options["zipfile"] = base64_zip

        # If testLevel is Run Specified Test, 
        # we need to specify the runTests
        testLevel = options.get("testLevel", "NoTestRun") 
        if testLevel == "RunSpecifiedTests":
            options["runTests"] = "\n".join([
                "<met:runTests>%s</met:runTests>" % c for c in test_classes
            ])
        soap_body = self.soap.create_request('deploy', options)

        try:
            response = requests.post(self.metadata_url, soap_body, 
                verify=False, headers=self.headers)
        except requests.exceptions.RequestException as e:
            self.result = {
                "Error Message":  "Network connection timeout when issuing deploy request",
                "success": False
            }
            return self.result

        # Check whether session_id is expired
        if "INVALID_SESSION_ID" in response.text:
            Printer.get('log').write("[sf:%s] Session expired, need login again" % deploy_or_validate)
            result = self.login(True)
            if not result["success"]:
                self.result = result
                return self.result
            return self.deploy(base64_zip)

        # If status_code is > 399, which means it has error
        # If status_code is > 399, which means it has error
        if response.status_code > 399:
            self.result = util.get_response_error(response)
            return self.result

        # [sf:deploy]
        Printer.get('log').write("[sf:%s] Request for a deploy submitted successfully." % deploy_or_validate)

        # Get async process id
        async_process_id = util.getUniqueElementValueFromXmlString(response.content, "id")

        # [sf:deploy]
        Printer.get('log').write("[sf:%s] Request ID for the current deploy task: %s" % (deploy_or_validate, async_process_id))
        Printer.get('log').write("[sf:%s] Waiting for server to finish processing the request..." % deploy_or_validate)

        # 2. issue a check status loop request to assure the async
        # process is done
        result = self.check_deploy_status(async_process_id)

        body = result["body"]

        index = 1
        failure_dict = {}
        while body["status"] in ["Pending", "InProgress", "Canceling"]:
            if "stateDetail" in body:
                if int(body["numberComponentsDeployed"]) < int(body["numberComponentsTotal"]):
                    Printer.get('log').write("[sf:%s] Request Status: %s (%s/%s)  -- %s" % (
                        deploy_or_validate,
                        body["status"], 
                        body["numberComponentsDeployed"],
                        body["numberComponentsTotal"],
                        body["stateDetail"]
                    ))
                else:
                    Printer.get('log').write("[sf:%s] TestRun Status: %s (%s/%s)  -- %s" % (
                        deploy_or_validate,
                        body["status"], 
                        body["numberTestsCompleted"],
                        body["numberTestsTotal"],
                        body["stateDetail"]
                    ))
            else:
                Printer.get('log').write("[sf:%s] Request Status: %s" % (
                    deploy_or_validate, body["status"]
                ))

            # Process Test Run Result
            if "runTestResult" in body["details"] and \
                "failures" in body["details"]["runTestResult"]:

                failures = body["details"]["runTestResult"]["failures"]
                if isinstance(failures, dict):
                    if failures["id"] not in failure_dict:
                        failure_dict[failures["id"]] = failures

                        Printer.get('log').write("-" * 84).write("Test Failures: ")
                        Printer.get('log').write("%s.\t%s" % (index, failures["message"]))
                        for msg in failures["stackTrace"].split("\n"):
                            Printer.get('log').write("\t%s" % msg)

                        # [sf:deploy]
                        Printer.get('log').write("-" * 84)

                        index += index
                        
                elif isinstance(failures, list):
                    for f in failures:
                        if f["id"] not in failure_dict:
                            failure_dict[f["id"]] = f

                            Printer.get('log').write("-" * 84).write("Test Failures: ")
                            Printer.get('log').write("%s.\t%s" % (index, f["message"]))

                            # If compile error, there will no stack trace
                            if isinstance(f["stackTrace"], str):
                                for msg in f["stackTrace"].split("\n"):
                                    Printer.get('log').write("\t%s" % msg)
                                Printer.get('log').write("-" * 84)

                            index += 1

            # Thread Wait
            sleep_seconds = 2 if body["status"] == "Pending" else self.settings["metadata_polling_frequency"]
            time.sleep(sleep_seconds)
            
            result = self.check_deploy_status(async_process_id)
            body = result["body"]

        # Check if job is canceled
        if body["status"] == "Canceled":
            Printer.get('log').write("\nBUILD FAILED", False)
            Printer.get('log').write("cuem******* DEPLOYMENT FAILED ***********", False)
            Printer.get('log').write("Request ID: %s" % async_process_id, False)
            Printer.get('log').write("\nRequest Canceled", False)
            Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)

        # If check status request failed, this will not be done
        elif body["status"] == "Failed":
            # Append failure message
            Printer.get('log').write("[sf:%s] Request Failed\n\nBUILD FAILED" % deploy_or_validate)
            Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
            Printer.get('log').write("Request ID: %s" % async_process_id, False)
            
            # Output Failure Details
            failures_messages = []
            if "componentFailures" in body["details"]:
                component_failures = body["details"]["componentFailures"]
                if isinstance(component_failures, dict):
                    component_failures = [component_failures]

                for index in range(len(component_failures)):
                    component_failure = component_failures[index]
                    failures_messages.append("%s. %s -- %s: %s (line %s column %s)" % (
                        index + 1, 
                        component_failure["fileName"],
                        component_failure["problemType"],
                        component_failure["problem"],
                        component_failure["lineNumber"] \
                            if "lineNumber" in component_failure else "N/A",
                        component_failure["columnNumber"] \
                            if "columnNumber" in component_failure else "N/A"
                    ))
            elif "runTestResult" in body["details"]:
                failures = body["details"]["runTestResult"].get("failures", [])
                if isinstance(failures, dict):
                    failures = [failures]

                for index in range(len(failures)):
                    failure = failures[index]
                    failures_messages.append("%s. %s -- %s: %s" % (
                        index + 1, 
                        failure.get("type"),
                        failure.get("name"),
                        failure.get("message")
                    ))

            elif "errorMessage" in body:
                Printer.get('log').write("\n" + body["errorMessage"], False)

            warning_messages = []
            if "runTestResult" in body["details"]:
                runTestResult = body["details"]["runTestResult"]
                if "codeCoverageWarnings" in runTestResult:
                    coverage_warnings = runTestResult["codeCoverageWarnings"]
                    if isinstance(runTestResult["codeCoverageWarnings"], dict):
                        coverage_warnings = [coverage_warnings]
                    elif isinstance(runTestResult["codeCoverageWarnings"], list):
                        coverage_warnings = coverage_warnings

                    for warn in coverage_warnings:
                        if not isinstance(warn["name"], str): continue
                        warning_messages.append("%s -- %s" % (warn["name"], warn["message"]))

            # Output failure message
            if failures_messages:
                Printer.get('log').write("\n\nAll Component Failures:", False)
                Printer.get('log').write("\n"+"\n\n".join(failures_messages), False)

            # Output warning message
            if warning_messages:
                Printer.get('log').write("\n\nTest Coverage Warnings:", False)
                Printer.get('log').write("\n"+"\n".join(warning_messages), False)
            
            # End for Deploy Result
            Printer.get('log').write("\n*********** %s FAILED ***********" % (
                deploy_or_validate.upper()), False)
        else:
            # Append succeed message
            Printer.get('log').write("\n[sf:%s] Request Succeed" % deploy_or_validate, False)
            Printer.get('log').write("[sf:%s] *********** %s SUCCEEDED ***********" % (
                deploy_or_validate, deploy_or_validate.upper()), False)
            Printer.get('log').write("[sf:%s] Finished request %s successfully." % (
                deploy_or_validate, async_process_id), False)

        # Total time
        total_seconds = (datetime.datetime.now() - start_time).seconds
        Printer.get('log').write("\n\nTotal time: %s seconds" % total_seconds, False)

        # # Display debug log message in the new view
        # view = sublime.active_window().new_file()
        # view.run_command("new_view", {
        #     "name": "Debugging Information",
        #     "input": result.get("header", {}).get("debugLog", "")
        # })

        self.result = result

Example 81

Project: DIRAC Source File: Test_InstalledComponentsDB.py
  def testInstallations( self ):
    """
    Test the InstalledComponents database operations
    """

    # Create a sample installation
    result = self.client.addInstallation \
                              ( { 'InstallationTime': datetime.datetime.now(),
                                  'UnInstallationTime': datetime.datetime.now(),
                                  'Instance': 'TestInstallA111' },
                                  { 'System': 'UnexistentSystem',
                                    'Module': 'UnexistentModule',
                                    'Type': 'UnexistentType' },
                                  { 'HostName': 'fictional',
                                    'CPU': 'TestCPU' },
                                  True )

    self.assert_( result[ 'OK' ] )

    # Check if the installation exists
    result = self.client.getInstallations( { 'Instance': 'TestInstallA111' },
                                           { 'System': 'UnexistentSystem',
                                              'Module': 'UnexistentModule',
                                              'Type': 'UnexistentType' },
                                            { 'HostName': 'fictional',
                                              'CPU': 'TestCPU' },
                                            False )

    self.assert_( result[ 'OK' ] and len( result[ 'Value' ] ) > 0 )

    # Update the fields of the created installation
    result = self.client.updateInstallations( { 'Instance': 'TestInstallA111' },
                                              { 'System': 'UnexistentSystem',
                                                'Module': 'UnexistentModule',
                                                'Type': 'UnexistentType' },
                                              { 'HostName': 'fictional',
                                                'CPU': 'TestCPU' },
                                              { 'Instance': 'TestInstallA222' }
                                            )

    self.assert_( result[ 'OK' ] )

    # Check if the installation with the modified fields exists
    result = self.client.getInstallations( { 'Instance': 'TestInstallA222' },
                                           { 'System': 'UnexistentSystem',
                                              'Module': 'UnexistentModule',
                                              'Type': 'UnexistentType' },
                                            { 'HostName': 'fictional',
                                              'CPU': 'TestCPU' },
                                            False )

    self.assert_( result[ 'OK' ] and len( result[ 'Value' ] ) > 0 )

    # Remove the Installation
    result = self.client.removeInstallations( { 'Instance': 'TestInstallA222' },
                                              { 'System': 'UnexistentSystem',
                                                'Module': 'UnexistentModule',
                                                'Type': 'UnexistentType' },
                                              { 'HostName': 'fictional',
                                                'CPU': 'TestCPU' } )

    self.assert_( result[ 'OK' ] )

    # Check if the installation was actually removed
    result = self.client.getInstallations( { 'Instance': 'TestInstallA222' },
                                            { 'System': 'UnexistentSystem',
                                              'Module': 'UnexistentModule',
                                              'Type': 'UnexistentType' },
                                            { 'HostName': 'fictional',
                                              'CPU': 'TestCPU' },
                                            False )

    self.assert_( result[ 'OK' ] and len( result[ 'Value' ] ) <= 0 )

    # Create an installation associated with nonexistent Component
    result = self.client.addInstallation( 
                                { 'InstallationTime': datetime.datetime.now(),
                                  'UnInstallationTime': datetime.datetime.now(),
                                  'Instance': 'TestInstallA333' },
                                { 'System': 'UnexistentSystem',
                                  'Module': 'UnexistentModule22A',
                                  'Type': 'UnexistentType' },
                                { 'HostName': 'fictional',
                                  'CPU': 'TestCPU' } ,
                                False )

    self.assertFalse( result[ 'OK' ] )

    # Multiple removal
    self.client.addInstallation( 
                                { 'InstallationTime': datetime.datetime.now(),
                                  'UnInstallationTime': datetime.datetime.now(),
                                  'Instance': 'MultipleRemovalInstall1' },
                                { 'System': 'UnexistentSystem',
                                  'Module': 'UnexistentModule',
                                  'Type': 'UnexistentType' },
                                { 'HostName': 'fictional',
                                  'CPU': 'TestCPU' },
                                False )
    self.client.addInstallation( 
                                { 'InstallationTime': datetime.datetime.now(),
                                  'UnInstallationTime': datetime.datetime.now(),
                                  'Instance': 'MultipleRemovalInstall2' },
                                { 'System': 'UnexistentSystem',
                                  'Module': 'UnexistentModule',
                                  'Type': 'UnexistentType' },
                                { 'HostName': 'fictional',
                                  'CPU': 'TestCPU' } ,
                                False )
    self.client.addInstallation( 
                                { 'InstallationTime': datetime.datetime.now(),
                                  'UnInstallationTime': datetime.datetime.now(),
                                  'Instance': 'MultipleRemovalInstall3' },
                                { 'System': 'UnexistentSystem',
                                  'Module': 'UnexistentModule2',
                                  'Type': 'UnexistentType' },
                                { 'HostName': 'fictional',
                                  'CPU': 'TestCPU' },
                                True )

    result = self.client.getInstallations( 
                  { 'Instance':
                    [ 'MultipleRemovalInstall1', 'MultipleRemovalInstall3' ] },
                  {},
                  {},
                  False )

    self.assert_( result[ 'OK' ] and len( result[ 'Value' ] ) == 2 )

    self.client.removeInstallations( {},
                                     { 'Module': 'UnexistentModule' },
                                     {} )

    result = self.client.getInstallations( {},
                                      { 'Module': 'UnexistentModule2' },
                    {}, False )

    self.assert_( result[ 'OK' ] and len( result[ 'Value' ] ) >= 1 )

    result = self.client.getInstallations( {},
                                           { 'Module': 'UnexistentModule' },
                                           {},
                                           False )

    self.assert_( result[ 'OK' ] and len( result[ 'Value' ] ) <= 0 )

    self.client.removeInstallations( {},
                                     { 'Module': 'UnexistentModule2' },
                                     {} )

    self.assert_( result[ 'OK' ] )

    # Clean up what we created
    self.client.removeHosts( { 'HostName': 'fictional', 'CPU': 'TestCPU' } )
    self.client.removeComponents( { 'System': 'UnexistentSystem',
                                    'Module': 'UnexistentModule',
                                    'Type': 'UnexistentType' } )
    self.client.removeComponents( { 'System': 'UnexistentSystem',
                                    'Module': 'UnexistentModule2',
                                    'Type': 'UnexistentType' } )

Example 82

Project: osf.io Source File: test_user.py
    @mock.patch('website.mailchimp_utils.get_mailchimp_api')
    def test_merge(self, mock_get_mailchimp_api):
        other_user = factories.UserFactory()
        other_user.save()

        # define values for users' fields
        today = datetime.datetime.now()
        yesterday = today - datetime.timedelta(days=1)

        self.user.comments_viewed_timestamp['shared_gt'] = today
        other_user.comments_viewed_timestamp['shared_gt'] = yesterday
        self.user.comments_viewed_timestamp['shared_lt'] = yesterday
        other_user.comments_viewed_timestamp['shared_lt'] = today
        self.user.comments_viewed_timestamp['user'] = yesterday
        other_user.comments_viewed_timestamp['other'] = yesterday

        self.user.email_verifications = {'user': {'email': 'a'}}
        other_user.email_verifications = {'other': {'email': 'b'}}

        self.user.notifications_configured = {'abc12': True}
        other_user.notifications_configured = {'123ab': True}

        self.user.external_accounts = [factories.ExternalAccountFactory()]
        other_user.external_accounts = [factories.ExternalAccountFactory()]

        self.user.mailchimp_mailing_lists = {
            'user': True,
            'shared_gt': True,
            'shared_lt': False,
        }
        other_user.mailchimp_mailing_lists = {
            'other': True,
            'shared_gt': False,
            'shared_lt': True,
        }

        self.user.security_messages = {
            'user': today,
            'shared': today,
        }
        other_user.security_messages = {
            'other': today,
            'shared': today,
        }

        self.user.system_tags = ['user', 'shared']
        other_user.system_tags = ['other', 'shared']

        self.user.watched = [factories.WatchConfigFactory()]
        other_user.watched = [factories.WatchConfigFactory()]

        self.user.save()
        other_user.save()

        # define expected behavior for ALL FIELDS of the User object
        default_to_master_user_fields = [
            '_id',
            'date_confirmed',
            'date_disabled',
            'date_last_login',
            'date_registered',
            'email_last_sent',
            'external_identity',
            'family_name',
            'fullname',
            'given_name',
            'is_claimed',
            'is_invited',
            'is_registered',
            'jobs',
            'locale',
            'merged_by',
            'middle_names',
            'password',
            'recently_added',
            'schools',
            'social',
            'suffix',
            'timezone',
            'username',
            'mailing_lists',
            'verification_key',
            'verification_key_v2',
            '_affiliated_institutions',
            'contributor_added_email_records',
            'requested_deactivation',
            'registered_by'
        ]

        calculated_fields = {
            'comments_viewed_timestamp': {
                'user': yesterday,
                'other': yesterday,
                'shared_gt': today,
                'shared_lt': today,
            },
            'email_verifications': {
                'user': {'email': 'a'},
                'other': {'email': 'b'},
            },
            'notifications_configured': {
                '123ab': True, 'abc12': True,
            },
            'emails': [
                self.user.username,
                other_user.username,
            ],
            'external_accounts': [
                self.user.external_accounts[0]._id,
                other_user.external_accounts[0]._id,
            ],
            'mailchimp_mailing_lists': {
                'user': True,
                'other': True,
                'shared_gt': True,
                'shared_lt': True,
            },
            'osf_mailing_lists': {
                'Open Science Framework Help': True
            },
            'security_messages': {
                'user': today,
                'other': today,
                'shared': today,
            },
            'system_tags': ['user', 'shared', 'other'],
            'unclaimed_records': {},
            'watched': [
                self.user.watched[0]._id,
                other_user.watched[0]._id,
            ],
        }

        # from the explicit rules above, compile expected field/value pairs
        expected = {}
        expected.update(calculated_fields)
        for key in default_to_master_user_fields:
            expected[key] = getattr(self.user, key)

        # ensure all fields of the user object have an explicit expectation
        assert_equal(
            set(expected.keys()),
            set(self.user._fields),
        )

        # mock mailchimp
        mock_client = mock.MagicMock()
        mock_get_mailchimp_api.return_value = mock_client
        mock_client.lists.list.return_value = {'data': [{'id': x, 'list_name': list_name} for x, list_name in enumerate(self.user.mailchimp_mailing_lists)]}

        # perform the merge
        self.user.merge_user(other_user)
        self.user.save()
        handlers.celery_teardown_request()

        # check each field/value pair
        for k, v in expected.iteritems():
            assert_equal(
                getattr(self.user, k),
                v,
                # "{} doesn't match expectation".format(k)
            )

        # check fields set on merged user
        assert_equal(other_user.merged_by, self.user)

        assert_equal(
            0,
            models.Session.find(
                Q('data.auth_user_id', 'eq', other_user._id)
            ).count()
        )

Example 83

Project: skll Source File: experiments.py
def _classify_featureset(args):
    """ Classification job to be submitted to grid """
    # Extract all the arguments.
    # (There doesn't seem to be a better way to do this since one can't specify
    # required keyword arguments.)
    experiment_name = args.pop("experiment_name")
    task = args.pop("task")
    sampler = args.pop("sampler")
    feature_hasher = args.pop("feature_hasher")
    hasher_features = args.pop("hasher_features")
    job_name = args.pop("job_name")
    featureset = args.pop("featureset")
    featureset_name = args.pop("featureset_name")
    learner_name = args.pop("learner_name")
    train_path = args.pop("train_path")
    test_path = args.pop("test_path")
    train_set_name = args.pop("train_set_name")
    test_set_name = args.pop("test_set_name")
    shuffle = args.pop('shuffle')
    model_path = args.pop("model_path")
    prediction_prefix = args.pop("prediction_prefix")
    grid_search = args.pop("grid_search")
    grid_objective = args.pop("grid_objective")
    suffix = args.pop("suffix")
    log_path = args.pop("log_path")
    probability = args.pop("probability")
    results_path = args.pop("results_path")
    fixed_parameters = args.pop("fixed_parameters")
    sampler_parameters = args.pop("sampler_parameters")
    param_grid = args.pop("param_grid")
    pos_label_str = args.pop("pos_label_str")
    overwrite = args.pop("overwrite")
    feature_scaling = args.pop("feature_scaling")
    min_feature_count = args.pop("min_feature_count")
    grid_search_jobs = args.pop("grid_search_jobs")
    grid_search_folds = args.pop("grid_search_folds")
    cv_folds = args.pop("cv_folds")
    save_cv_folds = args.pop("save_cv_folds")
    stratified_folds = args.pop("do_stratified_folds")
    label_col = args.pop("label_col")
    id_col = args.pop("id_col")
    ids_to_floats = args.pop("ids_to_floats")
    class_map = args.pop("class_map")
    custom_learner_path = args.pop("custom_learner_path")
    quiet = args.pop('quiet', False)

    if args:
        raise ValueError(("Extra arguments passed to _classify_featureset: "
                          "{}").format(args.keys()))
    start_timestamp = datetime.datetime.now()

    with open(log_path, 'w') as log_file:
        # logging
        print("Task:", task, file=log_file)
        if task == 'cross_validate':
            print(("Cross-validating ({} folds) on {}, feature " +
                   "set {} ...").format(cv_folds, train_set_name, featureset),
                  file=log_file)
        elif task == 'evaluate':
            print(("Training on {}, Test on {}, " +
                   "feature set {} ...").format(train_set_name, test_set_name,
                                                featureset),
                  file=log_file)
        elif task == 'train':
            print("Training on {}, feature set {} ...".format(train_set_name,
                                                              featureset),
                  file=log_file)
        else:  # predict
            print(("Training on {}, Making predictions about {}, " +
                   "feature set {} ...").format(train_set_name, test_set_name,
                                                featureset),
                  file=log_file)

        # check whether a trained model on the same data with the same
        # featureset already exists if so, load it and then use it on test data
        modelfile = join(model_path, '{}.model'.format(job_name))
        if task == 'cross_validate' or (not exists(modelfile) or
                                        overwrite):
            train_examples = _load_featureset(train_path, featureset, suffix,
                                              label_col=label_col,
                                              id_col=id_col,
                                              ids_to_floats=ids_to_floats,
                                              quiet=quiet, class_map=class_map,
                                              feature_hasher=feature_hasher,
                                              num_features=hasher_features)

            train_set_size = len(train_examples.ids)
            if not train_examples.has_labels:
                raise ValueError('Training examples do not have labels')
            # initialize a classifer object
            learner = Learner(learner_name,
                              probability=probability,
                              feature_scaling=feature_scaling,
                              model_kwargs=fixed_parameters,
                              pos_label_str=pos_label_str,
                              min_feature_count=min_feature_count,
                              sampler=sampler,
                              sampler_kwargs=sampler_parameters,
                              custom_learner_path=custom_learner_path)
        # load the model if it already exists
        else:
            # import the custom learner path here in case we are reusing a
            # saved model
            if custom_learner_path:
                _import_custom_learner(custom_learner_path, learner_name)
            train_set_size = 'unknown'
            if exists(modelfile) and not overwrite:
                print(('\tloading pre-existing %s model: %s') % (learner_name,
                                                                 modelfile))
            learner = Learner.from_file(modelfile)

        # Load test set if there is one
        if task == 'evaluate' or task == 'predict':
            test_examples = _load_featureset(test_path, featureset, suffix,
                                             label_col=label_col,
                                             id_col=id_col,
                                             ids_to_floats=ids_to_floats,
                                             quiet=quiet, class_map=class_map,
                                             feature_hasher=feature_hasher,
                                             num_features=hasher_features)
            test_set_size = len(test_examples.ids)
        else:
            test_set_size = 'n/a'

        # create a list of dictionaries of the results information
        learner_result_dict_base = {'experiment_name': experiment_name,
                                    'train_set_name': train_set_name,
                                    'train_set_size': train_set_size,
                                    'test_set_name': test_set_name,
                                    'test_set_size': test_set_size,
                                    'featureset': json.dumps(featureset),
                                    'featureset_name': featureset_name,
                                    'shuffle': shuffle,
                                    'learner_name': learner_name,
                                    'task': task,
                                    'start_timestamp':
                                    start_timestamp.strftime('%d %b %Y %H:%M:'
                                                             '%S.%f'),
                                    'version': __version__,
                                    'feature_scaling': feature_scaling,
                                    'grid_search': grid_search,
                                    'grid_objective': grid_objective,
                                    'grid_search_folds': grid_search_folds,
                                    'min_feature_count': min_feature_count,
                                    'cv_folds': cv_folds,
                                    'save_cv_folds': save_cv_folds,
                                    'stratified_folds': stratified_folds,
                                    'scikit_learn_version': SCIKIT_VERSION}

        # check if we're doing cross-validation, because we only load/save
        # models when we're not.
        task_results = None
        if task == 'cross_validate':
            print('\tcross-validating', file=log_file)
            task_results, grid_scores, skll_fold_ids = learner.cross_validate(
                train_examples, shuffle=shuffle, stratified=stratified_folds,
                prediction_prefix=prediction_prefix, grid_search=grid_search,
                grid_search_folds=grid_search_folds, cv_folds=cv_folds,
                grid_objective=grid_objective, param_grid=param_grid,
                grid_jobs=grid_search_jobs, save_cv_folds=save_cv_folds)
        else:
            # if we have do not have a saved model, we need to train one.
            if not exists(modelfile) or overwrite:
                print(('\tfeaturizing and training new ' +
                       '{} model').format(learner_name),
                      file=log_file)

                if not isinstance(cv_folds, int):
                    grid_search_folds = cv_folds

                best_score = learner.train(train_examples,
                                           shuffle=shuffle,
                                           grid_search=grid_search,
                                           grid_search_folds=grid_search_folds,
                                           grid_objective=grid_objective,
                                           param_grid=param_grid,
                                           grid_jobs=grid_search_jobs)
                grid_scores = [best_score]

                # save model
                if model_path:
                    learner.save(modelfile)

                if grid_search:
                    # note: bankers' rounding is used in python 3,
                    # so these scores may be different between runs in
                    # python 2 and 3 at the final decimal place.
                    print('\tbest {} grid search score: {}'
                          .format(grid_objective, round(best_score, 3)),
                          file=log_file)
            else:
                grid_scores = [None]

            # print out the tuned parameters and best CV score
            param_out = ('{}: {}'.format(param_name, param_value)
                         for param_name, param_value in
                         iteritems(learner.model.get_params()))
            print('\thyperparameters: {}'.format(', '.join(param_out)),
                  file=log_file)

            # run on test set or cross-validate on training data,
            # depending on what was asked for

            if task == 'evaluate':
                print('\tevaluating predictions', file=log_file)
                task_results = [learner.evaluate(
                    test_examples, prediction_prefix=prediction_prefix,
                    grid_objective=grid_objective)]
            elif task == 'predict':
                print('\twriting predictions', file=log_file)
                learner.predict(test_examples,
                                prediction_prefix=prediction_prefix)
            # do nothing here for train

        end_timestamp = datetime.datetime.now()
        learner_result_dict_base['end_timestamp'] = end_timestamp.strftime(
            '%d %b %Y %H:%M:%S.%f')
        total_time = end_timestamp - start_timestamp
        learner_result_dict_base['total_time'] = str(total_time)

        if task == 'cross_validate' or task == 'evaluate':
            results_json_path = join(results_path,
                                     '{}.results.json'.format(job_name))

            res = _create_learner_result_dicts(task_results, grid_scores,
                                               learner_result_dict_base)

            # write out the result dictionary to a json file
            file_mode = 'w' if sys.version_info >= (3, 0) else 'wb'
            with open(results_json_path, file_mode) as json_file:
                json.dump(res, json_file, cls=NumpyTypeEncoder)

            with open(join(results_path,
                           '{}.results'.format(job_name)),
                      'w') as output_file:
                _print_fancy_output(res, output_file)
        else:
            res = [learner_result_dict_base]

        # write out the cv folds if required
        if task == 'cross_validate' and save_cv_folds:
            skll_fold_ids_file = experiment_name + '_skll_fold_ids.csv'
            file_mode = 'w' if sys.version_info >= (3, 0) else 'wb'
            with open(join(results_path, skll_fold_ids_file),
                      file_mode) as output_file:
                _write_skll_folds(skll_fold_ids, output_file)

    return res

Example 84

Project: RentCrawer Source File: RentCrawler.py
Function: run
    def run(self):
        try:
            print "Crawler is running now."
            # creat database
            conn = sqlite3.connect(self.config.db_file)
            conn.text_factory = str
            cursor = conn.cursor()
            cursor.execute(
                'CREATE TABLE IF NOT EXISTS rent(id INTEGER PRIMARY KEY, title TEXT, url TEXT UNIQUE,itemtime timestamp, crawtime timestamp ,author TEXT, source TEXT,keyword TEXT,note TEXT)')
            cursor.close()
            start_time = RentCrawlerUtils.getTimeFromStr(self.config.start_time)
            print "searching data after date ", start_time

            cursor = conn.cursor()

            search_list = list(self.config.key_search_word_list)
            custom_black_list=list(self.config.custom_black_list)

            # New SMTH
            if self.config.newsmth_enable:
                newsmth_main_url = 'http://www.newsmth.net'
                newsmth_regex = r'<table class="board-list tiz"(?:\s|\S)*</td></tr></table>'
                #must do like this
                for keyword in search_list:
                    print '>>>>>>>>>>Search newsmth %s ...' % keyword
                    url = 'http://www.newsmth.net/nForum/s/article?ajax&au&b=HouseRent&t1=' + keyword
                    r = requests.get(url, headers=self.newsmth_headers)
                    if r.status_code == 200:
                        # print r.text
                        match = re.search(newsmth_regex, r.text)
                        if match:
                            try:
                                text = match.group(0)
                                soup = BeautifulSoup(text)
                                for tr in soup.find_all('tr')[1:]:
                                    title_element = tr.find_all(attrs={'class': 'title_9'})[0]
                                    title_text = title_element.text

                                    #exclude what in blacklist
                                    if RentCrawlerUtils.isInBalckList(custom_black_list, title_text):
                                        continue
                                    if RentCrawlerUtils.isInBalckList(self.smth_black_list, title_text):
                                        continue
                                    time_text = tr.find_all(attrs={'class': 'title_10'})[0].text  #13:47:32或者2015-05-12

                                    #data ahead of the specific date
                                    if RentCrawlerUtils.getTimeFromStr(time_text) < start_time:
                                        continue
                                    link_text = newsmth_main_url + title_element.find_all('a')[0].get('href').replace(
                                        '/nForum/article/', '/nForum/#!article/')
                                    author_text = tr.find_all(attrs={'class': 'title_12'})[0].find_all('a')[0].text
                                    try:
                                        cursor.execute(
                                            'INSERT INTO rent(id,title,url,itemtime,crawtime,author,source,keyword,note) VALUES(NULL,?,?,?,?,?,?,?,?)',
                                            [title_text, link_text, RentCrawlerUtils.getTimeFromStr(time_text),
                                             datetime.datetime.now(), author_text, keyword,
                                             'newsmth', ''])
                                        print 'add new data:', title_text, time_text, author_text, link_text, keyword
                                        #/nForum/article/HouseRent/225839 /nForum/#!article/HouseRent/225839
                                    except sqlite3.Error, e:
                                        print 'data exists:', title_text, link_text, e
                            except Exception, e:
                                print "error match table", e
                        else:
                            print "no data"
                    else:
                        print 'request url error %s -status code: %s:' % (url, r.status_code)
            else:
                print 'newsmth not enabled'
            # end newsmth

            #Douban: Beijing Rent,Beijing Rent Douban
            if self.config.douban_enable:
                print 'douban'
                douban_url = ['http://www.douban.com/group/search?group=35417&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=26926&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=262626&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=252218&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=279962&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=257523&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=232413&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=135042&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=252091&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=10479&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=221207&cat=1013&sort=time&q=']
                douban_url_name = (u'Douban-北京租房', u'Douban-北京租房豆瓣', u'Douban-北京无中介租房',
                                   u'Douban-北京租房专家', u'Douban-北京租房(非中介)', u'Douban-北京租房房东联盟(中介勿扰) ',
                                   u'Douban-北京租房(密探)', u'Douban-北漂爱合租(租房)', u'Douban-豆瓣♥北京♥租房',
                                   u'Douban-吃喝玩乐在北京', u'Douban-北京CBD租房')

                for i in range(len(list(douban_url))):
                    print 'start i->',i
                    for j in range(len(search_list)):
                        keyword = search_list[j]
                        print 'start i->j %s->%s %s' %(i,j,keyword)
                        print '>>>>>>>>>>Search %s  %s ...' % (douban_url_name[i].encode('utf-8'), keyword)
                        url_link = douban_url[i] + keyword
                        r = requests.get(url_link, headers=self.douban_headers)
                        if r.status_code == 200:
                            try:
                                if i==0:
                                    self.douban_headers['Cookie']=r.cookies
                                soup = BeautifulSoup(r.text)
                                table = soup.find_all(attrs={'class': 'olt'})[0]
                                for tr in table.find_all('tr'):
                                    td = tr.find_all('td')

                                    title_element = td[0].find_all('a')[0]
                                    title_text = title_element.get('title')
                                    #exclude what in blacklist
                                    if RentCrawlerUtils.isInBalckList(custom_black_list, title_text):
                                        continue
                                    if RentCrawlerUtils.isInBalckList(self.douban_black_list, title_text):
                                        continue
                                    time_text = td[1].get('title')

                                    #data ahead of the specific date
                                    if RentCrawlerUtils.getTimeFromStr(time_text) < start_time:
                                        continue
                                    link_text = title_element.get('href');

                                    reply_count = td[2].find_all('span')[0].text
                                    try:
                                        cursor.execute(
                                            'INSERT INTO rent(id,title,url,itemtime,crawtime,author,source,keyword,note) VALUES(NULL,?,?,?,?,?,?,?,?)',
                                            [title_text, link_text, RentCrawlerUtils.getTimeFromStr(time_text),
                                             datetime.datetime.now(), '', keyword,
                                             douban_url_name[i], reply_count])
                                        print 'add new data:', title_text, time_text, reply_count, link_text, keyword
                                    except sqlite3.Error, e:
                                        print 'data exists:', title_text, link_text, e
                            except Exception, e:
                                print "error match table", e
                        else:
                            print 'request url error %s -status code: %s:' % (url_link, r.status_code)
                        time.sleep(self.config.douban_sleep_time)
                        #print 'end i->',i
            else:
                print 'douban not enabled'
            #end douban

            cursor.close()

            cursor = conn.cursor()
            cursor.execute('SELECT * FROM rent ORDER BY itemtime DESC ,crawtime DESC')
            values = cursor.fetchall()

            #export to html file
            file = open(self.config.result_file, 'w')
            with file:
                file.writelines('<html><head>')
                file.writelines('<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>')
                file.writelines('<title>Rent Crawer Result</title></head><body>')
                file.writelines('<table rules=all>')
                file.writelines('<h1>' + prog_info + '</h1>')
                file.writelines(
                    '<tr><td>索引Index</td><td>标题Title</td><td>链接Link</td><td>发帖时间Page Time</td><td>抓取时间Crawl Time</td><td>作者Author</td><td>关键字Keyword</td><td>来源Source</td></tr>')
                for row in values:
                    file.write('<tr>')
                    for member in row:
                        file.write('<td>')
                        member = str(member)
                        if 'http' in member:
                            file.write('<a href="' + member + '" target="_black">' + member + '</a>')
                        else:
                            file.write(member)
                        file.write('</td>')
                    file.writelines('</tr>')
                file.writelines('</table>')
                file.writelines('</body></html>')
            cursor.close()
        except Exception, e:
            print "Error:", e.message
        finally:
            conn.commit()
            conn.close()
            print "Search Finish,Please open result.html to view result"

Example 85

Project: executive-dashboard Source File: portalautomation.py
    def publishfromconfig(self, configFiles, combinedApp=None, dateTimeFormat=None):
        """Parses a JSON configuration file to publish data.

        Args:
            configFiles (list): A list of JSON files on disk containing
                configuration data for publishing.
            combinedApp (str): A JSON file on disk containing configuration data
                for app publishing. Defaults to ``None``.
            dateTimeFormat (str): A valid date formatting directive, as understood
                by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e.,
                ``'%Y-%m-%d %H:%M'``.

        """
        publishTools = None
        webmaps = None
        config = None
        resultsItems = None
        resultFS = None
        resultMaps = None
        resultApps = None
        combinedResults = None

        if dateTimeFormat is None:
            dateTimeFormat = '%Y-%m-%d %H:%M'

        scriptStartTime = datetime.datetime.now()
        try:

            webmaps = []
            print ("cuem****************Script Started********************")

            print ("Script started at %s" % scriptStartTime.strftime(dateTimeFormat))

            # start report processing (moved out from under ArcREST logic. no AGO crednetials needed to run reports)
            for configFile in configFiles:
                config = common.init_config_json(config_file=configFile)
                if config is not None:
                    if 'ReportDetails' in config:
                        if reportToolsInstalled == False:
                            print ("Report section is included in the config file but the solutionreporttools cannot be located")
                        else:
                            reportConfig = config['ReportDetails']
                            # This code checks to see if you want to export the data from SDE to a local GDB. The parameter is set in config file.
                            # Could be performance gain to run locally. If you choose this option, both the report and the data prep in memory config
                            # are modified so they can point to the local temp location.

                            if 'RunReport' in reportConfig and (str(reportConfig['RunReport']).upper() =="TRUE" or str(reportConfig['RunReport']).upper() =="YES"):
                                reportConfig = ReportTools.reportDataPrep(reportConfig)

                                print ("-----Report Section Starting-----")
                                startTime = datetime.datetime.now()
                                print ("Processing reports in config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat)))
                                ReportTools.create_report_layers_using_config(config=reportConfig)
                                print ("Reports in config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime)))

                                print ("-----Report Section Complete-----")
                    if 'PublishingDetails' in config:
                        publishingConfig = config['PublishingDetails']

                        if 'PublishData' in publishingConfig:
                            publishData = publishingConfig['PublishData']
                        else:
                            print ("PublishingDetails is missing the PublishData parameter:  type string, values, True or False")
                            publishData = 'TRUE'
                        if (str(publishData).upper() =="TRUE" or str(publishData).upper() =="YES"):

                            print (" ")
                            print ("-----Publishing Section Starting-----")
                            startTime = datetime.datetime.now()
                            print ("Processing publishing in config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat)))


                            publishTools = publishingtools.publishingtools(securityinfo=self)
                            if publishTools.valid == False :
                                print ("Error creating publishing tools: %s" % publishTools.message)
                            else:
                                print ("Publishing tools created: %s" % publishTools.message)
                                resultFS = []
                                if 'Items' in publishingConfig:
                                    startSectTime = datetime.datetime.now()
                                    print (" ")
                                    print ("Creating Items: %s" % str(startSectTime.strftime(dateTimeFormat)))
                                    resultsItems = publishTools.publishItems(items_info=publishingConfig['Items'])
                                    print ("Items created, time to complete: %s" % str(datetime.datetime.now() - startSectTime))

                                if 'FeatureCollections' in publishingConfig:
                                    startSectTime = datetime.datetime.now()
                                    print (" ")
                                    print ("Creating Feature Collection: %s" % str(startSectTime.strftime(dateTimeFormat)))
                                    resultFS = publishTools.publishFeatureCollections(configs=publishingConfig['FeatureCollections'])
                                    print ("Feature Collection published, time to complete: %s" % str(datetime.datetime.now() - startSectTime))
                                if 'FeatureServices' in publishingConfig:
                                    startSectTime = datetime.datetime.now()
                                    print (" ")
                                    print ("Creating Feature Services: %s" % str(startSectTime.strftime(dateTimeFormat)))
                                    res = publishTools.publishFsFromMXD(fs_config=publishingConfig['FeatureServices'])
                                    if res is None:
                                        return
                                    resultFS = resultFS + res
                                    if len(resultFS) == 0:
                                        print ("Exiting, error creating feature services")
                                        return
                                    print ("Feature Services published, time to complete: %s" % str(datetime.datetime.now() - startSectTime))
                                if 'ExistingServices' in publishingConfig:

                                    startSectTime = datetime.datetime.now()
                                    print (" ")
                                    print ("Updating Existing Feature Services: %s" % str(startSectTime.strftime(dateTimeFormat)))
                                    resultES = publishTools.updateFeatureService(efs_config=publishingConfig['ExistingServices'])
                                    print ("Updating Existing Feature Services completed, time to complete: %s" % str(datetime.datetime.now() - startSectTime))
                                if 'MapDetails' in publishingConfig:
                                    startSectTime = datetime.datetime.now()
                                    print (" ")
                                    print ("Creating maps: %s" % str(startSectTime.strftime(dateTimeFormat)))
                                    resultMaps = publishTools.publishMap(maps_info=publishingConfig['MapDetails'],fsInfo=resultFS,itInfo=resultsItems)
                                    for maps in resultMaps:
                                        if 'MapInfo' in maps:
                                            if 'Results' in maps['MapInfo']:
                                                if 'itemId' in maps['MapInfo']['Results']:
                                                    webmaps.append(maps['MapInfo']['Results']['itemId'])
                                    print ("Creating maps completed, time to complete: %s" % str(datetime.datetime.now() - startSectTime))
                                if 'AppDetails' in publishingConfig:
                                    startSectTime = datetime.datetime.now()
                                    print (" ")
                                    print ("Creating apps: %s" % str(startSectTime.strftime(dateTimeFormat)))
                                    resultApps = publishTools.publishApp(app_info=publishingConfig['AppDetails'],map_info=resultMaps,fsInfo=resultFS)
                                    print ("Creating apps completed, time to complete: %s" % str(datetime.datetime.now() - startSectTime))


                            print ("Publishing complete in config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime)))

                            print ("-----Publishing Section Complete-----")

                else:
                    print ("Config %s not found" % configFile)

            if combinedApp:
                if os.path.exists(combinedApp):
                    print (" ")
                    startSectTime = datetime.datetime.now()
                    print ("Creating combined result: %s" % str(startSectTime.strftime(dateTimeFormat)))

                    config = common.init_config_json(config_file=combinedApp)
                    combinedResults = publishTools.publishCombinedWebMap(maps_info=config['PublishingDetails']['MapDetails'],webmaps=webmaps)
                    if 'PublishingDetails' in config:
                        publishingConfig = config['PublishingDetails']

                        if 'PublishData' in publishingConfig:
                            publishData = publishingConfig['PublishData']
                        else:
                            print ("PublishingDetails is missing the PublishData parameter:  type string, values, True or False")
                            publishData = 'TRUE'

                        if (str(publishData).upper() =="TRUE" or str(publishData).upper() =="YES"):

                            if 'AppDetails' in publishingConfig:
                                resultApps = publishTools.publishApp(app_info=publishingConfig['AppDetails'],map_info=combinedResults)
                            print ("Creating combind result completed, time to complete: %s" % str(datetime.datetime.now() - startSectTime))
        except(TypeError,ValueError,AttributeError) as e:
            print (e)
        except (common.ArcRestHelperError) as e:
            print ("error in function: %s" % e[0]['function'])
            print ("error on line: %s" % e[0]['line'])
            print ("error in file name: %s" % e[0]['filename'])
            print ("with error message: %s" % e[0]['synerror'])
            if 'arcpyError' in e[0]:
                print ("with arcpy message: %s" % e[0]['arcpyError'])

        except Exception as e:
            if (reportToolsInstalled):
                if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):
                    print ("error in function: %s" % e[0]['function'])
                    print ("error on line: %s" % e[0]['line'])
                    print ("error in file name: %s" % e[0]['filename'])
                    print ("with error message: %s" % e[0]['synerror'])
                    if 'arcpyError' in e[0]:
                        print ("with arcpy message: %s" % e[0]['arcpyError'])
                else:
                    line, filename, synerror = trace()
                    print ("error on line: %s" % line)
                    print ("error in file name: %s" % filename)
                    print ("with error message: %s" % synerror)
            else:
                line, filename, synerror = trace()
                print ("error on line: %s" % line)
                print ("error in file name: %s" % filename)
                print ("with error message: %s" % synerror)

        finally:
            print ("Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime))
            print ("###############Script Completed#################")
            print ("")
            if publishTools is not None:
                publishTools.dispose()

            publishTools = None
            webmaps = None
            config = None
            resultFS = None
            resultsItems = None
            resultMaps = None
            resultApps = None
            combinedResults = None

            del publishTools
            del webmaps
            del config
            del resultFS
            del resultMaps
            del resultApps
            del combinedResults
            del resultsItems
            gc.collect()

Example 86

Project: certitude Source File: hashscan_queue.py
def demarrer_scanner(hWaitStop=None, batch=None):
    logginghashscan.info('Starting an Hash scanner instance : ' + threadname)

    print ''
    print '\tPlease log in to launch scan'
    print ''
    username = raw_input('Username: ')
    password = getpass.getpass('Password: ')
    print ''

    # Get user
    u = session.query(User).filter_by(username = username).first()

    # No user or bad password
    if not u or hashPassword(password) != u.password:
        logginghashscan.critical('Username or password incorrect, stopping the initialization...')
        raw_input()
        return

    # Get KEY and decrypt MASTER_KEY
    keyFromPassword = crypto.keyFromText(password, base64.b64decode(u.b64_kdf_salt))
    MASTER_KEY = crypto.decrypt(u.encrypted_master_key, keyFromPassword)

    mk_cksum = session.query(GlobalConfig).filter_by(key = 'master_key_checksum').first()

    # No checksum in config ???
    if not mk_cksum:
        logginghashscan.critical('Database is broken, please create a new one, stopping the initialization...')
        del MASTER_KEY
        raw_input()
        return

    # Someone has been playing with the database !
    if checksum(MASTER_KEY)!=mk_cksum.value:
        logginghashscan.critical('MASTER_KEY may have been altered, stopping the initialization...')
        del MASTER_KEY
        raw_input()
        return

    logginghashscan.info('Login successful !')
    # INITIALIZATION

    # TODO : initialise all IOCs in DB, then link them to CP

    all_xmliocs = session.query(XMLIOC).order_by(XMLIOC.name.asc())
    all_cp = session.query(ConfigurationProfile).order_by(ConfigurationProfile.name.asc())

    ioc_by_cp = {}
    for cp in all_cp:
        if cp.ioc_list == '':
            logginghashscan.warning('No IOC defined for profile "%s"' % cp.name)
            continue
            
        ioc_by_cp[cp.id] = []
        for e in cp.ioc_list.split(','):
            ioc_by_cp[cp.id].append(int(e))
            
    tree_by_ioc = {}


    # Retrieves evaluators for current mode
    FLAT_MODE = (IOC_MODE == 'flat')
    allowedElements = {}
    evaluatorList = hash_modules.flatEvaluatorList if FLAT_MODE else hash_modules.logicEvaluatorList
    
    for name, classname in evaluatorList.items():
        allowedElements[name] = classname.evalList

    # Parse XML Ioc into IOC trees according to what we can do
    for xmlioc in all_xmliocs:

        content = base64.b64decode(xmlioc.xml_content)
        oip = openiocparser.OpenIOCParser(content, allowedElements, FLAT_MODE, fromString=True)
        oip.parse()
        iocTree = oip.getTree()

        # Trees may be stripped from non valid elements
        if iocTree is not None:
            tree_by_ioc[xmlioc.id] = {'name':xmlioc.name, 'tree':iocTree}

    # Each configuration profile has a set of trees
    tree_by_cp = {cpid: {i:tree_by_ioc[i] for i in ioclist} for (cpid, ioclist) in ioc_by_cp.items()}

    halt = False
    tache = None
    batchquery = None

    # Batch filtering
    if batch is not None:
        logginghashscan.info('Filtering for batch "%s"' % batch)
        batchquery = session.query(Batch).filter( Batch.name == batch).first()

        if batchquery is None:
            logginghashscan.error('Unknown batch "%s" ...' % batch)
            halt = True

    # LAUNCH
    # Main loop
    while not halt:
        try:

            # Get targets to be scanned
            # and that are not currently being scanned
            # or that don't have any retry left
            queue = session.query(Task).filter_by(hashscanned=False, reserved_ioc=False, reserved_hash=False).filter(Task.retries_left_hash > 0)

            # Batch filtering
            if batchquery is not None:
                queue = queue.filter_by(batch_id = batchquery.id)

            taille_queue = queue.count()

            # Compute the time after which targets are still recovering from last scan
            # Gets target which last retry is NULL or before that time
            limite_a_reessayer = datetime.datetime.now() - datetime.timedelta(0, SECONDES_ENTRE_TENTATIVES)
            a_scanner = queue.filter(or_(Task.last_retry_hash <= limite_a_reessayer, Task.last_retry_hash == None))
            taille_a_scanner = a_scanner.count()

            # Reads this list
            while taille_a_scanner > 0:

                # Max priority
                priorite_max = a_scanner.order_by(Task.priority_hash.desc()).first().priority_hash
                taches_priorite_max = a_scanner.filter(Task.priority_hash==priorite_max)
                nbre_taches_priorite_max = taches_priorite_max.count()
                if BASE_DE_DONNEES_QUEUE.startswith('sqlite'):
                    tache = taches_priorite_max.order_by(func.random()).first()
                else:
                    tache = taches_priorite_max.order_by(func.newid()).first()

                # Mutex on the task
                tache.reserved_hash = True
                tache.date_debut = datetime.datetime.now()
                session.commit()

                logginghashscan.debug('===============================================================================')
                logginghashscan.debug('Wake up, there is work to do !')
                logginghashscan.info('Queue size : ' + str(taille_queue) + ', including ' + str(taille_a_scanner) + ' to scan, including ' + str(nbre_taches_priorite_max) + ' at top priority (' + str(priorite_max) + ')')

                logginghashscan.debug('  --------------------------------')
                logginghashscan.info('         Starting Hash Scan')
                logginghashscan.info('        Target : ' + str(tache.ip))
                logginghashscan.debug('  --------------------------------')

                # Recover Windows Credential and Configuration Profile from Batch
                batch = session.query(Batch).filter_by(id = tache.batch_id).first()
                wc = session.query(WindowsCredential).filter_by(id = batch.windows_credential_id).first()
                cp = session.query(ConfigurationProfile).filter_by(id = batch.configuration_profile_id).first()

                if not wc:
                    raise Exception('WindowsCredential %d does not exist' % tache.windows_credential_id)

                if not cp:
                    raise Exception('ConfigurationProfile %d does not exist' % tache.configuration_profile_id)

                # Decrypt password using MASTER_KEY and create target object
                targetPassword = crypto.decrypt(wc.encrypted_password, MASTER_KEY)
                targetObject = {'ip':       tache.ip,
                                'login':    wc.login,
                                'password': targetPassword,
                                'domain':   wc.domain,
                                }

                # If high confidentiality is enabled, create local directory if needed
                if cp.host_confidential:
                    logginghashscan.info('"High confidentiality" mode enabled')
                    testdir = os.path.join(IOC_COMPONENT_ROOT, IOC_CONFIDENTIAL_DIRECTORY)
                    if not os.path.isdir(testdir):
                        logginghashscan.info('Creating confidential directory %s' % testdir)
                        os.makedirs(testdir)

                # Let the scan begin
                if cp.id in tree_by_cp.keys():
                    resultats_scan = scan(targetObject, tree_by_cp[cp.id], cp.host_confidential)
                else:
                    logginghashscan.warning('No IOC to scan (profile=%s)' % cp.name)
                    resultats_scan = {}

                # Analyze the results
                analyse(resultats_scan, tache)

                # Update queue size
                taille_a_scanner = a_scanner.count()

                try:
                    # If launched as a service (probably removed soon, TODO)
                    halt = (win32event.WaitForSingleObject(hWaitStop, 2000) == win32event.WAIT_OBJECT_0)
                except:
                    pass
                if halt:
                    # Stop signal encountered
                    break

            if halt:
                logginghashscan.info('Stopping Hash scanner : ' + threadname)
                break
            logginghashscan.debug('(Hash scanner sleeping for ' + str(SLEEP) + ' seconds...)' \
                + (' (' + str(taille_queue) + ' waiting)' if taille_queue > 0 else ''))
            time.sleep(SLEEP)
        except KeyboardInterrupt:
            halt = True
        except Exception, e:
            logginghashscan.error('Exception caught : %s, %s, %s' % (repr(e), str(e.message), str(e)))

            # Cancel changes and unreserve task
            session.rollback()
            if tache is not None:
                tache.reserved_hash = False
                tache.retries_left_hash = max(0,tache.retries_left_hash - 1)
            session.commit()

Example 87

Project: kaggle-heart Source File: train.py
def train_model(expid):
    metadata_path = MODEL_PATH + "%s.pkl" % expid

    if theano.config.optimizer != "fast_run":
        print "WARNING: not running in fast mode!"

    data_loader.filter_patient_folders()

    print "Build model"
    interface_layers = config().build_model()

    output_layers = interface_layers["outputs"]
    input_layers = interface_layers["inputs"]
    top_layer = lasagne.layers.MergeLayer(
        incomings=output_layers.values()
    )
    all_layers = lasagne.layers.get_all_layers(top_layer)

    all_params = lasagne.layers.get_all_params(top_layer, trainable=True)
    if "cutoff_gradients" in interface_layers:
        submodel_params = [param for value in interface_layers["cutoff_gradients"] for param in lasagne.layers.get_all_params(value)]
        all_params = [p for p in all_params if p not in submodel_params]

    if "pretrained" in interface_layers:
        for config_name, layers_dict in interface_layers["pretrained"].iteritems():
            pretrained_metadata_path = MODEL_PATH + "%s.pkl" % config_name.split('.')[1]
            pretrained_resume_metadata = np.load(pretrained_metadata_path)
            pretrained_top_layer = lasagne.layers.MergeLayer(
                incomings = layers_dict.values()
            )
            lasagne.layers.set_all_param_values(pretrained_top_layer, pretrained_resume_metadata['param_values'])

    num_params = sum([np.prod(p.get_value().shape) for p in all_params])

    print string.ljust("  layer output shapes:",36),
    print string.ljust("#params:",10),
    print string.ljust("#data:",10),
    print "output shape:"
    for layer in all_layers[:-1]:
        name = string.ljust(layer.__class__.__name__, 32)
        num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
        num_param = string.ljust(int(num_param).__str__(), 10)
        num_size = string.ljust(np.prod(layer.output_shape[1:]).__str__(), 10)
        print "    %s %s %s %s" % (name,  num_param, num_size, layer.output_shape)
    print "  number of parameters: %d" % num_params

    obj = config().build_objective(interface_layers)

    train_loss_theano = obj.get_loss()
    kaggle_loss_theano = obj.get_kaggle_loss()
    segmentation_loss_theano = obj.get_segmentation_loss()

    validation_other_losses = collections.OrderedDict()
    validation_train_loss = obj.get_loss(average=False, deterministic=True, validation=True, other_losses=validation_other_losses)
    validation_kaggle_loss = obj.get_kaggle_loss(average=False, deterministic=True, validation=True)
    validation_segmentation_loss = obj.get_segmentation_loss(average=False, deterministic=True, validation=True)


    xs_shared = {
        key: lasagne.utils.shared_empty(dim=len(l_in.output_shape), dtype='float32') for (key, l_in) in input_layers.iteritems()
    }

    # contains target_vars of the objective! Not the output layers desired values!
    # There can be more output layers than are strictly required for the objective
    # e.g. for debugging

    ys_shared = {
        key: lasagne.utils.shared_empty(dim=target_var.ndim, dtype='float32') for (key, target_var) in obj.target_vars.iteritems()
    }

    learning_rate_schedule = config().learning_rate_schedule

    learning_rate = theano.shared(np.float32(learning_rate_schedule[0]))
    idx = T.lscalar('idx')

    givens = dict()
    for key in obj.target_vars.keys():
        if key=="segmentation":
            givens[obj.target_vars[key]] = ys_shared[key][idx*config().sunny_batch_size : (idx+1)*config().sunny_batch_size]
        else:
            givens[obj.target_vars[key]] = ys_shared[key][idx*config().batch_size : (idx+1)*config().batch_size]

    for key in input_layers.keys():
        if key=="sunny":
            givens[input_layers[key].input_var] = xs_shared[key][idx*config().sunny_batch_size:(idx+1)*config().sunny_batch_size]
        else:
            givens[input_layers[key].input_var] = xs_shared[key][idx*config().batch_size:(idx+1)*config().batch_size]

    updates = config().build_updates(train_loss_theano, all_params, learning_rate)

    #grad_norm = T.sqrt(T.sum([(g**2).sum() for g in theano.grad(train_loss_theano, all_params)]))
    #theano_printer.print_me_this("Grad norm", grad_norm)

    iter_train = theano.function([idx], [train_loss_theano, kaggle_loss_theano, segmentation_loss_theano] + theano_printer.get_the_stuff_to_print(),
                                 givens=givens, on_unused_input="ignore", updates=updates,
                                 # mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
                                 )
    iter_validate = theano.function([idx], [validation_train_loss, validation_kaggle_loss, validation_segmentation_loss] + [v for _, v in validation_other_losses.items()] + theano_printer.get_the_stuff_to_print(),
                                    givens=givens, on_unused_input="ignore")

    num_chunks_train = int(config().num_epochs_train * NUM_TRAIN_PATIENTS / (config().batch_size * config().batches_per_chunk))
    print "Will train for %d chunks" % num_chunks_train
    if config().restart_from_save and os.path.isfile(metadata_path):
        print "Load model parameters for resuming"
        resume_metadata = np.load(metadata_path)
        lasagne.layers.set_all_param_values(top_layer, resume_metadata['param_values'])
        start_chunk_idx = resume_metadata['chunks_since_start'] + 1
        chunks_train_idcs = range(start_chunk_idx, num_chunks_train)

        # set lr to the correct value
        current_lr = np.float32(utils.current_learning_rate(learning_rate_schedule, start_chunk_idx))
        print "  setting learning rate to %.7f" % current_lr
        learning_rate.set_value(current_lr)
        losses_train = resume_metadata['losses_train']
        losses_eval_valid = resume_metadata['losses_eval_valid']
        losses_eval_train = resume_metadata['losses_eval_train']
        losses_eval_valid_kaggle = [] #resume_metadata['losses_eval_valid_kaggle']
        losses_eval_train_kaggle = [] #resume_metadata['losses_eval_train_kaggle']
    else:
        chunks_train_idcs = range(num_chunks_train)
        losses_train = []
        losses_eval_valid = []
        losses_eval_train = []
        losses_eval_valid_kaggle = []
        losses_eval_train_kaggle = []


    create_train_gen = partial(config().create_train_gen,
                               required_input_keys = xs_shared.keys(),
                               required_output_keys = ys_shared.keys()# + ["patients"],
                               )


    create_eval_valid_gen = partial(config().create_eval_valid_gen,
                                   required_input_keys = xs_shared.keys(),
                                   required_output_keys = ys_shared.keys()# + ["patients"]
                                   )

    create_eval_train_gen = partial(config().create_eval_train_gen,
                                   required_input_keys = xs_shared.keys(),
                                   required_output_keys = ys_shared.keys()
                                   )

    print "Train model"
    start_time = time.time()
    prev_time = start_time

    num_batches_chunk = config().batches_per_chunk


    for e, train_data in izip(chunks_train_idcs, buffering.buffered_gen_threaded(create_train_gen())):
        print "Chunk %d/%d" % (e + 1, num_chunks_train)
        epoch = (1.0 * config().batch_size * config().batches_per_chunk * (e+1) / NUM_TRAIN_PATIENTS)
        print "  Epoch %.1f" % epoch

        for key, rate in learning_rate_schedule.iteritems():
            if epoch >= key:
                lr = np.float32(rate)
                learning_rate.set_value(lr)
        print "  learning rate %.7f" % lr

        if config().dump_network_loaded_data:
            pickle.dump(train_data, open("data_loader_dump_train_%d.pkl"%e, "wb"))

        for key in xs_shared:
            xs_shared[key].set_value(train_data["input"][key])

        for key in ys_shared:
            ys_shared[key].set_value(train_data["output"][key])

        #print "train:", sorted(train_data["output"]["patients"])
        losses = []
        kaggle_losses = []
        segmentation_losses = []
        for b in xrange(num_batches_chunk):
            iter_result = iter_train(b)

            loss, kaggle_loss, segmentation_loss = tuple(iter_result[:3])
            utils.detect_nans(loss, xs_shared, ys_shared, all_params)
 
            losses.append(loss)
            kaggle_losses.append(kaggle_loss)
            segmentation_losses.append(segmentation_loss)

        mean_train_loss = np.mean(losses)
        print "  mean training loss:\t\t%.6f" % mean_train_loss
        losses_train.append(mean_train_loss)

        print "  mean kaggle loss:\t\t%.6f" % np.mean(kaggle_losses)
        print "  mean segment loss:\t\t%.6f" % np.mean(segmentation_losses)

        if ((e + 1) % config().validate_every) == 0:
            print
            print "Validating"
            if config().validate_train_set:
                subsets = ["validation", "train"]
                gens = [create_eval_valid_gen, create_eval_train_gen]
                losses_eval = [losses_eval_valid, losses_eval_train]
                losses_kaggle = [losses_eval_valid_kaggle, losses_eval_train_kaggle]
            else:
                subsets = ["validation"]
                gens = [create_eval_valid_gen]
                losses_eval = [losses_eval_valid]
                losses_kaggle = [losses_eval_valid_kaggle]

            for subset, create_gen, losses_validation, losses_kgl in zip(subsets, gens, losses_eval, losses_kaggle):

                vld_losses = []
                vld_kaggle_losses = []
                vld_segmentation_losses = []
                vld_other_losses = {k:[] for k,_ in validation_other_losses.items()}
                print "  %s set (%d samples)" % (subset, get_number_of_validation_samples(set=subset))

                for validation_data in buffering.buffered_gen_threaded(create_gen()):
                    num_batches_chunk_eval = config().batches_per_chunk

                    if config().dump_network_loaded_data:
                        pickle.dump(validation_data, open("data_loader_dump_valid_%d.pkl"%e, "wb"))

                    for key in xs_shared:
                        xs_shared[key].set_value(validation_data["input"][key])

                    for key in ys_shared:
                        ys_shared[key].set_value(validation_data["output"][key])

                    #print "validate:", validation_data["output"]["patients"]

                    for b in xrange(num_batches_chunk_eval):
                        losses = tuple(iter_validate(b)[:3+len(validation_other_losses)])
                        loss, kaggle_loss, segmentation_loss = losses[:3]
                        other_losses = losses[3:]
                        vld_losses.extend(loss)
                        vld_kaggle_losses.extend(kaggle_loss)
                        vld_segmentation_losses.extend(segmentation_loss)
                        for k, other_loss in zip(validation_other_losses, other_losses):
                            vld_other_losses[k].extend(other_loss)

                vld_losses = np.array(vld_losses)
                vld_kaggle_losses = np.array(vld_kaggle_losses)
                vld_segmentation_losses = np.array(vld_segmentation_losses)
                for k in validation_other_losses:
                    vld_other_losses[k] = np.array(vld_other_losses[k])

                # now select only the relevant section to average
                sunny_len = get_lenght_of_set(name="sunny", set=subset)
                regular_len = get_lenght_of_set(name="regular", set=subset)
                num_valid_samples = get_number_of_validation_samples(set=subset)

                #print losses[:num_valid_samples]
                #print kaggle_losses[:regular_len]
                #print segmentation_losses[:sunny_len]
                loss_to_save = obj.compute_average(vld_losses[:num_valid_samples])
                print "  mean training loss:\t\t%.6f" % loss_to_save
                print "  mean kaggle loss:\t\t%.6f"   % np.mean(vld_kaggle_losses[:regular_len])
                print "  mean segment loss:\t\t%.6f"  % np.mean(vld_segmentation_losses[:sunny_len])
                # print "    acc:\t%.2f%%" % (acc * 100)
                for k, v in vld_other_losses.items():
                    print "  mean %s loss:\t\t%.6f"  % (k, obj.compute_average(v[:num_valid_samples], loss_name=k))
                print

                losses_validation.append(loss_to_save)

                kaggle_to_save = np.mean(vld_kaggle_losses[:regular_len])
                losses_kgl.append(kaggle_to_save)

        now = time.time()
        time_since_start = now - start_time
        time_since_prev = now - prev_time
        prev_time = now
        est_time_left = time_since_start * (float(num_chunks_train - (e + 1)) / float(e + 1 - chunks_train_idcs[0]))
        eta = datetime.now() + timedelta(seconds=est_time_left)
        eta_str = eta.strftime("%c")
        print "  %s since start (%.2f s)" % (utils.hms(time_since_start), time_since_prev)
        print "  estimated %s to go (ETA: %s)" % (utils.hms(est_time_left), eta_str)
        print

        if ((e + 1) % config().save_every) == 0:
            print
            print "Saving metadata, parameters"

            with open(metadata_path, 'w') as f:
                pickle.dump({
                    'metadata_path': metadata_path,
                    'configuration_file': config().__name__,
                    'git_revision_hash': utils.get_git_revision_hash(),
                    'experiment_id': expid,
                    'chunks_since_start': e,
                    'losses_train': losses_train,
                    'losses_eval_train': losses_eval_train,
                    'losses_eval_train_kaggle': losses_eval_train_kaggle,
                    'losses_eval_valid': losses_eval_valid,
                    'losses_eval_valid_kaggle': losses_eval_valid_kaggle,
                    'time_since_start': time_since_start,
                    'param_values': lasagne.layers.get_all_param_values(top_layer)
                }, f, pickle.HIGHEST_PROTOCOL)

            print "  saved to %s" % metadata_path
            print

    # store all known outputs from last batch:
    if config().take_a_dump:
        all_theano_variables = [train_loss_theano, kaggle_loss_theano, segmentation_loss_theano] + theano_printer.get_the_stuff_to_print()
        for layer in all_layers[:-1]:
            all_theano_variables.append(lasagne.layers.helper.get_output(layer))

        iter_train = theano.function([idx], all_theano_variables,
                                     givens=givens, on_unused_input="ignore", updates=updates,
                                     # mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
                                     )
        train_data["intermediates"] = iter_train(0)
        pickle.dump(train_data, open(metadata_path + "-dump", "wb"))

    return

Example 88

Project: btb Source File: org_status_email.py
def send_org_mail(org):
    ctx = {'site': Site.objects.get_current(), 'org': org}
    # Get a random org moderator for permissioning.
    now = datetime.datetime.now()

    try:
        org_user = org.moderators.all()[0]
    except IndexError:
        print "No moderators found for {0}; skipping.".format(org.name)
        return

    # Pending scans.
    ps =  PendingScan.objects.org_filter(org_user).filter(
                completed__isnull=True
            ).order_by('created')
    finished_ps = PendingScan.objects.org_filter(org_user).filter(
                completed__isnull=False
            ).order_by('-completed')
    ctx['pendingscans'] = { 'count': ps.count(), }
    if ctx['pendingscans']['count'] > 0:
        ctx['pendingscans']['oldest'] = ps[0]
        overdue = ctx['pendingscans']['oldest'].created < \
            (now - datetime.timedelta(days=7))
        ctx['pendingscans']['overdue'] = overdue
    try:
        ctx['pendingscans']['last_completed'] = finished_ps[0]
    except IndexError:
        pass

    # Scans.
    scans = Scan.objects.org_filter(org_user).filter(
            processing_complete=False,
        ).exclude(
            author__profile__managed=False
        ).order_by('created')
    finished_scans =  Scan.objects.org_filter(org_user).filter(
            processing_complete=True
        ).order_by('-modified')
    ctx['scans'] = {'count': scans.count()}
    if ctx['scans']['count'] > 0:
        ctx['scans']['oldest'] = scans.order_by('created')[0]
        ctx['scans']['overdue'] = ctx['scans']['oldest'].created < \
                (now - datetime.timedelta(days=7))
    try:
        ctx['scans']['last_completed'] = finished_scans[0]
    except IndexError:
        pass

    # Docuements.
    all_docs = Docuement.objects.org_filter(org_user).exclude(
            author__profile__managed=False,
        ).exclude(
            scan__isnull=True
        )
    docs = all_docs.filter(status="unknown").order_by('scan__created')
    finished_docs =  all_docs.filter(status="unknown").order_by('-modified')
    
    ctx['docuements'] = { 'count': docs.count() }
    if ctx['docuements']['count'] > 0:
        ctx['docuements']['oldest'] = docs.order_by('scan__created')[0]
        overdue = ctx['docuements']['oldest'].scan.created < \
                (now - datetime.timedelta(days=14))
        ctx['docuements']['overdue'] = overdue
    try:
        ctx['docuements']['last_completed'] = finished_docs[0]
    except IndexError:
        pass

    # Outgoing mail
    needed = needed_letters(org_user).items()
    ctx['outgoing_mail'] = {}
    for letter_type, recipients in needed:
        all_letters = Letter.objects.mail_filter(org_user).filter(
                type=letter_type
        )
        try:
            latest = Letter.objects.mail_filter(org_user).filter(
                    sent__isnull=False
            ).order_by('-sent')[0]
        except IndexError:
            latest = None
        ctx['outgoing_mail'][letter_type] = {
            'count': recipients.count(),
            'last_completed': latest,
        }
        if ctx['outgoing_mail'][letter_type]['count'] > 0:
            if letter_type in ('waitlist', 'consent_form'):
                due_since = recipients.order_by('user__date_joined')[0].user.date_joined
            elif letter_type == 'enqueued':
              due_since = recipients.order_by('created')[0].created
            elif letter_type == 'comments':
              due_since = Comment.objects.unmailed().order_by('created')[0].created
            elif letter_type == 'signup_complete':
                try:
                    due_since = Docuement.objects.filter(
                            type='license',
                    ).exclude(
                        author__received_letters__type="signup_complete"
                    ).order_by('created')[0].created
                except IndexError:
                    due_since = None
            elif letter_type == 'first_post':
                try:
                    due_since = Docuement.objects.public().filter(
                        Q(type='post') | Q(type='profile')
                    ).exclude(
                        author__received_letters__type="first_post"
                    ).order_by('created')[0].created
                except IndexError:
                    due_since = None
            else:
                due_since = None
            if due_since:
                ctx['outgoing_mail'][letter_type]['due_since'] = due_since
                if letter_type != 'consent_form':
                    ctx['outgoing_mail'][letter_type]['overdue'] = due_since < (
                        now - datetime.timedelta(days=7)
                    )

    # Tickets
    tickets = Note.objects.org_filter(org_user).filter(
            resolved__isnull=True
    )
    finished_tickets =  Note.objects.org_filter(org_user).filter(
            resolved__isnull=False
        ).order_by('-resolved')
    ctx['tickets'] = { 'count': tickets.count() }
    if ctx['tickets']['count'] > 0:
        ctx['tickets']['important'] = tickets.filter(important=True).count()
        ctx['tickets']['oldest'] = tickets.order_by('created')[0]
        overdue = ctx['tickets']['oldest'].created < \
                (now - datetime.timedelta(days=14))
        ctx['tickets']['overdue'] = overdue
    try:
        ctx['tickets']['last_completed'] = finished_tickets[0]
    except IndexError:
        pass
    

    ctx['inbox_zero_distance'] = 0
    for kind in ('scans', 'docuements', 'tickets', 'pendingscans'):
        ctx['inbox_zero_distance'] += ctx[kind]['count']
    for letter_type, details in ctx['outgoing_mail'].iteritems():
        if letter_type != 'consent_form':
            ctx['inbox_zero_distance'] += details['count']

    # Flavor
    with open(os.path.join(
                os.path.dirname(__file__),
                "obscure.txt"
            )) as fh:
        lines = fh.read().split("\n")
        word, definition = random.choice(lines).split(" ", 1)
        ctx['adjective'] = {
            'word': word,
            'definition': definition,
        }
    with open(os.path.join(
                os.path.dirname(__file__),
                "collective_nouns.json"
            )) as fh:
        collective_nouns = json.load(fh).items()
        noun = random.choice(collective_nouns)
        ctx['collective_noun'] = {
            'thing': noun[0],
            'names_and_conditions': noun[1],
        }
    
    t = loader.get_template("btb/org_status_email.html")
    html = t.render(Context(ctx))

    mail_managers(
        subject="{0} clicks away from Inbox Zero".format(
            ctx['inbox_zero_distance']
        ),
        message="",
        html_message=html,
    )

Example 89

Project: aemanager Source File: reset_demo_account.py
    def handle(self, *args, **options):
        if not settings.DEMO:
            self.stderr.write("Demo is set to False\n")

        for profile in UserProfile.objects.all():
            shutil.rmtree('%s%s' % (settings.FILE_UPLOAD_DIR,
                                    profile.uuid),
                                    True)
            profile.user.delete()

        # reset sql sequence
        cursor = connection.cursor()
        model_list = [models.get_model('auth', 'User'), models.get_model('core', 'OwnedObject'), ]
        style = no_style()

        queries = connection.ops.sequence_reset_sql(style, model_list)
        for query in queries:
            cursor.execute(query.encode('utf-8'))
        transaction.commit_unless_managed()

        now = datetime.datetime.now()

        user = User.objects.create_user('demo', '[email protected]', 'demo')
        user.first_name = 'Jean'
        user.last_name = 'Dupont'
        user.save()

        profile = user.get_profile()
        profile.phonenumber = '0102030405'
        profile.professional_email = '[email protected]'
        profile.company_name = 'Ma Petite Auto-Entreprise'
        profile.company_id = '12345678912345'
        profile.activity = AUTOENTREPRENEUR_ACTIVITY_LIBERAL_BNC
        profile.professional_category = AUTOENTREPRENEUR_PROFESSIONAL_CATEGORY_LIBERAL
        profile.creation_date = datetime.date(now.year - 1, 3, 1)
        profile.payment_option = AUTOENTREPRENEUR_PAYMENT_OPTION_QUATERLY
        profile.save()

        address = profile.address
        address.street = '1 rue de la Paix'
        address.zipcode = '75001'
        address.city = 'Paris'
        address.save()

        customer1_address = Address.objects.create(owner=user,
                                                   street='714 rue de Sydney',
                                                   zipcode='92800',
                                                   city='Puteaux')
        customer1 = Contact.objects.create(owner=user,
                                           contact_type=CONTACT_TYPE_COMPANY,
                                           name='Bross & Clackwell',
                                           company_id='98765432198765',
                                           legal_form='SA',
                                           email='[email protected]',

                                           representative='Laszlo Carreidas',
                                           representative_function='Gérant',
                                           address=customer1_address)
        customer1_phonenumber = PhoneNumber.objects.create(owner=user,
                                                           type=PHONENUMBER_TYPE_WORK,
                                                           number='0203040506',
                                                           default=True,
                                                           contact=customer1)

        customer2_address = Address.objects.create(owner=user,
                                                   street='32 rue du Pharaon',
                                                   zipcode='44000',
                                                   city='Nantes')
        customer2 = Contact.objects.create(owner=user,
                                           contact_type=CONTACT_TYPE_COMPANY,
                                           name='Flor Fina',
                                           company_id='88765432198765',
                                           legal_form='SARL',
                                           email='[email protected]',

                                           representative='Philémon Siclone',
                                           representative_function='Gérant',
                                           address=customer2_address)
        customer2_phonenumber = PhoneNumber.objects.create(owner=user,
                                                           type=PHONENUMBER_TYPE_WORK,
                                                           number='0204040506',
                                                           default=True,
                                                           contact=customer2)

        customer3_address = Address.objects.create(owner=user,
                                                   street='6 rue des Carmes',
                                                   zipcode='75012',
                                                   city='Paris')
        customer3 = Contact.objects.create(owner=user,
                                           contact_type=CONTACT_TYPE_COMPANY,
                                           name='Paris Flash',
                                           company_id='78765432198765',
                                           legal_form='SARL',
                                           email='[email protected]',

                                           representative='Walter Rizotto',
                                           representative_function='Gérant',
                                           address=customer3_address)
        customer3_phonenumber = PhoneNumber.objects.create(owner=user,
                                                           type=PHONENUMBER_TYPE_WORK,
                                                           number='0105040506',
                                                           default=True,
                                                           contact=customer3)

        project1 = Project.objects.create(owner=user,
                                          name='Refonte site vitrine',
                                          customer=customer1,
                                          state=PROJECT_STATE_FINISHED)

        project2 = Project.objects.create(owner=user,
                                          name='Création site e-commerce',
                                          customer=customer1,
                                          state=PROJECT_STATE_STARTED)

        project3 = Project.objects.create(owner=user,
                                          name='Référencement',
                                          customer=customer2,
                                          state=PROJECT_STATE_PROPOSAL_ACCEPTED)

        project4 = Project.objects.create(owner=user,
                                          name='Création application métier',
                                          customer=customer3,
                                          state=PROJECT_STATE_PROSPECT)

        lorem_ipsum_proposal = """<h1>Conditions générales de ventes</h1><div>client : {{ client }}</div><h2>Lorem ipsum</h2><p>dolor sit amet, consectetur adipiscing elit. Praesent iaculis, elit at hendrerit ultricies, felis nunc commodo sem, sed dapibus diam mauris non enim. Proin sed erat massa, non laoreet nibh. Morbi id arcu sit amet metus accuemsan ullamcorper ac sed lorem. Integer velit velit, pellentesque eu cursus vel, cursus nec lectus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget turpis a sapien imperdiet adipiscing vel vitae sem. Sed egestas neque nec purus pulvinar non eleifend eros volutpat. Duis bibendum, lorem vitae tristique tincidunt, purus sem auctor nunc, porta malesuada erat eros et felis. Sed sem tellus, eleifend ut fermentum in, pulvinar elementum nunc. Phasellus mauris leo, rutrum ut vestibulum ac, porttitor a neque. Pellentesque ornare ultricies purus nec eleifend. Fusce mattis arcu ut eros dignissim tincidunt. Etiam tempus, nisl sit amet semper malesuada, augue ante semper turpis, eu ultricies magna leo non eros. In pellentesque, dui in congue fermentum, eros metus scelerisque quam, eu suscipit lacus urna in leo. Aliquam erat volutpat. Morbi in sollicitudin massa. Donec dictum, tellus ut tincidunt viverra, neque leo suscipit est, at luctus enim ante in metus. Nulla et justo nibh. Nunc non neque arcu, eget consectetur turpis. Quisque mattis aliquam lacus, sit amet consectetur enim feugiat sed.</p><h2>Donec turpis lectus</h2><p>auctor ac sagittis ac, rutrum id enim. Integer lobortis justo eu sapien viverra vel dapibus libero fermentum. Quisque sed nunc ipsum. Quisque vestibulum hendrerit sem vitae rhoncus. Nullam pharetra tortor et quam dignissim laoreet. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed tellus lectus, adipiscing at pellentesque sed, consectetur a ante. Curabitur aliquet nulla a ipsum convallis ac porta dui interdum. Etiam tristique metus in velit blandit id hendrerit magna rhoncus. Donec posuere orci ac diam faucibus rhoncus. Nulla facilisi. Proin imperdiet nisl quis ante laoreet sodales. Suspendisse potenti. Fusce rhoncus fermentum malesuada.</p><h2>Nullam tincidunt sapien</h2><p>et ligula gravida dignissim. Fusce condimentum aliquet mi in pellentesque. Vivamus elementum consequat mauris. Pellentesque a magna sed metus fringilla pharetra eu nec ante. Aliquam erat volutpat. Nam ac erat nec diam malesuada aliquet. Fusce ullamcorper nisl sed purus consectetur imperdiet. Vestibulum risus erat, pretium ut consequat et, iaculis vel orci. Integer vel diam velit, id mattis nunc. Pellentesque congue, turpis sed tempus consectetur, nulla lectus posuere velit, ac rhoncus metus nibh ut dolor. Donec in ante felis. Aliquam lacinia pellentesque dapibus. Praesent nulla metus, congue ac suscipit a, luctus pharetra orci. Mauris tincidunt egestas lectus, vitae suscipit dolor semper malesuada. Fusce imperdiet tincidunt convallis. Morbi sapien justo, pharetra a pharetra rhoncus, semper eu augue. Nulla eget nisl arcu, at cursus magna.</p><h2>Nullam auctor tempor</h2><p> magna nec dignissim. Pellentesque faucibus tellus a nisl faucibus ut consectetur felis bibendum. Morbi luctus pharetra pretium. Curabitur porttitor ipsum ut lectus pharetra eget consequat leo scelerisque. Mauris condimentum, urna in ultrices eleifend, lorem ligula pretium orci, ut condimentum urna orci ac nunc. Aliquam sit amet est et risus varius suscipit vitae ut ligula. Etiam orci tellus, laoreet non volutpat non, mollis nec nulla. Aenean pulvinar vestibulum aliquam. Proin eleifend dui urna, faucibus convallis quam. Sed aliquam leo et velit convallis rutrum. Ut in erat dolor. Nulla interdum tellus nec lacus porttitor malesuada. Nulla a lacus lectus, in congue nunc. Sed ac ipsum id mauris scelerisque hendrerit id aliquam odio. Duis faucibus orci sed arcu iaculis hendrerit. Aenean enim nunc, mollis ac sollicitudin eu, fringilla quis arcu. Sed facilisis, augue eu scelerisque hendrerit, libero justo suscipit orci, nec dapibus ligula eros sed orci.</p><h2>Sed vel ligula eget&nbsp;</h2><p>lacus imperdiet tempor. Quisque at massa a metus feugiat rhoncus eu eget erat. Sed posuere tempus augue laoreet tincidunt. Maecenas tempor, orci sed commodo volutpat, turpis magna euismod purus, id pulvinar ligula elit id eros. Ut laoreet magna eu leo interdum vel accuemsan nisl congue. Vestibulum mollis risus quis sem iaculis sit amet dignissim elit commodo. Quisque a condimentum orci. Cras non interdum velit. Morbi feugiat sapien at augue vulputate tempus. Aliquam imperdiet sodales cursus. Donec nisl tellus, rhoncus nec sollicitudin sed, tristique a lacus. Fusce quis sollicitudin nibh. Pellentesque elementum metus et lacus bibendum tristique. Nulla auctor gravida nunc, sed sodales ante laoreet at. Morbi ut tellus in tellus malesuada porta. Nunc ligula erat, fermentum eu tempus sit amet, imperdiet sit amet lacus. Vestibulum quis mauris nec velit fermentum egestas a eu urna. Curabitur eros ipsum, lobortis nec faucibus sagittis, cursus id felis. Praesent vestibulum, diam vitae commodo lacinia, justo neque lobortis nulla, nec congue diam eros vel massa.</p>"""

        proposal1 = Proposal.objects.create(owner=user,
                                            reference='CRT_%i_001' % (now.year - 1),
                                            project=project1,
                                            update_date=datetime.date.today(),
                                            state=PROPOSAL_STATE_BALANCED,
                                            begin_date=datetime.date(now.year - 1, 6, 1),
                                            end_date=datetime.date(now.year - 1 , 6, 28),
                                            expiration_date=datetime.date(now.year - 1, 6, 1),
                                            contract_content=lorem_ipsum_proposal,
                                            amount=3000)

        proposal1_row1 = ProposalRow.objects.create(owner=user,
                                                    proposal=proposal1,
                                                    label='Charte graphique',
                                                    category=ROW_CATEGORY_SERVICE,
                                                    quantity=5,
                                                    unit_price='200')

        proposal1_row2 = ProposalRow.objects.create(owner=user,
                                                    proposal=proposal1,
                                                    label='Intégration et remplissage',
                                                    category=ROW_CATEGORY_SERVICE,
                                                    quantity=8,
                                                    unit_price='250')

        invoice1 = Invoice.objects.create(owner=user,
                                          customer=proposal1.project.customer,
                                          invoice_id=1,
                                          state=INVOICE_STATE_PAID,
                                          amount=4000,
                                          edition_date=datetime.date(now.year - 1, 6, 28),
                                          payment_date=datetime.date(now.year - 1, 7, 28),
                                          paid_date=datetime.date(now.year - 1, 7, 28),
                                          payment_type=PAYMENT_TYPE_CHECK,
                                          execution_begin_date=datetime.date(now.year - 1, 6, 1),
                                          execution_end_date=datetime.date(now.year - 1, 6, 28),
                                          penalty_date=datetime.date(now.year - 1, 7, 29),
                                          penalty_rate='1.95',
                                          discount_conditions='Néant')

        invoice1_row1 = InvoiceRow.objects.create(owner=user,
                                                  proposal=proposal1,
                                                  invoice=invoice1,
                                                  label='Charte graphique',
                                                  category=ROW_CATEGORY_SERVICE,
                                                  quantity=5,
                                                  unit_price='200',
                                                  balance_payments=True)

        invoice1_row2 = InvoiceRow.objects.create(owner=user,
                                                  proposal=proposal1,
                                                  invoice=invoice1,
                                                  label='Intégration et remplissage',
                                                  category=ROW_CATEGORY_SERVICE,
                                                  quantity=8,
                                                  unit_price='250',
                                                  balance_payments=True)

        proposal2 = Proposal.objects.create(owner=user,
                                            reference='CRT_%i_002' % (now.year - 1),
                                            project=project2,
                                            update_date=datetime.date.today(),
                                            state=PROPOSAL_STATE_ACCEPTED,
                                            begin_date=now - datetime.timedelta(90),
                                            end_date=now - datetime.timedelta(60),
                                            expiration_date=now - datetime.timedelta(90),
                                            contract_content=lorem_ipsum_proposal,
                                            amount=7000)

        proposal2_row1 = ProposalRow.objects.create(owner=user,
                                                    proposal=proposal2,
                                                    label='Charte graphique',
                                                    category=ROW_CATEGORY_SERVICE,
                                                    quantity=5,
                                                    unit_price='200')

        proposal2_row2 = ProposalRow.objects.create(owner=user,
                                                    proposal=proposal2,
                                                    label='Configuration modules',
                                                    category=ROW_CATEGORY_SERVICE,
                                                    quantity=15,
                                                    unit_price='300')

        proposal2_row3 = ProposalRow.objects.create(owner=user,
                                                    proposal=proposal2,
                                                    label='Installation serveur',
                                                    category=ROW_CATEGORY_SERVICE,
                                                    quantity=5,
                                                    unit_price='300')

        invoice2 = Invoice.objects.create(owner=user,
                                          customer=proposal2.project.customer,
                                          invoice_id=2,
                                          state=INVOICE_STATE_PAID,
                                          amount=2100,
                                          edition_date=now - datetime.timedelta(90),
                                          payment_date=now - datetime.timedelta(90),
                                          paid_date=now - datetime.timedelta(90),
                                          payment_type=PAYMENT_TYPE_CHECK,
                                          execution_begin_date=now - datetime.timedelta(90),
                                          execution_end_date=now - datetime.timedelta(60),
                                          penalty_date=now - datetime.timedelta(90),
                                          penalty_rate='1.95',
                                          discount_conditions='Néant')

        invoice2_row1 = InvoiceRow.objects.create(owner=user,
                                                  proposal=proposal2,
                                                  invoice=invoice2,
                                                  label='Accompte commande 30%',
                                                  category=ROW_CATEGORY_SERVICE,
                                                  quantity=1,
                                                  unit_price='2100',
                                                  balance_payments=False)

        invoice3 = Invoice.objects.create(owner=user,
                                          customer=proposal2.project.customer,
                                          invoice_id=3,
                                          state=INVOICE_STATE_PAID,
                                          amount=1000,
                                          edition_date=now - datetime.timedelta(40),
                                          payment_date=now - datetime.timedelta(10),
                                          paid_date=now - datetime.timedelta(5),
                                          payment_type=PAYMENT_TYPE_CHECK,
                                          execution_begin_date=now - datetime.timedelta(90),
                                          execution_end_date=now - datetime.timedelta(60),
                                          penalty_date=now,
                                          penalty_rate='1.95',
                                          discount_conditions='Néant')


        invoice3_row1 = InvoiceRow.objects.create(owner=user,
                                                  proposal=proposal2,
                                                  invoice=invoice3,
                                                  label='Charte graphique',
                                                  category=ROW_CATEGORY_SERVICE,
                                                  quantity=5,
                                                  unit_price='200',
                                                  balance_payments=False)

        invoice4 = Invoice.objects.create(owner=user,
                                          customer=proposal2.project.customer,
                                          invoice_id=4,
                                          state=INVOICE_STATE_SENT,
                                          amount=3900,
                                          edition_date=now - datetime.timedelta(30),
                                          payment_date=now - datetime.timedelta(1),
                                          payment_type=None,
                                          execution_begin_date=now - datetime.timedelta(90),
                                          execution_end_date=now - datetime.timedelta(60),
                                          penalty_date=now - datetime.timedelta(1),
                                          penalty_rate='1.95',
                                          discount_conditions='Néant')

        invoice4_row1 = InvoiceRow.objects.create(owner=user,
                                                  proposal=proposal2,
                                                  invoice=invoice4,
                                                  label='Configuration modules',
                                                  category=ROW_CATEGORY_SERVICE,
                                                  quantity=15,
                                                  unit_price='300',
                                                  balance_payments=True)

        invoice4_row2 = InvoiceRow.objects.create(owner=user,
                                                  proposal=proposal2,
                                                  invoice=invoice4,
                                                  label='Installation serveur',
                                                  category=ROW_CATEGORY_SERVICE,
                                                  quantity=5,
                                                  unit_price='300',
                                                  balance_payments=True)

        invoice4_row3 = InvoiceRow.objects.create(owner=user,
                                                  proposal=proposal2,
                                                  invoice=invoice4,
                                                  label='Accompte payé',
                                                  category=ROW_CATEGORY_SERVICE,
                                                  quantity=1,
                                                  unit_price='-2100',
                                                  balance_payments=True)

        proposal3 = Proposal.objects.create(owner=user,
                                            reference='CRT_%i_001' % (now.year),
                                            project=project3,
                                            update_date=datetime.date.today(),
                                            state=PROPOSAL_STATE_ACCEPTED,
                                            begin_date=now + datetime.timedelta(10),
                                            end_date=now + datetime.timedelta(60),
                                            expiration_date=now + datetime.timedelta(10),
                                            contract_content=lorem_ipsum_proposal,
                                            amount=16000)

        proposal3_row1 = ProposalRow.objects.create(owner=user,
                                                    proposal=proposal3,
                                                    label="Préstation de conseil",
                                                    category=ROW_CATEGORY_SERVICE,
                                                    quantity=40,
                                                    unit_price='400')

        invoice5 = Invoice.objects.create(owner=user,
                                          customer=proposal3.project.customer,
                                          invoice_id=5,
                                          state=INVOICE_STATE_EDITED,
                                          amount=4500,
                                          edition_date=now - datetime.timedelta(1),
                                          payment_date=now + datetime.timedelta(30),
                                          execution_begin_date=now + datetime.timedelta(10),
                                          execution_end_date=now + datetime.timedelta(60),
                                          penalty_date=now + datetime.timedelta(30),
                                          penalty_rate='1.95',
                                          discount_conditions='Néant')

        invoice5_row1 = InvoiceRow.objects.create(owner=user,
                                                  proposal=proposal3,
                                                  invoice=invoice5,
                                                  label='Accompte commande 30%',
                                                  category=ROW_CATEGORY_SERVICE,
                                                  quantity=1,
                                                  unit_price='4500',
                                                  balance_payments=False)

        proposal4 = Proposal.objects.create(owner=user,
                                            reference='CRT_%i_002' % (now.year),
                                            project=project4,
                                            update_date=datetime.date.today(),
                                            state=PROPOSAL_STATE_DRAFT,
                                            begin_date=now + datetime.timedelta(40),
                                            end_date=now + datetime.timedelta(90),
                                            expiration_date=now + datetime.timedelta(40),
                                            contract_content=lorem_ipsum_proposal,
                                            amount=5000)

        proposal4_row1 = ProposalRow.objects.create(owner=user,
                                                    proposal=proposal4,
                                                    label="Préstation d'audit",
                                                    category=ROW_CATEGORY_SERVICE,
                                                    quantity=10,
                                                    unit_price='500')

        expense = Expense.objects.create(owner=user,
                                         date=now - datetime.timedelta(45),
                                         reference='XYZ',
                                         supplier='GMG',
                                         amount=500,
                                         payment_type=PAYMENT_TYPE_BANK_CARD,
                                         description='Assurance pro')

        expense = Expense.objects.create(owner=user,
                                         date=now - datetime.timedelta(100),
                                         reference='ZYX',
                                         supplier='Matos.net',
                                         amount=700,
                                         payment_type=PAYMENT_TYPE_BANK_CARD,
                                         description='Achat pc')

Example 90

Project: SmokeDetector Source File: bodyfetcher.py
    def make_api_call_for_site(self, site):
        if site not in self.queue:
            GlobalVars.charcoal_hq.send_message("Attempted API call to {} but there are no posts to fetch.".format(site))
            return

        self.queue_modify_lock.acquire()
        posts = self.queue.pop(site)
        store_bodyfetcher_queue()
        self.queue_modify_lock.release()

        question_modifier = ""
        pagesize_modifier = ""

        if site == "stackoverflow.com":
            # Not all SO questions are shown in the realtime feed. We now
            # fetch all recently modified SO questions to work around that.
            if self.last_activity_date != 0:
                pagesize = "50"
            else:
                pagesize = "25"

            pagesize_modifier = "&pagesize={pagesize}&min={time_length}".format(pagesize=pagesize, time_length=str(self.last_activity_date))
        else:
            question_modifier = "/{0}".format(";".join(str(post) for post in posts))

        url = "http://api.stackexchange.com/2.2/questions{q_modifier}?site={site}&filter=!)E0g*ODaEZ(SgULQhYvCYbu09*ss(bKFdnTrGmGUxnqPptuHP&key=IAkbitmze4B8KpacUfLqkw(({optional_min_query_param}".format(q_modifier=question_modifier, site=site, optional_min_query_param=pagesize_modifier)

        # wait to make sure API has/updates post data
        time.sleep(3)

        GlobalVars.api_request_lock.acquire()
        # Respect backoff, if we were given one
        if GlobalVars.api_backoff_time > time.time():
            time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
        try:
            time_request_made = datetime.strftime(datetime.now(), '%H:%M:%S')
            response = requests.get(url, timeout=20).json()
        except requests.exceptions.Timeout:
            return  # could add some retrying logic here, but eh.

        self.api_data_lock.acquire()
        add_or_update_api_data(site)
        self.api_data_lock.release()

        message_hq = ""
        if "quota_remaining" in response:
            if response["quota_remaining"] - GlobalVars.apiquota >= 5000 and GlobalVars.apiquota >= 0:
                GlobalVars.charcoal_hq.send_message("API quota rolled over with {0} requests remaining. Current quota: {1}.".format(GlobalVars.apiquota, response["quota_remaining"]))
                sorted_calls_per_site = sorted(GlobalVars.api_calls_per_site.items(), key=itemgetter(1), reverse=True)
                api_quota_used_per_site = ""
                for site_name, quota_used in sorted_calls_per_site:
                    api_quota_used_per_site += site_name.replace('.com', '').replace('.stackexchange', '') + ": {0}\n".format(str(quota_used))
                api_quota_used_per_site = api_quota_used_per_site.strip()
                GlobalVars.charcoal_hq.send_message(api_quota_used_per_site, False)
                clear_api_data()
            if response["quota_remaining"] == 0:
                GlobalVars.charcoal_hq.send_message("API reports no quota left!  May be a glitch.")
                GlobalVars.charcoal_hq.send_message(str(response))  # No code format for now?
            if GlobalVars.apiquota == -1:
                GlobalVars.charcoal_hq.send_message("Restart: API quota is {quota}.".format(quota=response["quota_remaining"]))
            GlobalVars.apiquota = response["quota_remaining"]
        else:
            message_hq = "The quota_remaining property was not in the API response."

        if "error_message" in response:
            message_hq += " Error: {} at {} UTC.".format(response["error_message"], time_request_made)
            if "error_id" in response and response["error_id"] == 502:
                if GlobalVars.api_backoff_time < time.time() + 12:  # Add a backoff of 10 + 2 seconds as a default
                    GlobalVars.api_backoff_time = time.time() + 12
            message_hq += " Backing off on requests for the next 12 seconds."

        if "backoff" in response:
            if GlobalVars.api_backoff_time < time.time() + response["backoff"]:
                GlobalVars.api_backoff_time = time.time() + response["backoff"]
            match = regex.compile('/2.2/([^.]*)').search(url)
            url_part = match.group(1) if match else url
            message_hq += "\nBackoff received of {} seconds on request to `{}` at {} UTC".format(str(response["backoff"]), url_part, time_request_made)

        GlobalVars.api_request_lock.release()

        if len(message_hq) > 0:
            GlobalVars.charcoal_hq.send_message(message_hq.strip())

        if "items" not in response:
            return

        if site == "stackoverflow.com":
            items = response["items"]
            if len(items) > 0 and "last_activity_date" in items[0]:
                self.last_activity_date = items[0]["last_activity_date"]

        for post in response["items"]:
            if "title" not in post or "body" not in post:
                continue
            title = GlobalVars.parser.unescape(post["title"])
            body = GlobalVars.parser.unescape(post["body"])
            link = post["link"]
            post_score = post["score"]
            up_vote_count = post["up_vote_count"]
            down_vote_count = post["down_vote_count"]
            try:
                owner_name = GlobalVars.parser.unescape(post["owner"]["display_name"])
                owner_link = post["owner"]["link"]
                owner_rep = post["owner"]["reputation"]
            except:
                owner_name = ""
                owner_link = ""
                owner_rep = 0
            q_id = str(post["question_id"])

            is_spam, reason, why = check_if_spam(title=title,
                                                 body=body,
                                                 user_name=owner_name,
                                                 user_url=owner_link,
                                                 post_site=site,
                                                 post_id=q_id,
                                                 is_answer=False,
                                                 body_is_summary=False,
                                                 owner_rep=owner_rep,
                                                 post_score=post_score)
            if is_spam:
                try:
                    handle_spam(title=title,
                                body=body,
                                poster=owner_name,
                                site=site,
                                post_url=link,
                                poster_url=owner_link,
                                post_id=q_id,
                                reasons=reason,
                                is_answer=False,
                                why=why,
                                owner_rep=owner_rep,
                                post_score=post_score,
                                up_vote_count=up_vote_count,
                                down_vote_count=down_vote_count,
                                question_id=None)
                except:
                    print "NOP"
            try:
                for answer in post["answers"]:
                    answer_title = ""
                    body = answer["body"]
                    print "got answer from owner with name " + owner_name
                    link = answer["link"]
                    a_id = str(answer["answer_id"])
                    post_score = answer["score"]
                    up_vote_count = answer["up_vote_count"]
                    down_vote_count = answer["down_vote_count"]
                    try:
                        owner_name = GlobalVars.parser.unescape(answer["owner"]["display_name"])
                        owner_link = answer["owner"]["link"]
                        owner_rep = answer["owner"]["reputation"]
                    except:
                        owner_name = ""
                        owner_link = ""
                        owner_rep = 0

                    is_spam, reason, why = check_if_spam(title=answer_title,
                                                         body=body,
                                                         user_name=owner_name,
                                                         user_url=owner_link,
                                                         post_site=site,
                                                         post_id=a_id,
                                                         is_answer=True,
                                                         body_is_summary=False,
                                                         owner_rep=owner_rep,
                                                         post_score=post_score)
                    if is_spam:
                        try:
                            handle_spam(title=title,
                                        body=body,
                                        poster=owner_name,
                                        site=site,
                                        post_url=link,
                                        poster_url=owner_link,
                                        post_id=a_id,
                                        reasons=reason,
                                        is_answer=True,
                                        why=why,
                                        owner_rep=owner_rep,
                                        post_score=post_score,
                                        up_vote_count=up_vote_count,
                                        down_vote_count=down_vote_count,
                                        question_id=q_id)
                        except:
                            print "NOP"
            except:
                print "no answers"
        return

Example 91

Project: discord_feedbot Source File: feed2discord.py
@asyncio.coroutine
def background_check_feed(feed,asyncioloop):
    global timezone
    logger.info(feed+': Starting up background_check_feed')

    # Try to wait until Discord client has connected, etc:
    yield from client.wait_until_ready()
    # make sure debug output has this check run in the right order...
    yield from asyncio.sleep(1)

    # just a bit easier to use...
    FEED=config[feed]

    # pull config for this feed out:
    feed_url = FEED.get('feed_url')
    rss_refresh_time = FEED.getint('rss_refresh_time',3600)
    max_age = FEED.getint('max_age',86400)

    # loop through all the channels this feed is configured to send to
    channels = []
    for key in FEED.get('channels').split(','):
        logger.debug(feed+': adding channel '+key)
        # stick a dict in the channels array so we have more to work with
        channels.append(
            {
              'object': discord.Object(id=config['CHANNELS'][key]),
              'name': key,
              'id': config['CHANNELS'][key],
            }
        )

    # Basically run forever
    while not client.is_closed:
        # And tries to catch all the exceptions and just keep going
        # (but see list of except/finally stuff below)
        try:
            logger.info(feed+': processing feed')

            # If send_typing is on for the feed, send a little "typing ..."
            # whenever a feed is being worked on.  configurable per-room
            if FEED.getint(
                           feed+'.send_typing',
                           FEED.getint('send_typing',0)) >= 1:
                for channel in channels:
                    # Since this is first attempt to talk to this channel,
                    # be very verbose about failures to talk to channel
                    try:
                        yield from client.send_typing(channel['object'])
                    except discord.errors.Forbidden:
                        logger.error(feed+':discord.errors.Forbidden')
                        logger.error(sys.exc_info())
                        logger.error(
                            feed+
                            ":Perhaps bot isn't allowed in this channel?")
                        logger.error(channel)

            http_headers = {}
            http_headers['User-Agent'] = MAIN.get('UserAgent',
                                                  'feed2discord/1.0')

            ### Download the actual feed, if changed since last fetch

            # pull data about history of this *feed* from DB:
            cursor = conn.cursor()
            cursor.execute(
                "select lastmodified,etag from feed_info where feed=? OR url=?",
                [feed,feed_url])
            data=cursor.fetchone()

            # If we've handled this feed before,
            # and we have etag from last run, add etag to headers.
            # and if we have a last modified time from last run,
            # add "If-Modified-Since" to headers.
            if data is None: # never handled this feed before...
                logger.info(feed+':looks like updated version. saving info')
                cursor.execute(
                    "REPLACE INTO feed_info (feed,url) VALUES (?,?)",
                    [feed,feed_url])
                conn.commit()
                logger.debug(feed+':feed info saved')
            else:
                logger.debug(feed+
                             ':setting up extra headers for HTTP request.')
                logger.debug(data)
                lastmodified = data[0]
                etag = data[1]
                if lastmodified is not None and len(lastmodified):
                    logger.debug(feed+
                                 ':adding header If-Modified-Since: '+
                                 lastmodified)
                    http_headers['If-Modified-Since'] = lastmodified
                else:
                    logger.debug(feed+':no stored lastmodified')
                if etag is not None and len(etag):
                    logger.debug(feed+':adding header ETag: '+etag)
                    http_headers['ETag'] = etag
                else:
                    logger.debug(feed+':no stored ETag')

            logger.debug(feed+':sending http request for '+feed_url)
            # Send actual request.  yield from can yield control to another
            # instance.
            http_response = yield from httpclient.request('GET',
                                                          feed_url,
                                                          headers=http_headers)
            logger.debug(http_response)

            # Some feeds are smart enough to use that if-modified-since or
            # etag info, which gives us a 304 status.  If that happens,
            # assume no new items, fall through rest of this and try again
            # later.
            if http_response.status == 304:
                logger.debug(feed+':data is old; moving on')
                http_response.close()
                raise HTTPNotModified()
            elif http_response.status is None:
                logger.error(feed+':HTTP response code is NONE')
                raise HTTPError()
            # If we get anything but a 200, that's a problem and we don't
            # have good data, so give up and try later.
            # Mostly handled different than 304/not-modified to make logging
            # clearer.
            elif http_response.status != 200:
                logger.debug(feed+':HTTP error: '+str(http_response.status))
                raise HTTPError()
            else:
                logger.debug(feed+':HTTP success')


            # pull data out of the http response
            logger.debug(feed+':reading http response')
            http_data = yield from http_response.read()

            # parse the data from the http response with feedparser
            logger.debug(feed+':parsing http data')
            feed_data = feedparser.parse(http_data)
            logger.debug(feed+':done fetching')


            # If we got an ETAG back in headers, store that, so we can
            # include on next fetch
            if 'ETAG' in http_response.headers:
                etag = http_response.headers['ETAG']
                logger.debug(feed+':saving etag: '+etag)
                cursor.execute(
                    "UPDATE feed_info SET etag=? where feed=? or url=?",
                    [etag,feed,feed_url])
                conn.commit()
                logger.debug(feed+':etag saved')
            else:
                logger.debug(feed+':no etag')

            # If we got a Last-Modified header back, store that, so we can
            # include on next fetch
            if 'LAST-MODIFIED' in http_response.headers:
                modified = http_response.headers['LAST-MODIFIED']
                logger.debug(feed+':saving lastmodified: '+modified)
                cursor.execute(
                    "UPDATE feed_info SET lastmodified=? where feed=? or url=?",
                    [modified,feed,feed_url])
                conn.commit()
                logger.debug(feed+':saved lastmodified')
            else:
                logger.debug(feed+':no last modified date')

            http_response.close()

            # Process all of the entries in the feed
            # Use reversed to start with end, which is usually oldest
            logger.debug(feed+':processing entries')
            for item in reversed(feed_data.entries):
                logger.debug(feed+':item:processing this entry')
                if debug > 1:
                    logger.debug(item) # can be very noisy

                # Pull out the unique id, or just give up on this item.
                id = ''
                if 'id' in item:
                    id=item.id
                elif 'guid' in item:
                    id=item.guid
                elif 'link' in item:
                    id=item.link
                else:
                    logger.error(feed+':item:no id, skipping')
                    continue

                # Get our best date out, in both raw and parsed form
                pubDateDict = extract_best_item_date(item)
                pubDate = pubDateDict['date']
                pubDate_parsed = pubDateDict['date_parsed']

                logger.debug(feed+':item:id:'+id)
                logger.debug(feed+
                             ':item:checking database history for this item')
                # Check DB for this item
                cursor.execute(
                    "SELECT published,title,url,reposted FROM feed_items WHERE id=?",
                    [id])
                data=cursor.fetchone()

                # If we've never seen it before, then actually processing
                # this:
                if data is None:
                    logger.info(feed+':item '+id+' unseen, processing:')

                    # Store info about this item, so next time we skip it:
                    cursor.execute(
                        "INSERT INTO feed_items (id,published) VALUES (?,?)",
                        [id,pubDate])
                    conn.commit()

                    # Doing some crazy date math stuff...
                    # max_age is mostly so that first run doesn't spew too
                    # much stuff into a room, but is also a useful safety
                    # measure in case a feed suddenly reverts to something
                    # ancient or other weird problems...
                    time_since_published = timezone.localize(datetime.now()) - pubDate_parsed.astimezone(timezone)

                    if time_since_published.total_seconds() < max_age:
                        logger.info(feed+':item:fresh and ready for parsing')

                        # Loop over all channels for this particular feed
                        # and process appropriately:
                        for channel in channels:
                            include = True
                            filter_field = FEED.get(
                                                    channel['name']+'.filter_field',
                                                    FEED.get('filter_field',
                                                        'title'))
                            # Regex if channel exists
                            if (channel['name']+'.filter') in FEED or 'filter' in FEED:
                                logger.debug(feed+':item:running filter for'+channel['name'])
                                regexpat = FEED.get(
                                                    channel['name']+'.filter',
                                                    FEED.get('filter','^.*$'))
                                logger.debug(feed+':item:using filter:'+regexpat+' on '+item['title']+' field '+filter_field)
                                regexmatch = re.search(regexpat,item[filter_field])
                                if regexmatch is None:
                                    include = False
                                    logger.info(feed+':item:failed filter for '+channel['name'])
                            elif (channel['name']+'.filter_exclude') in FEED or 'filter_exclude' in FEED:
                                logger.debug(feed+':item:running exclude filter for'+channel['name'])
                                regexpat = FEED.get(
                                                    channel['name']+'.filter_exclude',
                                                    FEED.get('filter_exclude',
                                                    '^.*$'))
                                logger.debug(feed+':item:using filter_exclude:'+regexpat+' on '+item['title']+' field '+filter_field)
                                regexmatch = re.search(regexpat,item[filter_field])
                                if regexmatch is None:
                                    include = True
                                    logger.info(feed+':item:passed exclude filter for '+channel['name'])
                                else:
                                    include = False
                                    logger.info(feed+':item:failed exclude filter for '+channel['name'])
                            else:
                                include = True # redundant safety net
                                logger.debug(feed+':item:no filter configured for'+channel['name'])

                            if include is True:
                                logger.debug(feed+':item:building message for '+channel['name'])
                                message = build_message(FEED,item,channel)
                                logger.debug(feed+':item:sending message (eventually) to '+channel['name'])
                                yield from send_message_wrapper(asyncioloop,
                                                                FEED,
                                                                feed,
                                                                channel,
                                                                client,
                                                                message)
                            else:
                                logger.info(feed+':item:skipping item due to not passing filter for '+channel['name'])

                    else:
                        # Logs of debugging info for date handling stuff...
                        logger.info(feed+':too old; skipping')
                        logger.debug(feed+':now:'+str(time.time()))
                        logger.debug(feed+':now:gmtime:'+str(time.gmtime()))

                        logger.debug(feed+':now:localtime:'+str(time.localtime()))
                        logger.debug(feed+':timezone.localize(datetime.now()):'+str(timezone.localize(datetime.now())))
                        logger.debug(feed+':pubDate:'+str(pubDate))
                        logger.debug(feed+':pubDate_parsed:'+str(pubDate_parsed))
                        logger.debug(feed+':pubDate_parsed.astimezome(timezone):'+str(pubDate_parsed.astimezone(timezone)))
                        if debug >= 4:
                            logger.debug(item)
                # seen before, move on:
                else:
                    logger.debug(feed+':item:'+id+' seen before, skipping')
        # This is completely expected behavior for a well-behaved feed:
        except HTTPNotModified:
            logger.debug(feed+':Headers indicate feed unchanged since last time fetched:')
            logger.debug(sys.exc_info())
        # Many feeds have random periodic problems that shouldn't cause
        # permanent death:
        except HTTPError:
            logger.warn(feed+':Unexpected HTTP error:')
            logger.warn(sys.exc_info())
            logger.warn(feed+':Assuming error is transient and trying again later')
        # sqlite3 errors are probably really bad and we should just totally
        # give up on life
        except sqlite3.Error as sqlerr:
            logger.error(feed+':sqlite3 error: ')
            logger.error(sys.exc_info())
            logger.error(sqlerr)
            raise
        # Ideally we'd remove the specific channel or something...
        # But I guess just throw an error into the log and try again later...
        except discord.errors.Forbidden:
            logger.error(feed+':discord.errors.Forbidden')
            logger.error(sys.exc_info())
            logger.error(feed+":Perhaps bot isn't allowed in one of the channels for this feed?")
            # raise # or not? hmm...
        # unknown error: definitely give up and die and move on
        except:
            logger.error(feed+':Unexpected error:')
            # logger.error(sys.exc_info())
            logger.error(traceback.format_exc())
            logger.error(feed+':giving up')
            raise
        # No matter what goes wrong, wait same time and try again
        finally:
            logger.debug(feed+':sleeping for '+str(rss_refresh_time)+' seconds')
            yield from asyncio.sleep(rss_refresh_time)

Example 92

Project: Wallace Source File: clock.py
@scheduler.scheduled_job('interval', minutes=0.5)
def check_db_for_missing_notifications():
    """Check the database for missing notifications."""
    aws_access_key_id = os.environ['aws_access_key_id']
    aws_secret_access_key = os.environ['aws_secret_access_key']
    if config.getboolean('Shell Parameters', 'launch_in_sandbox_mode'):
        conn = MTurkConnection(
            aws_access_key_id=aws_access_key_id,
            aws_secret_access_key=aws_secret_access_key,
            host='mechanicalturk.sandbox.amazonaws.com')
    else:
        conn = MTurkConnection(
            aws_access_key_id=aws_access_key_id,
            aws_secret_access_key=aws_secret_access_key)

    # get all participants with status < 100
    participants = Participant.query.filter_by(status="working").all()

    # get current time
    current_time = datetime.now()

    # get experiment duration in seconds
    duration = float(config.get('HIT Configuration', 'duration')) * 60 * 60

    # for each participant, if current_time - start_time > duration + 5 mins
    for p in participants:
        p_time = (current_time - p.creation_time).total_seconds()

        if p_time > (duration + 120):
            print ("Error: participant {} with status {} has been playing for too "
                   "long and no notification has arrived - "
                   "running emergency code".format(p.id, p.status))

            # get their assignment
            assignment_id = p.assignment_id

            # ask amazon for the status of the assignment
            try:
                assignment = conn.get_assignment(assignment_id)[0]
                status = assignment.AssignmentStatus
            except:
                status = None
            print "assignment status from AWS is {}".format(status)
            hit_id = p.hit_id

            # general email settings:
            username = os.getenv('wallace_email_username')
            fromaddr = username + "@gmail.com"
            email_password = os.getenv("wallace_email_key")
            toaddr = config.get('HIT Configuration', 'contact_email_on_error')
            whimsical = os.getenv("whimsical")

            if status == "Approved":
                # if its been approved, set the status accordingly
                print "status set to approved"
                p.status = "approved"
                session.commit()
            elif status == "Rejected":
                print "status set to rejected"
                # if its been rejected, set the status accordingly
                p.status = "rejected"
                session.commit()
            elif status == "Submitted":
                # if it has been submitted then resend a submitted notification
                args = {
                    'Event.1.EventType': 'AssignmentSubmitted',
                    'Event.1.AssignmentId': assignment_id
                }
                requests.post(
                    "http://" + os.environ['HOST'] + '/notifications',
                    data=args)

                # send the researcher an email to let them know
                if whimsical:
                    msg = MIMEText(
                        """Dearest Friend,\n\nI am writing to let you know that at
 {}, during my regular (and thoroughly enjoyable) perousal of the most charming
  participant data table, I happened to notice that assignment {} has been
 taking longer than we were expecting. I recall you had suggested {} minutes as
 an upper limit for what was an acceptable length of time for each assignement
 , however this assignment had been underway for a shocking {} minutes, a full
 {} minutes over your allowance. I immediately dispatched a telegram to our
 mutual friends at AWS and they were able to assure me that although the
 notification had failed to be correctly processed, the assignment had in fact
 been completed. Rather than trouble you, I dealt with this myself and I can
 assure you there is no immediate cause for concern. Nonetheless, for my own
 peace of mind, I would appreciate you taking the time to look into this matter
 at your earliest convenience.\n\nI remain your faithful and obedient servant,
\nAlfred R. Wallace\n\n P.S. Please do not respond to this message, I am busy
 with other matters.""".format(
                        datetime.now(),
                        assignment_id,
                        round(duration/60),
                        round(p_time/60),
                        round((p_time-duration)/60)))
                    msg['Subject'] = "A matter of minor concern."
                else:
                    msg = MIMEText(
                        """Dear experimenter,\n\nThis is an automated email from
 Wallace. You are receiving this email because the Wallace platform has
 discovered evidence that a notification from Amazon Web Services failed to
 arrive at the server. Wallace has automatically contacted AWS and has
 determined the dropped notification was a submitted notification (i.e. the
 participant has finished the experiment). This is a non-fatal error and so
 Wallace has auto-corrected the problem. Nonetheless you may wish to check the
 database.\n\nBest,\nThe Wallace dev. team.\n\n Error details:\nAssignment: {}
\nAllowed time: {}\nTime since participant started: {}""").format(
                        assignment_id,
                        round(duration/60),
                        round(p_time/60))
                    msg['Subject'] = "Wallace automated email - minor error."

                # This method commented out as gmail now blocks emails from
                # new locations
                # server = smtplib.SMTP('smtp.gmail.com:587')
                # server.starttls()
                # server.login(username, email_password)
                # server.sendmail(fromaddr, toaddr, msg.as_string())
                # server.quit()
                print ("Error - submitted notification for participant {} missed. "
                       "Database automatically corrected, but proceed with caution."
                       .format(p.id))
            else:
                # if it has not been submitted shut everything down
                # first turn off autorecruit
                host = os.environ['HOST']
                host = host[:-len(".herokuapp.com")]
                args = json.dumps({"auto_recruit": "false"})
                headers = {
                    "Accept": "application/vnd.heroku+json; version=3",
                    "Content-Type": "application/json"
                }
                heroku_email_address = os.getenv('heroku_email_address')
                heroku_password = os.getenv('heroku_password')
                requests.patch(
                    "https://api.heroku.com/apps/{}/config-vars".format(host),
                    data=args,
                    auth=(heroku_email_address, heroku_password),
                    headers=headers)

                # then force expire the hit via boto
                conn.expire_hit(hit_id)

                # send the researcher an email to let them know
                if whimsical:
                    msg = MIMEText(
                        """Dearest Friend,\n\nI am afraid I write to you with most
 grave tidings. At {}, during a routine check of the usually most delightful
 participant data table, I happened to notice that assignment {} has been
 taking longer than we were expecting. I recall you had suggested {} minutes as
 an upper limit for what was an acceptable length of time for each assignment,
 however this assignment had been underway for a shocking {} minutes, a full {}
 minutes over your allowance. I immediately dispatched a telegram to our mutual
 friends at AWS and they infact informed me that they had already sent us a
 notification which we must have failed to process, implying that the
 assignment had not been successfully completed. Of course when the seriousness
 of this scenario dawned on me I had to depend on my trusting walking stick for
 support: without the notification I didn't know to remove the old assignment's
 data from the tables and AWS will have already sent their replacement, meaning
 that the tables may already be in a most unsound state!\n\nI am sorry to
 trouble you with this, however, I do not know how to proceed so rather than
 trying to remedy the scenario myself, I have instead temporarily ceased
 operations by expiring the HIT with the fellows at AWS and have refrained from
 posting any further invitations myself. Once you see fit I would be most
 appreciative if you could attend to this issue with the caution, sensitivity
 and intelligence for which I know you so well.\n\nI remain your faithful and
 obedient servant,\nAlfred R. Wallace\n\nP.S. Please do not respond to this
 message, I am busy with other matters.""".format(
                        datetime.now(),
                        assignment_id,
                        round(duration/60),
                        round(p_time/60),
                        round((p_time-duration)/60)))
                    msg['Subject'] = "Most troubling news."
                else:
                    msg = MIMEText(
                        """Dear experimenter,\n\nThis is an automated email from
 Wallace. You are receiving this email because the Wallace platform has
 discovered evidence that a notification from Amazon Web Services failed to
 arrive at the server. Wallace has automatically contacted AWS and has
 determined the dropped notification was an abandoned/returned notification
 (i.e. the participant had returned the experiment or had run out of time).
 This is a serious error and so Wallace has paused the experiment - expiring
 the HIT on MTurk and setting auto_recruit to false. Participants currently
 playing will be able to finish, however no further participants will be
 recruited until you do so manually. We strongly suggest you use the details
 below to check the database to make sure the missing notification has not caused
 additional problems before resuming.\nIf you are receiving a lot of these
 emails this suggests something is wrong with your experiment code.\n\nBest,
\nThe Wallace dev. team.\n\n Error details:\nAssignment: {}
\nAllowed time: {}\nTime since participant started: {}""").format(
                        assignment_id,
                        round(duration/60),
                        round(p_time/60))
                    msg['Subject'] = "Wallace automated email - major error."

                # This method commented out as gmail now blocks emails from
                # new locations
                # server = smtplib.SMTP('smtp.gmail.com:587')
                # server.starttls()
                # server.login(username, email_password)
                # server.sendmail(fromaddr, toaddr, msg.as_string())
                # server.quit()

                # send a notificationmissing notification
                args = {
                    'Event.1.EventType': 'NotificationMissing',
                    'Event.1.AssignmentId': assignment_id
                }
                requests.post(
                    "http://" + os.environ['HOST'] + '/notifications',
                    data=args)

                print ("Error - abandoned/returned notification for participant {} missed. "
                       "Experiment shut down. Please check database and then manually "
                       "resume experiment."
                       .format(p.id))

Example 93

Project: ganga Source File: feedback_report.py
def report(job=None):
    """ Upload error reports (snapshot of configuration,job parameters, input/output files, command history etc.). Job argument is optional. """
    import mimetypes
    import urllib
    import urllib2
    import httplib
    import string
    import random
    import sys
    import os
    import platform

    import Ganga.GPIDev.Lib.Config.config as config
    from Ganga.GPIDev.Base.VPrinter import full_print

    import Ganga

    # global variables that will print sumamry report to the user along with
    # the download link
    global JOB_REPORT, GANGA_VERSION, BACKEND_NAME, APPLICATION_NAME, PYTHON_PATH
    JOB_REPORT = False
    GANGA_VERSION = ''
    BACKEND_NAME = ''
    APPLICATION_NAME = ''
    PYTHON_PATH = ''

    def random_string(length):
        return ''.join([random.choice(string.letters) for ii in range(length + 1)])

    def encode_multipart_formdata(files):
        boundary = random_string(30)
        retnl = '\r\n'
        lines = []

        def get_content_type(filename):
            return mimetypes.guess_type(filename)[0] or 'application/octet-stream'

        fields = {'title': 'Ganga Error Report'}

        for (key, value) in fields.iteritems():
            lines.append('--' + boundary)
            lines.append('Content-Disposition: form-data; name="%s"' % key)
            lines.append('')
            lines.append(value)
        for field_name, file in files.iteritems():
            lines.append('--' + boundary)
            lines.append(
                'Content-Disposition: form-data; name="file"; filename="%s"' % (file))
            lines.append('Content-Type: %s' % get_content_type(file))
            lines.append('')
            lines.append(open(file, 'rb').read())
        lines.append('--' + boundary + '--')
        lines.append('')
        body = retnl.join(lines)

        headers = {'content-type': 'multipart/form-data; boundary=%s' %
                   boundary, 'content-length': str(len(body))}

        return body, headers

    def make_upload_file(server):

        def upload_file(path):

            # print 'Uploading %r to %r' % (path, server)

            data = {'MAX_FILE_SIZE': '3145728',
                    'sub': '',
                    'mode': 'regist'}
            files = {'file': path}

            send_post(server, files)

        return upload_file

    def send_post(url, files):
        logger.debug("Sending Post to %s ,  containing %s" % (url, files))

        encoded_data = encode_multipart_formdata(files)

        data = urllib.urlencode(encoded_data[1])
        req = urllib2.Request(url, data=data)
        if req.has_data():
            logger.debug("urllib2: Success!")
        else:
            logger.debug("urllib2: Fail!!!")

        connection = httplib.HTTPConnection(req.get_host())
        # connection.set_debuglevel(1)
        logger.debug("Requesting: 'POST', %s, %s " % (url, encoded_data[1]))
#                connection.request( method='POST', url=req.get_selector(), body=encoded_data[0], headers=encoded_data[1] )
        connection.request(
            method='POST', url=url, body=encoded_data[0], headers=encoded_data[1])
        response = connection.getresponse()

        logger.debug("httplib POST request response was: %s , because: %s" % (
            response.status, response.reason))

        responseResult = response.read()

        #logger.debug("Responce.read(): --%s--" % responseResult )

        responseResult = responseResult[
            responseResult.find("<span id=\"download_path\""):]
        startIndex = responseResult.find("path:") + 5
        endIndex = responseResult.find("</span>")

        logger.debug("Responce.read(): --%s--" %
                     responseResult[startIndex:endIndex])

        logger.info(
            'Your error report was uploaded to ganga developers with the following URL. ')
        logger.info(
            'You may include this URL and the following summary information in your bug report or in the support email to the developers.')
        logger.info('')
        logger.info('***' + str(responseResult[startIndex:endIndex]) + '***')
        logger.info('')
        global GANGA_VERSION, JOB_REPORT, APPLICATION_NAME, BACKEND_NAME, PYTHON_PATH
        logger.info('Ganga Version : ' + GANGA_VERSION)
        logger.info('Python Version : ' + "%s.%s.%s" %
                    (sys.version_info[0], sys.version_info[1], sys.version_info[2]))
        logger.info('Operation System Version : ' + platform.platform())

        if JOB_REPORT:
            logger.info('Application Name : ' + APPLICATION_NAME)
            logger.info('Backend Name : ' + BACKEND_NAME)

        logger.info('Python Path : ' + PYTHON_PATH)
        logger.info('')

        JOB_REPORT = False
        GANGA_VERSION = ''
        BACKEND_NAME = ''
        APPLICATION_NAME = ''
        PYTHON_PATH = ''

    def run_upload(server, path):

        upload_file = make_upload_file(server)
        upload_file(path)

    def report_inner(job=None, isJob=False, isTask=False):

        userInfoDirName = "userreport"
        tempDirName = "reportsRepository"
        # job relevant info
        jobSummaryFileName = "jobsummary.txt"
        jobFullPrintFileName = "jobfullprint.txt"
        repositoryPath = "repository/$usr/LocalXML/6.0/jobs/$thousandsNumxxx"
        # task relevant info
        taskSummaryFileName = "tasksummary.txt"
        taskFullPrintFileName = "taskfullprint.txt"
        tasksRepositoryPath = "repository/$usr/LocalXML/6.0/tasks/$thousandsNumxxx"
        # user's info
        environFileName = "environ.txt"
        userConfigFileName = "userconfig.txt"
        defaultConfigFileName = "gangarc.txt"
        ipythonHistoryFileName = "ipythonhistory.txt"
        gangaLogFileName = "gangalog.txt"
        jobsListFileName = "jobslist.txt"
        tasksListFileName = "taskslist.txt"
        thread_trace_file_name = 'thread_trace.html'
        from Ganga.Utility import Config
        uploadFileServer = Config.getConfig('Feedback')['uploadServer']
        #uploadFileServer= "http://gangamon.cern.ch/django/errorreports/"
        #uploadFileServer= "http://ganga-ai-02.cern.ch/django/errorreports/"
        #uploadFileServer= "http://127.0.0.1:8000/errorreports"

        def printDictionary(dictionary, file=sys.stdout):
            for k, v in dictionary.iteritems():
                print('%s: %s' % (k, v), file=file)

                if k == 'PYTHONPATH':
                    global PYTHON_PATH
                    PYTHON_PATH = v

        def extractFileObjects(fileName, targetDirectoryName):
            try:
                fileToRead = open(fileName, 'r')
                try:
                    fileText = fileToRead.read()
                    import re
                    pattern = "File\(name=\'(.+?)\'"
                    matches = re.findall(pattern, fileText)

                    for fileName in matches:
                        fileName = os.path.expanduser(fileName)
                        targetFileName = os.path.join(
                            targetDirectoryName, os.path.basename(fileName))
                        shutil.copyfile(fileName, targetFileName)

                finally:
                    fileToRead.close()
            # except IOError, OSError:
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

        def writeErrorLog(errorMessage):
            try:
                fileToWrite = open(errorLogPath, 'a')
                try:
                    fileToWrite.write(errorMessage)
                    fileToWrite.write("\n")
                except Exception as err:
                    logger.debug("Err: %s" % err)
                    raise
                finally:
                    fileToWrite.close()
            except Exception as err2:
                logger.debug("Err: %s" % err2)
                pass

        def writeStringToFile(fileName, stringToWrite):

            try:
                # uncomment this to try the error logger
                #fileName = '~/' + fileName
                fileToWrite = open(fileName, 'w')
                try:
                    fileToWrite.write(stringToWrite)
                except Exception as err:
                    logger.debug("Err: %s" % err)
                    raise err
                finally:
                    fileToWrite.close()
            # except IOError:
            except Exception as err:
                logger.debug("Err2: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

        def renameDataFiles(directory):

            for fileName in os.listdir(directory):
                fullFileName = os.path.join(directory, fileName)
                if os.path.isfile(fullFileName):
                    if fileName == 'data':
                        os.rename(fullFileName, fullFileName + '.txt')
                else:
                    renameDataFiles(fullFileName)

        import shutil
        import tarfile
        import tempfile
        import os

        userHomeDir = os.getenv("HOME")
        tempDir = tempfile.mkdtemp()

        errorLogPath = os.path.join(tempDir, 'reportErrorLog.txt')

        fullPathTempDir = os.path.join(tempDir, tempDirName)
        fullLogDirName = ''
        # create temp dir and specific dir for the job/user

        try:
            if not os.path.exists(fullPathTempDir):
                os.mkdir(fullPathTempDir)

            import datetime
            now = datetime.datetime.now()
            userInfoDirName = userInfoDirName + \
                now.strftime("%Y-%m-%d-%H:%M:%S")
            fullLogDirName = os.path.join(fullPathTempDir, userInfoDirName)

            # if report directory exists -> delete it's content(we would like
            # last version of the report)
            if os.path.exists(fullLogDirName):
                shutil.rmtree(fullLogDirName)

            os.mkdir(fullLogDirName)
        # except OSError:
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import os.environ in a file
        fullEnvironFileName = os.path.join(fullLogDirName, environFileName)

        try:
            inputFile = open(fullEnvironFileName, 'w')
            try:
                printDictionary(os.environ, file=inputFile)

                print('OS VERSION : ' + platform.platform(), file=inputFile)

            finally:
                inputFile.close()
        # except IOError
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import user config in a file
        userConfigFullFileName = os.path.join(
            fullLogDirName, userConfigFileName)

        try:
            inputFile = open(userConfigFullFileName, 'w')
            try:

                print("#GANGA_VERSION = %s" %
                      config.System.GANGA_VERSION, file=inputFile)

                global GANGA_VERSION
                GANGA_VERSION = config.System.GANGA_VERSION

                # this gets the default values
                # Ganga.GPIDev.Lib.Config.Config.print_config_file()

                # this should get the changed values
                for c in config:
                    print(config[c], file=inputFile)

            finally:
                inputFile.close()
        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # write gangarc - default configuration
        defaultConfigFullFileName = os.path.join(
            fullLogDirName, defaultConfigFileName)

        try:
            outputFile = open(os.path.join(userHomeDir, '.gangarc'), 'r')

            try:
                writeStringToFile(defaultConfigFullFileName, outputFile.read())
            finally:
                outputFile.close()

        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import ipython history in a file
        try:
            ipythonFile = open(
                os.path.join(os.environ['IPYTHONDIR'], 'history'), 'r')

            try:
                lastIPythonCommands = ipythonFile.readlines()[-20:]
                writeStringToFile(os.path.join(
                    fullLogDirName, ipythonHistoryFileName), '\n'.join(lastIPythonCommands))
                #writeStringToFile(os.path.join(fullLogDirName, ipythonHistoryFileName), ipythonFile.read())
            finally:
                ipythonFile.close()
        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import gangalog in a file
        userLogFileLocation = config["Logging"]._logfile
        userLogFileLocation = os.path.expanduser(userLogFileLocation)

        try:
            gangaLogFile = open(userLogFileLocation, 'r')
            try:
                writeStringToFile(
                    os.path.join(fullLogDirName, gangaLogFileName), gangaLogFile.read())
            finally:
                gangaLogFile.close()
        # except IOError:
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import the result of jobs command in the report
        jobsListFullFileName = os.path.join(fullLogDirName, jobsListFileName)

        try:
            outputFile = open(jobsListFullFileName, 'w')
            try:

                from Ganga.Core.GangaRegistry import getRegistryProxy
                print(getRegistryProxy('jobs'), file=outputFile)

            finally:
                outputFile.close()

        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import the result of tasks command in the report
        tasksListFullFileName = os.path.join(fullLogDirName, tasksListFileName)

        try:
            outputFile = open(tasksListFullFileName, 'w')
            try:

                from Ganga.Core.GangaRegistry import getRegistryProxy
                print(getRegistryProxy('tasks'), file=outputFile)

            finally:
                outputFile.close()

        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # save it here because we will change fullLogDirName, but we want this
        # to be the archive and to be deleted
        folderToArchive = fullLogDirName

        # import job relevant info
        if (job is not None and isJob):

            global JOB_REPORT, APPLICATION_NAME, BACKEND_NAME

            JOB_REPORT = True
            APPLICATION_NAME = getName(job.application)
            BACKEND_NAME = getName(job.backend)

            # create job folder
            jobFolder = 'job_%s' % job.fqid
            fullLogDirName = os.path.join(fullLogDirName, jobFolder)
            os.mkdir(fullLogDirName)

            # import job summary in a file
            fullJobSummaryFileName = os.path.join(
                fullLogDirName, jobSummaryFileName)
            writeStringToFile(fullJobSummaryFileName, job)

            # import job full print in a file
            fullJobPrintFileName = os.path.join(
                fullLogDirName, jobFullPrintFileName)

            try:
                inputFile = open(fullJobPrintFileName, 'w')
                try:
                    full_print(job, inputFile)
                finally:
                    inputFile.close()
            # except IOError, OSError:
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # extract file objects
            try:
                fileObjectsPath = os.path.join(fullLogDirName, 'fileobjects')
                os.mkdir(fileObjectsPath)
                extractFileObjects(fullJobSummaryFileName, fileObjectsPath)
            # except OSError:
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy dir of the job ->input/output and subjobs
            try:
                parentDir, currentDir = os.path.split(job.inputdir[:-1])
                workspaceDir = os.path.join(fullLogDirName, 'workspace')
                shutil.copytree(parentDir, workspaceDir)
            # except IOError, OSError
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy shared area of the job
            try:

                if hasattr(job.application, 'is_prepared'):
                    if job.application.is_prepared is not None and job.application.is_prepared is not True:
                        import os
                        from Ganga.Utility.Config import getConfig
                        from Ganga.Utility.files import expandfilename
                        shared_path = os.path.join(expandfilename(getConfig(
                            'Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'])
                        shareddir = os.path.join(
                            shared_path, job.application.is_prepared.name)
                        if os.path.isdir(shareddir):

                            sharedAreaDir = os.path.join(
                                fullLogDirName, 'sharedarea')
                            shutil.copytree(shareddir, sharedAreaDir)
            # except IOError, OSError
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy repository job file
            try:
                indexFileName = str(job.id) + '.index'

                repositoryPath = repositoryPath.replace(
                    '$usr', os.getenv("USER"))

                # check if the job is subjob -> different way of forming the
                # path to the repository
                is_subjob = job.fqid.find('.') > -1

                if is_subjob:

                    jobid, subjobid = job.fqid.split(
                        '.')[0], job.fqid.split('.')[1]
                    repositoryPath = repositoryPath.replace(
                        '$thousandsNum', str(int(jobid) / 1000))
                    repositoryPath = os.path.join(repositoryPath, jobid)

                else:
                    repositoryPath = repositoryPath.replace(
                        '$thousandsNum', str(job.id / 1000))

                repositoryFullPath = os.path.join(
                    config.Configuration.gangadir, repositoryPath)
                indexFileSourcePath = os.path.join(
                    repositoryFullPath, indexFileName)
                repositoryFullPath = os.path.join(
                    repositoryFullPath, str(job.id))

                repositoryTargetPath = os.path.join(
                    fullLogDirName, 'repository', str(job.id))

                os.mkdir(os.path.join(fullLogDirName, 'repository'))

                shutil.copytree(repositoryFullPath, repositoryTargetPath)
                # data files are copied but can not be opened -> add .txt to
                # their file names
                renameDataFiles(repositoryTargetPath)

                if not is_subjob:
                    # copy .index file
                    indexFileTargetPath = os.path.join(
                        fullLogDirName, 'repository', indexFileName)
                    shutil.copyfile(indexFileSourcePath, indexFileTargetPath)

            # except OSError, IOError:
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

        # import task relevant info
        if (job is not None and isTask):
            # job is actually a task object
            task = job
            # create task folder
            taskFolder = 'task_%s' % task.id
            fullLogDirName = os.path.join(fullLogDirName, taskFolder)
            os.mkdir(fullLogDirName)

            # import task summary in a file
            fullTaskSummaryFileName = os.path.join(
                fullLogDirName, taskSummaryFileName)
            writeStringToFile(fullTaskSummaryFileName, str(task))

            # import task full print in a file
            fullTaskPrintFileName = os.path.join(
                fullLogDirName, taskFullPrintFileName)

            try:
                inputFile = open(fullTaskPrintFileName, 'w')
                try:
                    full_print(task, inputFile)
                except Exception as err:
                    logger.debug("Err: %s" % err)
                    raise err
                finally:
                    inputFile.close()
            # except IOError, OSError:
            except Exception as err:
                logger.debug("Err2: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy shared area of the task
            try:
                if len(task.transforms) > 0:
                    if hasattr(task.transforms[0], 'application') and hasattr(task.transforms[0].application, 'is_prepared'):
                        if task.transforms[0].application.is_prepared is not None and task.transforms[0].application.is_prepared is not True:
                            import os
                            from Ganga.Utility.Config import getConfig
                            from Ganga.Utility.files import expandfilename
                            shared_path = os.path.join(expandfilename(getConfig(
                                'Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'])
                            shareddir = os.path.join(
                                shared_path, task.transforms[0].application.is_prepared.name)
                            if os.path.isdir(shareddir):

                                sharedAreaDir = os.path.join(
                                    fullLogDirName, 'sharedarea')
                                shutil.copytree(shareddir, sharedAreaDir)
            # except IOError, OSError
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy repository task file
            try:
                indexFileName = str(task.id) + '.index'

                tasksRepositoryPath = tasksRepositoryPath.replace(
                    '$usr', os.getenv("USER"))
                tasksRepositoryPath = tasksRepositoryPath.replace(
                    '$thousandsNum', str(task.id / 1000))

                repositoryFullPath = os.path.join(
                    config.Configuration.gangadir, tasksRepositoryPath)
                indexFileSourcePath = os.path.join(
                    repositoryFullPath, indexFileName)
                repositoryFullPath = os.path.join(
                    repositoryFullPath, str(task.id))

                repositoryTargetPath = os.path.join(
                    fullLogDirName, 'repository', str(task.id))

                os.mkdir(os.path.join(fullLogDirName, 'repository'))

                shutil.copytree(repositoryFullPath, repositoryTargetPath)
                # data files are copied but can not be opened -> add .txt to
                # their file names
                renameDataFiles(repositoryTargetPath)

                # copy .index file
                indexFileTargetPath = os.path.join(
                    fullLogDirName, 'repository', indexFileName)
                shutil.copyfile(indexFileSourcePath, indexFileTargetPath)

            # except OSError, IOError:
            except Exception as err:
                logger.debug("Err %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

        # Copy thread stack trace file
        try:
            thread_trace_source_path = os.path.join(getConfig('Configuration')['gangadir'], thread_trace_file_name)
            thread_trace_target_path = os.path.join(fullLogDirName, thread_trace_file_name)
            shutil.copyfile(thread_trace_source_path, thread_trace_target_path)
        except (OSError, IOError) as err:
            logger.debug('Err %s', err)
            writeErrorLog(str(sys.exc_info()[1]))

        resultArchive = '%s.tar.gz' % folderToArchive

        try:
            resultFile = tarfile.TarFile.open(resultArchive, 'w:gz')
            try:
                resultFile.add(
                    folderToArchive, arcname=os.path.basename(folderToArchive))
                # put the error log in the archive
                if(os.path.exists(errorLogPath)):
                    resultFile.add(
                        errorLogPath, arcname=os.path.basename(errorLogPath))
            except Exception as err:
                logger.debug("Err: %s" % err)
                raise
            finally:
                resultFile.close()
        except Exception as err:
            logger.debug("Err2: %s" % err)
            raise  # pass

        # remove temp dir
        if(os.path.exists(folderToArchive)):
            shutil.rmtree(folderToArchive)

        # print the error if there is something
        if os.path.exists(errorLogPath):
            logger.error('')
            logger.error('An error occured while collecting report information : ' + open(errorLogPath, 'r').read())
            logger.error('')

        # delete the errorfile from user's pc
        if(os.path.exists(errorLogPath)):
            os.remove(errorLogPath)

        # return the path to the archive and the path to the upload server
        return (resultArchive, uploadFileServer, tempDir)

    def removeTempFiles(tempDir):
        import shutil

        # remove temp dir
        if os.path.exists(tempDir):
            shutil.rmtree(tempDir)

        # remove temp files from django upload-> if the file is bigger than 2.5
        # mb django internally stores it in tmp file during the upload
        userTempDir = '/tmp/'

        for fileName in os.listdir(userTempDir):
            if fileName.find('.upload') > -1:
                os.remove(os.path.join(userTempDir, fileName))

    tempDir = ''

    # call the report function
    try:
        isJob = isTask = False

        # make typecheck of the param passed
        if job is not None:
            from Ganga.GPIDev.Lib.Job.Job import Job
            from Ganga.GPIDev.Base.Proxy import stripProxy
            isJob = isinstance(stripProxy(job), Job)
            if hasattr(stripProxy(job), '_category') and (stripProxy(job)._category == 'tasks'):
                isTask = True

            if not (isJob or isTask):
                logger.error("report() function argument should be reference to a job or task object")
                return

        resultArchive, uploadFileServer, tempDir = report_inner(
            job, isJob, isTask)

        report_bytes = os.path.getsize(resultArchive)

        if report_bytes > 1024 * 1024 * 100:  # if bigger than 100MB
            logger.error(
                'The report is bigger than 100MB and can not be uploaded')
        else:
            run_upload(server=uploadFileServer, path=resultArchive)

    except Exception as err:
        logger.debug("Err: %s" % err)
        removeTempFiles(tempDir)
        raise  # pass

Example 94

Project: cmdbac Source File: basedeployer.py
    def save_attempt(self, attempt_result, driver_result = {}):
        LOG.info("Saving attempt ...")

        # flush log
        self.flush_log()

        # get info
        register_result = driver_result.get('register', USER_STATUS_UNKNOWN)
        login_result = driver_result.get('login', USER_STATUS_UNKNOWN)
        forms = driver_result.get('forms', None)
        urls = driver_result.get('urls', None)
        screenshot_path = driver_result.get('screenshot', None)
        statistics = driver_result.get('statistics', None)
        informations = driver_result.get('informations', None)

        # get runtime
        if self.runtime == None:
            self.runtime = self.get_runtime()
        Runtime.objects.get_or_create(executable = self.runtime['executable'], version = self.runtime['version'])
        runtime = Runtime.objects.get(executable = self.runtime['executable'], version = self.runtime['version'])

        # save attempt
        self.attempt.result = attempt_result
        self.attempt.login = login_result
        self.attempt.register = register_result
        self.attempt.stop_time = datetime.now()
        self.attempt.size = utils.get_size(self.base_path)
        self.attempt.runtime = runtime
        self.attempt.actions_count = 0
        self.attempt.queries_count = 0
        if forms == None and urls == None and self.attempt.result == ATTEMPT_STATUS_SUCCESS:
            self.attempt.result = ATTEMPT_STATUS_NO_QUERIES

        self.attempt.save()

        # save forms
        if forms != None:
            url_patterns = set()
            for f in forms:
                try:
                    if '/admin' in f['url']:
                        continue
                    url_pattern = re.sub('\d', '', f['url'])
                    if url_pattern in url_patterns:
                        continue
                    url_patterns.add(url_pattern)
                    action = Action()
                    action.url = f['url']
                    if f['method'] == '':
                        f['method'] = 'get'
                    action.method = f['method'].upper()
                    action.attempt = self.attempt
                    action.save()
                    self.attempt.actions_count += 1
                    for q in f['queries']:
                        try:
                            query = Query()
                            query.content = q['content']
                            query.matched = q['matched']
                            query.action = action
                            query.save()
                            self.attempt.queries_count += 1

                            if 'explain' in q:
                                explain = Explain()
                                explain.output = q['explain']
                                explain.query = query
                                explain.save()

                            if 'stats' in q:
                                metric = QueryMetric()
                                metric.name = 'stats'
                                metric.value = str(q['stats'])
                                metric.query = query
                                metric.save()
                        except:
                            pass
                    for input in f['inputs']:
                        field = Field()
                        field.name = input['name']
                        field.type = input['type']
                        field.action = action
                        field.save()
                    for description, count in f['counter'].iteritems():
                        counter = Counter()
                        counter.description = description
                        counter.count = count
                        counter.action = action
                        counter.save()
                except Exception, e:
                    LOG.exception(e)  

        # save urls
        if urls != None:
            url_patterns = set()
            for u in urls:
                try:
                    if '/admin' in u['url']:
                        continue
                    url_pattern = re.sub('\d', '', u['url'])
                    if url_pattern in url_patterns:
                        continue
                    url_patterns.add(url_pattern)
                    action = Action()
                    action.url = u['url']
                    action.method = 'GET'
                    action.attempt = self.attempt
                    action.save()
                    self.attempt.actions_count += 1
                    for q in u['queries']:
                        try:
                            query = Query()
                            query.content = q['content']
                            query.action = action
                            query.save()
                            self.attempt.queries_count += 1

                            if 'explain' in q:
                                explain = Explain()
                                explain.output = q['explain']
                                explain.query = query
                                explain.save()

                            if 'stats' in q:
                                metric = QueryMetric()
                                metric.name = 'stats'
                                metric.value = str(q['stats'])
                                metric.query = query
                                metric.save()
                        except:
                            pass
                    for description, count in u['counter'].iteritems():
                        counter = Counter()
                        counter.description = description
                        counter.count = count
                        counter.action = action
                        counter.save()
                except Exception, e:
                    LOG.exception(e)  

        # save screenshot
        if screenshot_path != None:
            screenshot = open(screenshot_path, 'rb')
            image = Image()
            image.data = screenshot.read()
            image.attempt = self.attempt
            image.save()

        # save statistics
        if statistics != None:
            for description, count in statistics.iteritems():
                statistic = Statistic()
                statistic.description = description
                statistic.count = count
                statistic.attempt = self.attempt
                statistic.save()

        # save informations
        if informations != None:
            for name, description in informations.iteritems():
                information = Information()
                information.name = name
                information.description = description
                information.attempt = self.attempt
                information.save()

        LOG.info("Saved Attempt #%s for %s" % (self.attempt, self.attempt.repo))
        
        # populate packages
        for pkg in self.packages_from_file:
            try:
                Dependency.objects.get_or_create(attempt=self.attempt, package=pkg, source=PACKAGE_SOURCE_FILE)
                pkg.count = pkg.count + 1
                pkg.save()
            except Exception, e:
                LOG.exception(e)  
        ## FOR
        for pkg in self.packages_from_database:
            try:
                Dependency.objects.get_or_create(attempt=self.attempt, package=pkg, source=PACKAGE_SOURCE_DATABASE)
                if pkg.version != '':
                    pkg.count = pkg.count + 1
                    pkg.save()
            except Exception, e:
                LOG.exception(e)
        ## FOR

        # make sure we update the repo to point to this latest attempt
        if attempt_result in [ATTEMPT_STATUS_MISSING_REQUIRED_FILES, ATTEMPT_STATUS_RUNNING_ERROR, ATTEMPT_STATUS_DOWNLOAD_ERROR]:
            self.repo.valid_project = False
        else:
            self.repo.valid_project = True
        self.repo.latest_attempt = self.attempt
        if self.attempt.result == ATTEMPT_STATUS_SUCCESS and self.attempt.queries_count == 0:
            self.attempt.result = ATTEMPT_STATUS_NO_QUERIES
        if self.attempt.result == ATTEMPT_STATUS_SUCCESS:
            self.repo.latest_successful_attempt = self.attempt
        self.repo.attempts_count = self.repo.attempts_count + 1
        self.repo.save()
        self.attempt.save()

Example 95

Project: faf Source File: mark_probably_fixed.py
    def run(self, cmdline, db):
        """
        Mark a problem probably fixed if there is a new build of the problem's
        affected package, for which no crash reports have come in.
        """

        try:
            tasks = self._get_tasks(cmdline, db)
        except FafError as ex:
            self.log_error("Unable to process command line arguments: {0}"
                           .format(str(ex)))
            return 1

        problems = get_problems(db)

        task_i = 0
        for osplugin, db_release in tasks:
            task_i += 1

            self.log_info("[{0} / {1}] Processing '{2} {3}'"
                          .format(task_i, len(tasks), osplugin.nice_name,
                                  db_release.version))

            self.log_debug("Getting builds...")
            opsys_builds = osplugin.get_released_builds(db_release.version)

            newest_builds = {}
            all_builds = {}
            now = datetime.now()
            for build in opsys_builds:
                age = now - build["completion_time"]
                # If a hot new build comes out, we need to wait a certain
                # period of time for people to use it before we can make
                # conclusions about it being a probable fix.
                if age.days >= osplugin.build_aging_days:
                    if build["name"] not in newest_builds:
                        newest_builds[build["name"]] = build

                    if build["name"] not in all_builds:
                        all_builds[build["name"]] = [build, ]
                    else:
                        all_builds[build["name"]].append(build)

            probably_fixed_total = 0
            problems_in_release = 0
            problem_counter = 0
            for problem in problems:
                problem_counter += 1
                self.log_debug("Processing problem ID:{0} {1}/{2}:"
                               .format(problem.id, problem_counter, len(problems)))
                affected_newest = {}
                affected_not_found = False

                reports_for_release =  \
                    get_reports_for_opsysrelease(db, problem.id, db_release.id)

                # For all the reports, we need the affected packages and their
                # newest versions.
                if len(reports_for_release) > 0:
                    problems_in_release += 1
                else:
                    self.log_debug(" This problem doesn't appear in this release.")
                    self._save_probable_fix(db, problem, db_release, None)
                    # Next problem
                    continue

                for report in reports_for_release:
                    # First we try to find the affected package among the known
                    # packages.
                    affected_known = [
                        (affected.build.base_package_name,
                         affected.build.epoch,
                         affected.build.version,
                         affected.build.release) for affected in
                        get_crashed_package_for_report(db, report.id)]

                    # Then among the unknown packages.
                    affected_unknown = \
                        get_crashed_unknown_package_nevr_for_report(db, report.id)
                    # We get the base package name directly from the report
                    affected_unknown = [(report.component.name,
                                         affected[1],
                                         affected[2],
                                         affected[3]) for affected in affected_unknown]

                    affected_all = affected_known + affected_unknown
                    if len(affected_all) == 0:
                        affected_not_found = True
                        break

                    for affected in affected_all:
                        if affected[0] in affected_newest:
                            # If a problem contains multiple reports with the same
                            # affected package, we only want the newest version of
                            # it.
                            affected_newest[affected[0]]['reports'].append(report)
                            if cmp_evr(affected[1:],
                                       affected_newest[affected[0]]['nevr'][1:]) > 0:
                                affected_newest[affected[0]]['nevr'] = affected
                        else:
                            affected_newest[affected[0]] = {
                                'reports': [report, ],
                                'nevr': affected
                            }

                if affected_not_found or len(affected_newest) == 0:
                    # Affected package of one of the reports was not found.
                    # We can't make any conclusions.
                    self.log_debug(" Affected package not found.")
                    self._save_probable_fix(db, problem, db_release, None)
                    # Next problem
                    continue

                if len(affected_newest) > 1:
                    # Multiple different affected packages => cannot be fixed
                    # by a single package update
                    self.log_debug(" Multiple affected packages. No simple fix.")
                    self._save_probable_fix(db, problem, db_release, None)
                    # Next problem
                    continue

                probably_fixed_since = datetime.fromtimestamp(0)

                pkg = affected_newest.values()[0]

                name = pkg['nevr'][0]
                newest_build = newest_builds.get(name, False)
                if newest_build:
                    newest_evr = (newest_build["epoch"] or 0,
                                  newest_build["version"],
                                  newest_build["release"])
                if newest_build and cmp_evr(newest_evr, pkg['nevr'][1:]) > 0:
                    # Newest available build is newer than the newest version
                    # of the affected package. Now find the oldest such
                    # probable fix.
                    i = 0
                    while i < len(all_builds[name]) and cmp_evr(
                        (all_builds[name][i]["epoch"] or 0,
                            all_builds[name][i]["version"],
                            all_builds[name][i]["release"]), pkg['nevr'][1:]) > 0:
                        i += 1
                    completion_time = all_builds[name][i-1]["completion_time"]
                    probably_fixed_since = max(completion_time,
                                               probably_fixed_since)
                    pkg["probable_fix"] = (name,
                                           all_builds[name][i-1]["epoch"] or 0,
                                           all_builds[name][i-1]["version"],
                                           all_builds[name][i-1]["release"])

                    self._save_probable_fix(db, problem, db_release,
                                            pkg["probable_fix"],
                                            probably_fixed_since)
                    self.log_debug("  Probably fixed for {0} days.".format(
                        (datetime.now() - probably_fixed_since).days))
                    probably_fixed_total += 1
                else:
                    self._save_probable_fix(db, problem, db_release, None)
                    self.log_debug("  Not fixed.")

            db.session.flush()
            if problems_in_release > 0:
                self.log_info("{0}% of problems in this release probably fixed.".format(
                    (probably_fixed_total * 100) / problems_in_release))
            else:
                self.log_info("No problems found in this release.")

Example 96

Project: hamster_experiments Source File: ui_demo.py
Function: init
    def __init__(self):
        now = dt.datetime.now()

        graphics.Scene.__init__(self)

        self.notebook = ui.Notebook(tab_position = "top-left", scroll_position="end", show_scroll = "auto_invisible", scroll_selects_tab = False)

        # boxes packed and nested horizontally and vertically, with a draggable corner
        self.box = ui.HBox(spacing = 3, x=10, y=10)
        self.button = ui.Button("My image changes position", image = graphics.Image("assets/hamster.png"), fill = False)
        self.button.connect("on-click", self.on_button_click)

        self.box.add_child(*[ui.VBox([self.button,
                                      ui.ToggleButton("I'm a toggle button! Have a tooltip too!", image = graphics.Image("assets/day.png"), fill = True, tooltip="Oh hey there, i'm a tooltip!"),
                                      ui.Label("I'm a label \nand we all can wrap", image = graphics.Image("assets/week.png"), spacing = 5, padding = 5, x_align = 0),
                                      ui.Entry("Feel free to edit me! I'm a rather long text that will scroll nicely perhaps. No guarantees though!", expand = False),
                                      ui.Entry("And me too perhaps", expand = False)],
                                     spacing = 5, padding = 10),
                             Rectangle(20, expand = False),
                             graphics.Label("rrrr", color="#666"),
                             Rectangle(20, expand = False),
                             ui.VBox([Rectangle(fill = False), Rectangle(), Rectangle()], spacing = 3)
                             ])


        box_w, box_h = self.box.get_min_size()
        self.corner = graphics.Rectangle(10, 10, fill="#666",
                                         x = self.box.x + box_w,
                                         y = self.box.y + box_h,
                                         draggable=True,
                                         interactive=True,
                                         z_order = 100)
        self.corner.connect("on-drag", self.on_corner_drag)


        # a table
        self.table = ui.Table(3, 3, snap_to_pixel = False, padding=10)
        self.table.attach(Rectangle(fill_color = "#f00", expand_vert = False), 0, 3, 0, 1) # top
        self.table.attach(Rectangle(fill_color = "#0f0", expand = False), 2, 3, 1, 2)      # right
        self.table.attach(Rectangle(fill_color = "#f0f", expand_vert = False), 0, 3, 2, 3) # bottom
        self.table.attach(Rectangle(fill_color = "#0ff", expand = False), 0, 1, 1, 2)      # left
        center = Rectangle()
        center.connect("on-mouse-over", self.on_table_mouse_over)
        center.connect("on-mouse-out", self.on_table_mouse_out)
        self.table.attach(center, 1, 2, 1, 2)


        # a scroll area with something to scroll in it
        self.scroll = ui.ScrollArea(border = 0)
        self.scroll.add_child(ui.Container(ui.Button("Scroll me if you can!", width = 1000, height = 300, fill=False), fill = False, padding=15))


        # bunch of different input elements
        inputs = ui.Panes(padding=10)
        listitem = ui.ListItem(["Sugar", "Spice", "Everything Nice", "--", "Feel",
                                "Free", "To", "Click", "On", "Me", {'markup': "<span color='red'>And</span>"},
                                "Use", "The", "Arrows!", "Ah", "And", "It", "Seems",
                                "That", "There", "Are", "So", "Many", "Elements"])

        def print_selection(listitem, item):
            print "selection", item

        def print_change(listitem, item):
            print "change", item

        listitem.connect("on-change", print_change)
        listitem.connect("on-select", print_selection)
        inputs.add_child(listitem)

        one = ui.ToggleButton("One", margin=[15, 10, 20, 30], id="one")

        group1 = ui.Group([one,
                           ui.ToggleButton("Two", scale_x = 0.5, scale_y = 0.5, expand=False, id="two"),
                           ui.ToggleButton("Three", id="three"),
                           ui.ToggleButton("Four", id="four")],
                          expand = False, allow_no_selection=True)
        label1 = ui.Label("Current value: none selected", x_align=0, expand = False)
        def on_toggle1(group, current_item):
            if current_item:
                label1.text = "Current value: %s" % current_item.label
            else:
                label1.text = "No item selected"
        group1.connect("on-change", on_toggle1)

        group2 = ui.Group([ui.RadioButton("One"),
                           ui.RadioButton("Two"),
                           ui.RadioButton("Three"),
                           ui.RadioButton("Four")],
                          horizontal = False)
        label2 = ui.Label("Current value: none selected", x_align = 0, expand=False)
        def on_toggle2(group, current_item):
            label2.text = "Current value: %s" % current_item.label
        group2.connect("on-change", on_toggle2)

        slider = ui.Slider(range(100),
                           expand = False,
                           snap_to_ticks = False,
                           range=True,
                           selection=(23, 80),
                           grips_can_cross = False,
                           snap_points = [5, 20, 50, 75],
                           snap_on_release = True)
        slider_value = ui.Label(" ")
        def on_slider_change(slider, value):
            slider_value.text = str(value)
        slider.connect("on_change", on_slider_change)

        spinner = ui.Spinner(active = False, expand=False, width = 40)
        spinner_button = ui.Button("Toggle spin", expand=False)
        spinner_button.spinner = spinner

        def on_spinner_button_click(button, event):
            button.spinner.active = not button.spinner.active
        spinner_button.connect("on-click", on_spinner_button_click)

        combo = ui.ComboBox(["Sugar", "Spice", "Everything Nice", "And", "Other", "Nice", "Things"],
                             open_below=True,
                             expand = False)
        inputs.add_child(ui.VBox([combo,
                                  group1, label1,
                                  ui.HBox([group2,
                                           ui.VBox([ui.CheckButton("And a few of those", expand = False),
                                                    ui.CheckButton("Check boxes", expand = False),
                                                    ui.CheckButton("Which don't work for groups", expand = False)])
                                          ]),
                                  label2,
                                  slider,
                                  slider_value,
                                  ui.HBox([spinner, spinner_button], expand=False, spacing = 10),
                                  ui.HBox([ui.ScrollArea(ui.Label(sample_text * 3, overflow = pango.WrapMode.WORD, fill=True, padding=[2, 5]), height=45, scroll_horizontal=False),
                                           ui.SpinButton(expand = False, fill=False)], expand = False),
                                  ],
                                 expand = False, spacing = 10))

        combo.rows = ["some", "things", "are", "made", "of", "bananas", "and", "icecream"]


        menu = ui.Menu([ui.MenuItem(label="One", menu=ui.Menu([ui.MenuItem(label="One one", menu=ui.Menu([ui.MenuItem(label="One one one"),
                                                                                                          ui.MenuItem(label="One one two"),
                                                                                                          ui.MenuSeparator(),
                                                                                                          ui.MenuItem(label="One one three")])),
                                                               ui.MenuSeparator(),
                                                               ui.MenuItem(label="One two", mnemonic="Ctrl+1"),
                                                               ui.MenuItem(label="One three", mnemonic="Alt+1")])),

                        ui.MenuItem(label="Two", menu=ui.Menu([ui.MenuItem(label="Two one", mnemonic="Ctrl+Alt+2"),
                                                               ui.MenuItem(label="Two two", mnemonic="Ctrl+2"),
                                                               ui.MenuSeparator(),
                                                               ui.MenuItem(label="Two three", mnemonic="Alt+2")])),

                        ui.MenuItem(label="Three", menu=ui.Menu([ui.MenuItem(label="Three one", mnemonic="Ctrl+Alt+3"),
                                                                 ui.MenuItem(label="Three two", mnemonic="Ctrl+3"),
                                                                 ui.MenuSeparator(),
                                                                 ui.MenuItem(label="Three three", mnemonic="Alt+3")])),
                        ui.MenuItem(label="Four", menu=ui.Menu([ui.MenuItem(label="Four one", mnemonic="Ctrl+Alt+4"),
                                                                ui.MenuItem(label="Four two", mnemonic="Ctrl+4"),
                                                                ui.MenuSeparator(),
                                                                ui.MenuItem(label="Four three", mnemonic="Alt+4")])),
                       ], horizontal=True)

        self.menu_selection_label = ui.Label("Pick a menu item!", expand = False, x_align = 1)
        def on_menuitem_selected(menu, item, event):
            self.menu_selection_label.text = item.label
        menu.connect("selected", on_menuitem_selected)

        # adding notebook and attaching pages
        self.notebook.add_page(ui.NotebookTab(image=graphics.Image("assets/day.png"), label="boxes", padding=[1,5]),
                               ui.Fixed([self.box, self.corner], x = 10, y = 10))
        self.notebook.add_page(ui.NotebookTab("Table", tooltip="Oh hey, i'm a table!"), self.table)
        self.notebook.add_page("Scroll Area", self.scroll)
        self.notebook.add_page("Input Elements", inputs)

        self.notebook.add_page("Menu", ui.VBox([menu, self.menu_selection_label,
                                                ui.HBox(ui.Menu([ui.MenuItem(label="", image = graphics.Image("assets/day.png"), submenu_offset_x = 0, submenu_offset_y = 0,
                                                                       menu=ui.Menu([ui.MenuItem(label="", image = graphics.Image("assets/month.png")),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/hamster.png")),
                                                                                     ui.MenuSeparator(),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/hamster.png")),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/month.png"))], horizontal=True)),
                                                                 ui.MenuItem(label="", image = graphics.Image("assets/hamster.png"),submenu_offset_x = 0, submenu_offset_y = 0,
                                                                       menu=ui.Menu([ui.MenuItem(label="", image = graphics.Image("assets/month.png")),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/month.png")),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/week.png")),
                                                                                     ui.MenuSeparator(),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/month.png"))], horizontal=True)),
                                                                 ui.MenuItem(label="", image = graphics.Image("assets/month.png"), submenu_offset_x = 0, submenu_offset_y = 0,
                                                                       menu=ui.Menu([ui.MenuItem(label="", image = graphics.Image("assets/week.png")),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/week.png")),
                                                                                     ui.MenuSeparator(),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/week.png")),
                                                                                     ui.MenuItem(label="", image = graphics.Image("assets/month.png"))], horizontal=True)),
                                                                ], horizontal=False, spacing=50, hide_on_leave = True, open_on_hover = 0.01), expand=False),
                                                ui.Box()], padding=10))



        self.slice_image = ui.Image('assets/slice9.png', fill=True, slice_left = 35, slice_right = 230, slice_top = 35, slice_bottom = 220)

        data = []
        image = graphics.Image("assets/day.png")
        for i in range(10):
            data.append(["aasdf asdfasdf asdfasdf", "basdfasdf asdfasdf asdfasdf", image, "rrr"])
            data.append(["1", "2", None, "rrr"])
            data.append(["4", "5", None, "rrr"])

        tree = ui.ListItem(data,
                           [ui.LabelRenderer(editable=True),
                            ui.LabelRenderer(editable=True),
                            ui.ImageRenderer(expand=False, width=90)],
                           headers=["Text", "More text", "An icon!"],
                           fixed_headers = False,
                           scroll_border = 0
                           )
        self.notebook.add_page("Tree View", tree)

        #tree.data[0][1] = "I was actually modified afterwards!"


        self.notebook.add_page("Accordion", ui.Accordion([
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm am the first in the row", [ui.Label(accordion_text, overflow = pango.WrapMode.WORD, padding=5)]),
            ui.AccordionPage("I'm different!", [
                ui.VBox([
                    ui.Button("I'm a button", fill=False, expand=False),
                    ui.Button("I'm another one", fill=False, expand=False),
                    ui.Group([
                        ui.ToggleButton("We"),
                        ui.ToggleButton("Are"),
                        ui.ToggleButton("Brothers"),
                        ui.ToggleButton("Radio Brothers"),
                    ], expand=False)
                ], expand=False)
            ]),
        ], padding_top = 1, padding_left = 1))

        from pie_menu import Menu
        pie_menu = Menu(0, 0)
        pie_menu.y_align = 0.45

        self.magic_box = ui.VBox([ui.HBox([ui.Button("Hello", expand=False),
                                           ui.Button("Thar", expand=False),
                                           ui.Label("Drag the white area around", x_align=1)], expand=False, padding=5),
                                  pie_menu], x=50, y=50, spacing=50, draggable=True)
        self.magic_box.width = 500
        self.magic_box.height = 400
        def just_fill():
            box = self.magic_box
            box.graphics.fill_area(0, 0, box.width, box.height, "#fefefe")
        self.magic_box.do_render = just_fill
        self.notebook.add_page("Ordinary Sprite", ui.Fixed(self.magic_box))

        for i in range(5):
            self.notebook.add_page("Tab %d" % i)


        self.notebook.current_page = 3


        # a little button to change tab orientation
        self.tab_orient_switch = ui.Button("Change tab attachment", expand=False, tooltip="change")
        self.tab_orient_switch.connect("on-click", self.on_tab_orient_click)

        self.page_disablist = ui.Button("Enable/Disable current tab", expand=False, tooltip="disable")
        self.page_disablist.connect("on-click", self.on_page_disablist_click)

        self.dialog_button = ui.Button("Show a dialog", expand=False, tooltip="show")
        self.dialog_button.connect("on-click", self.on_dialog_button_click)


        top_menu = ui.Menu([ui.MenuItem(label="One", menu=ui.Menu([ui.MenuItem(label="One one oh one oh one etc etc",
                                                                               menu=ui.Menu([ui.MenuItem(label="One one one"),
                                                                                    ui.MenuItem(label="One one two"),
                                                                                    ui.MenuItem(label="One one three")])),
                                                                   ui.MenuItem(label="One two"),
                                                                   ui.MenuItem(label="One three")])),
                            ui.MenuItem(label="Two", menu=ui.Menu([ui.MenuItem(label="Two one"),
                                                        ui.MenuItem(label="Two two"),
                                                        ui.MenuItem(label="Two three")])),
                            ui.MenuItem(label="Three", menu=ui.Menu([ui.MenuItem(label="Three one"),
                                                          ui.MenuItem(label="Three two"),
                                                          ui.MenuItem(label="Three three")])),
                            ui.MenuItem(label="Four", menu=ui.Menu([ui.MenuItem(label="Four one"),
                                                         ui.MenuItem(label="Four two"),
                                                         ui.MenuItem(label="Four three")])),
                            ui.MenuItem(label="Five")
                            ], horizontal=True, disable_toggling=True)


        # not sure how elegant but let's override the flow for now for demo purposes!
        dummy_flow = ui.Flow()
        def flow_resize():
            dummy_flow.alloc_w, dummy_flow.alloc_h = top_menu.alloc_w, top_menu.alloc_h
            dummy_flow.sprites = top_menu.sprites
            dummy_flow.resize_children()
            top_menu.height = top_menu.sprites[-1].y + top_menu.sprites[-1].height

        def flow_height_for_width_size():
            dummy_flow.alloc_w, dummy_flow.alloc_h = top_menu.alloc_w, top_menu.alloc_h
            dummy_flow.sprites = top_menu.sprites
            w, h = dummy_flow.get_height_for_width_size()
            return w, h

        def flow_min_size():
            dummy_flow.sprites = top_menu.sprites
            w, h = dummy_flow.get_min_size()
            return w+ top_menu.horizontal_padding, h  + top_menu.vertical_padding

        # flow if b0rken ATM
        for i in range(20):
            top_menu.add_child(ui.MenuItem(label="flow %d" % i))
        top_menu.resize_children = flow_resize
        #top_menu.get_height_for_width_size = flow_height_for_width_size
        top_menu.get_min_size = flow_min_size





        self.add_child(ui.VBox([top_menu, ui.VBox([self.notebook,
                                                   ui.HBox([self.tab_orient_switch,
                                                            self.page_disablist,
                                                            self.dialog_button], expand = False, fill=False, x_align=1),
                               ], padding=20, spacing=10)], spacing = 10))






        self.connect("on-click", self.on_click)

        self.notebook.after_tabs.add_child(ui.Button("Yohoho"))
        print dt.datetime.now() - now

Example 97

Project: packet-manipulator Source File: sniff.py
Function: register_sniff_context
def register_sniff_context(BaseSniffContext):
    class SniffContext(BaseSniffContext):
        """
        A sniff context for controlling various options.
        """
        has_stop = True
        has_pause = False
        has_restart = True

        def __init__(self, *args, **kwargs):
            BaseSniffContext.__init__(self, *args, **kwargs)

            self.lock = Lock()
            self.prevtime = None
            self.socket = None
            self.internal = True
            self.process = None

            self.title = _('%s capture') % self.iface
            self.summary = _('Sniffing on %s') % self.iface
            self.thread = None
            self.priv = []

            self.audit_dispatcher = None

        @with_decorator
        def get_all_data(self):
            return BaseSniffContext.get_all_data(self)

        @with_decorator
        def get_data(self):
            return BaseSniffContext.get_data(self)

        @with_decorator
        def set_data(self, val):
            self.data = val

        def get_percentage(self):
            if self.state != self.RUNNING:
                return 100.0
            else:
                if self.stop_count or \
                   self.stop_time or \
                   self.stop_size or \
                   self.capmethod == 1:
                    return self.percentage
                else:
                    return None

        def _start(self):
            self.prevtime = datetime.now()

            if self.iface and self.capmethod == 0:
                try:
                    self.socket = conf.L2listen(type=ETH_P_ALL,
                                                iface=self.iface,
                                                filter=self.filter)

                    if self.audits:
                        try:
                            if self.socket.LL in conf.l2types.layer2num:
                                linktype = \
                                         conf.l2types.layer2num[self.socket.LL]
                            elif self.socket.LL in conf.l3types.layer2num:
                                linktype = \
                                         conf.l3types.layer2num[self.socket.LL]
                            else:
                                log.debug('Falling back to IL_TYPE_ETH as DL')
                                linktype = IL_TYPE_ETH
                        except:
                            try:
                                linktype = self.socket.ins.datalink()
                            except:
                                log.debug('It seems that we\'re using PF_PACKET'
                                          ' socket. Using IL_TYPE_ETH as DL')
                                linktype = IL_TYPE_ETH

                        self.audit_dispatcher = AuditDispatcher(linktype)

                except socket.error, (errno, err):
                    self.summary = str(err)
                    return False
                except Exception, err:
                    self.summary = str(err)
                    return False

            self.state = self.RUNNING
            self.internal = True
            self.data = []

            if self.capmethod == 0:
                self.thread = Thread(target=self.run)

            elif self.capmethod == 1 or \
                 self.capmethod == 2 or \
                 self.capmethod == 3:

                self.thread = Thread(target=self.run_helper)

            self.thread.setDaemon(True)
            self.thread.start()

            return True

        def _stop(self):
            if self.internal:
                self.internal = False

                if self.socket:
                    self.socket.close()

                # We have to kill the process directly from here because the
                # select function is blocking to avoid CPU burning.

                if self.process:
                    kill_helper(self.process)
                    self.process = None

                return True
            else:
                return False

        def _restart(self):
            if self.thread and self.thread.isAlive():
                return False

            # Ok reset the counters and begin a new sniff session
            self.tot_size = 0
            self.tot_time = 0
            self.tot_count = 0

            return self._start()

        def run_helper(self):
            """
            This function is the thread main procedure for the thread that spawn
            a new process like tcpdump to avoid packet loss and reads MetaPacket
            from the pcap file managed from a private process.
            """

            errstr = reader = None

            try:
                if self.capmethod == 1:
                    log.debug("I'm using virtual interface method")
                    outfile = self.cap_file
                else:
                    # Run tcpdump or dumpcap
                    self.process, outfile = run_helper(self.capmethod - 2,
                                                       self.iface,
                                                       self.filter,
                                                       self.stop_count,
                                                       self.stop_time,
                                                       self.stop_size)

                for reader in bind_reader(self.process, outfile):
                    if not self.internal:
                        break

                    if reader:
                        reader, outfile_size, position = reader

            except OSError, err:
                errstr = err.strerror
                self.internal = False
            except Exception, err:
                errstr = str(err)
                self.internal = False

            reported_packets = 0

            log.debug("Entering in the main loop")

            if self.audits:
                self.audit_dispatcher = AuditDispatcher(reader.linktype)

            while self.internal:

                if self.capmethod != 1:
                    report_idx = get_n_packets(self.process)

                    if report_idx < reported_packets:
                        continue

                while self.capmethod == 1 or reported_packets < report_idx:

                    pkt = reader.read_packet()

                    if not pkt:
                        break

                    pkt = MetaPacket(pkt)
                    packet_size = pkt.get_size()

                    if not pkt:
                        continue

                    if self.max_packet_size and \
                       packet_size - self.max_packet_size > 0:

                        log.debug("Skipping current packet (max_packet_size)")
                        continue

                    if self.min_packet_size and \
                       packet_size - self.min_packet_size < 0:

                        log.debug("Skipping current packet (min_packet_size)")
                        continue

                    self.tot_count += 1
                    self.tot_size += packet_size

                    now = datetime.now()
                    delta = now - self.prevtime
                    self.prevtime = now

                    if delta == abs(delta):
                        self.tot_time += delta.seconds

                    self.data.append(pkt)
                    reported_packets += 1

                    if self.audit_dispatcher:
                        self.audit_dispatcher.feed(pkt)

                    if self.callback:
                        self.callback(pkt, self.udata)

                    lst = []

                    # tcpdump and dumpcap offers this
                    if self.capmethod < 2 and self.stop_count:
                        lst.append(float(float(self.tot_count) /
                                         float(self.stop_count)))
                    # Only dumpcap here
                    if self.capmethod != 3 and self.stop_time:
                        lst.append(float(float(self.tot_time) /
                                         float(self.stop_time)))
                    if self.capmethod != 3 and self.stop_size:
                        lst.append(float(float(self.tot_size) /
                                         float(self.stop_size)))

                    if self.capmethod == 1:
                        lst.append(position() / outfile_size)

                    if lst:
                        self.percentage = float(float(sum(lst)) /
                                                float(len(lst))) * 100.0

                        if self.percentage >= 100:
                            self.internal = False
                    else:
                        # ((goject.G_MAXINT / 4) % gobject.G_MAXINT)
                        self.percentage = (self.percentage + 536870911) % \
                                          gobject.G_MAXINT

                report_idx = reported_packets

            log.debug("Exiting from thread")

            if self.process:
                kill_helper(self.process)

            self.exit_from_thread(errstr)

        def run(self):
            errstr = None

            while self.internal and self.socket is not None:
                r = None
                inmask = [self.socket]

                try:
                    if WINDOWS:
                        try:
                            r = self.socket.recv(MTU)
                        except PcapTimeoutElapsed:
                            continue
                    else:
                        inp, out, err = select.select(inmask, inmask, inmask, None)
                        if self.socket in inp:
                            r = self.socket.recv(MTU)
                    if r is None:
                        continue

                    self.priv.append(r)
                except Exception, err:
                    # Ok probably this is an exception raised when the select
                    # is runned on already closed socket (see also _stop)
                    # so avoid throwing this exception.

                    if self.internal:
                        errstr = str(err)

                    self.internal = False
                    self.socket = None
                    break

            self.exit_from_thread(errstr)

        def exit_from_thread(self, errstr=None):
            log.debug("Exiting from thread")

            self.priv = []

            self.state = self.NOT_RUNNING
            self.percentage = 100.0
            status = ""

            if self.tot_size >= 1024 ** 3:
                status = "%.1f GB/" % (self.tot_size / (1024.0 ** 3))
            elif self.tot_size >= 1024 ** 2:
                status = "%.1f MB/" % (self.tot_size / (1024.0 ** 2))
            else:
                status = "%.1f KB/" % (self.tot_size / (1024.0))

            if self.tot_time >= 60 ** 2:
                status += "%d h/" % (self.tot_time / (60 ** 2))
            elif self.tot_time >= 60:
                status += "%d m/" % (self.tot_time / 60)
            else:
                status += "%d s/" % (self.tot_time)

            status += "%d pks" % (self.tot_count)

            if errstr:
                self.summary = _('Error: %s (%s)') % (errstr, status)
            else:
                self.summary = _('Finished sniffing on %s (%s)') % (self.iface,
                                                                    status)

            if self.callback:
                self.callback(None, self.udata)

        def check_finished(self):
            if self.capmethod != 0:
                return

            priv = self.priv
            self.priv = []

            for r in priv:
                # This code should not be in the thread and called in the
                # main thread of the GUI so we can avoid packet loss.
                # It's better to have a temporary list object to store raw
                # packets captured from socket.recv(MTU) function and then joins
                # everything in self.data

                packet = MetaPacket(r)
                packet_size = packet.get_size()

                if self.max_packet_size and \
                   packet_size - self.max_packet_size > 0:

                    log.debug("Skipping current packet (max_packet_size)")
                    continue

                if self.min_packet_size and \
                   packet_size - self.min_packet_size < 0:

                    log.debug("Skipping current packet (min_packet_size)")
                    continue

                self.tot_count += 1
                self.tot_size += packet.get_size()

                now = datetime.now()
                delta = now - self.prevtime
                self.prevtime = now

                if delta == abs(delta):
                    self.tot_time += delta.seconds

                self.data.append(packet)

                if self.audit_dispatcher:
                    self.audit_dispatcher.feed(packet)

                # FIXME: This probably should be moved inside the run() function
                if self.callback:
                    self.callback(packet, self.udata)

                lst = []

                if self.stop_count:
                    lst.append(float(float(self.tot_count) /
                                     float(self.stop_count)))
                if self.stop_time:
                    lst.append(float(float(self.tot_time) /
                                     float(self.stop_time)))
                if self.stop_size:
                    lst.append(float(float(self.tot_size) /
                                     float(self.stop_size)))

                if lst:
                    self.percentage = float(float(sum(lst)) /
                                            float(len(lst))) * 100.0

                    if self.percentage >= 100:
                        self.internal = False
                else:
                    # ((goject.G_MAXINT / 4) % gobject.G_MAXINT)
                    self.percentage = (self.percentage + 536870911) % 2147483647

    return SniffContext

Example 98

Project: neckbeard Source File: up.py
@task
@notifies_hipchat(start_msg=UP_START_MSG, end_msg=UP_END_MSG)
@logs_duration(timer, output_result=True)
def up(
    environment_name,
    configuration_manager,
    resource_tracker,
    generation=ACTIVE,
):
    """
    Make sure that the instances for the specified generation are running and
    have current code. Will update code and deploy new EC2 and RDS instances as
    needed.
    """
    env._active_gen = True

    if generation == ACTIVE:
        # Always force the active generation in operation if possible
        make_operational = True

    with logs_duration(timer, timer_name='pre_deploy_validation'):
        # TODO: Make this an optional hook that can be registered
        git_conf = {}
        if git_conf.get('enable'):
            repo = _get_git_repo()

            # Force submodules to be updated
            # TODO: Make this an optional hook that can be registered
            with prompt_on_exception("Git submodule update failed"):
                repo.submodule_update(init=True, recursive=True)

            # Optionally require that we deploy from a tagged commit.
            if git_conf.get('require_tag', False):
                logger.info("Enforcing git tag requirement")
                if not _is_unchanged_from_head(repo):
                    logger.critical(
                        "Refusing to deploy, uncommitted changes exist.")
                    exit(1)
                if not _is_tagged_version(repo):
                    logger.critical(
                        "Refusing to deploy from an untagged commit.",
                    )
                    exit(1)
                _push_tags(repo)

        # TODO: Make this an optional hook that can be registered
        pagerduty_conf = {}
        if pagerduty_conf.get('temporarily_become_oncall', False):
            logger.info("Taking Pagerduty, temporarily")
            _take_temporary_pagerduty(
                duration=pagerduty_conf.get('temporary_oncall_duration'),
                api_key=pagerduty_conf.get('api_key'),
                user_id=pagerduty_conf.get('user_id'),
                project_subdomain=pagerduty_conf.get('project_subdomain'),
                schedule_key=pagerduty_conf.get('schedule_key'),
            )

    logger.info("Gathering deployment state")
    with logs_duration(timer, timer_name='gather deployment state'):
        environment_config = configuration_manager.get_environment_config(
            environment_name,
        )
        deployment = Deployment(
            environment_name,
            environment_config.get('ec2', {}),
            environment_config.get('rds', {}),
            environment_config.get('elb', {}),
        )
        # up never deals with old nodes, so just verify pending and active to
        # save HTTP round trips
        deployment.verify_deployment_state(verify_old=False)

    # Gather all of the configurations for each node, including their
    # seed deployment information
    logger.info("Gathering seed deployment state")
    with logs_duration(timer, timer_name='seed_deployment_state'):
        # If this environment has a seed environment, build that environment
        # manager
        seed_deployment = None
        seed_deployment_name = configuration_manager.get_seed_environment_name(
            environment_name,
        )
        if seed_deployment_name:
            seed_config = configuration_manager.get_environment_config(
                seed_deployment_name,
            )
            seed_deployment = Deployment(
                seed_deployment_name,
                seed_config.get('ec2', {}),
                seed_config.get('rds', {}),
                seed_config.get('elb', {}),
            )
            logger.info("Verifying seed deployment state")
            seed_deployment.verify_deployment_state(verify_old=False)

    # Build all of the deployment objects
    logger.info("Building Node deployers")
    with logs_duration(timer, timer_name='build deployers'):
        ec2_deployers = []
        rds_deployers = []

        # All rds and ec2 nodes, rds nodes first
        dep_confs = [
            (
                'rds',
                environment_config.get('ec2', {}),
            ),
            (
                'ec2',
                environment_config.get('rds', {}),
            ),
        ]

        for aws_type, node_confs in dep_confs:
            for node_name, conf in node_confs.items():
                # Get the seed deployment new instances will be copied from
                seed_node_name = None
                if seed_deployment and 'seed' in conf:
                    seed_node_name = conf['seed']['unique_id']
                    verify_seed_data = conf['seed_node'].get('verify', False)
                else:
                    logger.info("No seed node configured")
                    seed_node_name = None
                    verify_seed_data = False

                if aws_type == 'ec2':
                    klass = Ec2NodeDeployment
                elif aws_type == 'rds':
                    klass = RdsNodeDeployment

                deployer = klass(
                    deployment=deployment,
                    seed_deployment=seed_deployment,
                    is_active=env._active_gen,
                    aws_type=aws_type,
                    node_name=node_name,
                    seed_node_name=seed_node_name,
                    seed_verification=verify_seed_data,
                    brain_wrinkles=conf.get('brain_wrinkles', {}),
                    conf=conf,
                )

                if aws_type == 'ec2':
                    ec2_deployers.append(deployer)
                elif aws_type == 'rds':
                    rds_deployers.append(deployer)

    # We don't actually want to do deployments until we have tests
    assert False

    # Provision the RDS nodes
    with logs_duration(timer, timer_name='initial provision'):
        logger.info("Provisioning RDS nodes")
        for deployer in rds_deployers:
            if deployer.seed_verification and deployer.get_node() is None:
                _prompt_for_seed_verification(deployer)

            deployer.ensure_node_created()

        # Provision the EC2 nodes
        logger.info("Provisioning EC2 nodes")
        for deployer in ec2_deployers:
            if deployer.seed_verification and deployer.get_node() is None:
                _prompt_for_seed_verification(deployer)

            deployer.ensure_node_created()

    # Configure the RDS nodes
    logger.info("Configuring RDS nodes")
    with logs_duration(timer, timer_name='deploy rds'):
        for deployer in rds_deployers:
            deployer.run()

    logger.info("Determining EC2 node deploy priority")
    ec2_deployers = _order_ec2_deployers_by_priority(ec2_deployers)

    # Configure the EC2 nodes
    logger.info("Deploying to EC2 nodes")
    for deployer in ec2_deployers:
        timer_name = '%s deploy' % deployer.node_name
        with logs_duration(timer, timer_name='full %s' % timer_name):
            node = deployer.get_node()

            with seamless_modification(
                node,
                deployer.deployment,
                force_seamless=env._active_gen,
                make_operational_if_not_already=make_operational,
            ):
                pre_deploy_time = datetime.now()
                with logs_duration(
                    timer,
                    timer_name=timer_name,
                    output_result=True,
                ):
                    deployer.run()
            if DT_NOTIFY:
                _send_deployment_done_desktop_notification(
                    pre_deploy_time,
                    deployer,
                )

    _announce_deployment()

    time_logger.info("Timing Breakdown:")
    sorted_timers = sorted(
        timer.items(),
        key=lambda x: x[1],
        reverse=True,
    )
    for timer_name, duration in sorted_timers:
        time_logger.info("%02ds- %s", duration, timer_name)

Example 99

Project: dns-lg Source File: __init__.py
    def query(self, start_response, req, path, client, format="", alt_resolver=None,
              do_dnssec=False, tcp=False, cd=False, edns_size=default_edns_size,
              reverse=False):
        """ path must starts with a /, then the domain name then an
        (optional) / followed by the QTYPE """
        if not path.startswith('/'):
            raise Exception("Internal error: no / at the beginning of %s" % path)
        plaintype = 'text/plain; charset=%s' % self.encoding
        if not format:
            mformat = req.accept.best_match(['text/html', 'application/xml',
                                            'application/json', 'text/dns',
                                            'text/plain'])
            if mformat == "text/html":
                format = "HTML"
            elif mformat == "application/xml":
                format = "XML"
            elif mformat == "application/json":
                format = "JSON"
            elif mformat == "text/dns":
                format = "ZONE"
            elif mformat == "text/plain":
                format = "TEXT"    
            if not mformat:
                output = "No suitable output format found\n" 
                send_response(start_response, '400 Bad request', output, plaintype)
                return [output]
            mtype = '%s; charset=%s' % (mformat, self.encoding)
        else:
            if format == "TEXT" or format == "TXT":
                format = "TEXT"
                mtype = 'text/plain; charset=%s' % self.encoding
            elif format == "HTML":
                mtype = 'text/html; charset=%s' % self.encoding
            elif format == "JSON":
                mtype = 'application/json'
            elif format == "ZONE":
                mtype = 'text/dns' # RFC 4027
            # TODO: application/dns, "detached" DNS (binary), see issue #20
            elif format == "XML":
                mtype = 'application/xml'
            else:
                output = "Unsupported format \"%s\"\n" % format
                send_response(start_response, '400 Bad request', output, plaintype)
                return [output]
        ip_client = netaddr.IPAddress(client)
        if ip_client.version == 4:
            ip_prefix = netaddr.IPNetwork(client + "/28")
        elif ip_client.version == 6:
            ip_prefix = netaddr.IPNetwork(client + "/64")
        else:
            output = "Unsupported address family \"%s\"\n" % ip_client.version
            send_response(start_response, '400 Unknown IP version', output, plaintype)
            return [output]
        if ip_client not in self.whitelist:
            if self.buckets.has_key(ip_prefix.cidr):
                if self.buckets[ip_prefix.cidr].full():
                    status = '429 Too many requests'
                    # 429 registered by RFC 6585 in april 2012
                    # http://www.iana.org/assignments/http-status-codes
                    # Already common
                    # http://www.flickr.com/photos/girliemac/6509400997/in/set-72157628409467125
                    output = "%s sent too many requests" % client # TODO: better message
                    send_response(start_response, status, output, plaintype)
                    return [output]
                else:
                    self.buckets[ip_prefix.cidr].add(1)
            else:
                self.buckets[ip_prefix.cidr] = LeakyBucket(size=self.bucket_size)
        args = path[1:]
        slashpos = args.find('/')
        if slashpos == -1:
            if reverse:
                domain = str(dns.reversename.from_address(args))
                qtype = 'PTR'
            else:
                domain = args
                qtype = 'ADDR'
            qclass = 'IN'
        else:
            if reverse:
                domain = str(dns.reversename.from_address(args[:slashpos]))
            else:
                domain = args[:slashpos]
            nextslashpos = args.find('/', slashpos+1)
            if nextslashpos == -1:
                requested_qtype = args[slashpos+1:].upper()
                qclass = 'IN'
            else:
                requested_qtype = args[slashpos+1:nextslashpos].upper()
                qclass = args[nextslashpos+1:].upper()
            # We do not test if the QTYPE exists. If it doesn't
            # dnspython will raise an exception. The formatter will
            # have to deal with the various records.
            if requested_qtype == "":
                if reverse:
                    qtype = 'PTR'
                else:
                    qtype = 'ADDR'
            else:
                qtype = requested_qtype
            if reverse and qtype != 'PTR':
                output = "You cannot ask for a query type other than PTR with reverse queries\n" 
                send_response(start_response, '400 Bad qtype with reverse',
                              output, plaintype)
                return [output]
            # Pseudo-qtype ADDR is handled specially later
        if not domain.endswith('.'):
            domain += '.'
        if domain == 'root.':
            domain = '.'
        domain = unicode(domain, self.encoding)
        for forbidden in self.forbidden_suffixes:
            if domain.endswith(forbidden):
                output = "You cannot query local domain %s" % forbidden
                send_response(start_response, '403 Local domain is private',
                              output, plaintype)
                return [output]
        punycode_domain = punycode_of(domain)
        if punycode_domain != domain:
            qdomain = punycode_domain.encode("US-ASCII")
        else:
            qdomain = domain.encode("US-ASCII")
        try:
            if format == "HTML":
                formatter = Formatter.HtmlFormatter(domain)
            elif format == "TEXT":
                formatter = Formatter.TextFormatter(domain)
            elif format == "JSON":
                formatter = Formatter.JsonFormatter(domain)
            elif format == "ZONE":
                formatter = Formatter.ZoneFormatter(domain)
            elif format == "XML":
                formatter = Formatter.XmlFormatter(domain)
            self.resolver.reset()
            if edns_size is None:
                self.resolver.set_edns(version=-1)
            else:
                if do_dnssec:
                    self.resolver.set_edns(payload=edns_size, dnssec=True)
                else:
                    self.resolver.set_edns(payload=edns_size)
            if alt_resolver:
                self.resolver.set_nameservers([alt_resolver,])
            query_start = datetime.now()
            if qtype != "ADDR":
                answer = self.resolver.query(qdomain, qtype, qclass, tcp=tcp, cd=cd)
            else:
                try:
                    answer = self.resolver.query(qdomain, "A", tcp=tcp, cd=cd)
                except dns.resolver.NoAnswer: 
                    answer = None
                try:
                    answer_bis = self.resolver.query(qdomain, "AAAA", tcp=tcp, cd=cd)
                    if answer_bis is not None:
                        for rrset in answer_bis.answer:
                            answer.answer.append(rrset)
                except dns.resolver.NoAnswer: 
                    pass  
                # TODO: what if flags are different with A and AAAA? (Should not happen)
                if answer is None:
                    query_end = datetime.now()
                    self.delay = query_end - query_start
                    formatter.format(None, qtype, qclass, 0, self)
                    output = formatter.result(self)
                    send_response(start_response, '200 OK', output, mtype)
                    return [output]
            query_end = datetime.now()
            self.delay = query_end - query_start
            formatter.format(answer, qtype, qclass, answer.flags, self)
            output = formatter.result(self)
            send_response(start_response, '200 OK', output, mtype)
        except Resolver.UnknownRRtype:
            output = "Record type %s does not exist\n" % qtype
            output = output.encode(self.encoding)
            send_response(start_response, '400 Unknown record type', output, 
                          plaintype)
        except Resolver.UnknownClass:
            output = "Class %s does not exist\n" % qclass
            output = output.encode(self.encoding)
            send_response(start_response, '400 Unknown class', output, 
                          plaintype)
        except Resolver.NoSuchDomainName:
            output = "Domain %s does not exist\n" % domain
            output = output.encode(self.encoding)
            # TODO send back in the requested format (see issue #11)
            send_response(start_response, '404 No such domain', output, plaintype)
        except Resolver.Refused:
            output = "Refusal to answer for all name servers for %s\n" % domain
            output = output.encode(self.encoding)
            send_response(start_response, '403 Refused', output, plaintype)
        except Resolver.Servfail:
            output = "Server failure for all name servers for %s (may be a DNSSEC validation error)\n" % domain
            output = output.encode(self.encoding)
            send_response(start_response, '504 Servfail', output, plaintype)
        except Resolver.Timeout: 
            output = "No server replies for domain %s\n" % domain
            output = output.encode(self.encoding)
            # TODO issue #11. In that case, do not serialize output.
            send_response(start_response, '504 Timeout', output,
                          "text/plain")
        except Resolver.NoPositiveAnswer: 
            output = "No server replies for domain %s\n" % domain
            output = output.encode(self.encoding)
            # TODO issue #11
            send_response(start_response, '504 No positive answer', output,
                          "text/plain")
        except Resolver.UnknownError as code:
            output = "Unknown error %s resolving %s\n" % (dns.rcode.to_text(int(str(code))), domain)
            output = output.encode(self.encoding)
            # TODO issue #11
            send_response(start_response, '500 Unknown server error', output, plaintype)
        return [output]

Example 100

Project: mycli Source File: main.py
    def run_cli(self):
        sqlexecute = self.sqlexecute
        logger = self.logger
        self.configure_pager()

        self.refresh_completions()

        project_root = os.path.dirname(PACKAGE_ROOT)
        author_file = os.path.join(project_root, 'AUTHORS')
        sponsor_file = os.path.join(project_root, 'SPONSORS')

        key_binding_manager = mycli_bindings()

        if not self.less_chatty:
            print('Version:', __version__)
            print('Chat: https://gitter.im/dbcli/mycli')
            print('Mail: https://groups.google.com/forum/#!forum/mycli-users')
            print('Home: http://mycli.net')
            print('Thanks to the contributor -', thanks_picker([author_file, sponsor_file]))

        def prompt_tokens(cli):
            return [(Token.Prompt, self.get_prompt(self.prompt_format))]

        def get_continuation_tokens(cli, width):
            continuation_prompt = self.get_prompt(self.prompt_continuation_format)
            return [(Token.Continuation, ' ' * (width - len(continuation_prompt)) + continuation_prompt)]

        get_toolbar_tokens = create_toolbar_tokens_func(self.completion_refresher.is_refreshing)

        layout = create_prompt_layout(lexer=MyCliLexer,
                                      multiline=True,
                                      get_prompt_tokens=prompt_tokens,
                                      get_continuation_tokens=get_continuation_tokens,
                                      get_bottom_toolbar_tokens=get_toolbar_tokens,
                                      display_completions_in_columns=self.wider_completion_menu,
                                      extra_input_processors=[
                                          ConditionalProcessor(
                                              processor=HighlightMatchingBracketProcessor(chars='[](){}'),
                                              filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()),
                                      ])
        with self._completer_lock:
            buf = CLIBuffer(always_multiline=self.multi_line, completer=self.completer,
                    history=FileHistory(os.path.expanduser(os.environ.get('MYCLI_HISTFILE', '~/.mycli-history'))),
                    complete_while_typing=Always(), accept_action=AcceptAction.RETURN_DOCUMENT)

            if self.key_bindings == 'vi':
                editing_mode = EditingMode.VI
            else:
                editing_mode = EditingMode.EMACS

            application = Application(style=style_factory(self.syntax_style, self.cli_style),
                                      layout=layout, buffer=buf,
                                      key_bindings_registry=key_binding_manager.registry,
                                      on_exit=AbortAction.RAISE_EXCEPTION,
                                      on_abort=AbortAction.RETRY,
                                      editing_mode=editing_mode,
                                      ignore_case=True)
            self.cli = CommandLineInterface(application=application,
                                       eventloop=create_eventloop())

        try:
            while True:
                docuement = self.cli.run(reset_current_buffer=True)

                special.set_expanded_output(False)

                # The reason we check here instead of inside the sqlexecute is
                # because we want to raise the Exit exception which will be
                # caught by the try/except block that wraps the
                # sqlexecute.run() statement.
                if quit_command(docuement.text):
                    raise EOFError

                try:
                    docuement = self.handle_editor_command(self.cli, docuement)
                except RuntimeError as e:
                    logger.error("sql: %r, error: %r", docuement.text, e)
                    logger.error("traceback: %r", traceback.format_exc())
                    self.output(str(e), err=True, fg='red')
                    continue
                if self.destructive_warning:
                    destroy = confirm_destructive_query(docuement.text)
                    if destroy is None:
                        pass  # Query was not destructive. Nothing to do here.
                    elif destroy is True:
                        self.output('Your call!')
                    else:
                        self.output('Wise choice!')
                        continue

                # Keep track of whether or not the query is mutating. In case
                # of a multi-statement query, the overall query is considered
                # mutating if any one of the component statements is mutating
                mutating = False

                try:
                    logger.debug('sql: %r', docuement.text)

                    if self.logfile:
                        self.logfile.write('\n# %s\n' % datetime.now())
                        self.logfile.write(docuement.text)
                        self.logfile.write('\n')

                    successful = False
                    start = time()
                    res = sqlexecute.run(docuement.text)
                    successful = True
                    output = []
                    total = 0
                    for title, cur, headers, status in res:
                        logger.debug("headers: %r", headers)
                        logger.debug("rows: %r", cur)
                        logger.debug("status: %r", status)
                        threshold = 1000
                        if (is_select(status) and
                                cur and cur.rowcount > threshold):
                            self.output('The result set has more than %s rows.'
                                    % threshold, fg='red')
                            if not click.confirm('Do you want to continue?'):
                                self.output("Aborted!", err=True, fg='red')
                                break

                        if self.auto_vertical_output:
                            max_width = self.cli.output.get_size().columns
                        else:
                            max_width = None

                        formatted = format_output(title, cur, headers,
                            status, self.table_format,
                            special.is_expanded_output(), max_width)

                        output.extend(formatted)
                        end = time()
                        total += end - start
                        mutating = mutating or is_mutating(status)
                except UnicodeDecodeError as e:
                    import pymysql
                    if pymysql.VERSION < (0, 6, 7):
                        message = ('You are running an older version of pymysql.\n'
                                'Please upgrade to 0.6.7 or above to view binary data.\n'
                                'Try \'pip install -U pymysql\'.')
                        self.output(message)
                    else:
                        raise e
                except KeyboardInterrupt:
                    # Restart connection to the database
                    sqlexecute.connect()
                    logger.debug("cancelled query, sql: %r", docuement.text)
                    self.output("cancelled query", err=True, fg='red')
                except NotImplementedError:
                    self.output('Not Yet Implemented.', fg="yellow")
                except OperationalError as e:
                    logger.debug("Exception: %r", e)
                    reconnect = True
                    if (e.args[0] in (2003, 2006, 2013)):
                        reconnect = click.prompt('Connection reset. Reconnect (Y/n)',
                                show_default=False, type=bool, default=True)
                        if reconnect:
                            logger.debug('Attempting to reconnect.')
                            try:
                                sqlexecute.connect()
                                logger.debug('Reconnected successfully.')
                                self.output('Reconnected!\nTry the command again.', fg='green')
                            except OperationalError as e:
                                logger.debug('Reconnect failed. e: %r', e)
                                self.output(str(e), err=True, fg='red')
                                continue  # If reconnection failed, don't proceed further.
                        else:  # If user chooses not to reconnect, don't proceed further.
                            continue
                    else:
                        logger.error("sql: %r, error: %r", docuement.text, e)
                        logger.error("traceback: %r", traceback.format_exc())
                        self.output(str(e), err=True, fg='red')
                except Exception as e:
                    logger.error("sql: %r, error: %r", docuement.text, e)
                    logger.error("traceback: %r", traceback.format_exc())
                    self.output(str(e), err=True, fg='red')
                else:
                    try:
                        if special.is_pager_enabled():
                            self.output_via_pager('\n'.join(output))
                        else:
                            self.output('\n'.join(output))
                    except KeyboardInterrupt:
                        pass
                    if special.is_timing_enabled():
                        self.output('Time: %0.03fs' % total)

                    # Refresh the table names and column names if necessary.
                    if need_completion_refresh(docuement.text):
                        self.refresh_completions(
                                reset=need_completion_reset(docuement.text))
                finally:
                    if self.logfile is False:
                        self.output("Warning: This query was not logged.", err=True, fg='red')
                query = Query(docuement.text, successful, mutating)
                self.query_history.append(query)

        except EOFError:
            if not self.less_chatty:
                self.output('Goodbye!')
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3 Page 4