datetime.datetime.today

Here are the examples of the python api datetime.datetime.today taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

170 Examples 7

Example 1

Project: karaage Source File: test_xmlrpc.py
    def test_parse_usage_alogger(self):
        server = self.server
        today = datetime.datetime.today()
        today = today.strftime("%Y-%m-%d")

        module_file = alogger.tests.examples.__file__
        directory = os.path.abspath(os.path.split(module_file)[0])
        path = os.path.join(directory, "torque.log")
        fd = open(path, "r")
        lines = fd.readlines()
        fd.close()

        mc = MachineCategory.objects.get(name="Default")
        proj = Project.objects.get(pid="TestProject1")
        i = Institute.objects.get(name="Example")

        proj2 = Project.objects.create(pid="pMona0041", institute=i)

        p = Person.objects.create(
            username="blair", short_name="B", full_name="Blair",
            institute=i)
        proj.group.members.add(p)
        Account.objects.create(
            username="blair", person=p, machine_category=mc,
            default_project=proj,
            date_created=datetime.datetime.today())

        p = Person.objects.create(
            username="philipn", short_name="Phil", full_name="Phillip",
            institute=i)
        proj.group.members.add(p)
        proj2.group.members.add(p)
        Account.objects.create(
            username="philipn", person=p, machine_category=mc,
            default_project=proj,
            date_created=datetime.datetime.today())

        json_array = []

        parser = get_parser("TORQUE")
        for line in lines:
            d = parser.line_to_dict(line)
            if d is None:
                continue

            json_array.append(json.dumps(d))

        result = server.parse_usage(
            "tango", "aq12ws",
            json_array, today, 'tango', 'alogger')
        self.assertEqual(
            result[0],
            'Inserted : 16\nUpdated  : 0\nFailed   : 6\nSkiped   : 0')

Example 2

Project: pittsburgh-purchasing-suite Source File: test_scout_search.py
Function: set_up
    def setUp(self):
        from flask_migrate import upgrade
        upgrade()

        # insert the users/roles
        self.admin_role = get_a_role('admin')
        self.superadmin_role = get_a_role('superadmin')
        self.admin_user = insert_a_user(email='[email protected]', role=self.admin_role)
        self.superadmin_user = insert_a_user(email='[email protected]', role=self.superadmin_role)

        # insert the companies/contracts
        self.company_1 = insert_a_company(name='ship', insert_contract=False)
        company_2 = insert_a_company(name='boat', insert_contract=False)

        contract_type = ContractTypeFactory.create(name='test')
        self.contract_type2 = ContractTypeFactory.create(name='test2')

        self.contract1 = ContractBaseFactory.create(
            description='vessel', companies=[company_2], line_items=[LineItem(description='NAVY')],
            expiration_date=datetime.datetime.today() + datetime.timedelta(1), is_archived=False,
            financial_id='123', contract_type=contract_type
        )
        ContractBaseFactory.create(
            description='sail', financial_id='456', companies=[self.company_1],
            line_items=[LineItem(description='sunfish')], is_archived=False,
            expiration_date=datetime.datetime.today() + datetime.timedelta(1),
            contract_type=contract_type
        )
        ContractBaseFactory.create(
            description='sunfish', financial_id='789',
            properties=[ContractPropertyFactory.create(key='foo', value='engine')],
            expiration_date=datetime.datetime.today() + datetime.timedelta(1), is_archived=False,
            contract_type=contract_type
        )
        ContractBaseFactory.create(
            description='sunfish', financial_id='012',
            properties=[ContractPropertyFactory.create(key='foo', value='engine')],
            expiration_date=datetime.datetime.today() - datetime.timedelta(1), is_archived=False,
            contract_type=self.contract_type2
        )

        # db.session.execute('''
        #     REFRESH MATERIALIZED VIEW CONCURRENTLY search_view
        # ''')
        db.session.commit()

Example 3

Project: django-classifieds Source File: cron.py
def run():
    yesterday = datetime.datetime.today() - datetime.timedelta(days=NOTICE_POSTING_NEW)
    postings = Ad.objects.filter(created_on__gt=yesterday)

    # get subscriber list
    subscribers = User.objects.filter(userprofile__receives_new_posting_notices=True)

    emails = []

    for subscriber in subscribers:
        # 1. render context to email template
        email_template = loader.get_template('classifieds/email/newpostings.txt')
        context = Context({'postings': postings, 'user': subscriber,
                           'site': Site.objects.get_current()})
        email_contents = email_template.render(context)
        emails.append((_('New ads posted on ') + Site.objects.get_current().name,
                       email_contents,
                       FROM_EMAIL,
                       [subscriber.email],))

    # 2. send emails
    send_mass_mail(emails)

    tomorrow = datetime.datetime.today() + datetime.timedelta(days=NOTICE_POSTING_EXPIRES)
    expiring_postings = Ad.objects.filter(expires_on__lt=tomorrow)
    emails = []

    for posting in expiring_postings:
        # 1. render context to email template
        email_template = loader.get_template('classifieds/email/expiring.txt')
        context = Context({'posting': posting, 'user': posting.user,
                           'site': Site.objects.get_current()})
        email_contents = email_template.render(context)
        emails.append((_('Your ad on ') + Site.objects.get_current().name + _(' is about to expire.'),
                       email_contents,
                       FROM_EMAIL,
                       [posting.user.email],))

    # 2. send emails
    send_mass_mail(emails)

    # delete old ads
    yesterday = datetime.datetime.today() - datetime.timedelta(days=NOTICE_POSTING_EXPIRES)
    Ad.objects.filter(expires_on__lt=yesterday).delete()

Example 4

Project: satchmo Source File: listeners.py
def add_toolbar_context(sender, context={}, **kwargs):
    user = threadlocals.get_current_user()
    if user and user.is_staff:
        request_path = context['request'].META['PATH_INFO']
        slug = request_path.split('/')[-2]
        total_sales = 0
        show_sales = False
        variation_items = []
        try:
            product = Product.objects.get(slug=slug)
            show_sales = True
            subtypes = product.get_subtypes()
            if 'ConfigurableProduct' in subtypes:
                variation_items, total_sales = _get_all_variations(product)
            else:
                total_sales = product.total_sold
                
        except:
            pass
            
        st = {}
        st['st_satchmo_version'] = get_version()
        newq = Order.objects.filter(status__exact = 'New')
        st['st_new_order_ct'] = newq.count()
        amounts = newq.values_list('total', flat=True)
        if amounts:
            newtotal = reduce(operator.add, amounts)
        else:
            newtotal = 0
        st['st_new_order_total'] = newtotal
        st['st_total_sold'] = total_sales
        st['st_show_sales'] = show_sales
        st['st_variations'] = variation_items
        week = datetime.datetime.today()-datetime.timedelta(days=7)
        day = datetime.datetime.today()-datetime.timedelta(days=1)
        hours = datetime.datetime.today()-datetime.timedelta(hours=1)
        cartweekq = Cart.objects.filter(date_time_created__gte=week)
        cartdayq = Cart.objects.filter(date_time_created__gte=day)
        carthourq = Cart.objects.filter(date_time_created__gte=hours)
        st['st_cart_7d_ct'] = cartweekq.count()
        st['st_cart_1d_ct'] = cartdayq.count()
        st['st_cart_1h_ct'] = carthourq.count()
        
        st['st_contacts_ct'] = Contact.objects.all().count()
        st['st_contacts_7d_ct'] = Contact.objects.filter(create_date__gte=week).count()
        # edits = []
        # st['st_edits'] = edits        
        
        context.update(st)

Example 5

Project: django-locksmith Source File: views.py
@staff_required
def analytics_index(request,
                    keys_issued_display='chart', keys_issued_interval='yearly',
                    api_calls_display='chart'):
    ignore_internal_keys = request.GET.get('ignore_internal_keys', True)
    ignore_deprecated_apis = request.GET.get('ignore_deprecated_apis', True)
    ignore_inactive_keys = request.GET.get('ignore_inactive_keys', True)

    new_users = Key.objects.filter(issued_on__gte=(datetime.datetime.today()+datetime.timedelta(days=-14))).order_by('-issued_on')

    six_month = Key.objects.filter(issued_on__gte=(datetime.datetime.today()+datetime.timedelta(days=-4, weeks=-24)), issued_on__lte=(datetime.datetime.today()+datetime.timedelta(days=3, weeks=-24))).order_by('-issued_on')
    six_month_stats = []

    for sm in six_month:
        six_month_stats.append((sm, Report.objects.filter(key_id=sm.id).aggregate(Sum('calls'))['calls__sum'] ))

    six_month = sorted(six_month_stats, key=lambda tup: tup[1], reverse=True)

    apis = Api.objects.order_by('display_name')
    active_key_footnote="A key is considered active if it has at least {} calls in the month.".format(settings.LOCKSMITH_KEY_ACTIVITY_THRESHOLD)

    options = {
        'ignore_internal_keys': ignore_internal_keys,
        'ignore_deprecated_apis': ignore_deprecated_apis,
        'ignore_inactive_keys': ignore_inactive_keys,
        'api_calls_display': api_calls_display,
        'keys_issued_display': keys_issued_display,
        'keys_issued_interval': keys_issued_interval
    }
    ctx = {
        'options': options,
        'json_options': json.dumps(options),
        'new_users': new_users,
        'six_month': six_month,
        'active_key_footnote': active_key_footnote,
        'apis': apis,
        'LOCKSMITH_BASE_TEMPLATE': settings.LOCKSMITH_BASE_TEMPLATE
    }
    template = getattr(settings,
                       'LOCKSMITH_ANALYTICS_INDEX_TEMPLATE',
                       'locksmith/analytics_index.html')
    return render(request, template, ctx)

Example 6

Project: DataPillager Source File: DataServicePillager.py
def main():
    global count_tries
    global max_tries
    global sleep_time

    start_time = datetime.datetime.today()

    try:
        # arcgis toolbox parameters
        service_endpoint = arcpy.GetParameterAsText(0) # Service endpoint required
        output_workspace = arcpy.GetParameterAsText(1) # gdb/folder to put the results required
        max_tries = arcpy.GetParameter(2) # max number of retries allowed required
        sleep_time = arcpy.GetParameter(3) # max number of retries allowed required`
        strict_mode = arcpy.GetParameter(4) # JSON check True/False required
        username = arcpy.GetParameterAsText(5)
        password = arcpy.GetParameterAsText(6)
        referring_domain = arcpy.GetParameterAsText(7) # auth domain
        existing_token = arcpy.GetParameterAsText(8) # valid token value

        # to query by geometry need [xmin,ymin,xmax,ymax], spatial reference, and geometryType (eg esriGeometryEnvelope

        if service_endpoint == '':
            output_msg("Avast! Can't plunder nothing from an empty url! Time to quit.")
            sys.exit()

        if not type(strict_mode) is bool:
            strict_mode = True

        if not type(max_tries) is int:
            max_tries = int(max_tries)

        if not type(sleep_time) is int:
           sleep_time = int(sleep_time)

        if not existing_token:
            token = ''
        else:
            token = existing_token

        if output_workspace == '':
            output_workspace = os.getcwd()

        output_desc = arcpy.Describe(output_workspace)
        output_type = output_desc.dataType

        if output_type == "Folder": # To Folder
            output_folder = output_workspace
        else:
            output_folder = output_desc.path

        if username:
            # set referring domain if supplied
            # or try to infer it from url
            if referring_domain != '':
                if referring_domain[:5] == 'http:':
                    refer = 'https' + referring_domain[4:]
                else:
                    refer = referring_domain
            else:
                u = urlparse(service_endpoint)
                if u.netloc.find('arcgis.com') > -1:
                    # is an esri domain
                    refer = r"https://www.arcgis.com"
                else:
                    # generate from service url and hope it works
                    if u.scheme == 'http':
                        # must be https for token
                        refer = urlunsplit(['https', u.netloc, '', '', ''])
                    else:
                        refer = urlunsplit([u.scheme, u.netloc, '', '', ''])

            # set up authentication
            # http://stackoverflow.com/questions/1045886/https-log-in-with-urllib2
            passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
            # this creates a password manager
            passman.add_password(None, service_endpoint, username, password)
            # because we have put None at the start it will always
            # use this username/password combination for  urls
            # for which `theurl` is a super-url

            authhandler = urllib2.HTTPBasicAuthHandler(passman)
            # create the AuthHandler
            opener = urllib2.build_opener(authhandler)
            # user agent spoofing
            opener.addheaders = [('User-agent', 'Mozilla/5.0')]

            urllib2.install_opener(opener)
            # All calls to urllib2.urlopen will now use our handler
            # Make sure not to include the protocol in with the URL, or
            # HTTPPasswordMgrWithDefaultRealm will be very confused.
            # You must (of course) use it when fetching the page though.
            # authentication is now handled automatically in urllib2.urlopen

            # add proxy handling?
            # issue where a proxy may not be picked up

            # need to generate a new token
            token = gentoken(username, password, refer)
        else:
            #build a generic opener with the use agent spoofed
            opener = urllib2.build_opener()
            opener.addheaders = [('User-agent', 'Mozilla/5.0')]
            urllib2.install_opener(opener)

        if username and (token == ""):
            output_msg("Avast! The scurvy gatekeeper says 'Could not generate a token with the username and password provided'.", severity=2)

        else:
            output_msg("Start the plunder! {0}".format(service_endpoint))
            output_msg("We be stashing the booty in {0}".format(output_workspace))

            service_layers_to_get = []
            # other variables, calculated from the service
            tokenstring = ''
            if len(token) > 0:
                tokenstring = '&token=' + token
            service_call = urllib2.urlopen(service_endpoint + '?f=json' + tokenstring).read()
            if service_call and (service_call.find('error') == -1):
                service_layer_info = json.loads(service_call, strict=False)
            else:
                raise Exception("'service_call' failed to access {0}".format(service_endpoint))

            # catch root url entered
            service_list = service_layer_info.get('services')
            if service_list:
                raise ValueError("Unable to pillage a service root url at this time. Enter a FeatureServer layer url!")

            # for getting all the layers
            service_layers = service_layer_info.get('layers')
            if service_layers is not None:
                # has sub layers, get em all
                for lyr in service_layers:
                    if not lyr.get('subLayerIds'):
                        lyr_id = lyr.get('id')
                        service_layers_to_get.append(service_endpoint + '/' + str(lyr_id))
            else:
                # no sub layers
                service_layers_to_get.append(service_endpoint)
            for lyr in service_layers_to_get:
                output_msg('Found {0}'.format(lyr))

            for slyr in service_layers_to_get:
                count_tries = 0
                out_shapefile_list = [] # for file merging.
                response = None
                current_iter = 0
                max_record_count = 0
                feature_count = 0
                final_geofile = ''

                output_msg("Now pillagin' yer data from {0}".format(slyr))
                if slyr == service_endpoint: # no need to get it again
                    service_info = service_layer_info
                else:
                    service_info_call = urllib2.urlopen(slyr + '?f=json' + tokenstring).read()
                    if service_info_call:
                        service_info = json.loads(service_info_call, strict=False)
                    else:
                        raise Exception("'service_info_call' failed to access {0}".format(slyr))

                if not service_info.get('error'):
                    service_name = service_info.get('name')

                    # clean up the service name (remove invalid characters)
                    service_name_cl = service_name.encode('ascii', 'ignore') # strip any non-ascii characters that may cause an issue
                    service_name_cl = arcpy.ValidateTableName(service_name_cl, output_workspace) # remove any other problematic characters
                    ##output_msg("'{0}' will be stashed as '{1}'".format(service_name, service_name_cl))

                    # add url & write out the service info for reference
                    service_info[u'serviceURL'] = slyr
                    info_filename = service_name_cl + "_info.txt"
                    info_file = os.path.join(output_folder, info_filename)
                    with open(info_file, 'w') as i_file:
                        json.dump(service_info, i_file, sort_keys=True, indent=4, separators=(',', ': '))
                        output_msg("Yar! {0} Service info stashed in '{1}'".format(service_name, info_file))

                    if strict_mode:
                        # check JSON supported
                        supports_json = False
                        if 'supportedQueryFormats' in service_info:
                            supported_formats = service_info.get('supportedQueryFormats').split(",")
                            for data_format in supported_formats:
                                if data_format == "JSON":
                                    supports_json = True
                                    break
                        else:
                            output_msg('Unable to check supported formats. Check {0} for details'.format(info_file))
                    else:
                        # assume JSON supported
                        supports_json = True

                    if supports_json:
                        try:
                            # loop through fields in service_info, get objectID field
                            objectid_field = "OBJECTID"
                            if 'fields' in service_info:
                                field_list = service_info.get('fields')
                                for field in field_list:
                                    if field.get('type') == 'esriFieldTypeOID':
                                        objectid_field = field.get('name')
                                        break
                            else:
                                output_msg("No field list returned - forging ahead with {0}".format(objectid_field))

                            feat_OIDLIST_query = r"/query?where=" + objectid_field + r"+%3E+0&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&distance=&units=esriSRUnit_Meter&outFields=&returnGeometry=false&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=true&returnCountOnly=false&returnExtentOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&f=json" + tokenstring
                            

                            # to query using geometry,&geometry=   &geometryType= esriGeometryEnvelope &inSR= and probably spatial relationship and buffering
                            feat_query = r"/query?objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&distance=&units=esriSRUnit_Meter&outFields=*&returnGeometry=true&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=false&returnExtentOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&f=json" + tokenstring
                            

                            max_record_count = service_info.get('maxRecordCount') # maximum number of records returned by service at once
                            

                            # extract using actual OID values is the safest way
                            feature_OIDs = None
                            feature_query = json.loads(urllib2.urlopen(slyr + feat_OIDLIST_query).read())
                            if feature_query and 'objectIds' in feature_query:
                                feature_OIDs = feature_query["objectIds"]
                            else:
                                raise ValueError('Unable to get OID values: {}'.format(feature_query))

                            if feature_OIDs:
                                feature_count = len(feature_OIDs)
                                sortie_count = feature_count//max_record_count + (feature_count % max_record_count > 0)
                                output_msg("{0} records, in chunks of {1}, err, that be {2} sorties. Ready lads!".format(feature_count, max_record_count, sortie_count))

                                feature_OIDs.sort()
                                # chunk them
                                for group in grouper(feature_OIDs, max_record_count):
                                    # reset count_tries
                                    count_tries = 0
                                    start_oid = group[0]
                                    end_oid = group[max_record_count-1]
                                    if end_oid is None: # reached the end of the iterables
                                        # loop through and find last oid
                                        # need this due to fillvalue of None in grouper
                                        for i in reversed(group):
                                            if i is not None:
                                                end_oid = i
                                                break

                                    # >= %3E%3D, <= %3C%3D
                                    where_clause = "&where={0}+%3E%3D+{1}+AND+{2}+%3C%3D+{3}".format(objectid_field, str(start_oid), objectid_field, str(end_oid))
                                    # response is a string of json with the attr and geom
                                    query = slyr + feat_query + where_clause
                                    response = get_data(query) # expects json object. An error will return none
                                    if not response or not response.get('features'):
                                        # break out
                                        raise ValueError("Abandon ship! Data access failed! Check what ye manag'd to plunder before failure.")
                                    else:
                                        feature_dict = response["features"] # load the features so we can check they are not empty

                                        if len(feature_dict) != 0:
                                            # convert response to json file on disk then to shapefile (is fast)
                                            out_JSON_name = service_name_cl + "_" + str(current_iter) + ".json"
                                            out_JSON_file = os.path.join(output_folder, out_JSON_name)

                                            #with open(out_JSON_file, 'w') as out_file:
                                            #    out_file.write(response.encode('utf-8')) #back from unicode

                                            with codecs.open(out_JSON_file, 'w', 'utf-8') as out_file:
                                                data = json.dumps(response, ensure_ascii=False)
                                                out_file.write(data)

                                            output_msg("Nabbed some json data fer ye: '{0}', oids {1} to {2}".format(out_JSON_name, start_oid, end_oid))

                                            if output_type == "Folder":
                                                out_file_name = service_name_cl + "_" + str(current_iter) + ".shp"
                                            else:
                                                out_file_name = service_name_cl + "_" + str(current_iter)
                                            # in-memory version
                                            ##temp_output = "in_memory\\"
                                            ##out_file_name = service_name_cl + "_" + str(current_iter)
                                            ##out_geofile = os.path.join(temp_output, out_file_name)

                                            out_geofile = os.path.join(output_workspace, out_file_name)

                                            output_msg("Converting json to {0}".format(out_geofile))
                                            arcpy.JSONToFeatures_conversion(out_JSON_file, out_geofile)
                                            out_shapefile_list.append(out_geofile)
                                            os.remove(out_JSON_file) # clean up the JSON file

                                        current_iter += max_record_count

                            else:
                                # no objectids
                                output_msg("No feature IDs found!")
                                raise ValueError("Aaar, plunderin' failed")

                            # download complete, create a final output
                            if output_type == "Folder":
                                final_geofile = os.path.join(output_workspace, service_name_cl + ".shp")
                            else:
                                final_geofile = os.path.join(output_workspace, service_name_cl)

                            output_msg("Stashin' all the booty in '{0}'".format(final_geofile))

                            #combine all the data
                            combine_data(out_shapefile_list, final_geofile)

                            end_time = datetime.datetime.today()
                            elapsed_time = end_time - start_time
                            output_msg("{0} plundered in {1}".format(final_geofile, str(elapsed_time)))

                        except ValueError, e:
                            output_msg("ERROR: " + str(e), severity=2)

                        except Exception, e:
                            line, err = trace()
                            output_msg("Script Error\n{0}\n on {1}".format(err, line), severity=2)
                            output_msg(arcpy.GetMessages())

                        finally:
                            if arcpy.Exists(final_geofile):
                                data_count = int(arcpy.GetCount_management(final_geofile)[0])
                                if data_count == feature_count: #we got it all
                                    output_msg("Scrubbing the decks...")
                                    for fc in out_shapefile_list:
                                        arcpy.Delete_management(fc)
                                else:
                                    output_msg("Splicin' the data failed - found {0} but expected {1}. Check {2} to see what went wrong.".format(data_count, feature_count, final_geofile))

                    else:
                        # no JSON output
                        output_msg("Aaaar, ye service does not support JSON output. Can't do it.")
                else:
                    # service info error
                    output_msg("Error: {0}".format(service_info.get('error')))

    except ValueError, e:
        output_msg("ERROR: " + str(e), severity=2)

    except Exception, e:
        if hasattr(e, 'errno') and e.errno == 10054:
            output_msg("ERROR: " + str(e), severity=2)
        else:
            line, err = trace()
            output_msg("Error\n{0}\n on {1}".format(err, line), severity=2)
        output_msg(arcpy.GetMessages())

    finally:
        end_time = datetime.datetime.today()
        elapsed_time = end_time - start_time
        output_msg("Plunderin' done, in " + str(elapsed_time))

Example 7

Project: pittsburgh-purchasing-suite Source File: test_beacon_jobs.py
    def setUp(self):
        super(TestBeaconJobs, self).setUp()

        self.yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
        today = datetime.datetime.today()
        tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)

        self.category = CategoryFactory.create()
        self.admin = UserFactory.create()

        self.opportunity = OpportunityFactory.create(
            is_public=True, planned_publish=today, planned_submission_start=today,
            planned_submission_end=tomorrow, categories=set([self.category]),
            created_by=self.admin, published_at=today
        )
        self.opportunity2 = OpportunityFactory.create(
            is_public=True, planned_publish=self.yesterday, planned_submission_start=today,
            planned_submission_end=tomorrow, publish_notification_sent=True,
            categories=set([self.category]), created_by=self.admin, published_at=self.yesterday
        )
        self.opportunity3 = OpportunityFactory.create(
            is_public=False, planned_publish=today, planned_submission_start=today,
            planned_submission_end=tomorrow, publish_notification_sent=False,
            categories=set([self.category]), created_by=self.admin, published_at=today
        )
        self.opportunity4 = OpportunityFactory.create(
            is_public=True, planned_publish=self.yesterday, planned_submission_start=self.yesterday,
            planned_submission_end=today, publish_notification_sent=True,
            categories=set([self.category]), created_by=self.admin, published_at=self.yesterday
        )

        VendorFactory.create(opportunities=set([self.opportunity]))
        VendorFactory.create(categories=set([self.category]))

Example 8

Project: python-bugzilla Source File: rw_functional.py
    def test05ModifyStatus(self):
        """
        Modify status and comment fields for an existing bug
        """
        bz = self.bzclass(url=self.url)
        bugid = "663674"
        cmd = "bugzilla modify %s " % bugid

        bug = bz.getbug(bugid)

        # We want to start with an open bug, so fix things
        if bug.status == "CLOSED":
            tests.clicomm(cmd + "--status ASSIGNED", bz)
            bug.refresh()
            self.assertEquals(bug.status, "ASSIGNED")

        origstatus = bug.status

        # Set to ON_QA with a private comment
        status = "ON_QA"
        comment = ("changing status to %s at %s" %
                   (status, datetime.datetime.today()))
        tests.clicomm(cmd +
            "--status %s --comment \"%s\" --private" % (status, comment), bz)

        bug.refresh()
        self.assertEquals(bug.status, status)
        self.assertEquals(bug.longdescs[-1]["is_private"], 1)
        self.assertEquals(bug.longdescs[-1]["text"], comment)

        # Close bug as DEFERRED with a private comment
        resolution = "DEFERRED"
        comment = ("changing status to CLOSED=%s at %s" %
                   (resolution, datetime.datetime.today()))
        tests.clicomm(cmd +
            "--close %s --comment \"%s\" --private" %
            (resolution, comment), bz)

        bug.refresh()
        self.assertEquals(bug.status, "CLOSED")
        self.assertEquals(bug.resolution, resolution)
        self.assertEquals(bug.comments[-1]["is_private"], 1)
        self.assertEquals(bug.comments[-1]["text"], comment)

        # Close bug as dup with no comment
        dupeid = "461686"
        desclen = len(bug.longdescs)
        tests.clicomm(cmd +
            "--close DUPLICATE --dupeid %s" % dupeid, bz)

        bug.refresh()
        self.assertEquals(bug.dupe_of, int(dupeid))
        self.assertEquals(len(bug.longdescs), desclen + 1)
        self.assertTrue("marked as a duplicate" in bug.longdescs[-1]["text"])

        # bz.setstatus test
        comment = ("adding lone comment at %s" % datetime.datetime.today())
        bug.setstatus("POST", comment=comment, private=True)
        bug.refresh()
        self.assertEquals(bug.longdescs[-1]["is_private"], 1)
        self.assertEquals(bug.longdescs[-1]["text"], comment)
        self.assertEquals(bug.status, "POST")

        # bz.close test
        fixed_in = str(datetime.datetime.today())
        bug.close("ERRATA", fixedin=fixed_in)
        bug.refresh()
        self.assertEquals(bug.status, "CLOSED")
        self.assertEquals(bug.resolution, "ERRATA")
        self.assertEquals(bug.fixed_in, fixed_in)

        # bz.addcomment test
        comment = ("yet another test comment %s" % datetime.datetime.today())
        bug.addcomment(comment, private=False)
        bug.refresh()
        self.assertEquals(bug.longdescs[-1]["text"], comment)
        self.assertEquals(bug.longdescs[-1]["is_private"], 0)

        # Confirm comments is same as getcomments
        self.assertEquals(bug.comments, bug.getcomments())

        # Reset state
        tests.clicomm(cmd + "--status %s" % origstatus, bz)
        bug.refresh()
        self.assertEquals(bug.status, origstatus)

Example 9

Project: pystardict Source File: demo.py
def demo():
    
    milestone1 = datetime.datetime.today()
    
    dicts_dir = os.path.join(os.path.dirname(__file__))
    dict1 = Dictionary(os.path.join(dicts_dir, 'stardict-quick_eng-rus-2.4.2',
        'quick_english-russian'))
    dict2 = Dictionary(os.path.join(dicts_dir, 'stardict-quick_rus-eng-2.4.2',
        'quick_russian-english'))
    
    milestone2 = datetime.datetime.today()
    print '2 dicts load:', milestone2-milestone1
    
    print dict1.idx['test']
    print dict2.idx['проверка']
    
    milestone3 = datetime.datetime.today()
    print '2 cords getters:', milestone3-milestone2
    
    print dict1.dict['test']
    print dict2.dict['проверка']
    
    milestone4 = datetime.datetime.today()
    print '2 direct data getters (w\'out cache):', milestone4-milestone3
    
    print dict1['test']
    print dict2['проверка']

    milestone5 = datetime.datetime.today()
    print '2 high level data getters (not cached):', milestone5-milestone4
    
    print dict1['test']
    print dict2['проверка']
    
    milestone6 = datetime.datetime.today()
    print '2 high level data getters (cached):', milestone6-milestone5

Example 10

Project: karaage Source File: test_xmlrpc.py
    def test_parse_usage(self):
        server = self.server
        today = datetime.datetime.today()
        today = today.strftime("%Y-%m-%d")

        module_file = alogger.tests.examples.__file__
        directory = os.path.abspath(os.path.split(module_file)[0])
        path = os.path.join(directory, "torque.log")
        fd = open(path, "r")
        lines = fd.readlines()
        fd.close()

        with self.assertRaises(xmlrpclib.Fault) as cm:
            server.parse_usage(
                "tango", "aqws12",
                lines, today, 'tango', 'TORQUE')

        self.assertEqual(cm.exception.faultCode, 81)
        self.assertEqual(
            cm.exception.faultString, 'Username and/or password is incorrect')

        mc = MachineCategory.objects.get(name="Default")
        proj = Project.objects.get(pid="TestProject1")
        i = Institute.objects.get(name="Example")

        proj2 = Project.objects.create(pid="pMona0041", institute=i)

        p = Person.objects.create(
            username="blair", short_name="B", full_name="Blair",
            institute=i)
        proj.group.members.add(p)
        Account.objects.create(
            username="blair", person=p, machine_category=mc,
            default_project=proj,
            date_created=datetime.datetime.today())

        p = Person.objects.create(
            username="philipn", short_name="Phil", full_name="Phillip",
            institute=i)
        proj.group.members.add(p)
        proj2.group.members.add(p)
        Account.objects.create(
            username="philipn", person=p, machine_category=mc,
            default_project=proj,
            date_created=datetime.datetime.today())

        result = server.parse_usage(
            "tango", "aq12ws",
            lines, today, 'tango', 'TORQUE')
        self.assertEqual(
            result[0],
            'Inserted : 16\nUpdated  : 0\nFailed   : 6\nSkiped   : 35')

Example 11

Project: SickGear Source File: properFinder.py
def search_propers():

    if not sickbeard.DOWNLOAD_PROPERS:
        return

    logger.log(u'Beginning search for new propers')

    age_shows, age_anime = 2, 14
    aired_since_shows = datetime.datetime.today() - datetime.timedelta(days=age_shows)
    aired_since_anime = datetime.datetime.today() - datetime.timedelta(days=age_anime)
    recent_shows, recent_anime = _recent_history(aired_since_shows, aired_since_anime)
    if recent_shows or recent_anime:
        propers = _get_proper_list(aired_since_shows, recent_shows, recent_anime)

        if propers:
            _download_propers(propers)
    else:
        logger.log(u'No downloads or snatches found for the last %s%s days to use for a propers search' %
                   (age_shows, ('', ' (%s for anime)' % age_anime)[helpers.has_anime()]))

    _set_last_proper_search(datetime.datetime.today().toordinal())

    run_at = ''
    if None is sickbeard.properFinderScheduler.start_time:
        run_in = sickbeard.properFinderScheduler.lastRun + sickbeard.properFinderScheduler.cycleTime - datetime.datetime.now()
        hours, remainder = divmod(run_in.seconds, 3600)
        minutes, seconds = divmod(remainder, 60)
        run_at = u', next check in approx. ' + (
            '%dh, %dm' % (hours, minutes) if 0 < hours else '%dm, %ds' % (minutes, seconds))

    logger.log(u'Completed the search for new propers%s' % run_at)

Example 12

Project: gazouilleur Source File: stats.py
    @inlineCallbacks
    def digest(self, hours, channel):
        now = datetime.today()
        since = now - timedelta(hours=hours)
        re_chan = re.compile(r'^#*%s$' % channel.lower(), re.I)
        query = {'channel': re_chan, 'timestamp': {'$gte': since}}
        data = {
            "channel": channel,
            "t0": clean_date(since),
            "t1": clean_date(now),
            "news": [],
            "imgs": [],
            "tweets": []
        }

        news = yield SingleMongo('news', 'find', query, fields=['sourcename', 'source', 'link', 'message'], filter=sortasc('sourcename')+sortasc('timestamp'))
        lastsource = ""
        for n in news:
            source = n["sourcename"]
            if source != lastsource:
                lastsource = source
                data["news"].append({
                    "name": source,
                    "link": n["link"],
                    "elements": []
                })
            data["news"][-1]["elements"].append({
                "text": n["message"],
                "link": n["link"]
            })
        del(news)

        tweets = yield SingleMongo('tweets', 'find', query, fields=['screenname', 'message', 'link'], filter=sortasc('id'))
        links = {}
        imgs = {}
        filters = yield SingleMongo('filters', 'find', {'channel': re_chan}, fields=['keyword'])
        filters = [keyword['keyword'].lower() for keyword in filters]
        for t in tweets:
            skip = False
            tuser_low = t['screenname'].lower()
            if "@%s" % tuser_low in filters:
                continue
            msg_low = t["message"].lower()
            if not ((self.user and self.user in msg_low) or self.user == tuser_low):
                for k in filters:
                    if k in msg_low:
                        skip = True
                        break
            if skip: continue
            for link in URL_REGEX.findall(t["message"]):
                link, _ = clean_url(link[2])
                if not link.startswith("http"):
                    continue
                tid = re_twitmedia.search(link)
                if tid:
                    tid = tid.group(1)
                    if tid not in imgs:
                        imgs[tid] = 1
                        data["imgs"].append({"id": tid})
                    continue
                if re_tweet.match(link):
                    continue
                if link not in links:
                    links[link] = {
                        "link": link,
                        "first": ("%s: %s" % (t["screenname"], t["message"].replace(link, ""))),
                        "firstlink": t["link"],
                        "count": 0
                    }
                links[link]["count"] += 1

        del(tweets)
        data["tweets"] = sorted(links.values(), key=lambda x: "%06d-%s" % (10**6-x['count'], x['link']))
        del(links)

        filename = "%s_%s_%s" % (channel.lstrip("#"), data["t0"].replace(" ", "+"), data["t1"].replace(" ", "+"))
        if not self.render_template("digest.html", filename, data):
            returnValue("Wooops could not generate html for %s..." % filename)
        returnValue("Digest for the last %s hours available at %sdigest_%s.html" % (hours, self.url, filename))

Example 13

Project: zhuaxia Source File: downloader.py
def finish_summary(skipped_hist):
    """
    build the summary after finishing all dl

    skipped_hist: a History list, contains skipped songs, it is not empty only
                  if incremental_dl is true
    """
    border= "\n"+u">>"*40 + u"\n"
    #build summary text:
    text = []
    if skipped_hist:
        text.append( border+msg.fmt_summary_skip_title +border)
        text.append( msg.fmt_summary_skip_header)
        for hist in skipped_hist:
            text.append( "%s\t%s\t%s\t%s" % (msg.head_xm if hist.source ==1 else msg.head_163, hist.last_dl_time_str(), hist.song_name, hist.location))

    if success_list:
        text.append( border+msg.fmt_summary_success_title +border)
        text.append( msg.fmt_summary_success_header)
        for song in success_list:
            text.append('%s\t%s'%(song.song_name, song.abs_path))

    if failed_list:
        text.append( border+msg.fmt_summary_failed_title +border)
        text.append( msg.fmt_summary_failed_header)
        for song in failed_list:
            text.append('%s\t%s'%(song.song_name, song.abs_path))

    while True:
        sys.stdout.write(msg.summary_prompt)
        choice = raw_input().lower()
        if choice == 'q' or choice == '':
            break
        elif choice == 'v':
            pydoc.pager(u"\n".join(text))
            break
        elif choice == 's':
            summary = path.join(config.DOWNLOAD_DIR,'summary_'+str(datetime.datetime.today())+".txt")
            with codecs.open(summary, 'w', 'utf-8') as f:
                f.write("\n".join(text))
            print log.hl(msg.summary_saved % summary ,'cyan')
            break
        else:
            sys.stdout.write(msg.summary_prompt_err)

Example 14

Project: kamaelia_ Source File: TwitterSearch.py
    def main(self):
        twitterurl = "http://api.twitter.com/1/users/search.json"

        if self.proxy:
            proxyhandler = urllib2.ProxyHandler({"http" : self.proxy})
            twitopener = urllib2.build_opener(proxyhandler)
            urllib2.install_opener(twitopener)

        headers = {'User-Agent' : "BBC R&D Grabber"}
        postdata = None

        if self.keypair == False:
            # Perform OAuth authentication - as we don't have the secret key pair we need to request it
            # This will require some user input
            request_token_url = 'http://api.twitter.com/oauth/request_token'
            access_token_url = 'http://api.twitter.com/oauth/access_token'
            authorize_url = 'http://api.twitter.com/oauth/authorize'

            token = None
            consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

            params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

            params['oauth_consumer_key'] = consumer.key

            req = oauth.Request(method="GET",url=request_token_url,parameters=params)

            signature_method = oauth.SignatureMethod_HMAC_SHA1()
            req.sign_request(signature_method, consumer, token)

            requestheaders = req.to_header()
            requestheaders['User-Agent'] = "BBC R&D Grabber"

            # Connect to Twitter
            try:
                req = urllib2.Request(request_token_url,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                conn1 = urllib2.urlopen(req)
            except httplib.BadStatusLine:
                e = sys.exc_info()[1]
                Print("PeopleSearch BadStatusLine error:", e )
                conn1 = False
            except urllib2.HTTPError:
                e = sys.exc_info()[1]
                Print("PeopleSearch HTTP error:", e.code)
#                sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                conn1 = False
            except urllib2.URLError:
                e = sys.exc_info()[1]
                Print("PeopleSearch URL error: ", e.reason)
#                sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                conn1 = False

            if conn1:
                content = conn1.read()
                conn1.close()

                request_token = dict(urlparse.parse_qsl(content))

                Print( "Request Token:")
                Print("     - oauth_token        = " , request_token['oauth_token'])
                Print("     - oauth_token_secret = " , request_token['oauth_token_secret'])
                Print("")

                # The user must confirm authorisation so a URL is Printed here
                Print("Go to the following link in your browser:")
                Print("%s?oauth_token=%s" % (authorize_url, request_token['oauth_token']) )
                Print("")

                accepted = 'n'
                # Wait until the user has confirmed authorisation
                while accepted.lower() == 'n':
                    accepted = raw_input('Have you authorized me? (y/n) ')
                oauth_verifier = raw_input('What is the PIN? ')

                token = oauth.Token(request_token['oauth_token'],
                    request_token['oauth_token_secret'])
                token.set_verifier(oauth_verifier)

                params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                params['oauth_token'] = token.key
                params['oauth_consumer_key'] = consumer.key

                req = oauth.Request(method="GET",url=access_token_url,parameters=params)

                signature_method = oauth.SignatureMethod_HMAC_SHA1()
                req.sign_request(signature_method, consumer, token)

                requestheaders = req.to_header()
                requestheaders['User-Agent'] = "BBC R&D Grabber"
                # Connect to Twitter
                try:
                    req = urllib2.Request(access_token_url,"oauth_verifier=%s" % oauth_verifier,requestheaders) # Why won't this work?!? Is it trying to POST?
                    conn1 = urllib2.urlopen(req)
                except httplib.BadStatusLine:
                    e = sys.exc_info()[1]
#                    sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                    Print('PeopleSearch BadStatusLine error: ', e)
                    conn1 = False
                except urllib2.HTTPError:
                    e = sys.exc_info()[1]
                    Print('PeopleSearch HTTP error: ', e.code)
                    conn1 = False
                except urllib2.URLError:
                    e = sys.exc_info()[1]
#                    sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                    Print('PeopleSearch URL error: ', e.reason)
                    conn1 = False

                if conn1:
                    content = conn1.read()
                    conn1.close()
                    access_token = dict(urlparse.parse_qsl(content))

                    # Access tokens retrieved from Twitter
                    Print("Access Token:")
                    Print("     - oauth_token        = " , access_token['oauth_token'])
                    Print("     - oauth_token_secret = " , access_token['oauth_token_secret'])
                    Print("")
                    Print("You may now access protected resources using the access tokens above.")
                    Print("")

                    save = False
                    # Load config to save OAuth keys
                    try:
                        homedir = os.path.expanduser("~")
                        file = open(homedir + "/twitter-login.conf",'r')
                        save = True
                    except IOError:
                        e = sys.exc_info()[1]
                        Print ("Failed to load config file - not saving oauth keys: " , e)

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf",'w')
                            file.write(raw_config)
                            file.close()
                        except IOError:
                            e = sys.exc_info()[1]
                            Print ("Failed to save oauth keys: " , e)

                    self.keypair = [access_token['oauth_token'], access_token['oauth_token_secret']]
        

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
                # Retieve keywords to look up
                person = self.recv("inbox")

                # Ensure we're not rate limited during the first request - if so we'll wait for 15 mins before our next request
                if (datetime.today() - timedelta(minutes=15)) > self.ratelimited:
                    requesturl = twitterurl + "?q=" + urllib.quote(person) + "&per_page=5"

                    params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                    token = oauth.Token(key=self.keypair[0],secret=self.keypair[1])
                    consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

                    params['oauth_token'] = token.key
                    params['oauth_consumer_key'] = consumer.key

                    req = oauth.Request(method="GET",url=requesturl,parameters=params)

                    signature_method = oauth.SignatureMethod_HMAC_SHA1()
                    req.sign_request(signature_method, consumer, token)

                    requestheaders = req.to_header()
                    requestheaders['User-Agent'] = "BBC R&D Grabber"

                    # Connect to Twitter
                    try:
                        req = urllib2.Request(requesturl,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                        conn1 = urllib2.urlopen(req)
                    except httplib.BadStatusLine:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                        Print('PeopleSearch BadStatusLine error: ', e)
                        conn1 = False
                    except urllib2.HTTPError:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                        Print('PeopleSearch HTTP error: ', e.code)
                        conn1 = False
                    except urllib2.URLError:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                        Print('PeopleSearch URL error: ', e.reason)
                        conn1 = False

                    if conn1:
                        # Check rate limiting here and Print current limit
                        headers = conn1.info()
                        try:
                            headerlist = string.split(str(headers),"\n")
                        except UnicodeEncodeError: # str may fail...
                            headerlist = []
                        for line in headerlist:
                            if line != "":
                                splitheader = line.split()
                                if splitheader[0] == "X-FeatureRateLimit-Remaining:" or splitheader[0] == "X-RateLimit-Remaining:":
                                    Print(splitheader[0] , " " , splitheader[1] )
                                    if int(splitheader[1]) < 5:
                                        self.ratelimited = datetime.today()
                        # Grab json format result of people search here
                        try:
                            data = conn1.read()
                            try:
                                content = cjson.decode(data)
                                self.send(content,"outbox")
                            except cjson.DecodeError:
                                self.send(dict(),"outbox")
                        except IOError:
                            e = sys.exc_info()[1]
#                            sys.stderr.write('PeopleSearch IO error: ' + str(e) + '\n')
                            Print('PeopleSearch IO error: ', e)
                            self.send(dict(),"outbox")
                        conn1.close()
                    else:
                        self.send(dict(),"outbox")
                else:
                   Print("Twitter search paused - rate limited")
                   self.send(dict(),"outbox")
            self.pause()
            yield 1

Example 15

Project: kamaelia_ Source File: TwitterSearch.py
    def main(self):
        twitterurl = "http://api.twitter.com/1/users/search.json"

        if self.proxy:
            proxyhandler = urllib2.ProxyHandler({"http" : self.proxy})
            twitopener = urllib2.build_opener(proxyhandler)
            urllib2.install_opener(twitopener)

        headers = {'User-Agent' : "BBC R&D Grabber"}
        postdata = None

        if self.keypair == False:
            # Perform OAuth authentication - as we don't have the secret key pair we need to request it
            # This will require some user input
            request_token_url = 'http://api.twitter.com/oauth/request_token'
            access_token_url = 'http://api.twitter.com/oauth/access_token'
            authorize_url = 'http://api.twitter.com/oauth/authorize'

            token = None
            consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

            params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

            params['oauth_consumer_key'] = consumer.key

            req = oauth.Request(method="GET",url=request_token_url,parameters=params)

            signature_method = oauth.SignatureMethod_HMAC_SHA1()
            req.sign_request(signature_method, consumer, token)

            requestheaders = req.to_header()
            requestheaders['User-Agent'] = "BBC R&D Grabber"

            # Connect to Twitter
            try:
                req = urllib2.Request(request_token_url,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                conn1 = urllib2.urlopen(req)
            except httplib.BadStatusLine, e:
                Print("PeopleSearch BadStatusLine error:", e )
                conn1 = False
            except urllib2.HTTPError, e:
                Print("PeopleSearch HTTP error:", e.code)
#                sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                conn1 = False
            except urllib2.URLError, e:
                Print("PeopleSearch URL error: ", e.reason)
#                sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                conn1 = False

            if conn1:
                content = conn1.read()
                conn1.close()

                request_token = dict(urlparse.parse_qsl(content))

                Print( "Request Token:")
                Print("     - oauth_token        = " , request_token['oauth_token'])
                Print("     - oauth_token_secret = " , request_token['oauth_token_secret'])
                Print("")

                # The user must confirm authorisation so a URL is Printed here
                Print("Go to the following link in your browser:")
                Print("%s?oauth_token=%s" % (authorize_url, request_token['oauth_token']) )
                Print("")

                accepted = 'n'
                # Wait until the user has confirmed authorisation
                while accepted.lower() == 'n':
                    accepted = raw_input('Have you authorized me? (y/n) ')
                oauth_verifier = raw_input('What is the PIN? ')

                token = oauth.Token(request_token['oauth_token'],
                    request_token['oauth_token_secret'])
                token.set_verifier(oauth_verifier)

                params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                params['oauth_token'] = token.key
                params['oauth_consumer_key'] = consumer.key

                req = oauth.Request(method="GET",url=access_token_url,parameters=params)

                signature_method = oauth.SignatureMethod_HMAC_SHA1()
                req.sign_request(signature_method, consumer, token)

                requestheaders = req.to_header()
                requestheaders['User-Agent'] = "BBC R&D Grabber"
                # Connect to Twitter
                try:
                    req = urllib2.Request(access_token_url,"oauth_verifier=%s" % oauth_verifier,requestheaders) # Why won't this work?!? Is it trying to POST?
                    conn1 = urllib2.urlopen(req)
                except httplib.BadStatusLine, e:
#                    sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                    Print('PeopleSearch BadStatusLine error: ', e)
                    conn1 = False
                except urllib2.HTTPError, e:
                    Print('PeopleSearch HTTP error: ', e.code)
                    conn1 = False
                except urllib2.URLError, e:
#                    sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                    Print('PeopleSearch URL error: ', e.reason)
                    conn1 = False

                if conn1:
                    content = conn1.read()
                    conn1.close()
                    access_token = dict(urlparse.parse_qsl(content))

                    # Access tokens retrieved from Twitter
                    Print("Access Token:")
                    Print("     - oauth_token        = " , access_token['oauth_token'])
                    Print("     - oauth_token_secret = " , access_token['oauth_token_secret'])
                    Print("")
                    Print("You may now access protected resources using the access tokens above.")
                    Print("")

                    save = False
                    # Load config to save OAuth keys
                    try:
                        homedir = os.path.expanduser("~")
                        file = open(homedir + "/twitter-login.conf",'r')
                        save = True
                    except IOError, e:
                        Print ("Failed to load config file - not saving oauth keys: " , e)

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf",'w')
                            file.write(raw_config)
                            file.close()
                        except IOError, e:
                            Print ("Failed to save oauth keys: " , e)

                    self.keypair = [access_token['oauth_token'], access_token['oauth_token_secret']]
        

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
                # Retieve keywords to look up
                person = self.recv("inbox")

                # Ensure we're not rate limited during the first request - if so we'll wait for 15 mins before our next request
                if (datetime.today() - timedelta(minutes=15)) > self.ratelimited:
                    requesturl = twitterurl + "?q=" + urllib.quote(person) + "&per_page=5"

                    params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                    token = oauth.Token(key=self.keypair[0],secret=self.keypair[1])
                    consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

                    params['oauth_token'] = token.key
                    params['oauth_consumer_key'] = consumer.key

                    req = oauth.Request(method="GET",url=requesturl,parameters=params)

                    signature_method = oauth.SignatureMethod_HMAC_SHA1()
                    req.sign_request(signature_method, consumer, token)

                    requestheaders = req.to_header()
                    requestheaders['User-Agent'] = "BBC R&D Grabber"

                    # Connect to Twitter
                    try:
                        req = urllib2.Request(requesturl,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                        conn1 = urllib2.urlopen(req)
                    except httplib.BadStatusLine, e:
#                        sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                        Print('PeopleSearch BadStatusLine error: ', e)
                        conn1 = False
                    except urllib2.HTTPError, e:
#                        sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                        Print('PeopleSearch HTTP error: ', e.code)
                        conn1 = False
                    except urllib2.URLError, e:
#                        sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                        Print('PeopleSearch URL error: ', e.reason)
                        conn1 = False

                    if conn1:
                        # Check rate limiting here and Print current limit
                        headers = conn1.info()
                        try:
                            headerlist = string.split(str(headers),"\n")
                        except UnicodeEncodeError: # str may fail...
                            headerlist = []
                        for line in headerlist:
                            if line != "":
                                splitheader = line.split()
                                if splitheader[0] == "X-FeatureRateLimit-Remaining:" or splitheader[0] == "X-RateLimit-Remaining:":
                                    Print(splitheader[0] , " " , splitheader[1] )
                                    if int(splitheader[1]) < 5:
                                        self.ratelimited = datetime.today()
                        # Grab json format result of people search here
                        try:
                            data = conn1.read()
                            try:
                                content = cjson.decode(data)
                                self.send(content,"outbox")
                            except cjson.DecodeError, e:
                                self.send(dict(),"outbox")
                        except IOError, e:
#                            sys.stderr.write('PeopleSearch IO error: ' + str(e) + '\n')
                            Print('PeopleSearch IO error: ', e)
                            self.send(dict(),"outbox")
                        conn1.close()
                    else:
                        self.send(dict(),"outbox")
                else:
                   Print("Twitter search paused - rate limited")
                   self.send(dict(),"outbox")
            self.pause()
            yield 1

Example 16

Project: python-bugzilla Source File: rw_functional.py
    def _test8Attachments(self):
        """
        Get and set attachments for a bug
        """
        bz = self.bzclass(url=self.url)
        getallbugid = "663674"
        setbugid = "461686"
        cmd = "bugzilla attach "
        testfile = "../tests/data/bz-attach-get1.txt"

        # Add attachment as CLI option
        setbug = bz.getbug(setbugid, extra_fields=["attachments"])
        orignumattach = len(setbug.attachments)

        # Add attachment from CLI with mime guessing
        desc1 = "python-bugzilla cli upload %s" % datetime.datetime.today()
        out1 = tests.clicomm(cmd + "%s --description \"%s\" --file %s" %
                             (setbugid, desc1, testfile), bz)

        desc2 = "python-bugzilla cli upload %s" % datetime.datetime.today()
        out2 = tests.clicomm(cmd + "%s --file test --description \"%s\"" %
                             (setbugid, desc2), bz, stdin=open(testfile))

        # Expected output format:
        #   Created attachment <attachid> on bug <bugid>

        setbug.refresh()
        self.assertEquals(len(setbug.attachments), orignumattach + 2)
        self.assertEquals(setbug.attachments[-2]["description"], desc1)
        self.assertEquals(setbug.attachments[-2]["id"],
                          int(out1.splitlines()[2].split()[2]))
        self.assertEquals(setbug.attachments[-1]["description"], desc2)
        self.assertEquals(setbug.attachments[-1]["id"],
                          int(out2.splitlines()[2].split()[2]))
        attachid = setbug.attachments[-2]["id"]

        # Set attachment flags
        self.assertEquals(setbug.attachments[-1]["flags"], [])
        bz.updateattachmentflags(setbug.id, setbug.attachments[-1]["id"],
                                 "review", status="+")
        setbug.refresh()

        self.assertEquals(len(setbug.attachments[-1]["flags"]), 1)
        self.assertEquals(setbug.attachments[-1]["flags"][0]["name"], "review")
        self.assertEquals(setbug.attachments[-1]["flags"][0]["status"], "+")

        bz.updateattachmentflags(setbug.id, setbug.attachments[-1]["id"],
                                 "review", status="X")
        setbug.refresh()
        self.assertEquals(setbug.attachments[-1]["flags"], [])


        # Get attachment, verify content
        out = tests.clicomm(cmd + "--get %s" % attachid, bz).splitlines()

        # Expect format:
        #   Wrote <filename>
        fname = out[2].split()[1].strip()

        self.assertEquals(len(out), 3)
        self.assertEquals(fname, "bz-attach-get1.txt")
        self.assertEquals(open(fname).read(),
                          open(testfile).read())
        os.unlink(fname)

        # Get all attachments
        getbug = bz.getbug(getallbugid)
        getbug.autorefresh = True
        numattach = len(getbug.attachments)
        out = tests.clicomm(cmd + "--getall %s" % getallbugid, bz).splitlines()

        self.assertEquals(len(out), numattach + 2)
        fnames = [l.split(" ", 1)[1].strip() for l in out[2:]]
        self.assertEquals(len(fnames), numattach)
        for f in fnames:
            if not os.path.exists(f):
                raise AssertionError("filename '%s' not found" % f)
            os.unlink(f)

Example 17

Project: satnogs-network Source File: views.py
def station_view(request, id):
    """View for single station page."""
    station = get_object_or_404(Station, id=id)
    form = StationForm(instance=station)
    antennas = Antenna.objects.all()
    rigs = Rig.objects.all()

    try:
        satellites = Satellite.objects.filter(transmitters__alive=True).distinct()
    except:
        pass  # we won't have any next passes to display

    # Load the station information and invoke ephem so we can
    # calculate upcoming passes for the station
    observer = ephem.Observer()
    observer.lon = str(station.lng)
    observer.lat = str(station.lat)
    observer.elevation = station.alt

    nextpasses = []
    passid = 0

    for satellite in satellites:
        observer.date = ephem.date(datetime.today())

        try:
            sat_ephem = ephem.readtle(str(satellite.latest_tle.tle0),
                                      str(satellite.latest_tle.tle1),
                                      str(satellite.latest_tle.tle2))

            # Here we are going to iterate over each satellite to
            # find its appropriate passes within a given time constraint
            keep_digging = True
            while keep_digging:
                try:
                    tr, azr, tt, altt, ts, azs = observer.next_pass(sat_ephem)

                    if tr is None:
                        break

                    # bug in pyephem causes overhead sats to appear in the result
                    # mixing next-pass data with current pass data, resulting in
                    # satnogs/satnogs-network#199. As a workaround, pyephem does
                    # return set time for current pass while rise time for next
                    # pass so when this happens we want to toss the entry out
                    # not a break as this sat might have another valid pass
                    if ts < tr:
                        pass

                    # using the angles module convert the sexagesimal degree into
                    # something more easily read by a human
                    elevation = format(math.degrees(altt), '.0f')
                    azimuth = format(math.degrees(azr), '.0f')
                    passid += 1

                    # show only if >= configured horizon and in next 6 hours
                    if tr < ephem.date(datetime.today() + timedelta(hours=6)):
                        if float(elevation) >= station.horizon:
                            sat_pass = {'passid': passid,
                                        'mytime': str(observer.date),
                                        'debug': observer.next_pass(sat_ephem),
                                        'name': str(satellite.name),
                                        'id': str(satellite.id),
                                        'norad_cat_id': str(satellite.norad_cat_id),
                                        'tr': tr,           # Rise time
                                        'azr': azimuth,     # Rise Azimuth
                                        'tt': tt,           # Max altitude time
                                        'altt': elevation,  # Max altitude
                                        'ts': ts,           # Set time
                                        'azs': azs}         # Set azimuth
                            nextpasses.append(sat_pass)
                        observer.date = ephem.Date(ts).datetime() + timedelta(minutes=1)
                        continue
                    else:
                        keep_digging = False
                    continue
                except ValueError:
                    break  # there will be sats in our list that fall below horizon, skip
                except TypeError:
                    break  # if there happens to be a non-EarthSatellite object in the list
                except Exception:
                    break
        except (ValueError, AttributeError):
            pass  # TODO: if something does not have a proper TLE line we need to know/fix

    return render(request, 'base/station_view.html',
                  {'station': station, 'form': form, 'antennas': antennas,
                   'mapbox_id': settings.MAPBOX_MAP_ID,
                   'mapbox_token': settings.MAPBOX_TOKEN,
                   'nextpasses': sorted(nextpasses, key=itemgetter('tr')),
                   'rigs': rigs})

Example 18

Project: stopstalk-deployment Source File: default.py
Function: filters
def filters():
    """
        Apply multiple kind of filters on submissions
    """

    stable = db.submission
    get_vars = request.get_vars
    if len(request.args) == 0:
        page = 1
    else:
        page = int(request.args[0])
        page -= 1

    all_languages = db(stable).select(stable.lang,
                                      distinct=True)
    languages = [x["lang"] for x in all_languages]

    table = None
    global_submissions = False
    if get_vars.has_key("global"):
        if get_vars["global"] == "True":
            global_submissions = True

    # If form is not submitted
    if get_vars == {}:
        return dict(languages=languages,
                    div=DIV(),
                    global_submissions=global_submissions)

    # If nothing is filled in the form
    # these fields should be passed in
    # the URL with empty value
    compulsary_keys = ["pname", "name", "end_date", "start_date"]
    if set(compulsary_keys).issubset(get_vars.keys()) is False:
        session.flash = "Invalid URL parameters"
        redirect(URL("default", "filters"))

    # Form has been submitted
    cftable = db.custom_friend
    atable = db.auth_user
    ftable = db.friends
    duplicates = []

    switch = DIV(LABEL(H6("Friends' Submissions",
                          INPUT(_type="checkbox", _id="submission-switch"),
                          SPAN(_class="lever pink accent-3"),
                          "Global Submissions")),
                 _class="switch")
    table = TABLE()
    div = TAG[""](H4("Recent Submissions"), switch, table)

    if global_submissions is False and not auth.is_logged_in():
        session.flash = "Login to view Friends' submissions"
        new_vars = request.vars
        new_vars["global"] = True
        redirect(URL("default", "filters",
                     vars=new_vars,
                     args=request.args))

    query = True
    username = get_vars["name"]
    if username != "":
        tmplist = username.split()
        for token in tmplist:
            query &= ((cftable.first_name.contains(token)) | \
                      (cftable.last_name.contains(token)) | \
                      (cftable.stopstalk_handle.contains(token)))


    if global_submissions is False:
        # Retrieve all the custom users created by the logged-in user
        query = (cftable.user_id == session.user_id) & query
    cust_friends = db(query).select(cftable.id, cftable.duplicate_cu)

    # The Original IDs of duplicate custom_friends
    custom_friends = []
    for cus_id in cust_friends:
        if cus_id.duplicate_cu:
            duplicates.append((cus_id.id, cus_id.duplicate_cu))
            custom_friends.append(cus_id.duplicate_cu)
        else:
            custom_friends.append(cus_id.id)

    query = True
    # Get the friends of logged in user
    if username != "":
        tmplist = username.split()
        username_query = False
        for token in tmplist:
            username_query |= ((atable.first_name.contains(token)) | \
                               (atable.last_name.contains(token)) | \
                               (atable.stopstalk_handle.contains(token)))
            for site in current.SITES:
                username_query |= (atable[site.lower() + \
                                   "_handle"].contains(token))

        query &= username_query

    # @ToDo: Anyway to use join instead of two such db calls
    possible_users = db(query).select(atable.id)
    possible_users = [x["id"] for x in possible_users]
    friends = possible_users

    if global_submissions is False:
        query = (ftable.user_id == session.user_id) & \
                (ftable.friend_id.belongs(possible_users))
        friend_ids = db(query).select(ftable.friend_id)
        friends = [x["friend_id"] for x in friend_ids]

        if session.user_id in possible_users:
            # Show submissions of user also
            friends.append(session.user_id)

    # User in one of the friends
    query = (stable.user_id.belongs(friends))

    # User in one of the custom friends
    query |= (stable.custom_user_id.belongs(custom_friends))

    start_date = get_vars["start_date"]
    end_date = get_vars["end_date"]

    # Else part ensures that both the dates passed
    # are included in the range
    if start_date == "":
        # If start date is empty start from the INITIAL_DATE
        start_date = current.INITIAL_DATE
    else:
        # Else append starting time for that day
        start_date += " 00:00:00"

    if end_date == "":
        # If end date is empty retrieve all submissions till now(current timestamp)
        # Current date/time
        end_date = str(datetime.datetime.today())
        # Remove the last milliseconds from the timestamp
        end_date = end_date[:-7]
    else:
        # Else append the ending time for that day
        end_date += " 23:59:59"

    start_time = time.strptime(start_date, "%Y-%m-%d %H:%M:%S")
    end_time = time.strptime(end_date, "%Y-%m-%d %H:%M:%S")

    if end_time > start_time:
        # Submissions in the the range start_date to end_date
        query &= (stable.time_stamp >= start_date) & \
                 (stable.time_stamp <= end_date)
    else:
        session.flash = "Start Date greater than End Date"
        redirect(URL("default", "filters"))

    pname = get_vars["pname"]
    # Submissions with problem name containing pname
    if pname != "":
        pname = pname.split()
        for token in pname:
            query &= (stable.problem_name.contains(token))

    # Check if multiple parameters are passed
    def _get_values_list(param_name):

        values_list = None
        if get_vars.has_key(param_name):
            values_list = get_vars[param_name]
            if isinstance(values_list, str):
                values_list = [values_list]
        elif get_vars.has_key(param_name + "[]"):
            values_list = get_vars[param_name + "[]"]
            if isinstance(values_list, str):
                values_list = [values_list]

        return values_list

    # Submissions from this site
    sites = _get_values_list("site")
    if sites:
        query &= (stable.site.belongs(sites))

    # Submissions with this language
    langs = _get_values_list("language")
    if langs:
        query &= (stable.lang.belongs(langs))

    # Submissions with this status
    statuses = _get_values_list("status")
    if statuses:
        query &= (stable.status.belongs(statuses))

    PER_PAGE = current.PER_PAGE
    # Apply the complex query and sort by time_stamp DESC
    filtered = db(query).select(limitby=(page * PER_PAGE,
                                         (page + 1) * PER_PAGE),
                                orderby=~stable.time_stamp)

    total_problems = db(query).count()
    total_pages = total_problems / 100
    if total_problems % 100 == 0:
        total_pages += 1

    table = utilities.render_table(filtered, duplicates)
    switch = DIV(LABEL(H6("Friends' Submissions",
                          INPUT(_type="checkbox", _id="submission-switch"),
                          SPAN(_class="lever pink accent-3"),
                          "Global Submissions")),
                 _class="switch")
    div = TAG[""](switch, table)

    return dict(languages=languages,
                div=div,
                total_pages=total_pages,
                global_submissions=global_submissions)

Example 19

Project: sponge Source File: repo.py
def rebalance_sync_schedule(errors=None):
    repoapi = RepositoryAPI()
    repos = get_repos()

    # get a list of sync frequencies
    syncgroups = dict()  # dict of sync time -> [groups]
    default = None
    for ckey, sync in config.list(filter=dict(name__startswith="sync_frequency_")).items():
        group = ckey.replace("sync_frequency_", "")
        if sync is None:
            logger.error("Sync frequency for %s is None, skipping" % group)
            continue
        synctime = 60 * 60 * int(sync)
        if "group" == "default":
            default = synctime
        else:
            try:
                syncgroups[synctime].append(group)
            except KeyError:
                syncgroups[synctime] = [group]

    # divide the repos up by sync time and sort them by inheritance,
    # reversed, to ensure that children get synced before parents and
    # a package doesn't just go straight to the final child
    cycles = dict() # dict of repo -> sync time
    for repo in repos.values():
        cycles[repo['id']] = default
        for synctime, groups in syncgroups.items():
            if (set(groups) & set(repo['groupid']) and
                (cycles[repo['id']] is None or
                 synctime > cycles[repo['id']])):
                cycles[repo['id']] = synctime

    # finally, build a dict of sync time -> [repos]
    syncs = dict()
    for repoid, synctime in cycles.items():
        if synctime is None:
            continue
        try:
            syncs[synctime].append(repos[repoid])
        except KeyError:
            syncs[synctime] = [repos[repoid]]

    for synctime, syncrepos in syncs.items():
        syncrepos = sort_repos_by_ancestry(syncrepos)
        syncrepos.reverse()

        # we count the total number of packages in all repos, and
        # divide them evenly amongst the timespan allotted.  It's
        # worth noting that we count clones just the same as we count
        # "regular" repos, because it's createrepo, not the sync, that
        # really takes a lot of time and memory.
        pkgs = 0
        for repo in syncrepos:
            if repo['package_count'] < 10:
                # we still have to run createrepo even if there are
                # very few (or no!) packages, so count very small
                # repos as 10 packages
                pkgs += 10
            else:
                pkgs += repo['package_count']
    
        try:
            pkgtime = float(synctime) / pkgs
        except ZeroDivisionError:
            pkgtime = 1
            logger.debug("Allowing %s seconds per package" % pkgtime)

        # find tomorrow morning at 12:00 am
        tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)
        start = datetime.datetime(tomorrow.year, tomorrow.month, tomorrow.day)

        if errors is None:
            errors = []

        for repo in syncrepos:
            iso8601_start = format_iso8601_datetime(start)
            iso8601_interval = \
                format_iso8601_interval(datetime.timedelta(seconds=synctime))
            logger.debug("Scheduling %s to start at %s, sync every %s" %
                         (repo['id'], iso8601_start, iso8601_interval))
            schedule = parse_interval_schedule(iso8601_interval,
                                               iso8601_start,
                                               None)

            try:
                repoapi.change_sync_schedule(repo['id'],
                                             dict(schedule=schedule,
                                                  options=dict()))
                reload_repo(repo['id'])
            except ServerRequestError, err:
                errors.append("Could not set schedule for %s: %s" %
                              (repo['id'], err[1]))
            
            start += datetime.timedelta(seconds=int(pkgtime *
                                                    repo['package_count']))
    return not errors

Example 20

Project: NOT_UPDATED_Sick-Beard-Dutch Source File: properFinder.py
    def _getProperList(self):

        propers = {}

        # for each provider get a list of the propers
        for curProvider in providers.sortedProviderList():

            if not curProvider.isActive():
                continue

            search_date = datetime.datetime.today() - datetime.timedelta(days=2)

            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
            try:
                curPropers = curProvider.findPropers(search_date)
            except exceptions.AuthException, e:
                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
                continue

            # if they haven't been added by a different provider than add the proper to the list
            for x in curPropers:
                name = self._genericName(x.name)

                if not name in propers:
                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
                    x.provider = curProvider
                    propers[name] = x

        # take the list of unique propers and get it sorted by
        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
        finalPropers = []

        for curProper in sortedPropers:

            # parse the file name
            try:
                myParser = NameParser(False)
                parse_result = myParser.parse(curProper.name)
            except InvalidNameException:
                logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
                continue

            if not parse_result.episode_numbers:
                logger.log(u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode", logger.DEBUG)
                continue

            # populate our Proper instance
            if parse_result.air_by_date:
                curProper.season = -1
                curProper.episode = parse_result.air_date
            else:
                curProper.season = parse_result.season_number if parse_result.season_number != None else 1
                curProper.episode = parse_result.episode_numbers[0]
            curProper.quality = Quality.nameQuality(curProper.name)

            # for each show in our list
            for curShow in sickbeard.showList:

                if not parse_result.series_name:
                    continue

                genericName = self._genericName(parse_result.series_name)

                # get the scene name masks
                sceneNames = set(show_name_helpers.makeSceneShowSearchStrings(curShow))

                # for each scene name mask
                for curSceneName in sceneNames:

                    # if it matches
                    if genericName == self._genericName(curSceneName):
                        logger.log(u"Successful match! Result " + parse_result.series_name + " matched to show " + curShow.name, logger.DEBUG)

                        # set the tvdbid in the db to the show's tvdbid
                        curProper.tvdbid = curShow.tvdbid

                        # since we found it, break out
                        break

                # if we found something in the inner for loop break out of this one
                if curProper.tvdbid != -1:
                    break

            if curProper.tvdbid == -1:
                continue

            if not show_name_helpers.filterBadReleases(curProper.name):
                logger.log(u"Proper " + curProper.name + " isn't a valid scene release that we want, igoring it", logger.DEBUG)
                continue

            # if we have an air-by-date show then get the real season/episode numbers
            if curProper.season == -1 and curProper.tvdbid:
                showObj = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid)
                if not showObj:
                    logger.log(u"This should never have happened, post a bug about this!", logger.ERROR)
                    raise Exception("BAD STUFF HAPPENED")

                tvdb_lang = showObj.lang
                # There's gotta be a better way of doing this but we don't wanna
                # change the language value elsewhere
                ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()

                if tvdb_lang and not tvdb_lang == 'en':
                    ltvdb_api_parms['language'] = tvdb_lang

                try:
                    t = tvdb_api.Tvdb(**ltvdb_api_parms)
                    epObj = t[curProper.tvdbid].airedOn(curProper.episode)[0]
                    curProper.season = int(epObj["seasonnumber"])
                    curProper.episodes = [int(epObj["episodenumber"])]
                except tvdb_exceptions.tvdb_episodenotfound:
                    logger.log(u"Unable to find episode with date " + str(curProper.episode) + " for show " + parse_result.series_name + ", skipping", logger.WARNING)
                    continue

            # check if we actually want this proper (if it's the right quality)
            sqlResults = db.DBConnection().select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [curProper.tvdbid, curProper.season, curProper.episode])
            if not sqlResults:
                continue
            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))

            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != curProper.quality:
                continue

            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
            if curProper.tvdbid != -1 and (curProper.tvdbid, curProper.season, curProper.episode) not in map(operator.attrgetter('tvdbid', 'season', 'episode'), finalPropers):
                logger.log(u"Found a proper that we need: " + str(curProper.name))
                finalPropers.append(curProper)

        return finalPropers

Example 21

Project: goristock Source File: goristock.py
Function: init
  def __init__(self, stock_no, data_num = 75, debug=0):
    """ stock_no: Stock no.
        data_num: Default fetch numbers. (Default is 75)
        debug: For debug to print some info about data solution. (Default is 0)

        stock_no: 股票代碼。
        data_num: 預設抓取的筆數(交易日數,預設為 75 筆)
        debug: 除錯用,列印出相關除錯資訊。0:關閉(預設) 1:開啟

        property:
          self.raw_data = [list]    收盤資訊,[舊→新]
          self.stock_name = str()   該股票名稱
          self.stock_no = str()     該股票代號
          self.data_date = [list]   日期資訊,[舊→新]
          self.stock_range = [list] 漲跌幅
          self.stock_vol = [list]   成交量
          self.stock_open = [list]  開盤價
          self.stock_h = [list]     最高價
          self.stock_l = [list]     最低價
    """
    self.raw_data = []
    self.stock_name = ''
    self.stock_no = stock_no
    self.data_date = []
    self.stock_range = []
    self.stock_vol = []
    self.stock_open = []
    self.stock_h = []
    self.stock_l = []
    starttime = 0
    self.debug = debug

    try:
      while len(self.raw_data) < data_num:
        # start fetch data.
        self.csv_read = self.fetch_data(stock_no, datetime.today() - timedelta(days = 30 * starttime), starttime)
        try:
          result = self.list_data(self.csv_read)
        except:
          # In first day of months will fetch no data.
          if starttime == 0:
            starttime += 1
            self.csv_read = self.fetch_data(stock_no, datetime.today() - timedelta(days = 30 * starttime), starttime)
            result = self.list_data(self.csv_read)
          logging.info('In first day of months %s' % stock_no)

        self.raw_data = result['stock_price'] + self.raw_data
        self.data_date = result['data_date'] + self.data_date
        self.stock_name = result['stock_name']
        self.stock_range = result['stock_range'] + self.stock_range
        self.stock_vol = result['stock_vol'] + self.stock_vol
        self.stock_open = result['stock_open'] + self.stock_open
        self.stock_h = result['stock_h'] + self.stock_h
        self.stock_l = result['stock_l'] + self.stock_l
        starttime += 1
    except:
      logging.info('Data not enough! %s' % stock_no)

    logging.info('Fetch %s' % stock_no)

Example 22

Project: pybossa Source File: project_stats.py
@memoize(timeout=ONE_DAY)
def stats_dates(project_id, period='15 day'):
    """Return statistics with dates for a project."""
    dates = {}
    dates_anon = {}
    dates_auth = {}

    n_tasks(project_id)

    params = dict(project_id=project_id, period=period)

    # Get all completed tasks
    sql = text('''
               WITH myquery AS (
               SELECT task.id, coalesce(ct, 0) as n_task_runs, task.n_answers
               FROM task LEFT OUTER JOIN
               (SELECT task_id, COUNT(id) AS ct FROM task_run
               WHERE project_id=:project_id AND
               TO_DATE(task_run.finish_time, 'YYYY-MM-DD\THH24:MI:SS.US')
               >= NOW() - :period :: INTERVAL
               GROUP BY task_id) AS log_counts
               ON task.id=log_counts.task_id
               WHERE task.project_id=:project_id ORDER BY id ASC)
               select myquery.id, max(task_run.finish_time) as day
               from task_run, myquery where task_run.task_id=myquery.id
               and
               TO_DATE(task_run.finish_time, 'YYYY-MM-DD\THH24:MI:SS.US')
               >= NOW() - :period :: INTERVAL
               group by myquery.id order by day;
               ''').execution_options(stream=True)

    results = session.execute(sql, params)
    for row in results:
        day = row.day[:10]
        if day in dates.keys():
            dates[day] += 1
        else:
            dates[day] = 1

    # No completed tasks in the last period
    def _fill_empty_days(days, obj):
        if len(days) < convert_period_to_days(period):
            base = datetime.datetime.today()
            for x in range(0, convert_period_to_days(period)):
                tmp_date = base - datetime.timedelta(days=x)
                if tmp_date.strftime('%Y-%m-%d') not in days:
                    obj[tmp_date.strftime('%Y-%m-%d')] = 0
        return obj

    dates = _fill_empty_days(dates.keys(), dates)

    # Get all answers per date for auth
    sql = text('''
                WITH myquery AS (
                    SELECT TO_DATE(finish_time, 'YYYY-MM-DD\THH24:MI:SS.US')
                    as d, COUNT(id)
                    FROM task_run WHERE project_id=:project_id
                    AND user_ip IS NULL AND
                    TO_DATE(task_run.finish_time, 'YYYY-MM-DD\THH24:MI:SS.US')
                    >= NOW() - :period :: INTERVAL
                    GROUP BY d)
                SELECT to_char(d, 'YYYY-MM-DD') as d, count from myquery;
               ''').execution_options(stream=True)

    results = session.execute(sql, params)
    for row in results:
        dates_auth[row.d] = row.count

    dates_auth = _fill_empty_days(dates_auth.keys(), dates_auth)

    # Get all answers per date for anon
    sql = text('''
                WITH myquery AS (
                    SELECT TO_DATE(finish_time, 'YYYY-MM-DD\THH24:MI:SS.US')
                    as d, COUNT(id)
                    FROM task_run WHERE project_id=:project_id
                    AND user_id IS NULL AND
                    TO_DATE(task_run.finish_time, 'YYYY-MM-DD\THH24:MI:SS.US')
                    >= NOW() - :period :: INTERVAL
                    GROUP BY d)
               SELECT to_char(d, 'YYYY-MM-DD') as d, count  from myquery;
               ''').execution_options(stream=True)

    results = session.execute(sql, params)
    for row in results:
        dates_anon[row.d] = row.count

    dates_anon = _fill_empty_days(dates_anon.keys(), dates_anon)

    return dates, dates_anon, dates_auth

Example 23

Project: khal Source File: event.py
def create_timezone(tz, first_date=None, last_date=None):
    """
    create an icalendar vtimezone from a pytz.tzinfo

    :param tz: the timezone
    :type tz: pytz.tzinfo
    :param first_date: the very first datetime that needs to be included in the
    transition times, typically the DTSTART value of the (first recurring)
    event
    :type first_date: datetime.datetime
    :param last_date: the last datetime that needs to included, typically the
    end of the (very last) event (of a recursion set)
    :returns: timezone information
    :rtype: icalendar.Timezone()

    we currently have a problem here:

       pytz.timezones only carry the absolute dates of time zone transitions,
       not their RRULEs. This will a) make for rather bloated VTIMEZONE
       components, especially for long recurring events, b) we'll need to
       specify for which time range this VTIMEZONE should be generated and c)
       will not be valid for recurring events that go into eternity.

    Possible Solutions:

    As this information is not provided by pytz at all, there is no
    easy solution, we'd really need to ship another version of the OLSON DB.

    """

    # TODO last_date = None, recurring to infinity

    first_date = datetime.today() if not first_date else to_naive_utc(first_date)
    last_date = datetime.today() if not last_date else to_naive_utc(last_date)
    timezone = icalendar.Timezone()
    timezone.add('TZID', tz)

    dst = {
        one[2]: 'DST' in two.__repr__()
        for one, two in iter(tz._tzinfos.items())
    }
    bst = {
        one[2]: 'BST' in two.__repr__()
        for one, two in iter(tz._tzinfos.items())
    }

    # looking for the first and last transition time we need to include
    first_num, last_num = 0, len(tz._utc_transition_times) - 1
    first_tt = tz._utc_transition_times[0]
    last_tt = tz._utc_transition_times[-1]
    for num, dt in enumerate(tz._utc_transition_times):
        if dt > first_tt and dt < first_date:
            first_num = num
            first_tt = dt
        if dt < last_tt and dt > last_date:
            last_num = num
            last_tt = dt

    timezones = dict()
    for num in range(first_num, last_num + 1):
        name = tz._transition_info[num][2]
        if name in timezones:
            ttime = tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None)
            if 'RDATE' in timezones[name]:
                timezones[name]['RDATE'].dts.append(
                    icalendar.prop.vDDDTypes(ttime))
            else:
                timezones[name].add('RDATE', ttime)
            continue

        if dst[name] or bst[name]:
            subcomp = icalendar.TimezoneDaylight()
        else:
            subcomp = icalendar.TimezoneStandard()

        subcomp.add('TZNAME', tz._transition_info[num][2])
        subcomp.add(
            'DTSTART',
            tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None))
        subcomp.add('TZOFFSETTO', tz._transition_info[num][0])
        subcomp.add('TZOFFSETFROM', tz._transition_info[num - 1][0])
        timezones[name] = subcomp

    for subcomp in timezones.values():
        timezone.add_component(subcomp)

    return timezone

Example 24

Project: pmis Source File: project_time_schedule.py
    def get_critical_activities(self, d_activities):
        # warning = {}

        # Read the activity details
        activities = d_activities.values()

        for start_activity in activities:
            if start_activity.is_start:
                break

        l_successor_date_earliest_start = []
        for successor in start_activity.successors:
            if successor.date_earliest_start:
                l_successor_date_earliest_start.append(
                    successor.date_earliest_start
                )

        if l_successor_date_earliest_start:
            start_activity.date_early_start = min(
                l_successor_date_earliest_start
            )
        else:
            start_activity.date_early_start = network_activity.next_work_day(
                datetime.today()
            )

        network_activity.walk_list_ahead(start_activity)

        for stop_activity in activities:
            if stop_activity.is_stop:
                break

        stop_activity.date_late_finish = stop_activity.date_early_finish

        stop_activity.date_late_start = network_activity.sub_work_days(
            stop_activity.date_late_finish, stop_activity.replan_duration
        )

        network_activity.walk_list_aback(stop_activity)

        start_activity.date_late_finish = start_activity.date_early_finish
        start_activity.date_late_start = network_activity.sub_work_days(
            start_activity.date_late_finish, start_activity.replan_duration
        )

        # Calculate Float
        for act in activities:

            l_successor_date_early_start = []
            for successor in act.successors:
                l_successor_date_early_start.append(successor.date_early_start)
            if l_successor_date_early_start:
                [act.free_float, rr] = network_activity.work_days_diff(
                    act.date_early_finish, min(l_successor_date_early_start)
                )
            [act.total_float, rr] = network_activity.work_days_diff(
                act.date_early_start, act.date_late_start
            )

        # Calculate shortest path
        C_INFINITE = 9999
        d_graph = {}
        for act in d_activities.keys():
            d_neighbours = {}
            for other_act in d_activities.keys():
                if other_act != act:
                    d_neighbours[other_act] = C_INFINITE
                    for pred_act in d_activities[act].predecessors:
                        if other_act == pred_act.activity_id:
                            d_neighbours[other_act] = pred_act.total_float
                    for succ_act in d_activities[act].successors:
                        if other_act == succ_act.activity_id:
                            d_neighbours[other_act] = succ_act.total_float
            d_graph[act] = d_neighbours

        l_spath = []
        try:
            l_spath = shortestPath(d_graph, 'start', 'stop')
        except Exception:
            _logger.warning(
                """Could not calculate the critical path due to existing negative floats
                in one or more of the network activities."""
            )

        for act in activities:
            item = next((i for i in l_spath if i == act.activity_id), None)
            if item is not None:
                act.is_critical_path = True

Example 25

Project: pluss Source File: oauth2.py
@app.route("/oauth2callback")
def oauth2():
    """Google redirects the user back to this endpoint to continue the OAuth2 flow."""

    # Check for errors from the OAuth2 process
    err = flask.request.args.get('error')
    if err == 'access_denied':
        return flask.redirect(flask.url_for('denied'))
    elif err is not None:
        app.logger.warning("OAuth2 callback received error: %s", err)
        # TODO: handle this better (flash message?)
        message = 'Whoops, something went wrong (error=%s). Please try again later.'
        return message % flask.escape(err), 500

    # Okay, no errors, so we should have a valid authorization code.
    # Time to go get our server-side tokens for this user from Google.
    auth_code = flask.request.args['code']
    if auth_code is None:
        return 'Authorization code is missing.', 400 # Bad Request

    data =  {
        'code': auth_code,
        'client_id': Config.get('oauth', 'client-id'),
        'client_secret': Config.get('oauth', 'client-secret'),
        'redirect_uri': full_url_for('oauth2'),
        'grant_type': 'authorization_code',
    }
    try:
        response = session.post(OAUTH2_BASE + '/token', data, timeout=GOOGLE_API_TIMEOUT)
    except requests.exceptions.Timeout:
        app.logger.error('OAuth2 token request timed out.')
        # TODO: handle this better (flash message?)
        message = 'Whoops, Google took too long to respond. Please try again later.'
        return message, 504 # Gateway Timeout

    if response.status_code != 200:
        app.logger.error('OAuth2 token request got HTTP response %s for code "%s".',
            response.status_code, auth_code)
        # TODO: handle this better (flash message?)
        message = ('Whoops, we failed to finish processing your authorization with Google.'
                   ' Please try again later.')
        return message, 401 # Unauthorized

    try:
        result = response.json()
    except ValueError:
        app.logger.error('OAuth2 token request got non-JSON response for code "%s".', auth_code)
        # TODO: handle this better (flash message?)
        message = ('Whoops, we got an invalid response from Google for your authorization.'
                   ' Please try again later.')
        return message, 502 # Bad Gateway

    # Sanity check: we always expect Bearer tokens.
    if result.get('token_type') != 'Bearer':
        app.logger.error('OAuth2 token request got unknown token type "%s" for code "%s".',
            result['token_type'], auth_code)
        # TODO: handle this better (flash message?)
        message = ('Whoops, we got an invalid response from Google for your authorization.'
                   ' Please try again later.')
        return message, 502 # Bad Gateway

    # All non-error responses should have an access token.
    access_token = result['access_token']
    refresh_token = result.get('refresh_token')

    # This is in seconds, but we convert it to an absolute timestamp so that we can
    # account for the potential delay it takes to look up the G+ id we should associate
    # the access tokens with. (Could be up to GOOGLE_API_TIMEOUT seconds later.)
    expiry = datetime.datetime.today() + datetime.timedelta(seconds=result['expires_in'])

    try:
        person = get_person_by_access_token(access_token)
    except UnavailableException as e:
        app.logger.error('Unable to finish OAuth2 flow: %r.' % e)
        message = ('Whoops, we got an invalid response from Google for your authorization.'
                   ' Please try again later.')
        return message, 502 # Bad Gateway

    if refresh_token is not None:
        TokenIdMapping.update_refresh_token(person['id'], refresh_token)

    # Convert the absolute expiry timestamp back into a duration in seconds
    expires_in = int((expiry - datetime.datetime.today()).total_seconds())
    Cache.set(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % person['id'], access_token, time=expires_in)

    # Whew, all done! Set a cookie with the user's G+ id and send them back to the homepage.
    app.logger.info("Successfully authenticated G+ id %s.", person['id'])
    response = flask.make_response(flask.redirect(flask.url_for('main')))
    response.set_cookie('gplus_id', person['id'])
    return response

Example 26

Project: king-phisher Source File: campaign.py
	def _set_defaults(self):
		"""
		Set any default values for widgets. Also load settings from the existing
		campaign if one was specified.
		"""
		calendar = self.gobjects['calendar_campaign_expiration']
		default_day = datetime.datetime.today() + datetime.timedelta(days=31)
		gui_utilities.gtk_calendar_set_pydate(calendar, default_day)

		if self.campaign_id is None:
			return
		campaign = self.application.rpc.remote_table_row('campaigns', self.campaign_id, cache=True, refresh=True)

		self.gobjects['entry_campaign_name'].set_text(campaign.name)
		if campaign.description is not None:
			self.gobjects['entry_campaign_description'].set_text(campaign.description)
		if campaign.campaign_type_id is not None:
			combobox = self.gobjects['combobox_campaign_type']
			model = combobox.get_model()
			model_iter = gui_utilities.gtk_list_store_search(model, campaign.campaign_type_id, column=0)
			if model_iter is not None:
				combobox.set_active_iter(model_iter)

		self.gobjects['checkbutton_alert_subscribe'].set_property('active', self.application.rpc('campaign/alerts/is_subscribed', self.campaign_id))
		self.gobjects['checkbutton_reject_after_credentials'].set_property('active', campaign.reject_after_credentials)

		if campaign.company_id is not None:
			self.gobjects['radiobutton_company_existing'].set_active(True)
			combobox = self.gobjects['combobox_company_existing']
			model = combobox.get_model()
			model_iter = gui_utilities.gtk_list_store_search(model, campaign.company_id, column=0)
			if model_iter is not None:
				combobox.set_active_iter(model_iter)

		if campaign.expiration is not None:
			expiration = utilities.datetime_utc_to_local(campaign.expiration)
			self.gobjects['checkbutton_expire_campaign'].set_active(True)
			gui_utilities.gtk_calendar_set_pydate(self.gobjects['calendar_campaign_expiration'], expiration)
			self.gobjects['spinbutton_campaign_expiration_hour'].set_value(expiration.hour)
			self.gobjects['spinbutton_campaign_expiration_minute'].set_value(expiration.minute)

Example 27

Project: passwdmanager Source File: service.py
    def changeRootPwd(self,newRootPwd):
        oldPwd = config.getRootPwd()
        
        conn = self.getConnection()
        masterDao = MasterDao(conn)
        pwdDao = PwdDao(conn)
        
        # 1 re-encrypt all passwords with new root pwd
        accountList = pwdDao.getAllPasswd()
        currentDate = datetime.datetime.today()
        for account in accountList:
            dePassword = util.decrypt(oldPwd, account.pwd)
            enPassword = util.encrypt(newRootPwd, dePassword)

            if account.secret:
                deSecret = util.decrypt(oldPwd, account.secret)
                enSecret = util.encrypt(newRootPwd, deSecret)
            else:
                enSecret = ""

            deUsername = util.decrypt(oldPwd, account.username)
            enUsername = util.encrypt(newRootPwd, deUsername)

            account.pwd = enPassword
            account.username = enUsername
            account.secret = enSecret

            account.lastupdate = currentDate
            pwdDao.updateAccount(account.id,account.title, account.description, account.username, 
                                 account.pwd, account.secret,account.lastupdate)
            
        
        # 2 get md5 of new root pwd, update the rootpassword table
        newMd5String = util.md5Encode(newRootPwd)
        masterDao.updateMasterPwd(newMd5String)
        
        # 3 update master password in config module.
        config.setRootPwd(newRootPwd)
        
        conn.commit()
        conn.close()

Example 28

Project: gazouilleur Source File: stats.py
    @inlineCallbacks
    def print_last(self):
        now = timestamp_hour(datetime.today())
        since = now - timedelta(days=30)
        stats = yield find_stats({'user': self.user, 'timestamp': {'$gte': since}}, filter=sortdesc('timestamp'))
        if not len(stats):
            returnValue()
        stat = stats[0]
        stat["followers"] = yield count_followers(self.user)
        rts = 0
        fols = 0
        twts = 0
        delays = {1: 'hour', 6: '6 hours', 24: 'day', 7*24: 'week', 30*24: 'month'}
        order = delays.keys()
        order.sort()
        olds = {'tweets': {}, 'followers': {}, 'rts': {}}
        for s in stats:
            d = now - s['timestamp']
            delay = d.seconds / 3600 + d.days * 24
            fols = stat['followers'] - s['followers']
            twts = stat['tweets'] - s['tweets']
            for i in order:
                if delay == i:
                    if 'stats%sH' % i not in olds['tweets']:
                        olds['tweets']['stats%sH' % i] = twts if twts not in olds['tweets'].values() else 0
                    if 'stats%sH' % i not in olds['followers']:
                        olds['followers']['stats%sH' % i] = fols if fols not in olds['followers'].values() else 0
                    if 'stats%sH' % i not in olds['rts']:
                        olds['rts']['stats%sH' % i] = rts if rts not in olds['rts'].values() else 0
            rts += s['rts_last_hour']
        olds['rts']['stats1H'] = stat['rts_last_hour']
        for i in order:
            if rts and 'stats%sH' % i not in olds['rts'] and rts not in olds['rts'].values():
                olds['rts']['stats%sH' % i] = rts
                rts = 0
            if fols and 'stats%sH' % i not in olds['followers']  and fols not in olds['followers'].values():
                olds['followers']['stats%sH' % i] = fols
                fols = 0
            if twts and 'stats%sH' % i not in olds['tweets'] and twts not in olds['tweets'].values():
                olds['tweets']['stats%sH' % i] = twts
                twts = 0
        res = []
        if stat['tweets']:
            res.append("Tweets: %d total" % stat['tweets'] + " ; ".join([""]+["%d last %s" %  (olds['tweets']['stats%sH' % i], delays[i]) for i in order if 'stats%sH' % i in olds['tweets'] and olds['tweets']['stats%sH' % i]]))
        textrts = ["%d last %s" % (olds['rts']['stats%sH' % i], delays[i]) for i in order if 'stats%sH' % i in olds['rts'] and olds['rts']['stats%sH' % i]]
        if textrts:
            res.append("RTs: " + " ; ".join(textrts))
        if stat['followers']:
            res.append("Followers: %d total" % stat['followers'] + " ; ".join([""]+["%+d last %s" % (olds['followers']['stats%sH' % i], delays[i]) for i in order if 'stats%sH' % i in olds['followers'] and olds['followers']['stats%sH' % i]]))
            recent = yield find_last_followers(self.user)
            if recent:
                res.append("Recent follower%s: %s" % ("s include" if len(recent) > 1 else "", format_4_followers(recent)))
        if self.url and res:
            res.append("More details: %sstatic_stats_%s.html" % (self.url, self.user))
        returnValue([(True, "[Stats] %s" % m) for m in res])

Example 29

Project: app-sales-machine Source File: itcscrape.py
Function: main
def main(args) :
	username, password, verbose = None, None, None
	try :
		opts, args = getopt.getopt(sys.argv[1:], 'vu:p:d:')
	except getopt.GetoptError, err :
		print >> sys.stderr, "Error: %s" % str(err)
		usage(os.path.basename(args[0]))
		sys.exit(2)

	# Get today's date by default. Actually yesterday's date
	reportDay = datetime.today() - timedelta(1)
	reportDate = reportDay.strftime('%m/%d/%Y')

	for o, a in opts :
		if o == '-u' : 
			username = a
		if o == '-p' :
			password = a
		if o == '-d' :
			reportDate = a
		if o == '-v' :
			verbose = True
	
	if None in (username, password) :
		print >> sys.stderr, "Error: Must set -u and -p options."
		usage(os.path.basename(args[0]))
		sys.exit(3)

	result = None
	if verbose :
		# If the user has specified 'verbose', just let the exception propagate
		# so that we get a stacktrace from python.
		result = getLastDayReport(username, password, reportDate, True)
	else :
		try :
			result = getLastDayReport(username, password, reportDate)
		except Exception, e :
			print >> sys.stderr, "Error: problem processing output. Check your username and password."
			print >> sys.stderr, "Use -v for more detailed information."

	print result

Example 30

Project: classic.rhizome.org Source File: admin.py
    def membership_overview(self, request):
        # an overview page for membership

        context_instance = RequestContext(request)
        opts = self.model._meta
        admin_site = self.admin_site
        today =  datetime.datetime.today()
        
        all_member_count = RhizomeMembership.objects.values('id').filter(member_tools = True).count()
        paying_members = RhizomeMembership.objects.values('id').filter(member_tools = True).filter(complimentary=False).filter(org_sub=None).count()
        orgsub_member_count = RhizomeMembership.objects.values('id').filter(member_tools = True).exclude(org_sub=None).count()
        complimentary_members = RhizomeMembership.objects.values('id').filter(member_tools = True).filter(complimentary=True).count()
        
        new_users_this_year = User.objects.values('id').filter(date_joined__year = today.year).count()
        
        one_year_ago = today - datetime.timedelta(365)

        one_year_expired = RhizomeMembership.objects.values('id') \
             .filter(member_tools = False) \
             .filter(complimentary = False) \
             .filter(org_sub = None) \
             .filter(member_tools_exp_date__lte = today) \
             .filter(member_tools_exp_date__gte = one_year_ago) \
             .filter(org_sub_admin = False) \
             .count()

        thirty_days_ago = today - datetime.timedelta(30)
        
        recently_expired = RhizomeMembership.objects \
            .filter(member_tools_exp_date__gte = thirty_days_ago) \
            .filter(member_tools_exp_date__lte = today) \
            .filter(member_tools = False) \
            .filter(complimentary = False) \
            .filter(org_sub_admin = False) \
            .filter(org_sub = None) \
            .order_by('-member_tools_exp_date')

        d = {'admin_site': admin_site.name, 
             'title': "Membership Overview", 
             'opts': "Profiles", 
             'app_label': opts.app_label,
             'all_member_count':all_member_count,
             'paying_members':paying_members,
             'orgsub_member_count':orgsub_member_count,
             'recently_expired':recently_expired,
             'complimentary_members':complimentary_members,
             'one_year_expired':one_year_expired,
             'new_users_this_year':new_users_this_year
             }
        return render_to_response('admin/accounts/rhizomeuser/membership_overview.html', d, context_instance)

Example 31

Project: baruwa2 Source File: convert.py
    def __call__(self):
        "process the files"
        jsentry = None
        kvalue = None
        lvalue = None
        # for index, line in enumerate(message):
        with open(self.headerfile) as handle:
            index = 0
            for line in handle:
                index += 1
                if index == 1:
                    msgid = line.strip().rstrip('-H')
                    continue
                if index == 3:
                    now = datetime.datetime.today()
                    self.append('From %s %s\n' % (line.strip(),
                            now.strftime("%a %b %d %T %Y")))
                    continue
                if EXIMQ_XX_RE.match(line):
                    jsentry = 1
                    continue
                if jsentry and EXIMQ_NUM_RE.match(line):
                    ematch = EXIMQ_NUM_RE.match(line)
                    kvalue = ematch.group()
                    kvalue = int(kvalue)
                    jsentry -= 1
                    continue
                if kvalue:
                    kvalue -= 1
                    self.append('X-BaruwaFW-From: %s' % line)
                    continue
                if EXIMQ_BLANK_RE.match(line):
                    continue
                match = EXIMQ_HEADER_RE.match(line)
                if match:
                    groups = match.groups()
                    if groups[1] == '*':
                        lvalue = 0
                    else:
                        lvalue = int(groups[0]) - len(groups[2]) + 1
                    self.append(groups[2] + '\n')
                    continue
                else:
                    if lvalue:
                        self.append(line)
                        lvalue -= len(line)

        dirname = os.path.dirname(self.headerfile)
        with open('%s/%s-D' % (dirname, msgid)) as handle:
            body = handle.readlines()
        self.append('\n')
        body.pop(0)
        self.extend(body)
        return ''.join(self)

Example 32

Project: kamaelia_ Source File: TwitterSearch.py
    def main(self):
        twitterurl = "http://api.twitter.com/1/users/search.json"

        if self.proxy:
            proxyhandler = urllib2.ProxyHandler({"http" : self.proxy})
            twitopener = urllib2.build_opener(proxyhandler)
            urllib2.install_opener(twitopener)

        headers = {'User-Agent' : "BBC R&D Grabber"}
        postdata = None

        if self.keypair == False:
            # Perform OAuth authentication - as we don't have the secret key pair we need to request it
            # This will require some user input
            request_token_url = 'http://api.twitter.com/oauth/request_token'
            access_token_url = 'http://api.twitter.com/oauth/access_token'
            authorize_url = 'http://api.twitter.com/oauth/authorize'

            token = None
            consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

            params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

            params['oauth_consumer_key'] = consumer.key

            req = oauth.Request(method="GET",url=request_token_url,parameters=params)

            signature_method = oauth.SignatureMethod_HMAC_SHA1()
            req.sign_request(signature_method, consumer, token)

            requestheaders = req.to_header()
            requestheaders['User-Agent'] = "BBC R&D Grabber"

            # Connect to Twitter
            try:
                req = urllib2.Request(request_token_url,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                conn1 = urllib2.urlopen(req)
            except httplib.BadStatusLine, e:
                sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                conn1 = False
            except urllib2.HTTPError, e:
                sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                conn1 = False
            except urllib2.URLError, e:
                sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                conn1 = False

            if conn1:
                content = conn1.read()
                conn1.close()

                request_token = dict(urlparse.parse_qsl(content))

                print "Request Token:"
                print "     - oauth_token        = %s" % request_token['oauth_token']
                print "     - oauth_token_secret = %s" % request_token['oauth_token_secret']
                print

                # The user must confirm authorisation so a URL is printed here
                print "Go to the following link in your browser:"
                print "%s?oauth_token=%s" % (authorize_url, request_token['oauth_token'])
                print

                accepted = 'n'
                # Wait until the user has confirmed authorisation
                while accepted.lower() == 'n':
                    accepted = raw_input('Have you authorized me? (y/n) ')
                oauth_verifier = raw_input('What is the PIN? ')

                token = oauth.Token(request_token['oauth_token'],
                    request_token['oauth_token_secret'])
                token.set_verifier(oauth_verifier)

                params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                params['oauth_token'] = token.key
                params['oauth_consumer_key'] = consumer.key

                req = oauth.Request(method="GET",url=access_token_url,parameters=params)

                signature_method = oauth.SignatureMethod_HMAC_SHA1()
                req.sign_request(signature_method, consumer, token)

                requestheaders = req.to_header()
                requestheaders['User-Agent'] = "BBC R&D Grabber"
                # Connect to Twitter
                try:
                    req = urllib2.Request(access_token_url,"oauth_verifier=%s" % oauth_verifier,requestheaders) # Why won't this work?!? Is it trying to POST?
                    conn1 = urllib2.urlopen(req)
                except httplib.BadStatusLine, e:
                    sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                    conn1 = False
                except urllib2.HTTPError, e:
                    sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                    conn1 = False
                except urllib2.URLError, e:
                    sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                    conn1 = False

                if conn1:
                    content = conn1.read()
                    conn1.close()
                    access_token = dict(urlparse.parse_qsl(content))

                    # Access tokens retrieved from Twitter
                    print "Access Token:"
                    print "     - oauth_token        = %s" % access_token['oauth_token']
                    print "     - oauth_token_secret = %s" % access_token['oauth_token_secret']
                    print
                    print "You may now access protected resources using the access tokens above."
                    print

                    save = False
                    # Load config to save OAuth keys
                    try:
                        homedir = os.path.expanduser("~")
                        file = open(homedir + "/twitter-login.conf",'r')
                        save = True
                    except IOError, e:
                        print ("Failed to load config file - not saving oauth keys: " + str(e))

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf",'w')
                            file.write(raw_config)
                            file.close()
                        except IOError, e:
                            print ("Failed to save oauth keys: " + str(e))

                    self.keypair = [access_token['oauth_token'], access_token['oauth_token_secret']]
        

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
                # Retieve keywords to look up
                person = self.recv("inbox")

                # Ensure we're not rate limited during the first request - if so we'll wait for 15 mins before our next request
                if (datetime.today() - timedelta(minutes=15)) > self.ratelimited:
                    requesturl = twitterurl + "?q=" + urllib.quote(person) + "&per_page=5"

                    params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                    token = oauth.Token(key=self.keypair[0],secret=self.keypair[1])
                    consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

                    params['oauth_token'] = token.key
                    params['oauth_consumer_key'] = consumer.key

                    req = oauth.Request(method="GET",url=requesturl,parameters=params)

                    signature_method = oauth.SignatureMethod_HMAC_SHA1()
                    req.sign_request(signature_method, consumer, token)

                    requestheaders = req.to_header()
                    requestheaders['User-Agent'] = "BBC R&D Grabber"

                    # Connect to Twitter
                    try:
                        req = urllib2.Request(requesturl,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                        conn1 = urllib2.urlopen(req)
                    except httplib.BadStatusLine, e:
                        sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                        conn1 = False
                    except urllib2.HTTPError, e:
                        sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                        conn1 = False
                    except urllib2.URLError, e:
                        sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                        conn1 = False

                    if conn1:
                        # Check rate limiting here and print current limit
                        headers = conn1.info()
                        headerlist = string.split(str(headers),"\n")
                        for line in headerlist:
                            if line != "":
                                splitheader = line.split()
                                if splitheader[0] == "X-FeatureRateLimit-Remaining:" or splitheader[0] == "X-RateLimit-Remaining:":
                                    print splitheader[0] + " " + str(splitheader[1])
                                    if int(splitheader[1]) < 5:
                                        self.ratelimited = datetime.today()
                        # Grab json format result of people search here
                        try:
                            data = conn1.read()
                            try:
                                content = cjson.decode(data)
                                self.send(content,"outbox")
                            except cjson.DecodeError, e:
                                self.send(dict(),"outbox")
                        except IOError, e:
                            sys.stderr.write('PeopleSearch IO error: ' + str(e) + '\n')
                            self.send(dict(),"outbox")
                        conn1.close()
                    else:
                        self.send(dict(),"outbox")
                else:
                   print "Twitter search paused - rate limited"
                   self.send(dict(),"outbox")
            self.pause()
            yield 1

Example 33

Project: stopstalk-deployment Source File: default.py
Function: contests
def contests():
    """
        Show the upcoming contests
    """

    today = datetime.datetime.today()
    today = datetime.datetime.strptime(str(today)[:-7],
                                       "%Y-%m-%d %H:%M:%S")

    start_date = today.date()
    end_date = start_date + datetime.timedelta(90)
    url = "https://contesttrackerapi.herokuapp.com/"
    response = requests.get(url)
    if response.status_code == 200:
        response = response.json()["result"]
    else:
        return dict(retrieved=False)

    ongoing = response["ongoing"]
    upcoming = response["upcoming"]
    contests = []
    cal = pdt.Calendar()

    table = TABLE(_class="centered striped", _id="contests-table")
    thead = THEAD(TR(TH("Contest Name"),
                     TH("Site"),
                     TH("Start"),
                     TH("Duration/Ending"),
                     TH("Link"),
                     TH("Add Reminder")))
    table.append(thead)
    tbody = TBODY()

    button_class = "btn-floating btn-small accent-4 tooltipped"
    view_link_class = button_class + " green view-contest"
    reminder_class = button_class + " orange set-reminder"

    for i in ongoing:

        if i["Platform"] in ("TOPCODER", "OTHER"):
            continue

        try:
            endtime = datetime.datetime.strptime(i["EndTime"],
                                                 "%a, %d %b %Y %H:%M")
        except ValueError:
            continue
        tr = TR()
        span = SPAN(_class="green tooltipped",
                    data={"position": "right",
                          "delay": "50",
                          "tooltip": "Live Contest"},
                    _style="cursor: pointer; " + \
                            "float:right; " + \
                            "height:10px; " + \
                            "width:10px; " + \
                            "border-radius: 50%;")
        tr.append(TD(i["Name"], span))
        tr.append(TD(i["Platform"].capitalize()))
        tr.append(TD("-"))
        tr.append(TD(str(endtime).replace("-", "/"),
                     _class="contest-end-time"))
        tr.append(TD(A(I(_class="fa fa-external-link-square fa-lg"),
                       _class=view_link_class,
                       _href=i["url"],
                       data={"position": "left",
                             "tooltip": "Contest Link",
                             "delay": "50"},
                       _target="_blank")))
        tr.append(TD(BUTTON(I(_class="fa fa-calendar-plus-o"),
                            _class=reminder_class + " disabled",
                            data={"position": "left",
                                  "tooltip": "Already started!",
                                  "delay": "50"})))
        tbody.append(tr)

    for i in upcoming:

        if i["Platform"] in ("TOPCODER", "OTHER"):
            continue

        start_time = datetime.datetime.strptime(i["StartTime"],
                                                "%a, %d %b %Y %H:%M")
        tr = TR()
        tr.append(TD(i["Name"]))
        tr.append(TD(i["Platform"].capitalize()))
        tr.append(TD(str(start_time), _class="stopstalk-timestamp"))

        duration = i["Duration"]
        duration = duration.replace(" days", "d")
        duration = duration.replace(" day", "d")
        tr.append(TD(duration))
        tr.append(TD(A(I(_class="fa fa-external-link-square fa-lg"),
                       _class=view_link_class,
                       _href=i["url"],
                       data={"position": "left",
                             "tooltip": "Contest Link",
                             "delay": "50"},
                       _target="_blank")))
        tr.append(TD(BUTTON(I(_class="fa fa-calendar-plus-o"),
                            _class=reminder_class,
                            data={"position": "left",
                                  "tooltip": "Set Reminder to Google Calendar",
                                  "delay": "50"})))
        tbody.append(tr)

    table.append(tbody)
    return dict(table=table, upcoming=upcoming, retrieved=True)

Example 34

Project: pmis Source File: analytic_billing_plan_line_make_sale.py
    def make_sales_orders(self, cr, uid, ids, context=None):
        """
             To make sales.

             @param self: The object pointer.
             @param cr: A database cursor
             @param uid: ID of the user currently logged in
             @param ids: the ID or list of IDs
             @param context: A standard dictionary

             @return: A dictionary which of fields with values.

        """

        if context is None:
            context = {}
        record_ids = context and context.get('active_ids', False)
        make_order = self.browse(cr, uid, ids[0], context=context)
        res = []
        if record_ids:
            billing_plan_obj = self.pool.get('analytic.billing.plan.line')
            order_obj = self.pool.get('sale.order')
            order_line_obj = self.pool.get('sale.order.line')
            partner_obj = self.pool.get('res.partner')
            acc_pos_obj = self.pool.get('account.fiscal.position')

            list_line = []

            customer_data = False
            company_id = False
            sale_id = False
            account_id = False

            for line in billing_plan_obj.browse(cr, uid, record_ids,
                                                context=context):

                    uom_id = line.product_uom_id

                    if not line.customer_id:
                        raise osv.except_osv(
                            _('Could not create sale order !'),
                            _('You have to enter a customer.'))

                    if customer_data is not False \
                            and line.customer_id != customer_data:
                        raise osv.except_osv(
                            _('Could not create sale order !'),
                            _('You have to select lines '
                              'from the same customer.'))
                    else:
                        customer_data = line.customer_id

                    partner_addr = partner_obj.address_get(
                        cr, uid, [customer_data.id], ['default',
                                                      'invoice',
                                                      'delivery',
                                                      'contact'])
                    newdate = datetime.today()
                    partner = customer_data
                    pricelist_id = partner.property_product_pricelist \
                        and partner.property_product_pricelist.id \
                        or False
                    price_unit = line.price_unit

                    line_company_id = line.company_id \
                        and line.company_id.id \
                        or False
                    if company_id is not False \
                            and line_company_id != company_id:
                        raise osv.except_osv(
                            _('Could not create sale order !'),
                            _('You have to select lines '
                              'from the same company.'))
                    else:
                        company_id = line_company_id

                    # shop_id = make_order.shop_id \
                    #     and make_order.shop_id.id \
                    #     or False

                    line_account_id = line.account_id \
                        and line.account_id.id \
                        or False
                    if account_id is not False \
                            and line_account_id != account_id:
                        raise osv.except_osv(
                            _('Could not create billing request!'),
                            _('You have to select lines from the '
                              'same analytic account.'))
                    else:
                        account_id = line_account_id

                    sale_order_line = {
                        'name': line.name,
                        'product_uom_qty': line.unit_amount,
                        'product_id': line.product_id.id,
                        'product_uom': uom_id.id,
                        'price_unit': price_unit,
                        'notes': line.notes,
                    }

                    taxes = False
                    if line.product_id:
                        taxes_ids = line.product_id.product_tmpl_id.taxes_id
                        taxes = acc_pos_obj.map_tax(
                            cr, uid, partner.property_account_position,
                            taxes_ids)
                    if taxes:
                        sale_order_line.update({
                            'tax_id': [(6, 0, taxes)]
                        })
                    list_line.append(sale_order_line)

                    if sale_id is False:
                        sale_id = order_obj.create(cr, uid, {
                            'origin': '',
                            # 'shop_id': shop_id,
                            'partner_id': customer_data.id,
                            'pricelist_id': pricelist_id,
                            'partner_invoice_id': partner_addr['invoice'],
                            'partner_order_id': partner_addr['contact'],
                            'partner_shipping_id': partner_addr['delivery'],
                            'date_order':
                                newdate.strftime('%Y-%m-%d %H:%M:%S'),
                            'fiscal_position':
                                partner.property_account_position and
                                partner.property_account_position.id or False,
                            'company_id': company_id,
                            'payment_term':
                                partner.property_payment_term and
                                partner.property_payment_term.id or False,
                            'project_id': account_id,
                            'invoice_quantity': make_order.invoice_quantity,
                            'order_policy': make_order.order_policy,

                        }, context=context)
                        if line.account_id.user_id:
                            order_obj.message_subscribe_users(
                                cr, uid, [sale_id],
                                user_ids=[line.account_id.user_id.id])

                    sale_order_line.update({
                        'order_id': sale_id
                    })

                    order_line_id = order_line_obj.create(cr, uid,
                                                          sale_order_line,
                                                          context=context)

                    values = {
                        'order_line_ids': [(4, order_line_id)]
                    }

                    billing_plan_obj.write(cr, uid, [line.id], values,
                                           context=context)

                    res.append(order_line_id)

        return {
            'domain': "[('id','in', ["+','.join(map(str, res))+"])]",
            'name': _('Billing request lines'),
            'view_type': 'form',
            'view_mode': 'tree,form',
            'res_model': 'sale.order.line',
            'view_id': False,
            'context': False,
            'type': 'ir.actions.act_window'
        }

Example 35

Project: stopstalk-deployment Source File: problems.py
Function: trending
def trending():
    """
        Show trending problems globally and among friends
        @ToDo: Needs lot of comments explaining the code
    """

    stable = db.submission

    today = datetime.datetime.today()
    # Consider submissions only after PAST_DAYS(customizable)
    # for trending problems
    start_date = str(today - datetime.timedelta(days=current.PAST_DAYS))
    query = (stable.time_stamp >= start_date)
    last_submissions = db(query).select(stable.problem_name,
                                        stable.problem_link,
                                        stable.user_id,
                                        stable.custom_user_id)

    if auth.is_logged_in():
        friends, cusfriends = utilities.get_friends(session.user_id)

        # The Original IDs of duplicate custom_friends
        custom_friends = []
        for cus_id in cusfriends:
            if cus_id[1] is None:
                custom_friends.append(cus_id[0])
            else:
                custom_friends.append(cus_id[1])

    problems_dict = {}
    friends_problems_dict = {}
    for submission in last_submissions:
        plink = submission.problem_link
        pname = submission.problem_name
        uid = submission.user_id
        cid = submission.custom_user_id

        # @ToDo: Improve this code
        if problems_dict.has_key(plink):
            problems_dict[plink]["total_submissions"] += 1
        else:
            problems_dict[plink] = {"name": pname,
                                    "total_submissions": 1,
                                    "users": set([]),
                                    "custom_users": set([])}

        if auth.is_logged_in() and \
           ((uid and uid in friends) or \
            (cid and cid in custom_friends)):

            if friends_problems_dict.has_key(plink):
                friends_problems_dict[plink]["total_submissions"] += 1
            else:
                friends_problems_dict[plink] = {"name": pname,
                                                "total_submissions": 1,
                                                "users": set([]),
                                                "custom_users": set([])}
            if uid:
                friends_problems_dict[plink]["users"].add(uid)
            else:
                friends_problems_dict[plink]["custom_users"].add(cid)

        if uid:
            problems_dict[plink]["users"].add(uid)
        else:
            problems_dict[plink]["custom_users"].add(cid)

    # Sort the rows according to the number of users
    # who solved the problem in last PAST_DAYS
    custom_compare = lambda x: (len(x[1]["users"]) + \
                                len(x[1]["custom_users"]),
                                x[1]["total_submissions"])

    global_trending = sorted(problems_dict.items(),
                             key=custom_compare,
                             reverse=True)

    global_table = _render_trending("Trending Globally",
                                    global_trending[:current.PROBLEMS_PER_PAGE],
                                    "Users")
    if auth.is_logged_in():
        friends_trending = sorted(friends_problems_dict.items(),
                                  key=custom_compare,
                                  reverse=True)

        friend_table = _render_trending("Trending among friends",
                                        friends_trending[:current.PROBLEMS_PER_PAGE],
                                        "Friends")

        div = DIV(DIV(friend_table, _class="col s6"),
                  DIV(global_table, _class="col s6"),
                  _class="row col s12")
    else:
        div = DIV(global_table, _class="center")

    return dict(div=div)

Example 36

Project: sous-chef Source File: models.py
    def auto_create_orders(self, delivery_date, clients):
        """
        Automatically creates orders and order items for the given delivery
        date and given client list.
        Order items will be created based on client's meals default.

        Parameters:
          delivery_date : date on which orders are to be delivered
          clients : a list of one or many client objects

        Returns:
          Number of orders created.
        """
        created = 0
        day = delivery_date.weekday()  # Monday is 0, Sunday is 6
        for client in clients:
            # No main_dish means no delivery this day
            main_dish_quantity, main_dish_size = Client.get_meal_defaults(
                client,
                COMPONENT_GROUP_CHOICES_MAIN_DISH, day)
            if main_dish_quantity == 0:
                continue
            try:
                # If an order is already created, skip order items creation
                # (if want to replace, must be deleted first)
                Order.objects.get(client=client, delivery_date=delivery_date)
                continue
            except Order.DoesNotExist:
                order = Order.objects.create(client=client,
                                             creation_date=datetime.today(),
                                             delivery_date=delivery_date,
                                             status=ORDER_STATUS_ORDERED)
                created += 1

            # TODO Use Parameters Model in member to store unit prices
            prices = self.get_client_prices(client)
            main_price = prices['main']
            side_price = prices['side']

            for component_group, trans in COMPONENT_GROUP_CHOICES:
                item_quantity, item_size = Client.get_meal_defaults(
                    client, component_group, day)
                if item_quantity > 0:
                    # Set the quantity of the current item
                    total_quantity = item_quantity
                    # Set the unit price of the current item
                    if (component_group == COMPONENT_GROUP_CHOICES_MAIN_DISH):
                        unit_price = main_price
                    else:
                        unit_price = side_price
                        while main_dish_quantity > 0 and item_quantity > 0:
                            main_dish_quantity -= 1
                            item_quantity -= 1
                    Order_item.objects.create(
                        order=order,
                        component_group=component_group,
                        price=total_quantity * unit_price,
                        billable_flag=True,
                        size=item_size,
                        order_item_type=ORDER_ITEM_TYPE_CHOICES_COMPONENT,
                        total_quantity=total_quantity)
        return created

Example 37

Project: SickGear Source File: properFinder.py
def _download_propers(proper_list):

    for cur_proper in proper_list:

        history_limit = datetime.datetime.today() - datetime.timedelta(days=30)

        # make sure the episode has been downloaded before
        my_db = db.DBConnection()
        history_results = my_db.select(
            'SELECT resource FROM history ' +
            'WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? ' +
            'AND action IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')',
            [cur_proper.indexerid, cur_proper.season, cur_proper.episode, cur_proper.quality,
             history_limit.strftime(history.dateFormat)])

        # if we didn't download this episode in the first place we don't know what quality to use for the proper = skip
        if 0 == len(history_results):
            logger.log(u'Skipping download because cannot find an original history entry for proper ' + cur_proper.name)
            continue

        else:

            # get the show object
            show_obj = helpers.findCertainShow(sickbeard.showList, cur_proper.indexerid)
            if None is show_obj:
                logger.log(u'Unable to find the show with indexerid ' + str(
                    cur_proper.indexerid) + ' so unable to download the proper', logger.ERROR)
                continue

            # make sure that none of the existing history downloads are the same proper we're trying to download
            clean_proper_name = _generic_name(helpers.remove_non_release_groups(cur_proper.name, show_obj.is_anime()))
            is_same = False
            for result in history_results:
                # if the result exists in history already we need to skip it
                if clean_proper_name == _generic_name(helpers.remove_non_release_groups(result['resource'])):
                    is_same = True
                    break
            if is_same:
                logger.log(u'This proper is already in history, skipping it', logger.DEBUG)
                continue

            ep_obj = show_obj.getEpisode(cur_proper.season, cur_proper.episode)

            # make the result object
            result = cur_proper.provider.get_result([ep_obj], cur_proper.url)
            if None is result:
                continue
            result.name = cur_proper.name
            result.quality = cur_proper.quality
            result.version = cur_proper.version

            # snatch it
            search.snatch_episode(result, SNATCHED_PROPER)

Example 38

Project: aws-detailed-billing-parser Source File: config.py
Function: init
    def __init__(self):
        today = datetime.today()

        # elasticsearch default values
        self.es_host = 'search-name-hash.region.es.amazonaws.com'
        self.es_port = 80
        self.es_index = 'billing'
        self.es_doctype = 'billing'
        self.es_year = today.year
        self.es_month = today.month
        self.es_timestamp = 'UsageStartDate'  # fieldname that will be replaced by Timestamp
        self.es_timeout = ES_TIMEOUT

        # aws account id
        self.account_id = '01234567890'

        # encoding (this is the default encoding for most files, but if
        # customer uses latin/spanish characters you may to change
        # self.encoding = 'iso-8859-1'
        self.encoding = 'utf-8'

        # update flag (if True update existing docuements in Elasticsearch index;
        # defaults to False for performance reasons)
        self.update = False

        # check flag (check if current record exists before add new -- for
        # incremental updates)
        self.check = False

        # Use AWS Signed requests to access the Elasticsearch
        self.awsauth = False

        # Run Business Inteligence on the lineitems
        self.analytics = False

        # Time to wait for the analytics process. Default is 30 minutes
        self.analytics_timeout = 30

        # Run Business Inteligence Only
        self.bi_only = False

        # delete index flag indicates whether or not the current elasticsearch
        # should be kept or deleted
        self.delete_index = False

        # debug flag (will force print some extra data even in quiet mode)
        self.debug = False

        # fail fast flag (if True stop parsing on first index error)
        self.fail_fast = False

        # input and output filenames
        self._input_filename = None
        self._output_filename = None

        # other defaults
        self.csv_delimiter = ','
        self._output_type = OUTPUT_TO_FILE
        self._bulk_mode = PROCESS_BY_LINE
        self.bulk_size = BULK_SIZE
        self.bulk_msg = {
            "RecordType": [
                "StatementTotal",
                "InvoiceTotal",
                "Rounding",
                "AccountTotal"]}

Example 39

Project: hamster Source File: overview.py
    def __init__(self, parent = None):
        Controller.__init__(self, parent)

        self.window.set_position(gtk.WindowPosition.CENTER)
        self.window.set_default_icon_name("hamster-time-tracker")
        self.window.set_default_size(700, 500)

        self.storage = hamster.client.Storage()
        self.storage.connect("facts-changed", self.on_facts_changed)
        self.storage.connect("activities-changed", self.on_facts_changed)

        self.header_bar = HeaderBar()
        self.window.set_titlebar(self.header_bar)

        main = gtk.Box(orientation=1)
        self.window.add(main)

        self.report_chooser = None


        self.search_box = gtk.Revealer()

        space = gtk.Box(border_width=5)
        self.search_box.add(space)
        self.filter_entry = gtk.Entry()
        self.filter_entry.set_icon_from_icon_name(gtk.EntryIconPosition.PRIMARY,
                                                  "edit-find-symbolic")
        self.filter_entry.connect("changed", self.on_search_changed)
        self.filter_entry.connect("icon-press", self.on_search_icon_press)

        space.pack_start(self.filter_entry, True, True, 0)
        main.pack_start(self.search_box, False, True, 0)


        window = gtk.ScrolledWindow()
        window.set_policy(gtk.PolicyType.NEVER, gtk.PolicyType.AUTOMATIC)
        self.fact_tree = FactTree()
        self.fact_tree.connect("on-activate-row", self.on_row_activated)
        self.fact_tree.connect("on-delete-called", self.on_row_delete_called)

        window.add(self.fact_tree)
        main.pack_start(window, True, True, 1)

        self.totals = Totals()
        main.pack_start(self.totals, False, True, 1)

        date_range = stuff.week(dt.datetime.today()) # TODO - do the hamster day
        self.header_bar.range_pick.set_range(*date_range)
        self.header_bar.range_pick.connect("range-selected", self.on_range_selected)
        self.header_bar.add_activity_button.connect("clicked", self.on_add_activity_clicked)
        self.header_bar.search_button.connect("toggled", self.on_search_toggled)

        self.header_bar.menu_prefs.connect("activate", self.on_prefs_clicked)
        self.header_bar.menu_export.connect("activate", self.on_export_clicked)


        self.window.connect("key-press-event", self.on_key_press)

        self.facts = []
        self.find_facts()
        self.window.show_all()

Example 40

Project: classic.rhizome.org Source File: tally_user_ratings.py
    def handle(self, *args, **options):
        users = RhizomeUser.objects.filter(is_active=True)
        five_years_ago = datetime.datetime.today() - datetime.timedelta(1800)
        
        for user in users:

            #### announcements worth 2 points
            valid_opps = Opportunity.objects.filter(user = user, status = 1,is_spam=False)
            valid_jobs = Job.objects.filter(user = user, status = 1,is_spam=False)
            valid_events = Event.objects.filter(user = user, status = 1,is_spam=False)                   


            #### spam -10 points
            invalid_opps = int(Opportunity.objects.filter(user = user, is_spam=True).count()*10)
            invalid_jobs = int(Job.objects.filter(user = user, is_spam=True).count()*10)
            invalid_events = int(Event.objects.filter(user = user, is_spam=True).count()*10)   

            #### resources worth 2 points
            festivals = Festival.objects.filter(user = user, visible = 1)
            programs = Program.objects.filter(user = user, visible = 1)
            residencies = Residency.objects.filter(user = user, visible = 1)
            syllabi = Syllabus.objects.filter(user = user, visible = 1)

            two_points = chain(valid_events, valid_opps, valid_jobs,festivals,programs,residencies,syllabi)
            
            for a in two_points:
                points_object,created = UserObjectPoints.objects.get_or_create(
                        user=user, 
                        content_type = a.content_type(), 
                        object_pk = a.id, 
                        points = 2
                )
                if created:
                    points_object.save()

                                        
            #### comments worth 2 points
            valid_comments = ThreadedComment.objects.filter(user = user, is_public = 1)
            for a in valid_comments:
                try:
                    points_object,created = UserObjectPoints.objects.get_or_create(
                            user=user, 
                            content_type = a.content_type, 
                            object_pk = a.id, 
                            points = 2
                    )
                    if created:
                        points_object.save()
                except:
                    pass
                
            #### blog posts = 5 points            
            blog_posts = Post.objects.filter(authors__id = user.id).filter(status = 2)
            for a in blog_posts:
                points_object,created = UserObjectPoints.objects.get_or_create(
                        user=user, 
                        content_type = a.content_type(), 
                        object_pk = a.id, 
                        points = 5
                    )
                if created:
                    points_object.save()            
                
                
            ##### artworks in artbase = 10 points
            artbase_artworks = ArtworkStub.objects.filter(user = user, status = "approved")
            for a in artbase_artworks:
                points_object,created = UserObjectPoints.objects.get_or_create(
                        user=user, 
                        content_type = a.content_type(), 
                        object_pk = a.id,
                        points=10
                    )
                if created:
                    points_object.save()    


            #### portfolio works worth 3 points
            portfolio_artworks = ArtworkStub.objects.filter(user = user).exclude(status = "unsubmitted").exclude(status="deleted") 
            
            #### member exhibitions = 3 points
            exhibitions = MemberExhibition.objects.filter(user = user, live = 1)
            
            exhibitions_and_portfolios = chain(exhibitions, portfolio_artworks)
            for a in exhibitions_and_portfolios:
                points_object,created = UserObjectPoints.objects.get_or_create(
                        user=user, 
                        content_type = a.content_type(), 
                        object_pk = a.id,
                        points=3
                )
                if created:
                    points_object.save()    
                
            #### add up the points
            points = sum([int(obj.points) for obj in UserObjectPoints.objects.filter(user=user)])
#             points = int(len(valid_comments)) + int(len(valid_opps)) + int(len(valid_jobs)) + int(len(valid_events)) \
#                     + int(len(blog_posts)) + int(len(artbase_artworks)) + pint(len(ortfolio_artworks)) \
#                     + int(len(exhibitions)) + int(len(festivals)) + int(len(syllabi)) + int(len(programs)) + int(len    \                
#                     (residencies)) \
            
            negative_points = invalid_opps + invalid_jobs + invalid_events
            points = points - negative_points
            
            #old accounts get extra points
            if user.date_joined <= five_years_ago:
                points = points + 15
            
            #members get points
            if user.is_member():
                points = points + 15
                 
            try:
                rating = UserRating.objects.get(user=user)
            except:
                rating = UserRating(user=user)
            rating.rating = points
            rating.save()

Example 41

Project: SickRage Source File: properFinder.py
    def _getProperList(self):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        """
        Walk providers for propers
        """
        propers = {}

        search_date = datetime.datetime.today() - datetime.timedelta(days=2)

        # for each provider get a list of the
        origThreadName = threading.currentThread().name
        providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.is_active()]
        for curProvider in providers:
            threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"

            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)

            try:
                curPropers = curProvider.find_propers(search_date)
            except AuthException as e:
                logger.log(u"Authentication error: " + ex(e), logger.DEBUG)
                continue
            except Exception as e:
                logger.log(u"Exception while searching propers in " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
                logger.log(traceback.format_exc(), logger.DEBUG)
                continue

            # if they haven't been added by a different provider than add the proper to the list
            for x in curPropers:
                if not re.search(r'\b(proper|repack|real)\b', x.name, re.I):
                    logger.log(u'find_propers returned a non-proper, we have caught and skipped it.', logger.DEBUG)
                    continue

                name = self._genericName(x.name)
                if name not in propers:
                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
                    x.provider = curProvider
                    propers[name] = x

            threading.currentThread().name = origThreadName

        # take the list of unique propers and get it sorted by
        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
        finalPropers = []

        for curProper in sortedPropers:

            try:
                parse_result = NameParser(False).parse(curProper.name)
            except (InvalidNameException, InvalidShowException) as error:
                logger.log(u"{0}".format(error), logger.DEBUG)
                continue

            if not parse_result.series_name:
                continue

            if not parse_result.episode_numbers:
                logger.log(
                    u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode",
                    logger.DEBUG)
                continue

            logger.log(
                u"Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name,
                logger.DEBUG)

            # set the indexerid in the db to the show's indexerid
            curProper.indexerid = parse_result.show.indexerid

            # set the indexer in the db to the show's indexer
            curProper.indexer = parse_result.show.indexer

            # populate our Proper instance
            curProper.show = parse_result.show
            curProper.season = parse_result.season_number if parse_result.season_number is not None else 1
            curProper.episode = parse_result.episode_numbers[0]
            curProper.release_group = parse_result.release_group
            curProper.version = parse_result.version
            curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
            curProper.content = None

            # filter release
            bestResult = pickBestResult(curProper, parse_result.show)
            if not bestResult:
                logger.log(u"Proper " + curProper.name + " were rejected by our release filters.", logger.DEBUG)
                continue

            # only get anime proper if it has release group and version
            if bestResult.show.is_anime:
                if not bestResult.release_group and bestResult.version == -1:
                    logger.log(u"Proper " + bestResult.name + " doesn't have a release group and version, ignoring it",
                               logger.DEBUG)
                    continue

            # check if we actually want this proper (if it's the right quality)
            main_db_con = db.DBConnection()
            sql_results = main_db_con.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
                                             [bestResult.indexerid, bestResult.season, bestResult.episode])
            if not sql_results:
                continue

            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sql_results[0]["status"]))
            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != bestResult.quality:
                continue

            # check if we actually want this proper (if it's the right release group and a higher version)
            if bestResult.show.is_anime:
                main_db_con = db.DBConnection()
                sql_results = main_db_con.select(
                    "SELECT release_group, version FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
                    [bestResult.indexerid, bestResult.season, bestResult.episode])

                oldVersion = int(sql_results[0]["version"])
                oldRelease_group = (sql_results[0]["release_group"])

                if -1 < oldVersion < bestResult.version:
                    logger.log(u"Found new anime v" + str(bestResult.version) + " to replace existing v" + str(oldVersion))
                else:
                    continue

                if oldRelease_group != bestResult.release_group:
                    logger.log(u"Skipping proper from release group: " + bestResult.release_group + ", does not match existing release group: " + oldRelease_group)
                    continue

            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
            if bestResult.indexerid != -1 and (bestResult.indexerid, bestResult.season, bestResult.episode) not in {(p.indexerid, p.season, p.episode) for p in finalPropers}:
                logger.log(u"Found a proper that we need: " + str(bestResult.name))
                finalPropers.append(bestResult)

        return finalPropers

Example 42

Project: SickRage Source File: properFinder.py
    def _downloadPropers(self, properList):
        """
        Download proper (snatch it)

        :param properList:
        """

        for curProper in properList:

            historyLimit = datetime.datetime.today() - datetime.timedelta(days=30)

            # make sure the episode has been downloaded before
            main_db_con = db.DBConnection()
            historyResults = main_db_con.select(
                "SELECT resource FROM history " +
                "WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? " +
                "AND action IN (" + ",".join([str(x) for x in Quality.SNATCHED + Quality.DOWNLOADED]) + ")",
                [curProper.indexerid, curProper.season, curProper.episode, curProper.quality,
                 historyLimit.strftime(History.date_format)])

            # if we didn't download this episode in the first place we don't know what quality to use for the proper so we can't do it
            if not historyResults:
                logger.log(
                    u"Unable to find an original history entry for proper " + curProper.name + " so I'm not downloading it.")
                continue

            else:

                # make sure that none of the existing history downloads are the same proper we're trying to download
                clean_proper_name = self._genericName(helpers.remove_non_release_groups(curProper.name))
                isSame = False
                for curResult in historyResults:
                    # if the result exists in history already we need to skip it
                    if self._genericName(helpers.remove_non_release_groups(curResult["resource"])) == clean_proper_name:
                        isSame = True
                        break
                if isSame:
                    logger.log(u"This proper is already in history, skipping it", logger.DEBUG)
                    continue

                # get the episode object
                epObj = curProper.show.getEpisode(curProper.season, curProper.episode)

                # make the result object
                result = curProper.provider.get_result([epObj])
                result.show = curProper.show
                result.url = curProper.url
                result.name = curProper.name
                result.quality = curProper.quality
                result.release_group = curProper.release_group
                result.version = curProper.version
                result.content = curProper.content

                # snatch it
                snatchEpisode(result, SNATCHED_PROPER)
                time.sleep(cpu_presets[sickbeard.CPU_PRESET])

Example 43

Project: rank-es Source File: feedbot.py
    def get(self):
        """
        1 - Remove expired links
        2 - Gets new items from meneame.net and includes them in the database
        3 - Update scores for already existing links
        
        """
        
        # Remove expired links
        today = datetime.today()                
        logging.info('Doing database cleanup')
        
        # First, low-score links with no chance of getting to the front page.
        links = [x for x in get_top_links(cfg_links_front_page)]
        if len(links) > 0:  # Just in case we have not populated the database
                            # This is necessary as we are not working directly
                            # on the query cursor but are building a list
            min_score = links[-1].score
            # Set min_score in memcache so we can use it to refresh the front 
            # page only when it's necessary.
            memcache.set('minscore', min_score)     #@UndefinedVariable
            # Everything with more than cfg_link_expiration_seconds/10
            # age and less than min_score/3 will go away.
            time_diff = today - timedelta(seconds = cfg_link_expiration_seconds/10)
            c = db.GqlQuery('SELECT * FROM LinkEnt WHERE date < :1', 
                            time_diff)
            # Cannot run the query with two selectors the way I want, so let's
            # do it another way.
            low = [x for x in c if x.score < min_score/3]
            expired_links = delete_links(low)
            logging.info('%d links removed due to low scores' % expired_links)

        # Now, expired links        
        time_diff = today - timedelta(seconds = cfg_link_expiration_seconds)
        c = db.GqlQuery('SELECT * FROM LinkEnt WHERE date < :1', time_diff)
        expired_links = delete_links(c)
        logging.info('%d links expired. Shiny database!' % expired_links)

        logging.info('Getting meneame.net latest RSS items.')
        # Front page
        l1 = get_meneame('http://www.meneame.net/rss2.php?meta=0')
        # Voting queue
        l2 = get_meneame('http://www.meneame.net/rss2.php?status=queued&meta=0')
        links = l1 + l2
        
        # Filter the links: use only the ones we don't already have
        current_links_db = db.GqlQuery('SELECT * FROM LinkEnt')
        current_links = [x.url for x in current_links_db]
        # Get results into an array to avoid expiration when updating
        # DB queries last for only 30 seconds.
        current_links_ents = [x for x in current_links_db]
        new_links = [x for x in links if x[0] not in current_links]

        # Insert new links into database
        logging.info('%d links in database, %d retrieved, %d to be inserted' 
                     % (len(current_links), len(links), len(new_links)))
        for l in new_links:
            url, title = l
            insert_new_link(url, title, log=False)
        logging.info('Finished fetching new items.')

        # Update the links scores and the top links cached search
        logging.info('Updating %d links...' % len(current_links_ents))
        update_links(current_links_ents, new_links, datetime.today())
        get_top_links(cfg_links_front_page, update=True)
        logging.info('Finished updating links.')
        
        # Update Main Page
        logging.info('Generating main page...')
        generate_main_page()
        logging.info('Main page generated')

Example 44

Project: SiCKRAGE Source File: tv_cache.py
    def search_cache(self, episode=None, manualSearch=False, downCurQuality=False):
        neededEps = {}

        if not episode:
            dbData = [x['doc'] for x in CacheDB().db.get_many('providers', self.providerID, with_doc=True)]
        else:
            dbData = [x['doc'] for x in CacheDB().db.get_many('providers', self.providerID, with_doc=True)
                      if x['doc']['indexerid'] == episode.show.indexerid
                      and x['doc']['season'] == episode.season
                      and "|" + str(episode.episode) + "|" in x['doc']['episodes']]

        # for each cache entry
        for curResult in dbData:
            # ignored/required words, and non-tv junk
            if not show_names.filterBadReleases(curResult["name"]):
                continue

            # get the show object, or if it's not one of our shows then ignore it
            showObj = findCertainShow(sickrage.srCore.SHOWLIST, int(curResult["indexerid"]))
            if not showObj:
                continue

            # skip if provider is anime only and show is not anime
            if self.provider.anime_only and not showObj.is_anime:
                sickrage.srCore.srLogger.debug("" + str(showObj.name) + " is not an anime, skiping")
                continue

            # get season and ep data (ignoring multi-eps for now)
            curSeason = int(curResult["season"])
            if curSeason == -1:
                continue

            curEp = curResult["episodes"].split("|")[1]
            if not curEp:
                continue

            curEp = int(curEp)

            curQuality = int(curResult["quality"])
            curReleaseGroup = curResult["release_group"]
            curVersion = curResult["version"]

            # if the show says we want that episode then add it to the list
            if not showObj.wantEpisode(curSeason, curEp, curQuality, manualSearch, downCurQuality):
                sickrage.srCore.srLogger.info(
                    "Skipping " + curResult["name"] + " because we don't want an episode that's " +
                    Quality.qualityStrings[curQuality])
                continue

            epObj = showObj.getEpisode(curSeason, curEp)

            # build a result object
            title = curResult["name"]
            url = curResult["url"]

            sickrage.srCore.srLogger.info("Found result " + title + " at " + url)

            result = self.provider.getResult([epObj])
            result.show = showObj
            result.url = url
            result.name = title
            result.quality = curQuality
            result.release_group = curReleaseGroup
            result.version = curVersion
            result.content = None
            result.size = self.provider._get_size(url)
            result.files = self.provider._get_files(url)

            # add it to the list
            if epObj not in neededEps:
                neededEps[epObj.episode] = [result]
            else:
                neededEps[epObj.episode].append(result)

        # datetime stamp this search so cache gets cleared
        self.last_search = datetime.datetime.today()

        return neededEps

Example 45

Project: SiCKRAGE Source File: proper_searcher.py
Function: get_proper_list
    def _getProperList(self):
        """
        Walk providers for propers
        """
        propers = {}

        search_date = datetime.datetime.today() - datetime.timedelta(days=2)

        origThreadName = threading.currentThread().getName()

        # for each provider get a list of the
        for providerID, providerObj in sickrage.srCore.providersDict.sort(
                randomize=sickrage.srCore.srConfig.RANDOMIZE_PROVIDERS).items():
            # check provider type and provider is enabled
            if not sickrage.srCore.srConfig.USE_NZBS and providerObj.type in [NZBProvider.type, NewznabProvider.type]:
                continue
            elif not sickrage.srCore.srConfig.USE_TORRENTS and providerObj.type in [TorrentProvider.type,
                                                                                    TorrentRssProvider.type]:
                continue
            elif not providerObj.isEnabled:
                continue

            threading.currentThread().setName(origThreadName + " :: [" + providerObj.name + "]")

            sickrage.srCore.srLogger.info("Searching for any new PROPER releases from " + providerObj.name)

            try:
                curPropers = providerObj.findPropers(search_date)
            except AuthException as e:
                sickrage.srCore.srLogger.debug("Authentication error: {}".format(e.message))
                continue
            except Exception as e:
                sickrage.srCore.srLogger.debug(
                    "Error while searching " + providerObj.name + ", skipping: {}".format(e.message))
                sickrage.srCore.srLogger.debug(traceback.format_exc())
                continue

            # if they haven't been added by a different provider than add the proper to the list
            for x in curPropers:
                if not re.search(r'(^|[\. _-])(proper|repack)([\. _-]|$)', x.name, re.I):
                    sickrage.srCore.srLogger.debug('findPropers returned a non-proper, we have caught and skipped it.')
                    continue

                name = self._genericName(x.name)
                if not name in propers:
                    sickrage.srCore.srLogger.debug("Found new proper: " + x.name)
                    x.provider = providerObj
                    propers[name] = x

            threading.currentThread().setName(origThreadName)

        # take the list of unique propers and get it sorted by
        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
        finalPropers = []

        for curProper in sortedPropers:

            try:
                myParser = NameParser(False)
                parse_result = myParser.parse(curProper.name)
            except InvalidNameException:
                sickrage.srCore.srLogger.debug(
                    "Unable to parse the filename " + curProper.name + " into a valid episode")
                continue
            except InvalidShowException:
                sickrage.srCore.srLogger.debug("Unable to parse the filename " + curProper.name + " into a valid show")
                continue

            if not parse_result.series_name:
                continue

            if not parse_result.episode_numbers:
                sickrage.srCore.srLogger.debug(
                    "Ignoring " + curProper.name + " because it's for a full season rather than specific episode")
                continue

            sickrage.srCore.srLogger.debug(
                "Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name)

            # set the indexerid in the db to the show's indexerid
            curProper.indexerid = parse_result.show.indexerid

            # set the indexer in the db to the show's indexer
            curProper.indexer = parse_result.show.indexer

            # populate our Proper instance
            curProper.show = parse_result.show
            curProper.season = parse_result.season_number if parse_result.season_number is not None else 1
            curProper.episode = parse_result.episode_numbers[0]
            curProper.release_group = parse_result.release_group
            curProper.version = parse_result.version
            curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
            curProper.content = None

            # filter release
            bestResult = pickBestResult(curProper, parse_result.show)
            if not bestResult:
                sickrage.srCore.srLogger.debug("Proper " + curProper.name + " were rejected by our release filters.")
                continue

            # only get anime proper if it has release group and version
            if bestResult.show.is_anime:
                if not bestResult.release_group and bestResult.version == -1:
                    sickrage.srCore.srLogger.debug(
                        "Proper " + bestResult.name + " doesn't have a release group and version, ignoring it")
                    continue

            # check if we actually want this proper (if it's the right quality)            
            dbData = [x['doc'] for x in MainDB().db.get_many('tv_episodes', bestResult.indexerid, with_doc=True)
                      if x['doc']['season'] == bestResult.season and x['doc']['episode'] == bestResult.episode]
            if not dbData: continue

            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
            oldStatus, oldQuality = Quality.splitCompositeStatus(int(dbData[0]["status"]))
            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != bestResult.quality:
                continue

            # check if we actually want this proper (if it's the right release group and a higher version)
            if bestResult.show.is_anime:
                dbData = [x['doc'] for x in MainDB().db.get_many('tv_episodes', bestResult.indexerid, with_doc=True)
                          if x['doc']['season'] == bestResult.season and x['doc']['episode'] == bestResult.episode]

                oldVersion = int(dbData[0]["version"])
                oldRelease_group = (dbData[0]["release_group"])

                if -1 < oldVersion < bestResult.version:
                    sickrage.srCore.srLogger.info(
                        "Found new anime v" + str(bestResult.version) + " to replace existing v" + str(oldVersion))
                else:
                    continue

                if oldRelease_group != bestResult.release_group:
                    sickrage.srCore.srLogger.info(
                        "Skipping proper from release group: " + bestResult.release_group + ", does not match existing release group: " + oldRelease_group)
                    continue

            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
            if bestResult.indexerid != -1 and (bestResult.indexerid, bestResult.season, bestResult.episode) not in map(
                    operator.attrgetter('indexerid', 'season', 'episode'), finalPropers):
                sickrage.srCore.srLogger.info("Found a proper that we need: " + str(bestResult.name))
                finalPropers.append(bestResult)

        return finalPropers

Example 46

Project: STProjectPlanner Source File: ProjectPlanner.py
	def _schedule_task_with_deadline(self, task, available_before_date, available_effort, max_effort, category):
		"""
		The scheduler is only precise to the day,
		but will make sure you nevere have more than max_effort hours in 
		any single day
		"""
		MONDAY=0
		FRIDAY=4
		SATURDAY=5
		SUNDAY=6

		if available_effort <= 0:
			available_effort += max_effort
			available_before_date -= timedelta(days=1)

		# Define end_date
		if task.meta.end_date is not None and task.meta.end_date < available_before_date:
			end_date = task.meta.end_date
			available_effort = max_effort
		else:
			# print('SCHEDULE INFO: Task %s will have to begin earlier due to later tasks taking long' % (task,))
			end_date = available_before_date

		# Skip saturday & sunday
		if end_date.weekday() == SATURDAY:
			end_date -= timedelta(days=1)
		elif end_date.weekday() == SUNDAY: # this should never really happen
			end_date -= timedelta(days=2)

		if end_date < datetime.today():
			self.add_error('Past deadline', '"{}" ({}) should have been completed by {}'.format(task.description, category, end_date.date()))
		
		duration = int(task.category_duration(category))


		slots = []
		cur_dt = end_date
		while duration > 0:
			block_duration = min(available_effort, duration)
			slot = DaySlot(cur_dt, block_duration)
			slots.append(slot)
			available_effort -= block_duration
			duration -= block_duration
			if available_effort == 0:
				cur_dt = next_available_weekday(cur_dt)
				available_effort = max_effort

		task.set_slots_for_category(category, slots)

		return (cur_dt, available_effort)

Example 47

Project: jmbo Source File: __init__.py
    @override_settings(SITE_ID=3)
    def test_publish_retract(self):
        today = datetime.today()
        yesterday = today - timedelta(days=1)
        tomorrow = today + timedelta(days=1)

        p1 = ModelBase(title='title', state='published')
        p1.save()
        p1.sites.add(self.web_site)
        p1.save()
        jmbo_publish.Command().handle()
        queryset = ModelBase.permitted.all()
        self.failUnless(p1 in queryset)

        p2 = ModelBase(title='title', publish_on=today)
        p2.save()
        p2.sites.add(self.web_site)
        p2.save()
        jmbo_publish.Command().handle()
        queryset = ModelBase.permitted.all()
        self.failUnless(p2 in queryset)

        p4 = ModelBase(title='title', publish_on=today, retract_on=tomorrow)
        p4.save()
        p4.sites.add(self.web_site)
        p4.save()
        jmbo_publish.Command().handle()
        queryset = ModelBase.permitted.all()
        self.failUnless(p4 in queryset)

        p5 = ModelBase(title='title', publish_on=tomorrow)
        p5.save()
        p5.sites.add(self.web_site)
        p5.save()
        jmbo_publish.Command().handle()
        queryset = ModelBase.permitted.all()
        self.failIf(p5 in queryset)

        p6 = ModelBase(title='title', publish_on=yesterday, retract_on=today)
        p6.save()
        p6.sites.add(self.web_site)
        p6.save()
        jmbo_publish.Command().handle()
        queryset = ModelBase.permitted.all()
        self.failIf(p6 in queryset)

        p7 = ModelBase(title='title', publish_on=tomorrow, retract_on=tomorrow)
        p7.save()
        p7.sites.add(self.web_site)
        p7.save()
        jmbo_publish.Command().handle()
        queryset = ModelBase.permitted.all()
        self.failIf(p7 in queryset)

        p8 = ModelBase(
            title='title', publish_on=yesterday, retract_on=yesterday
        )
        p8.save()
        p8.sites.add(self.web_site)
        p8.save()
        jmbo_publish.Command().handle()
        queryset = ModelBase.permitted.all()
        self.failIf(p8 in queryset)

Example 48

Project: python-bugzilla Source File: rw_functional.py
    def test04NewBugAllFields(self):
        """
        Create a bug using all 'new' fields, check some values, close it
        """
        bz = self.bzclass(url=self.url)

        summary = ("python-bugzilla test manyfields bug %s" %
                   datetime.datetime.today())
        url = "http://example.com"
        osval = "Windows"
        cc = "[email protected]"
        blocked = "461686,461687"
        dependson = "427301"
        comment = "Test bug from python-bugzilla test suite"
        sub_component = "Command-line tools (RHEL6)"
        alias = "pybz-%s" % datetime.datetime.today().strftime("%s")
        newout = tests.clicomm("bugzilla new "
            "--product 'Red Hat Enterprise Linux 6' --version 6.0 "
            "--component lvm2 --sub-component '%s' "
            "--summary \"%s\" "
            "--comment \"%s\" "
            "--url %s --severity Urgent --priority Low --os %s "
            "--arch ppc --cc %s --blocked %s --dependson %s "
            "--alias %s "
            "--outputformat \"%%{bug_id}\"" %
            (sub_component, summary, comment, url,
             osval, cc, blocked, dependson, alias), bz)

        self.assertTrue(len(newout.splitlines()) == 3)

        bugid = int(newout.splitlines()[2])
        bug = bz.getbug(bugid, extra_fields=["sub_components"])
        print("\nCreated bugid: %s" % bugid)

        self.assertEquals(bug.summary, summary)
        self.assertEquals(bug.bug_file_loc, url)
        self.assertEquals(bug.op_sys, osval)
        self.assertEquals(bug.blocks, _split_int(blocked))
        self.assertEquals(bug.depends_on, _split_int(dependson))
        self.assertTrue(all([e in bug.cc for e in cc.split(",")]))
        self.assertEquals(bug.longdescs[0]["text"], comment)
        self.assertEquals(bug.sub_components, {"lvm2": [sub_component]})
        self.assertEquals(bug.alias, [alias])

        # Close the bug

        # RHBZ makes it difficult to provide consistent semantics for
        # 'alias' update:
        # https://bugzilla.redhat.com/show_bug.cgi?id=1173114
        # alias += "-closed"
        tests.clicomm("bugzilla modify "
            "--close WONTFIX %s " %
            bugid, bz)
        bug.refresh()
        self.assertEquals(bug.status, "CLOSED")
        self.assertEquals(bug.resolution, "WONTFIX")
        self.assertEquals(bug.alias, [alias])

        # Check bug's minimal history
        ret = bug.get_history_raw()
        self.assertTrue(len(ret["bugs"]) == 1)
        self.assertTrue(len(ret["bugs"][0]["history"]) == 1)

Example 49

Project: django-token-auth Source File: __init__.py
    def testVisitURL200Cookie(self):

        url = '/protected/'

        token = Token(url=url)
        token.save()

        client = Client()

        # test that tokens work
        response = client.get(token.use_token())
        self.failUnlessEqual(response.status_code, 302)
        self.failUnlessEqual(client.cookies[TOKEN_COOKIE].value, token.token)

        response = client.get("/protected/")
        self.failUnlessEqual(response.status_code, 200)

        response = client.get("/protected/sub1/")
        self.failUnlessEqual(response.status_code, 200)

        response = client.get("/protected/sub1/sub2/")
        self.failUnlessEqual(response.status_code, 200)

        response = client.get(token.use_token())
        self.failUnlessEqual(response.status_code, 302)

        # test for two tokens
        token2 = Token(url=url)
        token2.save()

        response = client.get(token2.use_token())
        self.failUnlessEqual(response.status_code, 302)
        self.failUnless(client.cookies[TOKEN_COOKIE].value, token.token + '|' + token2.token)

        token.delete()
        token2.delete()

        # test for expired tokens
        token3 = Token(url=url)
        token3.save()

        response = client.get(token3.use_token())
        self.failUnlessEqual(response.status_code, 302)

        response = client.get("/protected/")
        self.failUnlessEqual(response.status_code, 200)

        token3.valid_until = datetime.datetime.today() - datetime.timedelta(days=2)
        token3.save()

        response = client.get("/protected/")
        self.failUnlessEqual(response.status_code, 302)

Example 50

Project: oioioi Source File: views.py
@enforce_condition(is_teacher)
def teacher_dashboard_view(request):
    contest_context = []
    min_date = datetime.today() - timedelta(days=7)

    contests = [contest for contest in visible_contests(request)]
    are_contests_limited = len(contests) > MAX_CONTESTS_ON_PAGE
    visible_contests_count = len(contests)

    contests = [x for x in contests if is_teachers(x)
                                    and can_admin_contest(request.user, x)]
    if len(contests) < visible_contests_count:
        are_contests_limited = True
    contests.sort(key=lambda x: x.creation_date, reverse=True)

    contests = contests[:MAX_CONTESTS_ON_PAGE]

    if 'oioioi.portals' in settings.INSTALLED_APPS:
        has_portal = global_portal_exists(request)
    else:
        has_portal = False

    for contest in contests:

        scores = [result.score.to_int() for result in
                  UserResultForContest.objects.filter(contest=contest).all()
                  if result.score is not None]

        max_score = 0
        for problem_inst in ProblemInstance.objects.filter(contest=contest):
            user_results = \
                UserResultForProblem.objects.filter(
                    problem_instance=problem_inst,
                    submission_report__isnull=False)
            if user_results.count() > 0:
                for result in user_results:
                    score_report = result.submission_report.score_report

                    if score_report_is_valid(score_report):
                        max_score += score_report.max_score.to_int()
                        break

        contest_dict = {
            'id': contest.id,
            'name': contest.name,
            'round_count': Round.objects.filter(contest=contest).count(),
            'task_count': ProblemInstance.objects.filter(
                contest=contest).count(),
            'user_count': User.objects.filter(
                participant__contest=contest).count(),
            'submission_count': Submission.objects.filter(
                problem_instance__contest=contest).count(),
            'recent_submission_count': Submission.objects.filter(
                problem_instance__contest=contest, date__gte=min_date
            ).count(),
            'recent_question_count': Message.objects.filter(
                contest=contest, kind='QUESTION', date__gte=min_date
            ).count(),
            'max_score': max_score,
            'scores': scores,
        }
        contest_context.append(contest_dict)
    context = {
        'contests': contest_context,
        'are_contests_limited': are_contests_limited,
        'has_portal': has_portal
    }
    if has_portal:
        context['portal_path'] = Portal.objects.filter(owner=None)[0] \
            .root.get_path()

    return TemplateResponse(request,
                            'simpleui/main_dashboard/dashboard.html', context)
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3 Page 4