datetime.timedelta

Here are the examples of the python api datetime.timedelta taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

132 Examples 7

Example 1

Project: TARDIS Source File: TARDIS.py
def main(vulnerability,vulnObject,sourceIP,sourceHost):
	#Create results and working directories
	if not os.path.exists('Results'):
		os.makedirs('Results')
	if not os.path.exists('Working'):
		os.makedirs('Working')
	
	#Make sure the vulnerability is valid
	if vulnerability != "":
		vulnCheck=0
		resultCount=0
		logsource=''
		print("Searching for evidence of \"" + vulnerability + "\"")
		print("  Host: " + sourceIP)
		
		try:
			configFile = 'config.xml'
			tree = ET.parse(configFile)
			root = tree.getroot()
		except:
			sys.exit("Not a valid config XML file")
		for settings in root.findall("./log_source"):
			logsource=settings.text
		cnx = getDBConnector()
		
		
		#check if vulnerability/asset combo exists in assetVulnerability Table
		cursor = cnx.cursor()
		query = ("SELECT count(*) as count from assetVulnerabilities where victim_ip = '" + str(ip2long(sourceIP)) + "' and threat_id = '" + vulnerability + "'")
		
		cursor.execute(query)
		for row in cursor:
			vulnCheck=row[0]
		cursor.close()
		
		if vulnCheck==0:
			#No combination exists, write data to DB
			
			cursor = cnx.cursor()
			add_vulnInstance = ("INSERT INTO assetVulnerabilities "
               "(victim_ip, threat_id, active) "
               "VALUES (%s, %s, %s)")
			vulnData = (ip2long(sourceIP), vulnerability, '1')
			
			# Insert new entry
			cursor.execute(add_vulnInstance , vulnData )
			
			cnx.commit()
			cursor.close()
			cnx.close()
		searchStringResults= findStixObservables.run(vulnerability)
		isExploitFound=False
		searchStringCount=0
		operator=searchStringResults[0]
		numResults=0
		if(searchStringResults[1]=="No search file found"):
			searchResults="0"
			print("  No search file found\n")
		elif(searchStringResults[1]=="No supported observables found"):
			searchResults="0"
			print("  No supported observables found\n")
		else:
			#run  search...
			#search should return number of results
			#Insert source host from arguments
			for entry in searchStringResults:
				if logsource=="splunk":
					if (searchStringCount == 1):
						searchString=entry + " AND (host=\"" + sourceHost + "\" OR s_ip=\"" + sourceIP + "\" OR d_host=\"" + sourceHost + "\")  | fields host, c_ip | fields - _bkt, _cd, _indextime, _kv, _serial, _si, _sourcetype | rename _raw as \"Raw Log\" | rename c_ip as clientip"
						numResults=splunk.searchVulnerability(searchString,vulnerability,sourceIP,sourceHost)
						if (numResults != "0"):
							data = json.load(numResults)
					
					if (operator=="AND"):
						if (searchStringCount > 1):
							resultCount=0
							for result in data["results"]:
								startTime =  dateutil.parser.parse(data["results"][resultCount]["_time"]) + datetime.timedelta(days =- 300)
								endTime =  dateutil.parser.parse(data["results"][resultCount]["_time"]) + datetime.timedelta(days = 300)
								searchString=entry + " AND (host=\"" + sourceHost + "\" OR s_ip=\"" + sourceIP + "\" OR d_host=\"" + sourceHost + "\") | fields host, clientip | fields - _bkt, _cd, _indextime, _kv, _serial, _si, _sourcetype | rename _raw as \"Raw Log\""
								newResults=splunk.searchVulnerabilityTimeRange(searchString,vulnerability,sourceIP,sourceHost,startTime.isoformat(),endTime.isoformat())
								if (newResults != "0"):
									#This is the result from search 1
									newData = json.load(newResults)
									newResultCount=0
									for result in newData["results"]:
										try:
											clientip=newData["results"][newResultCount]["clientip"]
										except:
											clientip="0"
										isExploitFound=True
										#These are the results from any further results proving the AND condition
										cnx = getDBConnector()
										cursor = cnx.cursor()
										query = ("SELECT count(*) as count from attackInventory where victim_ip = '" + str(ip2long(sourceIP)) + "' and threat_id = '" + vulnerability + "' and attack_time = '" + data["results"][resultCount]["_time"] + "'")
										cursor.execute(query)
										for row in cursor:
											logCheck=row[0]
										cursor.close()
										if logCheck==0:
											#Write data to DB
											cursor = cnx.cursor()
											add_logInstance = ("INSERT INTO attackInventory "
																"(victim_ip, attacker_ip, attack_time, attack_log, threat_id) "
																"VALUES (%s, %s, %s, %s, %s)")
											
											logData = (ip2long(sourceIP), ip2long(clientip), newData["results"][newResultCount]["_time"], newData["results"][newResultCount]["Raw Log"], vulnerability)
											# Insert new entry
											cursor.execute(add_logInstance , logData )
											cnx.commit()
											cursor.close()
										cnx.close()
										newResultCount=newResultCount+1
								else:
									newResultCount=0
							if (isExploitFound==True):
								try:
									clientip=data["results"][resultCount]["clientip"]
								except:
									clientip="0"
								cnx = getDBConnector()
								cursor = cnx.cursor()
								query = ("SELECT count(*) as count from attackInventory where victim_ip = '" + str(ip2long(sourceIP)) + "' and threat_id = '" + vulnerability + "' and attack_time = '" + data["results"][resultCount]["_time"] + "'")
								cursor.execute(query)
								for row in cursor:
									logCheck=row[0]
								cursor.close()
								if logCheck==0:
									#Write data to DB
									cursor = cnx.cursor()
									add_logInstance = ("INSERT INTO attackInventory "
														"(victim_ip, attacker_ip, attack_time, attack_log, threat_id) "
														"VALUES (%s, %s, %s, %s, %s)")
									
									logData = (ip2long(sourceIP), ip2long(clientip), data["results"][resultCount]["_time"], data["results"][resultCount]["Raw Log"], vulnerability)
									# Insert new entry
									cursor.execute(add_logInstance , logData )
									cnx.commit()
									cursor.close()
								cnx.close()
								resultCount=newResultCount+1
							else:
								resultCount=newResultCount
					elif (operator=="OR"):
						if (searchStringCount > 0):
							#only keep searching if there are more IOCS to look at...
							if len(searchStringResults)>2:
								searchString=entry + " AND (host=\"" + sourceHost + "\" OR s_ip=\"" + sourceIP + "\" OR d_host=\"" + sourceHost + "\")  | fields host, clientip | fields - _bkt, _cd, _indextime, _kv, _serial, _si, _sourcetype | rename _raw as \"Raw Log\""
								numResults=splunk.searchVulnerability(searchString,vulnerability,sourceIP,sourceHost)
								if (numResults != "0"):
									data = json.load(numResults)
									resultCount=0
									for result in data["results"]:
										isExploitFound=True
										cnx = getDBConnector()
										cursor = cnx.cursor()
										query = ("SELECT count(*) as count from attackInventory where victim_ip = '" + str(ip2long(sourceIP)) + "' and threat_id = '" + vulnerability + "' and attack_time = '" + data["results"][resultCount]["_time"] + "'")
										cursor.execute(query)
										for row in cursor:
											logCheck=row[0]
										cursor.close()
										if logCheck==0:
											#Write data to DB
											cursor = cnx.cursor()
											add_logInstance = ("INSERT INTO attackInventory "
																"(victim_ip, attacker_ip, attack_time, attack_log, threat_id) "
																"VALUES (%s, %s, %s, %s, %s)")
											logData = (ip2long(sourceIP), ip2long(data["results"][resultCount]["clientip"]), data["results"][resultCount]["_time"], data["results"][resultCount]["Raw Log"], vulnerability)
											
											# Insert new entry
											cursor.execute(add_logInstance , logData )
											
											cnx.commit()
											cursor.close()
										cnx.close()
										resultCount=resultCount+1
							elif len(searchStringResults)==2:
								searchString=entry + " AND (host=\"" + sourceHost + "\" OR host=\"" + sourceIP + "\" OR s_ip=\"" + sourceIP + "\" OR d_host=\"" + sourceHost + "\")  | fields host, clientip | fields - _bkt, _cd, _indextime, _kv, _serial, _si, _sourcetype | rename _raw as \"Raw Log\""
								numResults=splunk.searchVulnerability(searchString,vulnerability,sourceIP,sourceHost)
								if (numResults != "0"):
									data = json.load(numResults)
									resultCount=0
									for result in data["results"]:
										isExploitFound=True
										cnx = getDBConnector()
										cursor = cnx.cursor()
										query = ("SELECT count(*) as count from attackInventory where victim_ip = '" + str(ip2long(sourceIP)) + "' and threat_id = '" + vulnerability + "' and attack_time = '" + data["results"][resultCount]["_time"] + "'")
										cursor.execute(query)
										for row in cursor:
											logCheck=row[0]
										cursor.close()
										if logCheck==0:
											#Write data to DB
											cursor = cnx.cursor()
											add_logInstance = ("INSERT INTO attackInventory "
																"(victim_ip, attacker_ip, attack_time, attack_log, threat_id) "
																"VALUES (%s, %s, %s, %s, %s)")
											
											logData = (ip2long(sourceIP), ip2long(data["results"][resultCount]["clientip"]), data["results"][resultCount]["_time"], data["results"][resultCount]["Raw Log"], vulnerability)
											
											# Insert new entry
											cursor.execute(add_logInstance , logData )
											
											cnx.commit()
											cursor.close()
										cnx.close()
										resultCount=resultCount+1
					searchStringCount=searchStringCount+1
				elif logsource=="elastic_search":
					numResults=0
					startTime="-90d"
					endTime="now"
					#Insert source host from arguments
					entry = re.sub('\<source_host\>', sourceHost, entry)
					#Insert source IP from arguments
					entry = re.sub('\<source_ip\>', sourceIP, entry)
					if (searchStringCount == 1):
						#Insert startTime
						entry = re.sub('\<startTime\>', startTime, entry)
						#Insert endTime
						entry = re.sub('\<endTime\>', endTime, entry)
						if sourceIP == '*':
							entry = re.sub('\<min_count\>', '1', entry)
						else:
							entry = re.sub('\<min_count\>', '2', entry)
						#print entry
						searchResults = ElasticSearchQuery.searchVulnerability(entry,vulnerability,sourceIP,sourceHost)
						#print searchResults
						numResults = getElasticSearchResults(searchResults)
						#print numResults
					if (operator=="AND"):
						if (searchStringCount > 1):
							resultCount=0
							for hit in searchResults['hits']['hits']:
								startTime =  dateutil.parser.parse(hit["_source"]["@timestamp"]) + datetime.timedelta(days =- 1)
								
								endTime =  dateutil.parser.parse(hit["_source"]["@timestamp"]) + datetime.timedelta(days = 1)
								#Insert start time
								entry = re.sub('\<startTime\>', str(startTime.isoformat()), entry)
								#Insert end time
								entry = re.sub('\<endTime\>', str(endTime.isoformat()), entry)
								newSearchResults = ElasticSearchQuery.searchVulnerability(entry,vulnerability,sourceIP,sourceHost)
								newResults = getElasticSearchResults(newSearchResults)
								if (newResults != "0"):
									#This is the result from search 1
									newResultCount=0
									isExploitFound=True
									for newhit in newSearchResults['hits']['hits']:
										try:
											attackerIP=newhit["_source"]["evt_srcip"]
										except:
											attackerIP="0.0.0.0"
										#These are the results from any further results proving the AND condition
										cnx = getDBConnector()
										cursor = cnx.cursor()
										#Check original log hit
										query = ("SELECT count(*) as count from attackInventory where victim_ip = '" + str(ip2long(sourceIP)) + "' and threat_id = '" + vulnerability + "' and attack_log = '" + newhit["_source"]["message"] + "'")
										cursor.execute(query)
										for row in cursor:
											logCheck=row[0]
										cursor.close()
										if logCheck==0:
											#Write data to DB
											cursor = cnx.cursor()
											add_logInstance = ("INSERT INTO attackInventory "
																"(victim_ip, attacker_ip, attack_time, attack_log, threat_id) "
																"VALUES (%s, %s, %s, %s, %s)")
											
											logData = (ip2long(sourceIP), ip2long(attackerIP),hit["_source"]["@timestamp"], hit["_source"]["message"], vulnerability)
											# Insert new entry
											cursor.execute(add_logInstance , logData )
										cursor = cnx.cursor()
										#check new log hit
										query = ("SELECT count(*) as count from attackInventory where victim_ip = '" + str(ip2long(sourceIP)) + "' and threat_id = '" + vulnerability + "' and attack_log = '" + newhit["_source"]["message"] + "'")
										cursor.execute(query)
										for row in cursor:
											logCheck=row[0]
										cursor.close()
										if logCheck==0:
											#Write data to DB
											cursor = cnx.cursor()
											add_logInstance = ("INSERT INTO attackInventory "
																"(victim_ip, attacker_ip, attack_time, attack_log, threat_id) "
																"VALUES (%s, %s, %s, %s, %s)")
											
											logData = (ip2long(sourceIP), ip2long(attackerIP),newhit["_source"]["@timestamp"], newhit["_source"]["message"], vulnerability)
											# Insert new entry
											cursor.execute(add_logInstance , logData )
											
											cnx.commit()
											cursor.close()
										cnx.close()
										newResultCount=newResultCount+1
								else:
									newResultCount=0
								resultCount=newResultCount+1
								
								
								
					elif (operator=="OR"):
						if (searchStringCount == 1):
							if (int(numResults) > 0):
								resultCount = int(numResults)
								writeElasticSearchResults(searchResults,vulnObject,sourceIP,vulnerability)
								isExploitFound=True
						if (searchStringCount > 1):
							#Insert startTime
							entry = re.sub('\<startTime\>', startTime, entry)
							#Insert endTime
							entry = re.sub('\<endTime\>', endTime, entry)
							if sourceIP == '*':
								entry = re.sub('\<min_count\>', '1', entry)
							else:
								entry = re.sub('\<min_count\>', '2', entry)
							#only keep searching if there are more IOCS to look at...
							if len(searchStringResults)>1:
								searchResults = ElasticSearchQuery.searchVulnerability(entry,vulnerability,sourceIP,sourceHost)
								numResults = getElasticSearchResults(searchResults)
								if int(numResults) > 0:
									writeElasticSearchResults(searchResults,vulnObject,sourceIP,vulnerability)
								resultCount = resultCount + int(numResults)
					searchStringCount=searchStringCount+1
			if (isExploitFound==True):
				print("  Found " + str(resultCount) + " instances of exploitation!")
				print("  Generating attack logs") 
				#Parse through data list to get elastic timestamp for audit log times...
			else:
				print("  No instances of exploitation found.\n")
	else:
		resultCount=0
		print("Invalid vulnerability ID")
	return(resultCount)

Example 2

Project: airmozilla Source File: dashboard.py
@staff_required
@json_view
def dashboard_data(request):
    context = {}
    now = timezone.now()
    today = now.replace(hour=0, minute=0, second=0, microsecond=0)
    tomorrow = today + datetime.timedelta(days=1)
    yesterday = today - datetime.timedelta(days=1)
    this_week = today - datetime.timedelta(days=today.weekday())
    next_week = this_week + datetime.timedelta(days=7)
    last_week = this_week - datetime.timedelta(days=7)
    this_month = today.replace(day=1)
    next_month = this_month
    while next_month.month == this_month.month:
        next_month += datetime.timedelta(days=1)
    last_month = (this_month - datetime.timedelta(days=1)).replace(day=1)
    this_year = this_month.replace(month=1)
    next_year = this_year.replace(year=this_year.year + 1)
    last_year = this_year.replace(year=this_year.year - 1)
    context['groups'] = []

    def make_filter(key, gte=None, lt=None):
        filter = {}
        if gte is not None:
            filter['%s__gte' % key] = gte
        if lt is not None:
            filter['%s__lt' % key] = lt
        return filter

    def get_counts(qs, key):
        counts = {}

        counts['today'] = qs.filter(
            **make_filter(key, gte=today, lt=tomorrow)
        ).count()
        counts['yesterday'] = qs.filter(
            **make_filter(key, gte=yesterday, lt=today)).count()

        counts['this_week'] = qs.filter(
            **make_filter(key, gte=this_week, lt=next_week)).count()
        counts['last_week'] = qs.filter(
            **make_filter(key, gte=last_week, lt=this_week)).count()

        counts['this_month'] = qs.filter(
            **make_filter(key, gte=this_month, lt=next_month)).count()
        counts['last_month'] = qs.filter(
            **make_filter(key, gte=last_month, lt=this_month)).count()

        counts['this_year'] = qs.filter(
            **make_filter(key, gte=this_year, lt=next_year)).count()
        counts['last_year'] = qs.filter(
            **make_filter(key, gte=last_year, lt=this_year)).count()

        counts['ever'] = qs.count()
        return counts

    # Events
    events = Event.objects.exclude(status=Event.STATUS_REMOVED)
    counts = get_counts(events, 'start_time')
    context['groups'].append({
        'name': 'New Events',
        'counts': counts
    })

    # Suggested Events
    counts = get_counts(SuggestedEvent.objects.all(), 'created')
    context['groups'].append({
        'name': 'Requested Events',
        'counts': counts
    })

    # Users
    counts = get_counts(User.objects.all(), 'date_joined')
    context['groups'].append({
        'name': 'New Users',
        'counts': counts
    })

    # Comments
    counts = get_counts(Comment.objects.all(), 'created')
    context['groups'].append({
        'name': 'Comments',
        'counts': counts
    })

    # Event revisions
    counts = get_counts(EventRevision.objects.all(), 'created')
    context['groups'].append({
        'name': 'Event Revisions',
        'counts': counts
    })

    # Pictures
    counts = get_counts(Picture.objects.all(), 'created')
    context['groups'].append({
        'name': 'Pictures',
        'counts': counts
    })

    # Chapters
    counts = get_counts(Chapter.objects.all(), 'created')
    context['groups'].append({
        'name': 'Chapters',
        'counts': counts
    })

    # Starred events
    counts = get_counts(StarredEvent.objects.all(), 'created')
    context['groups'].append({
        'name': 'Starred events',
        'counts': counts
    })

    def get_duration_totals(qs, key='start_time'):

        # def make_filter(gte=None, lt=None):
        #     filter = {}
        #     if gte is not None:
        #         filter['%s__gte' % key] = gte
        #     if lt is not None:
        #         filter['%s__lt' % key] = lt
        #     return filter

        counts = {}

        def sum(elements):
            seconds = elements.aggregate(Sum('duration'))['duration__sum']
            seconds = seconds or 0  # in case it's None
            minutes = seconds / 60
            hours = minutes / 60
            if hours > 1:
                return "%dh" % hours
            elif minutes > 1:
                return "%dm" % minutes
            return "%ds" % seconds

        counts['today'] = sum(qs.filter(
            **make_filter(key, gte=today)))
        counts['yesterday'] = sum(qs.filter(
            **make_filter(key, gte=yesterday, lt=today)))

        counts['this_week'] = sum(qs.filter(
            **make_filter(key, gte=this_week)))
        counts['last_week'] = sum(qs.filter(
            **make_filter(key, gte=last_week, lt=this_week)))

        counts['this_month'] = sum(qs.filter(
            **make_filter(key, gte=this_month)))
        counts['last_month'] = sum(qs.filter(
            **make_filter(key, gte=last_month, lt=this_month)))

        counts['this_year'] = sum(qs.filter(
            **make_filter(key, gte=this_year)))
        counts['last_year'] = sum(qs.filter(
            **make_filter(key, gte=last_year, lt=this_year)))

        counts['ever'] = sum(qs)
        return counts

    def get_size_totals(qs, key='created'):

        counts = {}

        def sum(elements):
            bytes = elements.aggregate(Sum('size'))['size__sum']
            return filesizeformat(bytes)

        counts['today'] = sum(qs.filter(
            **make_filter(key, gte=today)))
        counts['yesterday'] = sum(qs.filter(
            **make_filter(key, gte=yesterday, lt=today)))

        counts['this_week'] = sum(qs.filter(
            **make_filter(key, gte=this_week)))
        counts['last_week'] = sum(qs.filter(
            **make_filter(key, gte=last_week, lt=this_week)))

        counts['this_month'] = sum(qs.filter(
            **make_filter(key, gte=this_month)))
        counts['last_month'] = sum(qs.filter(
            **make_filter(key, gte=last_month, lt=this_month)))

        counts['this_year'] = sum(qs.filter(
            **make_filter(key, gte=this_year)))
        counts['last_year'] = sum(qs.filter(
            **make_filter(key, gte=last_year, lt=this_year)))

        counts['ever'] = sum(qs)
        return counts

    # Exceptional
    counts = get_duration_totals(Event.objects.exclude(duration__isnull=True))
    context['groups'].append({
        'name': 'Total Event Durations',
        'counts': counts
    })

    counts = get_size_totals(Upload.objects.all())
    context['groups'].append({
        'name': 'Uploads',
        'counts': counts,
        'small': True
    })

    return context

Example 3

Project: simplecoin_multi Source File: utils.py
def collect_user_stats(user_address):
    """ Accuemulates all aggregate user data for serving via API or rendering
    into main user stats page """
    # store all the raw data of we're gonna grab
    workers = {}

    def check_new(user_address, worker, algo):
        """ Setups up an empty worker template. Since anything that has data on
        a worker can create one then it's useful to abstract. """
        key = (user_address, worker, algo)
        if key not in workers:
            workers[key] = {'total_shares': ShareTracker(algo),
                            'last_10_shares': ShareTracker(algo),
                            'online': False,
                            'servers': {},
                            'algo': algo,
                            'name': worker,
                            'address': user_address}
        return workers[key]

    # Get the lower bound for 10 minutes ago
    lower_10, upper_10 = make_upper_lower(offset=datetime.timedelta(minutes=2))
    lower_day, upper_day = make_upper_lower(span=datetime.timedelta(days=1),
                                            clip=datetime.timedelta(minutes=2))

    newest = datetime.datetime.fromtimestamp(0)
    for slc in ShareSlice.get_span(ret_query=True,
                                   upper=upper_day,
                                   lower=lower_day,
                                   user=(user_address, )):
        if slc.time > newest:
            newest = slc.time

        worker = check_new(slc.user, slc.worker, slc.algo)
        worker['total_shares'].count_slice(slc)
        if slc.time > lower_10:
            worker['last_10_shares'].count_slice(slc)

    hide_hr = newest < datetime.datetime.utcnow() - datetime.timedelta(seconds=current_app.config['worker_hashrate_fold'])

    # pull online status from cached pull direct from powerpool servers
    for worker_name, connection_summary in (cache.get('addr_online_' + user_address) or {}).iteritems():
        for ppid, connections in connection_summary.iteritems():
            try:
                powerpool = powerpools[ppid]
            except KeyError:
                current_app.logger.warn(
                    "Cache said to look for powerpool {} which doesn't exist!"
                    .format(ppid))
                continue

            worker = check_new(user_address, worker_name, powerpool.chain.algo.key)
            worker['online'] = True
            worker['servers'].setdefault(powerpool, 0)
            worker['servers'][powerpool] += 1

    for worker in workers.itervalues():
        worker['status'] = redis_conn.get("status_{address}_{name}".format(**worker))
        if worker['status']:
            worker['status'] = json.loads(worker['status'])
            worker['status_stale'] = False
            worker['status_time'] = datetime.datetime.utcnow()
            try:
                worker['total_hashrate'] = sum([gpu['MHS av'] for gpu in worker['status']['gpus']]) * 1000000
            except Exception:
                worker['total_hashrate'] = -1

            try:
                algo_hps = algos[worker['algo']].hashes_per_share
                worker['wu'] = sum(
                    [((gpu['Difficulty Accepted'] * algo_hps / 2**16) / gpu['Device Elapsed']) * 60
                     for gpu in worker['status']['gpus']])
            except KeyError:
                worker['wu'] = 0

            try:
                worker['wue'] = worker['wu'] / (worker['total_hashrate'] / 1000)
            except ZeroDivisionError:
                worker['wue'] = 0.0

            ver = worker['status'].get('v', '0.2.0').split('.')
            try:
                worker['status_version'] = [int(part) for part in ver]
            except ValueError:
                worker['status_version'] = "Unsupp"

    # Could definitely be better... Makes a list of the dictionary keys sorted
    # by the worker name, then generates a list of dictionaries using the list
    # of keys
    workers = [workers[key] for key in sorted(workers.iterkeys(), key=lambda tpl: tpl[1])]

    settings = UserSettings.query.filter_by(user=user_address).first()

    # Generate payout history and stats for earnings all time
    earning_summary = {}
    def_earnings = dict(
        ready_to_send=dec(0),
        sent=dec(0),
        by_currency=None,
        sold_btc_total=dec(0),
        payable_total=dec(0)
    )
    currency = dict(
        immature=dec(0),
        unconverted=dec(0),
        sold=dec(0),
        btc_converted=dec(0),
        payable=dec(0),
        total_pending=dec(0)
    )

    def lookup_curr(curr):
        if curr not in earning_summary:
            earning_summary[curr] = def_earnings.copy()
            earning_summary[curr]['by_currency'] = {}

        return earning_summary[curr]

    # Go through already grouped aggregates
    payouts = Payout.query.filter_by(user=user_address).order_by(Payout.created_at.desc()).limit(20)

    # Loop through all unaggregated credits to find the rest
    credits = (Credit.query.with_polymorphic(CreditExchange).
               filter_by(user=user_address, payout_id=None).
               filter(Credit.block != None).
               options(db.joinedload('payout'),
                       db.joinedload('block')).
               join(Credit.block).
               filter(
                   ((Block.orphan == True) & (Block.found_at >= lower_day))
                   | (Block.orphan != True)).
               order_by(Credit.id.desc())).all()

    for credit in credits:
        # By desired currency
        summary = lookup_curr(credit.currency_obj)
        # By source currency
        curr = summary['by_currency'].setdefault(credit.block.currency_obj, currency.copy())
        curr['convert'] = credit.block.currency != credit.currency
        if credit.type == 1:  # CreditExchange
            if not credit.payable and not credit.block.orphan:
                if credit.sell_amount is not None:
                    curr['sold'] += credit.amount
                    curr['btc_converted'] += credit.sell_amount
                    summary['sold_btc_total'] += credit.sell_amount
                else:
                    curr['unconverted'] += credit.amount

        if credit.payable:
            curr['payable'] += credit.payable_amount
            summary['payable_total'] += credit.payable_amount
        if not credit.block.mature and not credit.block.orphan:
            curr['immature'] += credit.amount
        if not credit.block.orphan:
            curr['total_pending'] += credit.amount

    for currency, obj in earning_summary.iteritems():
        for currency, curr in obj['by_currency'].iteritems():
            for k, val in curr.iteritems():
                if isinstance(val, dec):
                    curr[k] = val.quantize(current_app.SATOSHI)

    # Show the user approximate next payout and exchange times
    now = datetime.datetime.now()
    next_exchange = now.replace(minute=0, second=0, microsecond=0, hour=((now.hour + 2) % 23))
    next_payout = now.replace(minute=0, second=0, microsecond=0, hour=0)

    f_perc = dec(current_app.config.get('fee_perc', dec('0.02'))) * 100

    return dict(workers=workers,
                credits=credits[:20],
                payouts=payouts[:20],
                settings=settings,
                next_payout=next_payout,
                earning_summary=earning_summary,
                hide_hr=hide_hr,
                next_exchange=next_exchange,
                f_per=f_perc)

Example 4

Project: Bluto Source File: output.py
def action_output_wild_false_hunter(brute_results_dict, sub_intrest, google_results, bing_true_results, linkedin_results, check_count, domain, time_spent_email, time_spent_brute, time_spent_total, emailHunter_results, args, report_location, company, data_mine):
    info('Output action_output_wild_false_hunter: Start')
    linkedin_evidence_results = []
    email_evidence_results = []
    email_results = []
    email_seen = []
    url_seen = []
    person_seen = []
    final_emails = []

    if emailHunter_results is not None:
        for email in emailHunter_results:
            email_results.append(email[0])
            email_evidence_results.append((email[0],email[1]))

    for email, url in google_results:
        try:
            e1, e2 = email.split(',')
            if url not in email_seen:
                email_seen.append(url)
                email_evidence_results.append((str(e2).replace(' ',''),url))
                email_evidence_results.append((str(e1).replace(' ',''),url))
                email_results.append((str(e2).replace(' ','')))
                email_results.append((str(e1).replace(' ','')))

        except ValueError:
            if url not in email_seen:
                email_seen.append(url)
                email_evidence_results.append((str(email).replace(' ',''),url))
                email_results.append(str(email).replace(' ',''))

    for e, u in bing_true_results:
        email_results.append(e)
        if u not in url_seen:
            email_evidence_results.append((e, u))

    for url, person, description in linkedin_results:
        if person not in person_seen:
            person_seen.append(person)
            linkedin_evidence_results.append((url, person, description))

    linkedin_evidence_results.sort(key=lambda tup: tup[1])
    sorted_email = set(sorted(email_results))
    for email in sorted_email:
        if email == '[]':
            pass
        elif email == '@' + domain:
            pass
        else:
            final_emails.append(email)
    email_count = len(final_emails)
    staff_count = len(person_seen)
    f_emails = sorted(final_emails)
    pwned_results = action_pwned(f_emails)
    c_accounts = len(pwned_results)

    print '\n\nEmail Addresses:\n'
    write_html(email_evidence_results, linkedin_evidence_results, pwned_results, report_location, company, data_mine)
    if f_emails:

        for email in f_emails:

            print '\t' + str(email).replace("u'","").replace("'","").replace('[','').replace(']','')
    else:
        print '\tNo Data To Be Found'

    print '\nCompromised Accounts:\n'
    if pwned_results:
        sorted_pwned = sorted(pwned_results)
        for account in sorted_pwned:
            print 'Account: \t{}'.format(account[0])
            print ' Domain: \t{}'.format(account[1])
            print '   Date: \t{}\n'.format(account[3])
    else:
        print '\tNo Data To Be Found'

    print '\nLinkedIn Results:\n'

    sorted_person = sorted(person_seen)
    if sorted_person:
        for person in sorted_person:
            print person
    else:
        print '\tNo Data To Be Found'

    if data_mine is not None:
        user_names = data_mine[0]
        software_list = data_mine[1]
        download_count = data_mine[2]
        download_list = data_mine[3]
        username_count = len(user_names)
        software_count = len(software_list)

        print '\nData Found In Docuement MetaData'
        print '\nPotential Usernames:\n'
        if user_names:
            for user in user_names:
                print '\t' + colored(user, 'red')
        else:
            print '\tNo Data To Be Found'

        print '\nSoftware And Versions Found:\n'
        if software_list:
            for software in software_list:
                print '\t' + colored(software, 'red')
        else:
            print '\tNo Data To Be Found'
    else:
        user_names = []
        software_list = []
        download_count = 0
        username_count = len(user_names)
        software_count = len(software_list)

    sorted_dict = collections.OrderedDict(sorted(brute_results_dict.items()))
    bruted_count = len(sorted_dict)
    print "\nBluto Results: \n"
    for item in sorted_dict:
        if item is not '*.' + domain:
            if item is not '@.' + domain:
                if item in sub_intrest:
                    print colored(item + "\t", 'red'), colored(sorted_dict[item], 'red')
                else:
                    print item + "\t",sorted_dict[item]

    time_spent_email_f = str(datetime.timedelta(seconds=(time_spent_email))).split('.')[0]
    time_spent_brute_f = str(datetime.timedelta(seconds=(time_spent_brute))).split('.')[0]
    time_spent_total_f = str(datetime.timedelta(seconds=(time_spent_total))).split('.')[0]

    print '\nHosts Identified: {}' .format(str(bruted_count))
    print 'Potential Emails Found: {}' .format(str(email_count))
    print 'Potential Staff Members Found: {}' .format(str(staff_count))
    print 'Compromised Accounts: {}' .format(str(c_accounts))
    print 'Potential Usernames Found: {}'.format(username_count)
    print 'Potential Software Found: {}'.format(software_count)
    print 'Docuements Downloaded: {}'.format(download_count)
    print "Email Enumeration:", time_spent_email_f
    print "Requests executed:", str(check_count) + " in ", time_spent_brute_f
    print "Total Time:", time_spent_total_f

    info('Hosts Identified: {}' .format(str(bruted_count)))
    info("Email Enumeration: {}" .format(str(time_spent_email_f)))
    info('Compromised Accounts: {}' .format(str(c_accounts)))
    info('Potential Staff Members Found: {}' .format(str(staff_count)))
    info('Potential Emails Found: {}' .format(str(email_count)))
    info("Total Time:" .format(str(time_spent_total_f)))
    info('Docuements Downloaded: {}'.format(download_count))
    info('DNS No Wild Cards + Email Hunter Run completed')
    info('Output action_output_wild_false_hunter: Completed')

    domain_r = domain.split('.')
    docs = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
    answers = ['no','n','y','yes']
    while True:
        print colored("\nWould you like to keep all local data?\n(Local Logs, Downloded Docuements, HTML Evidence Report)\n\nYes|No:", "red")
        answer = raw_input("").lower()
        if answer in answers:
            if answer == 'y' or answer == 'yes':
                domain
                print '\nThe docuements are located here: {}'.format(docs)
                print 'The logs are located here: {}.'.format(LOG_DIR)
                print "\nAn evidence report has been written to {}\n".format(report_location)
                while True:
                    answer = raw_input("Would you like to open this report now? ").lower()
                    if answer in answers:
                        if answer == 'y' or answer == 'yes':
                            print '\nOpening {}' .format(report_location)
                            webbrowser.open('file://' + str(report_location))
                            break
                        else:
                            break
                    else:
                        print 'Your answer needs to be either yes|y|no|n rather than, {}' .format(answer)
                break
            else:
                shutil.rmtree(docs)
                shutil.rmtree(LOG_DIR)
                os.remove(report_location)
                break
        else:
            print '\tYour answer needs to be either yes|y|no|n rather than, {}' .format(answer)

Example 5

Project: eventgen Source File: eventgentoken.py
    def _getReplacement(self, old=None, earliestTime=None, latestTime=None, s=None):
        if self.replacementType == 'static':
            return self.replacement
        elif self.replacementType in ('timestamp', 'replaytimestamp'):
            if s.earliest and s.latest:
                if earliestTime and latestTime:
                    if latestTime>=earliestTime:
                        if s.timestamp == None:
                            minDelta = 0

                            ## Compute timeDelta as total_seconds
                            td = latestTime - earliestTime
                            maxDelta = timeDelta2secs(td)

                            ## Get random timeDelta
                            randomDelta = datetime.timedelta(seconds=random.randint(minDelta, maxDelta), microseconds=random.randint(0, latestTime.microsecond if latestTime.microsecond > 0 else 999999))

                            ## Compute replacmentTime
                            replacementTime = latestTime - randomDelta
                            s.timestamp = replacementTime
                        else:
                            replacementTime = s.timestamp

                        # logger.debug("Generating timestamp for sample '%s' with randomDelta %s, minDelta %s, maxDelta %s, earliestTime %s, latestTime %s, earliest: %s, latest: %s" % (s.name, randomDelta, minDelta, maxDelta, earliestTime, latestTime, s.earliest, s.latest))
                        
                        if self.replacementType == 'replaytimestamp':
                            if old != None and len(old) > 0:
                                # Determine type of timestamp to use for this token
                                # We can either be a string with one strptime format
                                # or we can be a json formatted list of strptime formats
                                currentts = None
                                try:
                                    strptimelist = json.loads(self.replacement)   
                                    # logger.debugv("Replaytimestamp formats: %s" % json.dumps(strptimelist))  
                                    for currentformat in strptimelist:
                                        try:
                                            timeformat = currentformat
                                            if timeformat == "%s":
                                                ts = float(old) if  len(old) < 10 else float(old) / (10**(len(old)-10))
                                                currentts = datetime.datetime.fromtimestamp(ts)
                                            else:
                                                currentts = datetime.datetime.strptime(old, timeformat)
                                            # logger.debug("Old '%s' Timeformat '%s' currentts '%s'" % (old, timeformat, currentts))
                                            if type(currentts) == datetime.datetime:
                                                break
                                        except ValueError:
                                            pass
                                    # logger.debugv("Currentts: %s" % currentts)
                                    if type(currentts) != datetime.datetime:
                                        # Total fail
                                        logger.error("Can't find strptime format for this timestamp '%s' in the list of formats.  Returning original value" % old)
                                        return old
                                except ValueError:
                                    # Not JSON, try to read as text
                                    timeformat = self.replacement
                                    try:
                                        if timeformat == "%s":
                                            ts = float(old) if  len(old) < 10 else float(old) / (10**(len(old)-10))
                                            currentts = datetime.datetime.fromtimestamp(ts)
                                        else:
                                            currentts = datetime.datetime.strptime(old, timeformat)
                                        # logger.debug("Timeformat '%s' currentts '%s'" % (timeformat, currentts))
                                    except ValueError:
                                        # Total fail
                                        logger.error("Can't match strptime format ('%s') to this timestamp '%s'.  Returning original value" % (timeformat, old))
                                        return old
                                    
                                    # Can't parse as strptime, try JSON
                                
                                # Check to make sure we parsed a year
                                if currentts.year == 1900:
                                    currentts = currentts.replace(year=s.now().year)
                                # We should now know the timeformat and currentts associated with this event
                                # If we're the first, save those values        
                                if self._replaytd == None:
                                    self._replaytd = replacementTime - currentts
                                
                                # logger.debug("replaytd %s" % self._replaytd)
                                replacementTime = currentts + self._replaytd
                                
                                # Randomize time a bit between last event and this one
                                # Note that we'll always end up shortening the time between
                                # events because we don't know when the next timestamp is going to be
                                if s.bundlelines:
                                    if self._lastts == None:
                                        self._lastts = replacementTime
                                    oldtd = replacementTime - self._lastts
                                    randomsecs = random.randint(0, oldtd.seconds)
                                    if oldtd.seconds > 0:
                                        randommicrosecs = random.randint(0, 1000000)
                                    else:
                                        randommicrosecs = random.randint(0, oldtd.microseconds)
                                    randomtd = datetime.timedelta(seconds=randomsecs, microseconds=randommicrosecs)
                                    replacementTime -= randomtd
                                else:
                                    randomtd = datetime.timedelta()
                                self._lastts = replacementTime
                                replacement = timeformat.replace('%s', str(round(time.mktime(replacementTime.timetuple()))).rstrip('0').rstrip('.'))
                                replacementTime = replacementTime.strftime(replacement)
                                # logger.debugv("ReplacementTime: %s" % replacementTime)
                                # logger.debug("Old '%s' Timeformat '%s' currentts '%s' replacementTime '%s' replaytd '%s' randomtd '%s'" \
                                #             % (old, timeformat, currentts, replacementTime, self._replaytd, randomtd))
                            else:
                                logger.error("Could not find old value, needed for replaytimestamp")
                                return old
                        else:
                            replacement = self.replacement.replace('%s', str(round(time.mktime(replacementTime.timetuple()))).rstrip('0').rstrip('.'))
                            replacementTime = replacementTime.strftime(replacement)
                        ## replacementTime == replacement for invalid strptime specifiers
                        if replacementTime != self.replacement.replace('%', ''):
                            return replacementTime
                        else:
                            logger.error("Invalid strptime specifier '%s' detected; will not replace" \
                                        % (self.replacement) )
                            return old
                    ## earliestTime/latestTime not proper
                    else:
                        logger.error("Earliest specifier '%s', value '%s' is greater than latest specifier '%s', value '%s' for sample '%s'; will not replace" \
                                    % (s.earliest, earliestTime, s.latest, latestTime, s.name) )
                        return old
            ## earliest/latest not proper
            else:
                logger.error('Earliest or latest specifier were not set; will not replace')
                return old
        elif self.replacementType in ('random', 'rated'):
            ## Validations:
            if self._integerMatch != None:
                integerMatch = self._integerMatch
            else:
                integerRE = re.compile('integer\[([-]?\d+):([-]?\d+)\]', re.I)
                integerMatch = integerRE.match(self.replacement)
                self._integerMatch = integerMatch
            
            if self._floatMatch != None:
                floatMatch = self._floatMatch
            else:
                floatRE = re.compile('float\[(\d+)\.(\d+):(\d+)\.(\d+)\]', re.I)
                floatMatch = floatRE.match(self.replacement)
                self._floatMatch = floatMatch

            if self._stringMatch != None:
                stringMatch = self._stringMatch
            else:
                stringRE = re.compile('string\((\d+)\)', re.I)
                stringMatch = stringRE.match(self.replacement)
                self._stringMatch = stringMatch

            if self._hexMatch != None:
                hexMatch = self._hexMatch
            else:       
                hexRE = re.compile('hex\((\d+)\)', re.I)
                hexMatch = hexRE.match(self.replacement)
                self._hexMatch = hexMatch

            if self._listMatch != None:
                listMatch = self._listMatch
            else:
                listRE = re.compile('list(\[[^\]]+\])', re.I)
                listMatch = listRE.match(self.replacement)
                self._listMatch = listMatch

            ## Valid replacements: ipv4 | ipv6 | integer[<start>:<end>] | string(<i>)
            if self.replacement.lower() == 'ipv4':
                x = 0
                replacement = ''

                while x < 4:
                    replacement += str(random.randint(0, 255)) + '.'
                    x += 1

                replacement = replacement.strip('.')
                return replacement
            elif self.replacement.lower() == 'ipv6':
                x = 0
                replacement = ''

                while x < 8:
                    replacement += hex(random.randint(0, 65535))[2:] + ':'
                    x += 1

                replacement = replacement.strip(':')
                return replacement
            elif self.replacement.lower() == 'mac':
                x = 0
                replacement = ''

                ## Give me 6 blocks of 2 hex
                while x < 6:
                    y = 0
                    while y < 2:
                        replacement += hex(random.randint(0, 15))[2:]
                        y += 1
                    replacement += ':'
                    x += 1

                replacement = replacement.strip(':')
                return replacement
            elif self.replacement.lower() == 'guid':
                return str(uuid.uuid4())
            elif integerMatch:
                startInt = int(integerMatch.group(1))
                endInt = int(integerMatch.group(2))

                if endInt >= startInt:
                    replacementInt = random.randint(startInt, endInt)
                    if self.replacementType == 'rated':
                        rateFactor = 1.0
                        if type(s.hourOfDayRate) == dict:
                            try:
                                rateFactor *= s.hourOfDayRate[str(s.now())]
                            except KeyError:
                                import traceback
                                stack =  traceback.format_exc()
                                logger.error("Hour of day rate failed for token %s.  Stacktrace %s" % stack)
                        if type(s.dayOfWeekRate) == dict:
                            try:
                                weekday = datetime.date.weekday(s.now())
                                if weekday == 6:
                                    weekday = 0
                                else:
                                    weekday += 1
                                rateFactor *= s.dayOfWeekRate[str(weekday)]
                            except KeyError:
                                import traceback
                                stack =  traceback.format_exc()
                                logger.error("Day of week rate failed.  Stacktrace %s" % stack)
                        replacementInt = int(round(replacementInt * rateFactor, 0))
                    replacement = str(replacementInt)
                    return replacement
                else:
                    logger.error("Start integer %s greater than end integer %s; will not replace" % (startInt, endInt) )
                    return old
            elif floatMatch:
                try:
                    startFloat = float(floatMatch.group(1)+'.'+floatMatch.group(2))
                    endFloat = float(floatMatch.group(3)+'.'+floatMatch.group(4))
                    
                    if endFloat >= startFloat:
                        floatret = round(random.uniform(startFloat,endFloat), len(floatMatch.group(2)))
                        if self.replacementType == 'rated':
                            rateFactor = 1.0
                            now = s.now()
                            if type(s.hourOfDayRate) == dict:
                                try:
                                    rateFactor *= s.hourOfDayRate[str(now.hour)]
                                except KeyError:
                                    import traceback
                                    stack =  traceback.format_exc()
                                    logger.error("Hour of day rate failed for token %s.  Stacktrace %s" % stack)
                            if type(s.dayOfWeekRate) == dict:
                                try:
                                    weekday = datetime.date.weekday(now)
                                    if weekday == 6:
                                        weekday = 0
                                    else:
                                        weekday += 1
                                    rateFactor *= s.dayOfWeekRate[str(weekday)]
                                except KeyError:
                                    import traceback
                                    stack =  traceback.format_exc()
                                    logger.error("Day of week rate failed.  Stacktrace %s" % stack)
                            floatret = round(floatret * rateFactor, len(floatMatch.group(2)))
                        floatret = str(floatret)
                        return floatret
                    else:
                        logger.error("Start float %s greater than end float %s; will not replace" % (startFloat, endFloat))
                        return old
                except ValueError:
                    logger.error("Could not parse float[%s.%s:%s.%s]" % (floatMatch.group(1), floatMatch.group(2), \
                                floatMatch.group(3), floatMatch.group(4)))
                    return old
            elif stringMatch:
                strLength = int(stringMatch.group(1))
                if strLength == 0:
                    return ''
                elif strLength > 0:
                    replacement = ''
                    while len(replacement) < strLength:
                        ## Generate a random ASCII between dec 33->126
                        replacement += chr(random.randint(33, 126))
                        ## Practice safe strings
                        replacement = re.sub('%[0-9a-fA-F]+', '', urllib.quote(replacement))
                    
                    return replacement
                else:
                    logger.error("Length specifier %s for string replacement must be greater than 0; will not replace" % (strLength) )
                    return old
            elif hexMatch:
                strLength = int(hexMatch.group(1))

                replacement = ''
                hexList = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
                while len(replacement) < strLength:
                    replacement += hexList[random.randint(0, 15)]

                return replacement
            elif listMatch:
                try:
                    value = json.loads(listMatch.group(1))
                except:
                    logger.error("Could not parse json for '%s' in sample '%s'" % (listMatch.group(1), s.name))
                    return old
                return random.choice(value)

            else:
                logger.error("Unknown replacement value '%s' for replacementType '%s'; will not replace" % (self.replacement, self.replacementType) )
                return old
        elif self.replacementType in ('file', 'mvfile'):
            if self._replacementFile != None:
                replacementFile = self._replacementFile
                replacementColumn = self._replacementColumn
            else:
                try:
                    paths = self.replacement.split(':')
                    if(len(paths) == 1):
                        replacementColumn = 0
                    else:
                        try: # When it's not a mvfile, there's no number on the end:
                            replacementColumn = int(paths[-1])
                        except (ValueError):
                            replacementColumn = 0
                    if(replacementColumn > 0):
                        # This supports having a drive-letter colon
                        replacementFile = s.pathParser(":".join(paths[0:-1]))
                    else:
                        replacementFile = s.pathParser(self.replacement)
                except ValueError, e:
                    logger.error("Replacement string '%s' improperly formatted.  Should be /path/to/file or /path/to/file:column" % (self.replacement))
                    return old
                self._replacementFile = replacementFile
                self._replacementColumn = replacementColumn

            # If we've seen this file before, simply return already read results
            # This applies only if we're looking at a multivalue file and we want to
            # return the same random pick on every iteration
            if replacementColumn > 0 and replacementFile in self.mvhash:
                if replacementColumn > len(self.mvhash[replacementFile]):
                    logger.error("Index for column '%s' in replacement file '%s' is out of bounds" % (replacementColumn, replacementFile))
                    return old
                else:
                    # logger.debug("Returning mvhash: %s" % self.mvhash[replacementFile][replacementColumn-1])
                    return self.mvhash[replacementFile][replacementColumn-1]
            else:
                # Adding caching of the token file to avoid reading it every iteration
                if self._tokenfile != None:
                    replacementLines = self._tokenfile
                ## Otherwise, lets read the file and build our cached results, pick a result and return it
                else:
                    # logger.debug("replacementFile: %s replacementColumn: %s" % (replacementFile, replacementColumn))
                    replacementFile = os.path.abspath(replacementFile)
                    logger.debug("Normalized replacement file %s" % replacementFile)
                    if os.path.exists(replacementFile) and os.path.isfile(replacementFile):
                        replacementFH = open(replacementFile, 'rU')
                        replacementLines = replacementFH.readlines()
                        replacementFH.close()

                        if len(replacementLines) == 0:
                            logger.error("Replacement file '%s' is empty; will not replace" % (replacementFile) )
                            return old
                        else:
                            self._tokenfile = replacementLines
                    else:
                        logger.error("File '%s' does not exist" % (replacementFile))
                        return old

                replacement = replacementLines[random.randint(0, len(replacementLines)-1)].strip()

                if replacementColumn > 0:
                    self.mvhash[replacementFile] = replacement.split(',')

                    if replacementColumn > len(self.mvhash[replacementFile]):
                        logger.error("Index for column '%s' in replacement file '%s' is out of bounds" % (replacementColumn, replacementFile))
                        return old
                    else:
                        return self.mvhash[replacementFile][replacementColumn-1]
                else:
                    return replacement
        elif self.replacementType == 'integerid':
            temp = self.replacement
            self.replacement = str(int(self.replacement) + 1)
            return temp

        else:
            logger.error("Unknown replacementType '%s'; will not replace" % (replacementType) )
            return old

Example 6

Project: Bluto Source File: output.py
def action_output_wild_false(brute_results_dict, sub_intrest, google_results, bing_true_results, linkedin_results, check_count, domain, time_spent_email, time_spent_brute, time_spent_total, report_location, company, data_mine):
    info('Output action_output_wild_false: Start')
    linkedin_evidence_results = []
    email_evidence_results = []
    email_results = []
    email_seen = []
    url_seen = []
    person_seen = []
    final_emails = []

    for email, url in google_results:
        try:
            e1, e2 = email.split(',')
            if url not in email_seen:
                email_seen.append(url)
                email_evidence_results.append((str(e2).replace(' ',''),url))
                email_evidence_results.append((str(e1).replace(' ',''),url))
                email_results.append((str(e2).replace(' ','')))
                email_results.append((str(e1).replace(' ','')))

        except ValueError:
            if url not in email_seen:
                email_seen.append(url)
                email_evidence_results.append((str(email).replace(' ',''),url))
                email_results.append(str(email).replace(' ',''))

    for e, u in bing_true_results:
        email_results.append(e)
        if u not in url_seen:
            email_evidence_results.append((e, u))

    for url, person, description in linkedin_results:
        if person not in person_seen:
            person_seen.append(person)
            linkedin_evidence_results.append((url, person, description))

    linkedin_evidence_results.sort(key=lambda tup: tup[1])
    sorted_email = set(sorted(email_results))
    for email in sorted_email:
        if email == '[]':
            pass
        elif email == '@' + domain:
            pass
        else:
            final_emails.append(email)
    email_count = len(final_emails)
    staff_count = len(person_seen)
    f_emails = sorted(final_emails)
    pwned_results = action_pwned(f_emails)
    c_accounts = len(pwned_results)

    print '\n\nEmail Addresses:\n'
    write_html(email_evidence_results, linkedin_evidence_results, pwned_results, report_location, company, data_mine)
    if f_emails:

        for email in f_emails:

            print str(email).replace("u'","").replace("'","").replace('[','').replace(']','')
    else:
        print '\tNo Data To Be Found'

    print '\nCompromised Accounts:\n'
    if pwned_results:
        sorted_pwned = sorted(pwned_results)
        for account in sorted_pwned:
            print 'Account: \t{}'.format(account[0])
            print 'Domain: \t{}'.format(account[1])
            print 'Date: \t{}\n'.format(account[3])
    else:
        print '\tNo Data To Be Found'

    print '\nLinkedIn Results:\n'

    sorted_person = sorted(person_seen)
    if sorted_person:
        for person in sorted_person:
            print person
    else:
        print '\tNo Data To Be Found'

    if data_mine is not None:
        user_names = data_mine[0]
        software_list = data_mine[1]
        download_count = data_mine[2]
        download_list = data_mine[3]
        username_count = len(user_names)
        software_count = len(software_list)

        print '\nData Found In Docuement MetaData'
        print '\nPotential Usernames:\n'
        if user_names:
            for user in user_names:
                print '\t' + colored(user, 'red')
        else:
            print '\tNo Data To Be Found'

        print '\nSoftware And Versions Found:\n'
        if software_list:
            for software in software_list:
                print '\t' + colored(software, 'red')
        else:
            print '\tNo Data To Be Found'
    else:
        user_names = []
        software_list = []
        download_count = 0
        username_count = len(user_names)
        software_count = len(software_list)

    sorted_dict = collections.OrderedDict(sorted(brute_results_dict.items()))
    bruted_count = len(sorted_dict)
    print "\nBluto Results: \n"
    for item in sorted_dict:
        if item in sub_intrest:
            print colored(item + "\t", 'red'), colored(sorted_dict[item], 'red')
        else:
            print item + "\t",sorted_dict[item]


    time_spent_email_f = str(datetime.timedelta(seconds=(time_spent_email))).split('.')[0]
    time_spent_brute_f = str(datetime.timedelta(seconds=(time_spent_brute))).split('.')[0]
    time_spent_total_f = str(datetime.timedelta(seconds=(time_spent_total))).split('.')[0]

    print '\nHosts Identified: {}' .format(str(bruted_count))
    print 'Potential Emails Found: {}' .format(str(email_count))
    print 'Potential Staff Members Found: {}' .format(str(staff_count))
    print 'Compromised Accounts: {}' .format(str(c_accounts))
    print 'Potential Usernames Found: {}'.format(username_count)
    print 'Potential Software Found: {}'.format(software_count)
    print 'Docuements Downloaded: {}'.format(download_count)
    print "Email Enumeration:", time_spent_email_f
    print "Requests executed:", str(check_count) + " in ", time_spent_brute_f
    print "Total Time:", time_spent_total_f

    info('Hosts Identified: {}' .format(str(bruted_count)))
    info("Email Enumeration: {}" .format(str(time_spent_email_f)))
    info('Compromised Accounts: {}' .format(str(c_accounts)))
    info('Potential Staff Members Found: {}' .format(str(staff_count)))
    info('Potential Emails Found: {}' .format(str(email_count)))
    info('Potential Usernames Found: {}'.format(username_count))
    info('Potential Software Found: {}'.format(software_count))
    info('Docuements Downloaded: {}'.format(download_count))
    info("Total Time:" .format(str(time_spent_total_f)))
    info('DNS No Wild Cards + Email Hunter Run completed')
    info('Output action_output_wild_false: Completed')

    domain_r = domain.split('.')
    docs = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
    answers = ['no','n','y','yes']
    while True:
        answer = raw_input("\nWould you like to keep all local data?\n(Local Logs, Downloded Docuements, HTML Evidence Report)\n\nYes|No:").lower()
        if answer in answers:
            if answer == 'y' or answer == 'yes':
                domain
                print '\nThe docuements are located here: {}'.format(docs)
                print 'The logs are located here: {}.'.format(LOG_DIR)
                print "\nAn evidence report has been written to {}\n".format(report_location)
                while True:
                    answer = raw_input("Would you like to open this report now? ").lower()
                    if answer in answers:
                        if answer == 'y' or answer == 'yes':
                            print '\nOpening {}' .format(report_location)
                            webbrowser.open('file://' + str(report_location))
                            break
                        else:
                            break
                    else:
                        print 'Your answer needs to be either yes|y|no|n rather than, {}' .format(answer)
                break
            else:
                shutil.rmtree(docs)
                shutil.rmtree(LOG_DIR)
                os.remove(report_location)
                break
        else:
            print '\tYour answer needs to be either yes|y|no|n rather than, {}' .format(answer)

Example 7

Project: Medusa Source File: tz.py
    def _read_tzfile(self, fileobj):
        out = _tzfile()

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).
        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
            # The number of UTC/local indicators stored in the file.
            ttisgmtcnt,

            # The number of standard/wall indicators stored in the file.
            ttisstdcnt,

            # The number of leap seconds for which data is
            # stored in the file.
            leapcnt,

            # The number of "transition times" for which data
            # is stored in the file.
            timecnt,

            # The number of "local time types" for which data
            # is stored in the file (must not be zero).
            typecnt,

            # The  number  of  characters  of "time zone
            # abbreviation strings" stored in the file.
            charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            out.trans_list = list(struct.unpack(">%dl" % timecnt,
                                                  fileobj.read(timecnt*4)))
        else:
            out.trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.

        if timecnt:
            out.trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            out.trans_idx = []

        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now (but read anyway for correct file position)
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # Build ttinfo list
        out.ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind = ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = 60 * ((gmtoff + 30) // 60)
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.dstoffset = datetime.timedelta(0)
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            out.ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        out.ttinfo_std = None
        out.ttinfo_dst = None
        out.ttinfo_before = None
        if out.ttinfo_list:
            if not out.trans_list:
                out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = out.trans_idx[i]
                    if not out.ttinfo_std and not tti.isdst:
                        out.ttinfo_std = tti
                    elif not out.ttinfo_dst and tti.isdst:
                        out.ttinfo_dst = tti

                    if out.ttinfo_std and out.ttinfo_dst:
                        break
                else:
                    if out.ttinfo_dst and not out.ttinfo_std:
                        out.ttinfo_std = out.ttinfo_dst

                for tti in out.ttinfo_list:
                    if not tti.isdst:
                        out.ttinfo_before = tti
                        break
                else:
                    out.ttinfo_before = out.ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = None
        for i, tti in enumerate(out.trans_idx):
            if not tti.isdst:
                offset = tti.offset
                laststdoffset = offset
            else:
                if laststdoffset is not None:
                    # Store the DST offset as well and update it in the list
                    tti.dstoffset = tti.offset - laststdoffset
                    out.trans_idx[i] = tti

                offset = laststdoffset or 0

            out.trans_list[i] += offset

        # In case we missed any DST offsets on the way in for some reason, make
        # a second pass over the list, looking for the /next/ DST offset.
        laststdoffset = None
        for i in reversed(range(len(out.trans_idx))):
            tti = out.trans_idx[i]
            if tti.isdst:
                if not (tti.dstoffset or laststdoffset is None):
                    tti.dstoffset = tti.offset - laststdoffset
            else:
                laststdoffset = tti.offset

            if not isinstance(tti.dstoffset, datetime.timedelta):
                tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
            
            out.trans_idx[i] = tti

        out.trans_idx = tuple(out.trans_idx)
        out.trans_list = tuple(out.trans_list)

        return out

Example 8

Project: my-weather-indicator Source File: weatherwidget.py
    def parse_data(self):
        if self.skin is not None and\
                os.path.exists(os.path.join(self.skin, 'skin')):
            maindir = self.skin
            ans = self.read_main_data()
            if ans is not None and self.weather_data is not None:
                mainsurface = cairo.ImageSurface(
                    cairo.FORMAT_ARGB32, ans[0], ans[1])
                cr = cairo.Context(mainsurface)
                # try:
                for index, line in enumerate(self.widgetdata.split('\n')):
                    row = line.split('|')
                    cr.save()
                    if row is not None and len(row) > 1:
                        if row[0] == 'CLOCK':
                            print(row)
                            atype, minutesorhours, fileimage, x, y, width,\
                                height, xpos, ypos = row
                            fileimage = os.path.join(maindir, fileimage)
                            x = float(x)
                            y = float(y)
                            width = float(width)
                            height = float(height)
                            surface = get_surface_from_file(fileimage)
                            print(surface.get_width(), surface.get_height())
                            if surface is not None:
                                s_width = surface.get_width()
                                s_height = surface.get_height()
                                if xpos == 'CENTER':
                                    x = x-width/2.0
                                elif xpos == 'RIGHT':
                                    x = x-width
                                if ypos == 'CENTER':
                                    y = y-height/2.0
                                elif ypos == 'BOTTOM':
                                    y = y-height
                                hours = float(self.weather_data['current_conditions']['rawOffset'])
                                now = self.datetime +\
                                    datetime.timedelta(hours=hours)
                                atime = float(now.hour) + float(now.minute)/60.0
                                hours = atime
                                if not self.a24h and hours>12:
                                    hours -= 12.0
                                minutes = (atime -int(atime))*60.0
                                cr.translate(x,y)
                                cr.scale(width/s_width,height/s_height)
                                if minutesorhours == '$HOUR$':
                                    cr.rotate(2.0*math.pi/12.0*hours-math.pi/2.0)
                                elif minutesorhours == '$MINUTES$':
                                    cr.rotate(2.0*math.pi/60.0*minutes-math.pi/2.0)
                                cr.set_source_surface(surface)
                                cr.paint()
                        elif row[0] == 'IMAGE':
                            atype, fileimage, x, y, width, height, xpos, ypos = row
                            if self.weather_data is not None:
                                if fileimage == '$CONDITION$':
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['current_conditions']['condition_image'])
                                elif fileimage == '$CONDITION_ICON_LIGHT$':
                                    fileimage = os.path.join(comun.ICONDIR,self.weather_data['current_conditions']['condition_icon_light'])
                                elif fileimage == '$CONDITION_ICON_DARK':
                                    fileimage = os.path.join(comun.ICONDIR,self.weather_data['current_conditions']['condition_icon_dark'])
                                elif fileimage == '$MOONPHASE$':
                                    fileimage = os.path.join(comun.IMAGESDIR,self.weather_data['current_conditions']['moon_icon'])
                                elif fileimage == '$WIND$':
                                    fileimage = os.path.join(comun.IMAGESDIR,self.weather_data['current_conditions']['wind_icon'])
                                elif fileimage == '$CONDITION_01$' and len(self.weather_data['forecasts'])>0:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][0]['condition_image'])
                                elif fileimage == '$CONDITION_02$' and len(self.weather_data['forecasts'])>1:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][1]['condition_image'])
                                elif fileimage == '$CONDITION_03$' and len(self.weather_data['forecasts'])>2:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][2]['condition_image'])
                                elif fileimage == '$CONDITION_04$' and len(self.weather_data['forecasts'])>3:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][3]['condition_image'])
                                elif fileimage == '$CONDITION_05$' and len(self.weather_data['forecasts'])>4:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][4]['condition_image'])
                                elif fileimage == '$MOONPHASE_01$' and len(self.weather_data['forecasts'])>0:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][0]['moon_phase'])
                                elif fileimage == '$MOONPHASE_02$' and len(self.weather_data['forecasts'])>1:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][1]['moon_phase'])
                                elif fileimage == '$MOONPHASE_03$' and len(self.weather_data['forecasts'])>2:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][2]['moon_phase'])
                                elif fileimage == '$MOONPHASE_04$' and len(self.weather_data['forecasts'])>3:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][3]['moon_phase'])
                                elif fileimage == '$MOONPHASE_05$' and len(self.weather_data['forecasts'])>4:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][4]['moon_phase'])
                                elif fileimage == '$WIND_01$' and len(self.weather_data['forecasts'])>0:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][0]['wind_icon'])
                                elif fileimage == '$WIND_02$' and len(self.weather_data['forecasts'])>1:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][1]['wind_icon'])
                                elif fileimage == '$WIND_03$' and len(self.weather_data['forecasts'])>2:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][2]['wind_icon'])
                                elif fileimage == '$WIND_04$' and len(self.weather_data['forecasts'])>3:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][3]['wind_icon'])
                                elif fileimage == '$WIND_05$' and len(self.weather_data['forecasts'])>4:
                                    fileimage = os.path.join(comun.WIMAGESDIR,self.weather_data['forecasts'][4]['wind_icon'])
                                else:
                                    fileimage = os.path.join(maindir,fileimage)
                            else:
                                fileimage = os.path.join(maindir,fileimage)
                            x = float(x)
                            y = float(y)
                            width = float(width)
                            height = float(height)
                            surface = get_surface_from_file(fileimage)
                            if surface is not None:
                                s_width = surface.get_width()
                                s_height = surface.get_height()
                                if xpos == 'CENTER':
                                    x = x-width/2.0
                                elif xpos == 'RIGHT':
                                    x = x-width
                                if ypos == 'CENTER':
                                    y = y-height/2.0
                                elif ypos == 'BOTTOM':
                                    y = y-height
                                cr.translate(x,y)
                                cr.scale(width/s_width,height/s_height)
                                cr.set_source_surface(surface)
                                cr.paint()
                        elif row[0] == 'TEXT':
                            atype, text, x, y, font, size, color, xpos, ypos = row
                            x = float(x)
                            y = float(y)
                            size = int(size)
                            r,g,b,a = color.split(',')
                            cr.set_source_rgba(float(r),float(g),float(b),float(a))
                            cr.select_font_face(font)
                            cr.set_font_size(size)
                            now = self.datetime + datetime.timedelta(hours=float(self.weather_data['current_conditions']['rawOffset']))
                            if self.parse_time:
                                now = self.datetime + datetime.timedelta(hours=float(self.weather_data['current_conditions']['rawOffset']))
                                hours = now.hour
                                if not self.a24h:
                                    if hours>12:
                                        hours -= 12
                                    if hours < 1:
                                        hours += 12
                                hours = str(hours)
                                hours = '0'*(2-len(hours))+hours
                                minutes = str(now.minute)
                                minutes = '0'*(2-len(minutes))+minutes
                                if text.find('$HOUR$')>-1:
                                    text = text.replace('$HOUR$',hours)
                                if text.find('$MINUTES$')>-1:
                                    text = text.replace('$MINUTES$',minutes)
                            if text.find('$WEEKDAY$')>-1:
                                text = text.replace('$WEEKDAY$',now.strftime('%A'))
                            if text.find('$DAY$')>-1:
                                text = text.replace('$DAY$',now.strftime('%d'))
                            if text.find('$MONTH$')>-1:
                                text = text.replace('$MONTH$',now.strftime('%m'))
                            if text.find('$MONTHNAME$')>-1:
                                text = text.replace('$MONTHNAME$',now.strftime('%B'))
                            if text.find('$YEAR$')>-1:
                                text = text.replace('$YEAR$',now.strftime('%Y'))
                            if text.find('$LOCATION$')>-1 and self.location is not None:
                                text = text.replace('$LOCATION$',self.location)
                            if self.weather_data is not None:
                                if text.find('$TEMPERATURE$')>-1:
                                    text = text.replace('$TEMPERATURE$','{0}{1:c}'.format(self.weather_data['current_conditions']['temperature'],176))
                                if text.find('$MAX_TEMPERATURE$')>-1:
                                    text = text.replace('$MAX_TEMPERATURE$','{0}{1:c}'.format(self.weather_data['forecasts'][0]['high'],176))
                                if text.find('$MIN_TEMPERATURE$')>-1:
                                    text = text.replace('$MIN_TEMPERATURE$','{0}{1:c}'.format(self.weather_data['forecasts'][0]['low'],176))
                                if text.find('$HUMIDITY$')>-1:
                                    text = text.replace('$HUMIDITY$',self.weather_data['current_conditions']['humidity'])
                                if text.find('$PRESSURE$')>-1:
                                    text = text.replace('$PRESSURE$',self.weather_data['current_conditions']['pressure'])
                                if text.find('$WIND$')>-1:
                                    text = text.replace('$WIND$',self.weather_data['current_conditions']['wind_condition'])
                                if text.find('$CONDITION$')>-1:
                                    text = text.replace('$CONDITION$',self.weather_data['current_conditions']['condition_text'])
                                if len(self.weather_data['forecasts'])>0:
                                    if text.find('$MAX_TEMPERATURE_01$')>-1:
                                        text = text.replace('$MAX_TEMPERATURE_01$',self.weather_data['forecasts'][0]['high'])
                                    if text.find('$MIN_TEMPERATURE_01$')>-1:
                                        text = text.replace('$MIN_TEMPERATURE_01$',self.weather_data['forecasts'][0]['low'])
                                    if text.find('$CONDITION_01$')>-1:
                                        text = text.replace('$CONDITION_01$',self.weather_data['forecasts'][0]['condition_text'])
                                    if text.find('$DAY_OF_WEEK_01$')>-1:
                                        text = text.replace('$DAY_OF_WEEK_01$',self.weather_data['forecasts'][0]['day_of_week'])
                                if len(self.weather_data['forecasts'])>1:
                                    if text.find('$MAX_TEMPERATURE_02$')>-1:
                                        text = text.replace('$MAX_TEMPERATURE_02$',self.weather_data['forecasts'][1]['high'])
                                    if text.find('$MIN_TEMPERATURE_02$')>-1:
                                        text = text.replace('$MIN_TEMPERATURE_02$',self.weather_data['forecasts'][1]['low'])
                                    if text.find('$CONDITION_02$')>-1:
                                        text = text.replace('$CONDITION_02$',self.weather_data['forecasts'][1]['condition_text'])
                                    if text.find('$DAY_OF_WEEK_02$')>-1:
                                        text = text.replace('$DAY_OF_WEEK_02$',self.weather_data['forecasts'][1]['day_of_week'])
                                if len(self.weather_data['forecasts'])>2:
                                    if text.find('$MAX_TEMPERATURE_03$')>-1:
                                        text = text.replace('$MAX_TEMPERATURE_03$',self.weather_data['forecasts'][2]['high'])
                                    if text.find('$MIN_TEMPERATURE_03$')>-1:
                                        text = text.replace('$MIN_TEMPERATURE_03$',self.weather_data['forecasts'][2]['low'])
                                    if text.find('$CONDITION_03$')>-1:
                                        text = text.replace('$CONDITION_03$',self.weather_data['forecasts'][2]['condition_text'])
                                    if text.find('$DAY_OF_WEEK_03$')>-1:
                                        text = text.replace('$DAY_OF_WEEK_03$',self.weather_data['forecasts'][2]['day_of_week'])
                                if len(self.weather_data['forecasts'])>3:
                                    if text.find('$MAX_TEMPERATURE_04$')>-1:
                                        text = text.replace('$MAX_TEMPERATURE_04$',self.weather_data['forecasts'][3]['high'])
                                    if text.find('$MIN_TEMPERATURE_04$')>-1:
                                        text = text.replace('$MIN_TEMPERATURE_04$',self.weather_data['forecasts'][3]['low'])
                                    if text.find('$CONDITION_04$')>-1:
                                        text = text.replace('$CONDITION_04$',self.weather_data['forecasts'][3]['condition_text'])
                                    if text.find('$DAY_OF_WEEK_04$')>-1:
                                        text = text.replace('$DAY_OF_WEEK_04$',self.weather_data['forecasts'][3]['day_of_week'])
                                if len(self.weather_data['forecasts'])>4:
                                    if text.find('$MAX_TEMPERATURE_05$')>-1:
                                        text = text.replace('$MAX_TEMPERATURE_05$',self.weather_data['forecasts'][4]['high'])
                                    if text.find('$MIN_TEMPERATURE_05$')>-1:
                                        text = text.replace('$MIN_TEMPERATURE_05$',self.weather_data['forecasts'][4]['low'])
                                    if text.find('$CONDITION_05$')>-1:
                                        text = text.replace('$CONDITION_05$',self.weather_data['forecasts'][4]['condition_text'])
                                    if text.find('$DAY_OF_WEEK_05$')>-1:
                                        text = text.replace('$DAY_OF_WEEK_05$',self.weather_data['forecasts'][4]['day_of_week'])

                            x_bearing, y_bearing, width, height, x_advance, y_advance = cr.text_extents(text)
                            if xpos == 'CENTER':
                                x = x-width/2.0
                            elif xpos == 'RIGHT':
                                x = x-width
                            if ypos == 'CENTER':
                                y = y+height/2.0
                            elif ypos == 'TOP':
                                y = y+height
                            cr.move_to(x, y)
                            cr.show_text(text)
                    cr.restore()
                self.surface = mainsurface
                return
                # except Exception as e:
                #   print('Parsing data error: %s'%e)
        self.surface = None

Example 9

Project: EDeN Source File: model.py
    def optimize(self, iterable_pos, iterable_neg,
                 model_name='model',
                 n_active_learning_iterations=0,
                 size_positive=-1,
                 size_negative=-1,
                 lower_bound_threshold_positive=-1,
                 upper_bound_threshold_positive=1,
                 lower_bound_threshold_negative=-1,
                 upper_bound_threshold_negative=1,
                 n_iter=20,
                 n_inner_iter_estimator=5,
                 max_total_time=-1,
                 pre_processor_parameters=dict(),
                 vectorizer_parameters=dict(),
                 estimator_parameters=dict(),
                 cv=10,
                 scoring='roc_auc',
                 score_func=lambda u, s: u - s,
                 two_steps_optimization=True):

        def _get_parameters_range():
            text = []
            text.append('\n\n\tParameters range:')
            text.append('\nPre_processor:')
            text.append(serialize_dict(pre_processor_parameters))
            text.append('\nVectorizer:')
            text.append(serialize_dict(vectorizer_parameters))
            text.append('\nEstimator:')
            text.append(serialize_dict(estimator_parameters))
            return '\n'.join(text)

        logger.debug(_get_parameters_range())
        # init
        n_failures = 0
        best_pre_processor_ = None
        best_vectorizer_ = None
        best_estimator_ = None
        best_pre_processor_args_ = dict()
        best_vectorizer_args_ = dict()
        best_estimator_args_ = dict()
        best_pre_processor_parameters_ = defaultdict(list)
        best_vectorizer_parameters_ = defaultdict(list)
        best_estimator_parameters_ = defaultdict(list)
        best_score_ = best_score_mean_ = best_score_std_ = 0
        start = time.time()
        if n_iter == 1:
            logger.debug('n_iter is 1: switching to default parameters')
            self.fit_default(iterable_pos,
                             iterable_neg,
                             pre_processor_parameters,
                             vectorizer_parameters,
                             estimator_parameters)
        else:
            if len(pre_processor_parameters) == 0:
                mean_len_pre_processor_parameters = 0
            else:
                mean_len_pre_processor_parameters = np.mean([len(pre_processor_parameters[p])
                                                             for p in pre_processor_parameters])
            if len(vectorizer_parameters) == 0:
                mean_len_vectorizer_parameters = 0
            else:
                mean_len_vectorizer_parameters = np.mean([len(vectorizer_parameters[p])
                                                          for p in vectorizer_parameters])
            if (mean_len_pre_processor_parameters == 1 or mean_len_pre_processor_parameters == 0) and\
                    (mean_len_vectorizer_parameters == 1 or mean_len_vectorizer_parameters == 0):
                data_matrix_is_stable = True
            else:
                data_matrix_is_stable = False
            # main iteration
            for i in range(n_iter):
                if max_total_time != -1:
                    if time.time() - start > max_total_time:
                        delta_time = datetime.timedelta(seconds=(time.time() - start))
                        logger.warning('Reached max time: %s' % (str(delta_time)))
                        break

                # after n_iter/2 iterations, replace the parameter lists with only those values that
                # have been found to increase the performance
                if i == int(n_iter / 2) and two_steps_optimization is True:
                    if len(best_pre_processor_parameters_) > 0:
                        pre_processor_parameters = dict(best_pre_processor_parameters_)
                    if len(best_vectorizer_parameters_) > 0:
                        vectorizer_parameters = dict(best_vectorizer_parameters_)
                    if len(best_estimator_parameters_) > 0:
                        estimator_parameters = dict(best_estimator_parameters_)
                    logger.debug(_get_parameters_range())
                    if len(pre_processor_parameters) == 1 and len(vectorizer_parameters) == 1 and \
                            len(estimator_parameters) == 1:
                        logger.debug('Optimal parameters range is singular, bailing out')
                        break

                # build data matrix only the first time or if needed e.g. because
                # there are more choices in the paramter settings for the
                # pre_processor or the vectorizer
                if i == 0 or data_matrix_is_stable is False:
                    if i == 0:
                        # select default paramters
                        self.pre_processor_args = self._default(pre_processor_parameters)
                        self.vectorizer_args = self._default(vectorizer_parameters)
                    else:
                        # sample paramters randomly
                        self.pre_processor_args = self._sample(pre_processor_parameters)
                        self.vectorizer_args = self._sample(vectorizer_parameters)
                    # copy the iterators for later re-use
                    iterable_pos, iterable_pos_ = tee(iterable_pos)
                    iterable_neg, iterable_neg_ = tee(iterable_neg)
                    try:
                        # if no active learning mode, just produce data matrix
                        if n_active_learning_iterations == 0:
                            X, y = self._data_matrices(iterable_pos_,
                                                       iterable_neg_,
                                                       fit_vectorizer=self.fit_vectorizer)
                        else:  # otherwise use the active learning strategy
                            X, y = self._select_data_matrices(
                                iterable_pos_, iterable_neg_,
                                n_active_learning_iterations=n_active_learning_iterations,
                                size_positive=size_positive,
                                size_negative=size_negative,
                                lower_bound_threshold_positive=lower_bound_threshold_positive,
                                upper_bound_threshold_positive=upper_bound_threshold_positive,
                                lower_bound_threshold_negative=lower_bound_threshold_negative,
                                upper_bound_threshold_negative=upper_bound_threshold_negative)
                    except Exception as e:
                        logger.debug('Exception', exc_info=True)
                        delta_time = datetime.timedelta(seconds=(time.time() - start))
                        text = []
                        text.append('\nFailed outer optimization iteration: %d/%d (at %.1f sec; %s)' %
                                    (i + 1, n_iter, time.time() - start, str(delta_time)))
                        text.append(e.__doc__)
                        text.append(e.message)
                        text.append('Failed with the following setting:')
                        text.append(self.get_parameters())
                        text.append('...continuing')
                        logger.debug('\n'.join(text))

                # iterate more frequently across the estimator parameters
                for inner_i in range(n_inner_iter_estimator):
                    try:
                        self.estimator_args = self._sample(estimator_parameters)
                        self.estimator.set_params(**self.estimator_args)
                        scores = cross_validation.cross_val_score(self.estimator, X, y, cv=cv,
                                                                  scoring=scoring, n_jobs=self.n_jobs)
                    except Exception as e:
                        logger.debug('Exception', exc_info=True)
                        delta_time = datetime.timedelta(seconds=(time.time() - start))
                        text = []
                        text.append('\nFailed inner optimization iteration: (%d/%d) %d/%d (at %.1f sec; %s)' %
                                    (inner_i + 1, n_inner_iter_estimator, i + 1,
                                        n_iter, time.time() - start, str(delta_time)))
                        text.append(e.__doc__)
                        text.append(e.message)
                        text.append('Failed with the following setting:')
                        text.append(self.get_parameters())
                        text.append('...continuing')
                        logger.debug('\n'.join(text))
                        n_failures += 1
                    else:
                        # consider as score the mean-std for a robust estimate of predictive performance
                        score_mean = np.mean(scores)
                        score_std = np.std(scores)
                        score = score_func(score_mean, score_std)
                        logger.debug('iteration: (%d/%d) %d/%d score (%s): %.3f (%.3f +- %.3f)' %
                                     (inner_i + 1, n_inner_iter_estimator, i + 1, n_iter,
                                      scoring, score, score_mean, score_std))
                        # update the best confirguration
                        if best_score_ < score:
                            # fit the estimator since the cross_validation estimate does not
                            # set the estimator parametrs
                            self.estimator.fit(X, y)
                            self.save(model_name)
                            best_score_ = score
                            best_score_mean_ = score_mean
                            best_score_std_ = score_std
                            best_pre_processor_ = copy.deepcopy(self.pre_processor)
                            best_vectorizer_ = copy.deepcopy(self.vectorizer)
                            best_estimator_ = copy.deepcopy(self.estimator)
                            best_pre_processor_args_ = copy.deepcopy(self.pre_processor_args)
                            best_vectorizer_args_ = copy.deepcopy(self.vectorizer_args)
                            best_estimator_args_ = copy.deepcopy(self.estimator_args)
                            # add parameter to list of best parameters
                            for key in self.pre_processor_args:
                                best_pre_processor_parameters_[key].append(self.pre_processor_args[key])
                            for key in self.vectorizer_args:
                                best_vectorizer_parameters_[key].append(self.vectorizer_args[key])
                            for key in self.estimator_args:
                                best_estimator_parameters_[key].append(self.estimator_args[key])
                            delta_time = datetime.timedelta(seconds=(time.time() - start))
                            text = []
                            text.append('\n\n\tIteration: %d/%d (after %.1f sec; %s)' %
                                        (i + 1, n_iter, time.time() - start, str(delta_time)))
                            text.append('Best score (%s): %.3f (%.3f +- %.3f)' % (scoring, best_score_,
                                                                                  best_score_mean_,
                                                                                  best_score_std_))
                            text.append('\nData:')
                            text.append('Instances: %d ; Features: %d with an avg of %d features' %
                                        (X.shape[0], X.shape[1], X.getnnz() / X.shape[0]))
                            text.append(report_base_statistics(y))
                            text.append(self.get_parameters())
                            logger.info('\n'.join(text))
            # store the best hyperparamter configuration
            self.pre_processor_args = copy.deepcopy(best_pre_processor_args_)
            self.vectorizer_args = copy.deepcopy(best_vectorizer_args_)
            self.estimator_args = copy.deepcopy(best_estimator_args_)
            # store the best machines
            self.pre_processor = copy.deepcopy(best_pre_processor_)
            self.vectorizer = copy.deepcopy(best_vectorizer_)
            self.estimator = copy.deepcopy(best_estimator_)

        # save to disk
        if n_failures < n_iter * n_inner_iter_estimator:
            self.save(model_name)
            logger.info('Saved current best model in %s' % model_name)
        else:
            logger.warning('ERROR: no iteration has produced any viable solution.')
            exit(1)

Example 10

Project: CommunityCellularManager Source File: setup_test_db.py
    def create_data(self, username, password, usernum, kind, prefix,
                    endaga_version):
        # Create a user.
        sys.stdout.write('creating user: %s %s %s..\n' % (
            username, password, usernum))
        user = User(username=username, email="%[email protected]" % username)
        user.set_password(password)
        user.save()

        # Get user profile and add some credit.
        sys.stdout.write('setting user profile..\n')
        user_profile = UserProfile.objects.get(user=user)
        user_profile.save()

        # Add some towers.
        towers_to_add = random.randint(4, 7)
        added_towers = []
        print 'adding %s towers..' % towers_to_add

        for index in range(towers_to_add):
            nickname = None
            if random.random() < 0.5:
                nickname = 'Test Tower %s' % index
            bts = BTS(uuid=str(uuid.uuid4()), nickname=nickname, secret='mhm',
                      inbound_url='http://localhost:8090',
                      network=user_profile.network)
            added_towers.append(bts)
            # Set the last_active time and uptime randomly.
            random_seconds = random.randint(0, 24*60*60)
            random_date = (timezone.now() -
                           datetime.timedelta(seconds=random_seconds))
            bts.last_active = random_date
            bts.uptime = random.randint(24*60*60, 100*24*60*60)
            bts.status = random.choice(['no-data','active','inactive'])
            bts.save()
            # Set the metapackage version.  This has to be done after initially
            # creating the BTS or the post-create hook will override.
            if endaga_version is not None:
                endaga_version = bts.sortable_version(endaga_version)
            versions = {
                'endaga_version': endaga_version,
                'freeswitch_version': None,
                'gsm_version': None,
                'python_endaga_core_version': None,
                'python_gsm_version': None,
            }
            bts.package_versions = json.dumps(versions)
            bts.save()
            # Add some TimeseriesStats for each tower.
            stats_to_add = random.randint(100, 1000)
            print 'adding %s TimeseriesStats..' % stats_to_add
            for _ in range(stats_to_add):
                date = (
                    timezone.now() -
                    datetime.timedelta(seconds=random.randint(0, 7*24*60*60)))
                key = random.choice(stats_app.views.TIMESERIES_STAT_KEYS)
                if key in ('noise_rssi_db', 'noise_ms_rssi_target_db'):
                    value = random.randint(-75, -20)
                elif 'percent' in key:
                    value = random.randint(0, 100)
                elif 'bytes' in key:
                    value = random.randint(0, 10000)
                else:
                    value = random.randint(0, 10)
                stat = TimeseriesStat(key=key, value=value, date=date, bts=bts,
                                      network=user_profile.network)
                stat.save()
            # Add some SystemEvents for each tower (either small or large number)
            number_of_events = [0,1,2,5,18,135,264]
            events_to_add = random.choice(number_of_events)
            print 'adding %s SystemEvents..' % events_to_add
            for _ in range(events_to_add):
                # Actual events should be in order. But we should support
                # out-of-order events just in case
                date = (
                    timezone.now() -
                    datetime.timedelta(seconds=random.randint(0, 7*24*60*60)))
                event = SystemEvent(date=date, bts=bts,
                            type=random.choice(['bts up','bts down']))
                event.save()

        # Make at least one BTS active recently.
        bts.last_active = timezone.now()
        bts.status = 'active'
        bts.save()
        # Make one BTS in the no-data state.
        bts = BTS(uuid=str(uuid.uuid4()), nickname='No-data tower', secret='z',
                  inbound_url='http://localhost:5555',
                  network=user_profile.network,
                  package_versions=json.dumps(versions))
        bts.save()

        # Add some subscribers.
        sys.stdout.write("adding subscribers and numbers..\n")
        added_subscribers = []
        for index in range(random.randint(3, 20)):
            imsi = "IMSI%d999900000000%s" % (usernum, index)
            if random.random() < 0.5:
                name = "test name %s" % index
            else:
                name = ''
            balance = random.randint(40000000, 60000000)
            state = "active"
            bts = BTS.objects.filter(
                network=user_profile.network).order_by('?').first()
            subscriber = Subscriber(network=user_profile.network, imsi=imsi,
                                    name=name, balance=balance, state=state,
                                    bts=bts, last_camped=bts.last_active)
            subscriber.save()
            added_subscribers.append(subscriber)
            # And attach some numbers.
            for _ in range(random.randint(1, 5)):
                msisdn = int(prefix + str(random.randint(1000, 9999)))
                number = Number(
                    number=msisdn, state="inuse", network=user_profile.network,
                    kind=kind, subscriber=subscriber)
                number.save()

        # Add one last subscriber so we have at least one sub with no activity.
        imsi = "IMSI%d8888000000000" % usernum
        name = 'test name (no activity)'
        subscriber = Subscriber(network=user_profile.network, imsi=imsi,
                                bts=bts, name=name, balance=1000,
                                state='active')
        subscriber.save()

        # Add some UsageEvents attached to random subscribers.
        events_to_add = random.randint(100, 4000)
        sys.stdout.write("adding %s usage events..\n" % events_to_add)
        all_destinations = list(Destination.objects.all())
        with transaction.atomic():
            for _ in range(events_to_add):
                random_sub = random.choice(added_subscribers)
                time_delta = datetime.timedelta(
                    minutes=random.randint(0, 60000))
                date = (timezone.now() - time_delta)
                kinds = [
                    ('outside_sms', 10000), ('incoming_sms', 2000),
                    ('local_sms', 4000),
                    ('local_recv_sms', 1000), ('free_sms', 0),
                    ('error_sms', 0),
                    ('outside_call', 8000), ('incoming_call', 3000),
                    ('local_call', 2000),
                    ('local_recv_call', 1000),
                    ('free_call', 0), ('error_call', 0), ('gprs', 5000)]
                (kind, tariff) = random.choice(kinds)
                to_number, billsec, up_bytes, call_duration = 4 * [None]
                from_number, down_bytes, timespan, change = 4 * [None]
                if 'call' in kind:
                    billsec = random.randint(0, 120)
                    change = tariff * billsec
                    call_duration = billsec + random.randint(0, 10)
                    to_number = str(random.randint(1234567890, 9876543210))
                    from_number = str(random.randint(1234567890, 9876543210))
                    reason = '%s sec call to %s (%s)' % (billsec, to_number,
                                                         kind)
                elif 'sms' in kind:
                    change = tariff
                    to_number = str(random.randint(1234567890, 9876543210))
                    from_number = str(random.randint(1234567890, 9876543210))
                    reason = '%s to %s' % (kind, to_number)
                elif kind == 'gprs':
                    up_bytes = random.randint(20000, 400000)
                    down_bytes = random.randint(20000, 400000)
                    change = (down_bytes/1024) * tariff
                    timespan = 60
                    reason = 'gprs_usage, %sB uploaded, %sB downloaded' % (
                        up_bytes, down_bytes)
                old_amount = random_sub.balance
                random_sub.change_balance(change)
                usage_event = UsageEvent(
                    subscriber=random_sub, bts=random.choice(added_towers),
                    date=date, kind=kind,
                    reason=reason, oldamt=old_amount,
                    newamt=random_sub.balance, change=-change, billsec=billsec,
                    call_duration=call_duration, uploaded_bytes=up_bytes,
                    downloaded_bytes=down_bytes,
                    timespan=timespan, to_number=to_number,
                    from_number=from_number,
                    destination=random.choice(all_destinations), tariff=tariff)
                try:
                    usage_event.save()
                except DataError:
                    from django.db import connection
                    print connection.queries[-1]
                random_sub.save()
            # Create one more UE with a negative "oldamt" to test display
            # handling of such events.
            usage_event = UsageEvent(
                subscriber=random_sub, bts=random.choice(added_towers),
                date=date, kind='local_sms',
                reason='negative oldamt', oldamt=-200000,
                newamt=0, change=200000,
                billsec=0, to_number='19195551234',
                destination=random.choice(all_destinations))
            usage_event.save()

        # Add some transaction history.
        sys.stdout.write("adding transactions..\n")
        for _ in range(random.randint(10, 50)):
            time_delta = datetime.timedelta(
                minutes=random.randint(0, 60000))
            date = (timezone.now() - time_delta)
            new_transaction = Transaction(
                ledger=user_profile.network.ledger, kind='credit',
                reason='Automatic Recharge',
                amount=1e3*random.randint(1000, 100000),
                created=date,
            )
            new_transaction.save()

        # And some floating numbers for release testing.
        sys.stdout.write("adding floating phone numbers..\n")
        for num in random.sample(range(10000, 99999), 300):
            #need to be e164, that's what we use
            msisdn = int('155555%s' % str(num))
            state = random.choice(('available', 'pending'))
            kind = random.choice(('number.nexmo.monthly',
                                  'number.telecom.permanent'))
            number = Number(
                number=msisdn, state=state, kind=kind, country_id='US')
            number.save()

Example 11

Project: kikola Source File: timedelta.py
Function: timedelta_to_str
def timedelta_to_str(value, format=None):
    """
    Display the timedelta formatted according to the given string.

    You should use global setting ``TIMEDELTA_FORMAT`` to specify default
    format to this function there (like ``DATE_FORMAT`` for builtin ``date``
    template filter).

    Default value for ``TIMEDELTA_FORMAT`` is ``'G:i'``.

    Format uses the same policy as Django ``date`` template filter or
    PHP ``date`` function with several differences.

    Available format strings:

    +------------------+-----------------------------+------------------------+
    | Format character | Description                 | Example output         |
    +==================+=============================+========================+
    | ``a``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``A``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``b``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``B``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``c``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``d``            | Total days, 2 digits with   | ``'01'``, ``'41'``     |
    |                  | leading zeros. Do not       |                        |
    |                  | combine with ``w`` format.  |                        |
    +------------------+-----------------------------+------------------------+
    | ``D``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``f``            | Magic "full" format with    | ``'2w 4d 1:28:07'``    |
    |                  | short labels.               |                        |
    +------------------+-----------------------------+------------------------+
    | ``F``            | Magic "full" format with    | ``'2 weeks, 4 days,    |
    |                  | normal labels.              | 1:28:07'``             |
    +------------------+-----------------------------+------------------------+
    | ``g``            | Day, not total, hours       | ``'0'`` to ``'23'``    |
    |                  | without leading zeros. To   |                        |
    |                  | use with ``d``, ``j``, or   |                        |
    |                  | ``w``.                      |                        |
    +------------------+-----------------------------+------------------------+
    | ``G``            | Total hours without         | ``'1'``, ``'433'``     |
    |                  | leading zeros. Do not       |                        |
    |                  | combine with ``g`` or       |                        |
    |                  | ``h`` formats.              |                        |
    +------------------+-----------------------------+------------------------+
    | ``h``            | Day, not total, hours with  | ``'00'`` to ``'23'``   |
    |                  | leading zeros. To use with  |                        |
    |                  | ``d`` or ``w``.             |                        |
    +------------------+-----------------------------+------------------------+
    | ``H``            | Total hours with leading    | ``'01', ``'433'``      |
    |                  | zeros. Do not combine with  |                        |
    |                  | ``g`` or ``h`` formats.     |                        |
    +------------------+-----------------------------+------------------------+
    | ``i``            | Hour, not total, minutes, 2 | ``00`` to ``'59'``     |
    |                  | digits with leading zeros   |                        |
    |                  | To use with ``g``, ``G``,   |                        |
    |                  | ``h`` or ``H`` formats.     |                        |
    +------------------+-----------------------------+------------------------+
    | ``I``            | Total minutes, 2 digits or  | ``'01'``, ``'433'``    |
    |                  | more with leading zeros. Do |                        |
    |                  | not combine with ``i``      |                        |
    |                  | format.                     |                        |
    +------------------+-----------------------------+------------------------+
    | ``j``            | Total days, one or 2 digits | ``'1'``, ``'41'``      |
    |                  | without leading zeros. Do   |                        |
    |                  | not combine with ``w``      |                        |
    |                  | format.                     |                        |
    +------------------+-----------------------------+------------------------+
    | ``J``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``l``            | Days long label.            | ``'day'`` or           |
    |                  | Pluralized and localized.   | ``'days'``             |
    +------------------+-----------------------------+------------------------+
    | ``L``            | Weeks long label.           | ``'week'`` or          |
    |                  | Pluralized and localized.   | ``'weeks'``            |
    +------------------+-----------------------------+------------------------+
    | ``m``            | Week days long label.       | ``'day'`` or           |
    |                  | Pluralized and localized.   | ``'days'``             |
    +------------------+-----------------------------+------------------------+
    | ``M``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``n``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``N``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``O``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``P``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``r``            | Standart Python timedelta   | ``'18 d 1:28:07'``     |
    |                  | representation with short   |                        |
    |                  | labels.                     |                        |
    +------------------+-----------------------------+------------------------+
    | ``R``            | Standart Python timedelta   | ``'18 days, 1:28:07'`` |
    |                  | representation with normal  |                        |
    |                  | labels.                     |                        |
    +------------------+-----------------------------+------------------------+
    | ``s``            | Minute, not total, seconds, | ``'00'`` to ``'59'``   |
    |                  | 2 digits with leading       |                        |
    |                  | zeros. To use with ``i`` or |                        |
    |                  | ``I``.                      |                        |
    +------------------+-----------------------------+------------------------+
    | ``S``            | Total seconds. 2 digits or  | ``'00'``, ``'433'``    |
    |                  | more with leading zeros. Do |                        |
    |                  | not combine with ``s``      |                        |
    |                  | format.                     |                        |
    +------------------+-----------------------------+------------------------+
    | ``t``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``T``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``u``            | Second, not total,          | ``0`` to ``999999``    |
    |                  | microseconds.               |                        |
    +------------------+-----------------------------+------------------------+
    | ``U``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``w``            | Week, not total, days, one  | ``0`` to ``6``         |
    |                  | digit without leading       |                        |
    |                  | zeros. To use with ``W``.   |                        |
    +------------------+-----------------------------+------------------------+
    | ``W``            | Total weeks, one or more    | ``'1'``, ``'41'``      |
    |                  | digits without leading      |                        |
    |                  | zeros.                      |                        |
    +------------------+-----------------------------+------------------------+
    | ``y``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``Y``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``z``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+
    | ``Z``            | Not implemented.            |                        |
    +------------------+-----------------------------+------------------------+

    For example,

    ::

        >>> import datetime
        >>> from kikola.utils import timedelta_to_str
        >>> delta = datetime.timedelta(seconds=99660)
        >>> timedelta_to_str(delta)
        ... u'27:41'
        >>> timedelta_to_str(delta, 'r')
        ... u'1d 3:41:00'
        >>> timedelta_to_str(delta, 'f')
        ... u'1d 3:41'
        >>> timedelta_to_str(delta, 'W L, w l, H:i:s')
        ... u'0 weeks, 1 day, 03:41:00'

    Couple words about magic "full" formats. These formats show weeks number
    with week label, days number with day label and seconds only if weeks
    number, days number or seconds greater that zero.

    For example,

    ::

        >>> import datetime
        >>> from kikola.utils import timedelta_to_str
        >>> delta = datetime.timedelta(hours=12)
        >>> timedelta_to_str(delta, 'f')
        ... u'12:00'
        >>> timedelta_to_str(delta, 'F')
        ... u'12:00'
        >>> delta = datetime.timedelta(hours=12, seconds=30)
        >>> timedelta_to_str(delta, 'f')
        ... u'12:00:30'
        >>> timedelta_to_str(delta, 'F')
        ... u'12:00:30'
        >>> delta = datetime.timedelta(hours=168)
        >>> timedelta_to_str(delta, 'f')
        ... u'2w 0:00'
        >>> timedelta_to_str(delta, 'F')
        ... u'2 weeks, 0:00'

    """
    # Only ``datetime.timedelta`` instances allowed for this function
    if not isinstance(value, datetime.timedelta):
        raise ValueError('Only "datetime.timedelta" instances supported ' \
                         'by this function. You use %s.' % type(value))

    # Generate total data
    days = value.days
    microseconds = value.microseconds
    seconds = timedelta_seconds(value)

    hours = seconds / 3600
    minutes = seconds / 60
    weeks = days / 7

    # Generate collapsed data
    day_hours = hours - days * 24
    hour_minutes = minutes - hours * 60
    minute_seconds = seconds - minutes * 60
    week_days = days - weeks * 7

    days_label = ungettext(u'day', u'days', days)
    short_days_label = ugettext(u'd')
    short_week_days_label = ugettext(u'd')
    short_weeks_label = ugettext(u'w')
    week_days_label = ungettext(u'day', u'days', week_days)
    weeks_label = ungettext(u'week', u'weeks', weeks)

    # Collect data
    data = locals()

    format = format or TIMEDELTA_FORMAT
    processed = u''

    for part in format:
        if part in TIMEDELTA_FORMATS:
            is_full_part = part in ('f', 'F')
            is_repr_part = part in ('r', 'R')

            part = TIMEDELTA_FORMATS[part][0]

            if is_full_part or is_repr_part:
                if is_repr_part and not days:
                    part = part.replace(u'%(days)d', u'')
                    part = part.replace(u'%(days_label)s,', u'')
                    part = part.replace(u'%(short_days_label)s', u'')

                if is_full_part and not minute_seconds:
                    part = part.replace(u':%(minute_seconds)02d', u'')

                if is_full_part and not weeks:
                    part = part.replace(u'%(weeks)d', u'')
                    part = part.replace(u'%(short_weeks_label)s', u'')
                    part = part.replace(u'%(weeks_label)s,', u'')

                if is_full_part and not week_days:
                    part = part.replace(u'%(week_days)d', u'')
                    part = part.replace(u'%(short_week_days_label)s', u'')
                    part = part.replace(u'%(week_days_label)s,', u'')

                part = part.strip()
                part = u' '.join(part.split())

        processed += part

    return processed % data

Example 12

Project: socorro Source File: test_crashes.py
    @classmethod
    def setUpClass(cls):
        """Set up this test class by populating the reports table with fake
        data. """
        super(IntegrationTestCrashes, cls).setUpClass()

        cursor = cls.connection.cursor()

        cls.now = datetimeutil.utc_now()
        yesterday = cls.now - datetime.timedelta(days=1)

        build_date = cls.now - datetime.timedelta(days=30)
        sunset_date = cls.now + datetime.timedelta(days=30)

        # Insert data for frequency test
        cursor.execute("""
            INSERT INTO reports
            (
                id,
                uuid,
                build,
                signature,
                os_name,
                date_processed,
                user_comments,
                product,
                version,
                release_channel
            )
            VALUES
            (
                1,
                'abc',
                '2012033116',
                'js',
                'Windows NT',
                '%(now)s',
                null,
                'Firefox',
                '11.0',
                'Nightly'
            ),
            (
                2,
                'def',
                '2012033116',
                'js',
                'Linux',
                '%(now)s',
                'hello',
                'Firefox',
                '11.0',
                'Nightly'
            ),
            (
                3,
                'hij',
                '2012033117',
                'js',
                'Windows NT',
                '%(now)s',
                'hah',
                'Firefox',
                '11.0',
                'Nightly'
            ),
            (
                4,
                'klm',
                '2012033117',
                'blah',
                'Unknown',
                '%(now)s',
                null,
                'Firefox',
                '14.0b1',
                'Beta'
            ),
            (
                5,
                'nop',
                '2012033117',
                'cool_sig',
                'Unknown',
                '%(now)s',
                'hi!',
                'Firefox',
                '14.0b',
                'Beta'
            ),
            (
                6,
                'qrs',
                '2012033117',
                'cool_sig',
                'Linux',
                '%(now)s',
                'meow',
                'WaterWolf',
                '2.0b',
                'Beta'
            )
        """ % {"now": cls.now})

        # Insert data for daily crashes test

        cursor.execute("""
            INSERT INTO products
            (product_name, sort, release_name)
            VALUES
            (
                'Firefox',
                1,
                'firefox'
            ),
            (
                'WaterWolf',
                2,
                'WaterWolf'
            );
        """)

        cursor.execute("""
            INSERT INTO product_versions
            (product_version_id, product_name, major_version, release_version,
             version_string, version_sort, build_date, sunset_date,
             featured_version, build_type, is_rapid_beta, rapid_beta_id)
            VALUES
            (
                1,
                'Firefox',
                '11.0',
                '11.0',
                '11.0',
                '00000011000',
                '%(build_date)s',
                '%(sunset_date)s',
                't',
                'Nightly',
                False,
                NULL
            ),
            (
                2,
                'Firefox',
                '12.0',
                '12.0',
                '12.0',
                '00000012000',
                '%(build_date)s',
                '%(sunset_date)s',
                't',
                'Nightly',
                False,
                NULL
            ),
            (
                3,
                'Firefox',
                '13.0',
                '13.0',
                '13.0',
                '00000013000',
                '%(build_date)s',
                '%(sunset_date)s',
                'f',
                'Nightly',
                False,
                NULL
            ),
            (
                4,
                'Firefox',
                '14.0b321241',
                '14.0b',
                '14.0b',
                '00000013000',
                '%(build_date)s',
                '%(sunset_date)s',
                'f',
                'Beta',
                True,
                3
            ),
            (
                5,
                'Firefox',
                '14.0b1',
                '14.0b',
                '14.0b1',
                '00000013000',
                '%(build_date)s',
                '%(sunset_date)s',
                'f',
                'Beta',
                False,
                4
            ),
            (
                6,
                'WaterWolf',
                '2.0b',
                '2.0b',
                '2.0b',
                '00000013000',
                '%(build_date)s',
                '%(sunset_date)s',
                'f',
                'Nightly',
                True,
                NULL
            );
        """ % {"build_date": build_date, "sunset_date": sunset_date})

        cursor.execute("""
            INSERT INTO release_channels
            (release_channel, sort)
            VALUES
            ('Nightly', 1),
            ('Beta', 2)
        """)

        cursor.execute("""
            INSERT INTO product_release_channels
            (product_name, release_channel, throttle)
            VALUES
            ('Firefox', 'Nightly', 0.1),
            ('Firefox', 'Beta', 1.0)
        """)

        cursor.execute("""
            INSERT INTO os_names
            (os_short_name, os_name)
            VALUES
            ('win', 'Windows'),
            ('mac', 'Mac OS X'),
            ('lin', 'Linux')
        """)

        cursor.execute("""
            INSERT INTO process_types
            (process_type)
            VALUES
            ('crash'),
            ('hang')
        """)

        cursor.execute("""
            INSERT INTO crash_types
            (crash_type_id, crash_type, crash_type_short, process_type,
             old_code, include_agg)
            VALUES
            (1, 'Browser', 'crash', 'crash', 'c', TRUE),
            (2, 'Hang', 'hang', 'hang', 'h', TRUE)
        """)

        cursor.execute("""
            INSERT INTO home_page_graph
            (product_version_id, report_date, report_count, adu, crash_hadu)
            VALUES
            (1, '%(now)s', 5, 20, 0.12),
            (2, '%(yesterday)s', 2, 14, 0.12)
        """ % {"now": cls.now, "yesterday": yesterday})

        cursor.execute("""
            INSERT INTO home_page_graph_build
            (product_version_id, report_date, build_date, report_count, adu)
            VALUES
            (1, '%(now)s', '%(now)s', 5, 200),
            (1, '%(now)s', '%(yesterday)s', 3, 274),
            (2, '%(yesterday)s', '%(now)s', 3, 109)
        """ % {"now": cls.now, "yesterday": yesterday})

        cursor.execute("""
            INSERT INTO crashes_by_user
            (product_version_id, os_short_name, crash_type_id, report_date,
             report_count, adu)
            VALUES
            (1, 'win', 1, '%(now)s', 2, 3000),
            (1, 'win', 2, '%(now)s', 3, 3000),
            (1, 'lin', 2, '%(now)s', 1, 1000),
            (2, 'win', 1, '%(now)s', 5, 2000),
            (3, 'win', 1, '%(now)s', 6, 2000),
            (3, 'win', 2, '%(now)s', 5, 2000),
            (3, 'lin', 1, '%(now)s', 4, 4000),
            (3, 'lin', 2, '%(now)s', 3, 4000),
            (3, 'mac', 1, '%(now)s', 2, 6000),
            (3, 'mac', 2, '%(now)s', 1, 6000)
        """ % {"now": cls.now, "yesterday": yesterday})

        cursor.execute("""
            INSERT INTO crashes_by_user_build
            (product_version_id, os_short_name, crash_type_id, build_date,
             report_date, report_count, adu)
            VALUES
            (1, 'win', 1, '%(now)s', '%(now)s', 1, 2000),
            (1, 'win', 1, '%(yesterday)s', '%(now)s', 2, 3000),
            (1, 'win', 2, '%(yesterday)s', '%(now)s', 3, 1000),
            (1, 'lin', 2, '%(now)s', '%(yesterday)s', 4, 5000),
            (1, 'mac', 1, '%(yesterday)s', '%(now)s', 5, 4000),
            (2, 'lin', 1, '%(yesterday)s', '%(now)s', 1, 1000)
        """ % {"now": cls.now, "yesterday": yesterday})

        cursor.execute("""
            INSERT INTO signatures
            (signature_id, signature, first_build, first_report)
            VALUES
            (1, 'canIhaveYourSignature()', 2008120122, '%(now)s'),
            (2, 'ofCourseYouCan()', 2008120122, '%(now)s')
        """ % {"now": cls.now.date()})

        # Remember your product versions...
        #   1) Firefox:11.0
        #   2) Firefox:12.0
        #   4) Firefox:14.0b
        #   6) WaterWolf:2.0b
        cursor.execute("""
            INSERT INTO exploitability_reports
            (signature_id, product_version_id, signature, report_date,
             null_count, none_count, low_count, medium_count, high_count)
            VALUES
            (1, 1, 'canIhaveYourSignature()', '%(now)s', 0, 1, 2, 3, 4),
            (2, 1, 'ofCourseYouCan()', '%(yesterday)s', 4, 3, 2, 1, 0),
            (2, 4, 'ofCourseYouCan()', '%(now)s', 1, 4, 0, 1, 0),
            (2, 6, 'canIhaveYourSignature()', '%(yesterday)s', 2, 2, 2, 2, 2)
        """ % {"now": cls.now, "yesterday": yesterday})

        cursor.execute("""
            INSERT INTO signatures
            (signature_id, signature)
            VALUES
            (5, 'js')
        """)

        cursor.execute("""
        INSERT INTO
            reports_clean
            (signature_id, date_processed, uuid, release_channel, reason_id,
             process_type, os_version_id, os_name, flash_version_id, domain_id,
             address_id)
        VALUES
            (5, '{now}', 'this-is-suppose-to-be-a-uuid1',
             'Beta', 245, 'Browser', 71, 'Windows', 215, 631719, 11427500),

            (5, '{now}', 'this-is-suppose-to-be-a-uuid2',
             'Beta', 245, 'Browser', 71, 'Windows', 215, 631719, 11427500),

            (5, '{now}', 'this-is-suppose-to-be-a-uuid3',
             'Beta', 245, 'Browser', 71, 'Windows', 215, 631719, 11427500),

            (5, '{yesterday}', 'this-is-suppose-to-be-a-uuid4',
             'Beta', 245, 'Browser', 71, 'Windows', 215, 631719, 11427500),

            (5, '{yesterday}', 'this-is-suppose-to-be-a-uuid5',
             'Beta', 245, 'Browser', 71, 'Windows', 215, 631719, 11427500)
        """.format(now=cls.now, yesterday=yesterday))

        cursor.execute("""
            INSERT INTO crash_adu_by_build_signature
            (signature_id, signature, adu_date, build_date, buildid,
             crash_count, adu_count, os_name, channel, product_name)
            VALUES
            (1, 'canIhaveYourSignature()', '{yesterday}', '2014-03-01',
             '201403010101', 3, 1023, 'Mac OS X', 'release', 'WaterWolf'),
            (1, 'canIhaveYourSignature()', '{yesterday}', '2014-04-01',
             '201404010101', 4, 1024, 'Windows NT', 'release', 'WaterWolf'),
            (1, 'canIhaveYourSignature()', '2014-01-01', '2014-04-01',
             '201404010101', 4, 1024, 'Windows NT', 'release', 'WaterWolf'),
            (2, 'youMayNotHaveMySignature()', '{yesterday}', '2014-04-01',
             '201404010101', 4, 1024, 'Windows NT', 'release', 'WaterWolf'),
            (2, 'youMayNotHaveMySignature()', '{yesterday}', '2014-04-01',
             '201404010101', 4, 1024, 'Windows NT', 'release', 'WaterWolf')
        """.format(yesterday=yesterday))

        cls.connection.commit()
        cursor.close()

Example 13

Project: SmartElect Source File: create_test_data.py
def create(center_without_office=False,
           num_copy_centers=DEFAULT_NUM_COPY_CENTERS,
           num_registrations=DEFAULT_NUM_REGISTRATIONS,
           num_registration_dates=DEFAULT_NUM_REGISTRATION_DATES,
           num_daily_reports=DEFAULT_NUM_DAILY_REPORTS,
           num_registration_centers=DEFAULT_NUM_REGISTRATION_CENTERS,
           num_subconstituencies=DEFAULT_NUM_SUBCONSTITUENCIES,
           use_existing_infra=False,
           num_inactive_centers_per_election=DEFAULT_NUM_INACTIVE_PER_ELECTION,
           num_no_reg_centers=DEFAULT_NUM_NO_REG_CENTERS,
           election_dates=()):
    assert settings.ENVIRONMENT not in ('production', 'testing')
    delete(delete_infra=not use_existing_infra)
    empty_report_store()  # Remove any old data from Redis

    # Figure out ~10% of "normal" centers...
    fraction_of_normal_centers = \
        max(1, int(0.1 * num_registration_centers)) if num_registration_centers else 0

    # If numbers of some weird center types weren't specified, use a small
    # fraction of normal centers.
    if num_copy_centers == DEFAULT_NUM_COPY_CENTERS:
        num_copy_centers = fraction_of_normal_centers
    if num_no_reg_centers == DEFAULT_NUM_NO_REG_CENTERS:
        num_no_reg_centers = fraction_of_normal_centers

    carrier = BackendFactory()

    if election_dates:
        elections = [
            ElectionFactory(
                polling_start_time=election_date.replace(hour=8),
                polling_end_time=election_date.replace(hour=20)
            )
            for election_date in election_dates
        ]
    else:
        election_date = PAST_DAY.replace(hour=8, microsecond=123456)
        election = ElectionFactory(
            polling_start_time=election_date,
            polling_end_time=election_date.replace(hour=20)
        )
        elections = (election,)

    if not use_existing_infra:
        OfficeFactory()
        ConstituencyFactory(name_english='first')
        SubConstituencyFactory(name_english='Benghazi')

    offices = Office.objects.all()
    copy_centers = []
    no_reg_centers = []
    staff_phones = []

    if use_existing_infra:
        # Pick centers that support registrations at random.
        centers = RegistrationCenter.objects.filter(reg_open=True)\
            .exclude(center_type=RegistrationCenter.Types.COPY)\
            .order_by('?')[:num_registration_centers]
        if num_copy_centers:  # user wants some, but there might not be any
            copy_centers = RegistrationCenter.objects.\
                filter(reg_open=True, center_type=RegistrationCenter.Types.COPY)\
                .order_by('?')[:num_copy_centers]
        if num_no_reg_centers:  # user wants some, but there might not be any
            no_reg_centers = RegistrationCenter.objects.\
                filter(reg_open=False).order_by('?')[:num_no_reg_centers]
        # why like this? sliced queries and/or list
        all_kinds_of_centers = \
            list(centers) + list(copy_centers) + list(no_reg_centers)
    else:
        subconstituencies = SubConstituency.objects.exclude(pk=SPLIT_CENTER_SUBCONSTITUENCY_ID)
        subconstituencies = subconstituencies[:num_subconstituencies]

        centers = []
        for i in range(num_registration_centers):
            constituency = Constituency.objects.filter(name_english='first')[0]
            subconstituency = random.choice(subconstituencies)

            rc = RegistrationCenter(name='polling-center-%d' % i,
                                    center_id=CENTER_ID_MIN_INT_VALUE+i, constituency=constituency,
                                    subconstituency=subconstituency, office=random.choice(offices))
            rc.full_clean()
            rc.save()
            centers.append(rc)

        for i in range(num_copy_centers):
            original = random.choice(centers)
            # XXX This doesn't handle accidentally making too many copies of the same
            #     center, so make sure --num-centers is "big enough" w.r.t. --num-copy-centers.
            new_center_id = CENTER_ID_MIN_INT_VALUE + num_registration_centers + i
            copy = RegistrationCenter(name='Copy of %s' % original.name,
                                      center_id=new_center_id,
                                      constituency=original.constituency,
                                      subconstituency=original.subconstituency,
                                      office=original.office,
                                      center_type=RegistrationCenter.Types.COPY,
                                      copy_of=original)
            copy.full_clean()
            copy.save()
            copy_centers.append(copy)

        for i in range(num_no_reg_centers):
            constituency = Constituency.objects.filter(name_english='first')[0]
            subconstituency = random.choice(subconstituencies)
            center_id = CENTER_ID_MIN_INT_VALUE + num_registration_centers + num_copy_centers + i
            rc = RegistrationCenter(name='no-reg-polling-center-%d' % i,
                                    center_id=center_id,
                                    constituency=constituency,
                                    subconstituency=subconstituency,
                                    office=random.choice(offices),
                                    reg_open=False)
            rc.full_clean()
            rc.save()

        all_kinds_of_centers = centers + copy_centers + no_reg_centers

    if center_without_office:
        try:
            # by not specifying office and other infra, it will be "standalone"
            rc = RegistrationCenter(name='dummy-registration-center',
                                    center_id=UNUSED_CENTER_ID)
            rc.full_clean()
            rc.save()
        except ValidationError:
            pass  # assume that it already exists

    for election in elections:
        num_daily_reports_on_election_day = int(round(0.9 * num_daily_reports))
        centers_reported = set()
        for i in range(num_daily_reports_on_election_day):
            staff_phone_number = STAFF_PHONE_NUMBER_PATTERN % i
            from_center = random.choice(all_kinds_of_centers)
            ensure_staff_phone_exists(staff_phone_number, from_center, staff_phones,
                                      election.work_start_time + datetime.timedelta(minutes=5))

            # split votes between two options
            number_of_votes = (random.randint(1, 100), random.randint(1, 100))
            random_period_number = random.randint(FIRST_PERIOD_NUMBER, LAST_PERIOD_NUMBER)
            pr = PollingReport(election=election,
                               phone_number=staff_phone_number,
                               registration_center=from_center,
                               period_number=random_period_number,
                               num_voters=sum(number_of_votes),
                               creation_date=election.polling_start_time)
            pr.full_clean()
            pr.save()
            s = SMS(from_number=staff_phone_number, to_number=POLLING_REPORT_PHONE_NUMBER,
                    direction=INCOMING, message='my message', msg_type=SMS.POLLING_REPORT,
                    message_code=MESSAGE_1, carrier=carrier,
                    creation_date=election.polling_start_time)
            s.full_clean()
            s.save()

            if from_center in centers_reported:
                continue  # can't send but one PreliminaryVoteCount from a center

            # send a corresponding vote count
            for option, votes_for_option in enumerate(number_of_votes, start=1):
                pvc = PreliminaryVoteCount(election=election,
                                           phone_number=staff_phone_number,
                                           registration_center=from_center,
                                           option=option,
                                           num_votes=votes_for_option,
                                           creation_date=election.polling_start_time)
                pvc.full_clean()
                pvc.save()
                s = SMS(from_number=staff_phone_number,
                        to_number=PRELIMINARY_VOTE_COUNT_PHONE_NUMBER,
                        # XXX no specific message type for PreliminaryVoteCount
                        direction=INCOMING, message='my message', msg_type=SMS.POLLING_REPORT,
                        message_code=MESSAGE_1, carrier=carrier,
                        creation_date=election.polling_start_time)
                s.full_clean()
                s.save()

            centers_reported.add(from_center)

        # some daily reports on the day after
        for i in range(num_daily_reports - num_daily_reports_on_election_day):
            staff_phone_number = STAFF_PHONE_NUMBER_PATTERN % i
            rc = random.choice(all_kinds_of_centers)
            ensure_staff_phone_exists(staff_phone_number, rc, staff_phones,
                                      election.work_start_time + datetime.timedelta(minutes=5))
            report_creation_date = election.polling_start_time + datetime.timedelta(days=1)
            pr = PollingReport(election=election,
                               phone_number=staff_phone_number,
                               registration_center=rc,
                               period_number=LAST_PERIOD_NUMBER,  # day after counts as last period
                               num_voters=random.randint(1, 50),
                               creation_date=report_creation_date)
            pr.full_clean()
            pr.save()
            s = SMS(from_number=staff_phone_number, to_number=POLLING_REPORT_PHONE_NUMBER,
                    direction=INCOMING, message='my message', msg_type=SMS.POLLING_REPORT,
                    message_code=MESSAGE_1, carrier=carrier,
                    creation_date=election.polling_start_time)
            s.full_clean()
            s.save()

        # Tag some centers as inactive for the election.  We may or may not pick some that
        # sent messages as being inactive.
        num_inactive_centers_per_election = \
            min(num_inactive_centers_per_election, len(all_kinds_of_centers))
        if num_inactive_centers_per_election:
            reordered = all_kinds_of_centers
            random.shuffle(reordered)
            for i in range(num_inactive_centers_per_election):
                inactive_on_election = CenterClosedForElection(
                    registration_center=reordered[i], election=election
                )
                inactive_on_election.full_clean()
                inactive_on_election.save()

    tz = timezone(settings.TIME_ZONE)
    # construct a datetime that will change based on timezone discrepancies
    # 0-2am in Libya has a different date than the same time in UDT or EDT
    today_fragile = now().astimezone(tz).replace(hour=0, minute=59)

    # tz.normalize fixes up the date arithmetic when crossing DST boundaries
    creation_dates = \
        [tz.normalize((today_fragile -
                       datetime.timedelta(days=DAYS_BETWEEN_REGISTRATIONS * i)).astimezone(tz))
         for i in range(num_registration_dates)]

    citizens = []
    for i in range(num_registrations):
        # about 60% of registrations are for males, just as with actual data
        gender = MALE if random.randint(1, 100) <= 60 else FEMALE
        nat_id = '%d%011d' % (gender, i)

        creation_date = random.choice(creation_dates)
        modification_date = creation_date

        # Select voter ages from 18 years on up.
        voter_age = random.randint(18, 99)
        # If they were a certain age at any time yesterday, they are certainly that age at any time
        # today.
        yesterday = datetime.datetime.now().replace(tzinfo=tz) - datetime.timedelta(days=1)
        birth_date = datetime.date(yesterday.year - voter_age, yesterday.month, yesterday.day)
        civil_registry_id = random.randint(1, 99999999)
        citizen = CitizenFactory(civil_registry_id=civil_registry_id, national_id=nat_id,
                                 gender=gender, birth_date=birth_date)
        citizens.append(citizen)
        s = SMS(from_number=VOTER_PHONE_NUMBER_PATTERN % i, to_number=REGISTRATION_PHONE_NUMBER,
                citizen=citizen, direction=INCOMING,
                message='my reg message', msg_type=SMS.REGISTRATION, message_code=MESSAGE_1,
                carrier=carrier, creation_date=creation_date)
        s.full_clean()
        s.save()

        rc = random.choice(centers)

        confirmed = random.randint(1, 100) <= 80  # most are confirmed
        if confirmed:
            archive_time = None
        else:
            archive_time = random.choice(creation_dates)
        r = Registration(citizen=citizen, registration_center=rc, sms=s,
                         archive_time=archive_time,
                         creation_date=creation_date, modification_date=modification_date)
        r.full_clean()
        r.save()

    if num_registrations:  # if any data being generated
        # generate a variety of sms messages
        for i in range(NUM_RANDOM_SMS_MESSAGES):
            sms_type = random.choice(SMS.MESSAGE_TYPES)[0]
            staff_phone = random.choice(staff_phones)
            s = SMS(from_number=staff_phone.phone_number, to_number=RANDOM_MESSAGE_PHONE_NUMBER,
                    citizen=random.choice(citizens), direction=INCOMING,
                    message='my long random message',
                    msg_type=sms_type, message_code=MESSAGE_1, carrier=carrier,
                    creation_date=random.choice(creation_dates))
            s.full_clean()
            s.save()

        for election in elections:
            for rc in centers:
                i = random.randint(8888, 9999)
                staff_phone_number = STAFF_PHONE_NUMBER_PATTERN % i
                ensure_staff_phone_exists(staff_phone_number, rc, staff_phones,
                                          election.work_start_time + datetime.timedelta(minutes=5))
                center_open = CenterOpen(
                    election=election,
                    phone_number=staff_phone_number, registration_center=rc,
                    creation_date=election.polling_start_time.replace(hour=random.randint(0, 10),
                                                                      minute=23))
                center_open.full_clean()
                center_open.save()
                s = SMS(from_number=staff_phone_number, to_number=ACTIVATE_PHONE_NUMBER,
                        direction=INCOMING, message='my message', msg_type=SMS.ACTIVATE,
                        message_code=MESSAGE_1, carrier=carrier,
                        creation_date=election.polling_start_time)
                s.full_clean()
                s.save()

Example 14

Project: calibre Source File: epub_mobi.py
    def run(self, path_to_output, opts, db, notification=DummyReporter()):
        from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder
        from calibre.utils.logging import default_log as log
        from calibre.utils.config import JSONConfig

        # If preset specified from the cli, insert stored options from JSON file
        if hasattr(opts, 'preset') and opts.preset:
            available_presets = JSONConfig("catalog_presets")
            if opts.preset not in available_presets:
                if available_presets:
                    print(_('Error: Preset "%s" not found.' % opts.preset))
                    print(_('Stored presets: %s' % ', '.join([p for p in sorted(available_presets.keys())])))
                else:
                    print(_('Error: No stored presets.'))
                return 1

            # Copy the relevant preset values to the opts object
            for item in available_presets[opts.preset]:
                if item not in ['exclusion_rules_tw', 'format', 'prefix_rules_tw']:
                    setattr(opts, item, available_presets[opts.preset][item])

            # Provide an unconnected device
            opts.connected_device = {
                         'is_device_connected': False,
                         'kind': None,
                         'name': None,
                         'save_template': None,
                         'serial': None,
                         'storage': None,
                        }

            # Convert prefix_rules and exclusion_rules from JSON lists to tuples
            prs = []
            for rule in opts.prefix_rules:
                prs.append(tuple(rule))
            opts.prefix_rules = tuple(prs)

            ers = []
            for rule in opts.exclusion_rules:
                ers.append(tuple(rule))
            opts.exclusion_rules = tuple(ers)

        opts.log = log
        opts.fmt = self.fmt = path_to_output.rpartition('.')[2]

        # Add local options
        opts.creator = '%s, %s %s, %s' % (strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y'))
        opts.creator_sort_as = '%s %s' % ('calibre', strftime('%Y-%m-%d'))
        opts.connected_kindle = False

        # Finalize output_profile
        op = opts.output_profile
        if op is None:
            op = 'default'

        if opts.connected_device['name'] and 'kindle' in opts.connected_device['name'].lower():
            opts.connected_kindle = True
            if opts.connected_device['serial'] and \
               opts.connected_device['serial'][:4] in ['B004', 'B005']:
                op = "kindle_dx"
            else:
                op = "kindle"

        opts.description_clip = 380 if op.endswith('dx') or 'kindle' not in op else 100
        opts.author_clip = 100 if op.endswith('dx') or 'kindle' not in op else 60
        opts.output_profile = op

        opts.basename = "Catalog"
        opts.cli_environment = not hasattr(opts, 'sync')

        # Hard-wired to always sort descriptions by author, with series after non-series
        opts.sort_descriptions_by_author = True

        build_log = []

        build_log.append(u"%s('%s'): Generating %s %sin %s environment, locale: '%s'" %
            (self.name,
             current_library_name(),
             self.fmt,
             'for %s ' % opts.output_profile if opts.output_profile else '',
             'CLI' if opts.cli_environment else 'GUI',
             calibre_langcode_to_name(canonicalize_lang(get_lang()), localize=False))
             )

        # If exclude_genre is blank, assume user wants all tags as genres
        if opts.exclude_genre.strip() == '':
            # opts.exclude_genre = '\[^.\]'
            # build_log.append(" converting empty exclude_genre to '\[^.\]'")
            opts.exclude_genre = 'a^'
            build_log.append(" converting empty exclude_genre to 'a^'")
        if opts.connected_device['is_device_connected'] and \
           opts.connected_device['kind'] == 'device':
            if opts.connected_device['serial']:
                build_log.append(u" connected_device: '%s' #%s%s " %
                    (opts.connected_device['name'],
                     opts.connected_device['serial'][0:4],
                     'x' * (len(opts.connected_device['serial']) - 4)))
                for storage in opts.connected_device['storage']:
                    if storage:
                        build_log.append(u"  mount point: %s" % storage)
            else:
                build_log.append(u" connected_device: '%s'" % opts.connected_device['name'])
                try:
                    for storage in opts.connected_device['storage']:
                        if storage:
                            build_log.append(u"  mount point: %s" % storage)
                except:
                    build_log.append(u"  (no mount points)")
        else:
            build_log.append(u" connected_device: '%s'" % opts.connected_device['name'])

        opts_dict = vars(opts)
        if opts_dict['ids']:
            build_log.append(" book count: %d" % len(opts_dict['ids']))

        sections_list = []
        if opts.generate_authors:
            sections_list.append('Authors')
        if opts.generate_titles:
            sections_list.append('Titles')
        if opts.generate_series:
            sections_list.append('Series')
        if opts.generate_genres:
            sections_list.append('Genres')
        if opts.generate_recently_added:
            sections_list.append('Recently Added')
        if opts.generate_descriptions:
            sections_list.append('Descriptions')

        if not sections_list:
            if opts.cli_environment:
                opts.log.warn('*** No Section switches specified, enabling all Sections ***')
                opts.generate_authors = True
                opts.generate_titles = True
                opts.generate_series = True
                opts.generate_genres = True
                opts.generate_recently_added = True
                opts.generate_descriptions = True
                sections_list = ['Authors', 'Titles', 'Series', 'Genres', 'Recently Added', 'Descriptions']
            else:
                opts.log.warn('\n*** No enabled Sections, terminating catalog generation ***')
                return ["No Included Sections", "No enabled Sections.\nCheck E-book options tab\n'Included sections'\n"]
        if opts.fmt == 'mobi' and sections_list == ['Descriptions']:
            warning = _("\n*** Adding 'By Authors' Section required for MOBI output ***")
            opts.log.warn(warning)
            sections_list.insert(0, 'Authors')
            opts.generate_authors = True

        opts.log(u" Sections: %s" % ', '.join(sections_list))
        opts.section_list = sections_list

        # Limit thumb_width to 1.0" - 2.0"
        try:
            if float(opts.thumb_width) < float(self.THUMB_SMALLEST):
                log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST))
                opts.thumb_width = self.THUMB_SMALLEST
            if float(opts.thumb_width) > float(self.THUMB_LARGEST):
                log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_LARGEST))
                opts.thumb_width = self.THUMB_LARGEST
            opts.thumb_width = "%.2f" % float(opts.thumb_width)
        except:
            log.error("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST))
            opts.thumb_width = "1.0"

        # eval prefix_rules if passed from command line
        if type(opts.prefix_rules) is not tuple:
            try:
                opts.prefix_rules = eval(opts.prefix_rules)
            except:
                log.error("malformed --prefix-rules: %s" % opts.prefix_rules)
                raise
            for rule in opts.prefix_rules:
                if len(rule) != 4:
                    log.error("incorrect number of args for --prefix-rules: %s" % repr(rule))

        # eval exclusion_rules if passed from command line
        if type(opts.exclusion_rules) is not tuple:
            try:
                opts.exclusion_rules = eval(opts.exclusion_rules)
            except:
                log.error("malformed --exclusion-rules: %s" % opts.exclusion_rules)
                raise
            for rule in opts.exclusion_rules:
                if len(rule) != 3:
                    log.error("incorrect number of args for --exclusion-rules: %s" % repr(rule))

        # Display opts
        keys = sorted(opts_dict.keys())
        build_log.append(" opts:")
        for key in keys:
            if key in ['catalog_title', 'author_clip', 'connected_kindle', 'creator',
                       'cross_reference_authors', 'description_clip', 'exclude_book_marker',
                       'exclude_genre', 'exclude_tags', 'exclusion_rules', 'fmt',
                       'genre_source_field', 'header_note_source_field', 'merge_comments_rule',
                       'output_profile', 'prefix_rules', 'preset', 'read_book_marker',
                       'search_text', 'sort_by', 'sort_descriptions_by_author', 'sync',
                       'thumb_width', 'use_existing_cover', 'wishlist_tag']:
                build_log.append("  %s: %s" % (key, repr(opts_dict[key])))
        if opts.verbose:
            log('\n'.join(line for line in build_log))

        # Capture start_time
        opts.start_time = time.time()

        self.opts = opts

        if opts.verbose:
            log.info(" Begin catalog source generation (%s)" %
                     str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))

        # Launch the Catalog builder
        catalog = CatalogBuilder(db, opts, self, report_progress=notification)

        try:
            catalog.build_sources()
            if opts.verbose:
                log.info(" Completed catalog source generation (%s)\n"  %
                         str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
        except (AuthorSortMismatchException, EmptyCatalogException), e:
            log.error(" *** Terminated catalog generation: %s ***" % e)
        except:
            log.error(" unhandled exception in catalog generator")
            raise

        else:
            recommendations = []
            recommendations.append(('remove_fake_margins', False,
                OptionRecommendation.HIGH))
            recommendations.append(('comments', '', OptionRecommendation.HIGH))

            """
            >>> Use to debug generated catalog code before pipeline conversion <<<
            """
            GENERATE_DEBUG_EPUB = False
            if GENERATE_DEBUG_EPUB:
                catalog_debug_path = os.path.join(os.path.expanduser('~'), 'Desktop', 'Catalog debug')
                setattr(opts, 'debug_pipeline', os.path.expanduser(catalog_debug_path))

            dp = getattr(opts, 'debug_pipeline', None)
            if dp is not None:
                recommendations.append(('debug_pipeline', dp,
                    OptionRecommendation.HIGH))

            if opts.output_profile and opts.output_profile.startswith("kindle"):
                recommendations.append(('output_profile', opts.output_profile,
                    OptionRecommendation.HIGH))
                recommendations.append(('book_producer', opts.output_profile,
                    OptionRecommendation.HIGH))
                if opts.fmt == 'mobi':
                    recommendations.append(('no_inline_toc', True,
                        OptionRecommendation.HIGH))
                    recommendations.append(('verbose', 2,
                        OptionRecommendation.HIGH))

            # Use existing cover or generate new cover
            cpath = None
            existing_cover = False
            try:
                search_text = 'title:"%s" author:%s' % (
                        opts.catalog_title.replace('"', '\\"'), 'calibre')
                matches = db.search(search_text, return_matches=True, sort_results=False)
                if matches:
                    cpath = db.cover(matches[0], index_is_id=True, as_path=True)
                    if cpath and os.path.exists(cpath):
                        existing_cover = True
            except:
                pass

            if self.opts.use_existing_cover and not existing_cover:
                log.warning("no existing catalog cover found")

            if self.opts.use_existing_cover and existing_cover:
                recommendations.append(('cover', cpath, OptionRecommendation.HIGH))
                log.info("using existing catalog cover")
            else:
                from calibre.ebooks.covers import calibre_cover2
                log.info("replacing catalog cover")
                new_cover_path = PersistentTemporaryFile(suffix='.jpg')
                new_cover = calibre_cover2(opts.catalog_title, 'calibre')
                new_cover_path.write(new_cover)
                new_cover_path.close()
                recommendations.append(('cover', new_cover_path.name, OptionRecommendation.HIGH))

            # Run ebook-convert
            from calibre.ebooks.conversion.plumber import Plumber
            plumber = Plumber(os.path.join(catalog.catalog_path, opts.basename + '.opf'),
                            path_to_output, log, report_progress=notification,
                            abort_after_input_dump=False)
            plumber.merge_ui_recommendations(recommendations)
            plumber.run()

            try:
                os.remove(cpath)
            except:
                pass

            if GENERATE_DEBUG_EPUB:
                from calibre.ebooks.epub import initialize_container
                from calibre.ebooks.tweak import zip_rebuilder
                from calibre.utils.zipfile import ZipFile
                input_path = os.path.join(catalog_debug_path, 'input')
                epub_shell = os.path.join(catalog_debug_path, 'epub_shell.zip')
                initialize_container(epub_shell, opf_name='content.opf')
                with ZipFile(epub_shell, 'r') as zf:
                    zf.extractall(path=input_path)
                os.remove(epub_shell)
                zip_rebuilder(input_path, os.path.join(catalog_debug_path, 'input.epub'))

            if opts.verbose:
                log.info(" Catalog creation complete (%s)\n" %
                     str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))

        # returns to gui2.actions.catalog:catalog_generated()
        return catalog.error

Example 15

Project: luci-py Source File: task_result_test.py
  def test_integration(self):
    # Creates a TaskRequest, along its TaskResultSummary and TaskToRun. Have a
    # bot reap the task, and complete the task. Ensure the resulting
    # TaskResultSummary and TaskRunResult are properly updated.
    request = mkreq(_gen_request())
    result_summary = task_result.new_result_summary(request)
    to_run = task_to_run.new_task_to_run(request)
    result_summary.modified_ts = utils.utcnow()
    ndb.transaction(lambda: ndb.put_multi([result_summary, to_run]))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': None,
      'bot_id': None,
      'bot_version': None,
      'cipd_pins': None,
      'children_task_ids': [],
      'completed_ts': None,
      'costs_usd': [],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'duration': None,
      'exit_code': None,
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': self.now,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [],
      'started_ts': None,
      'state': task_result.State.PENDING,
      'try_number': None,
      'tags': [
        u'pool:default',
        u'priority:50',
        u'service_account:none',
        u'tag:1',
        u'user:Jesus',
      ],
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.to_dict())

    # Nothing changed 2 secs later except latency.
    self.mock_now(self.now, 2)
    self.assertEqual(expected, result_summary.to_dict())

    # Task is reaped after 2 seconds (4 secs total).
    reap_ts = self.now + datetime.timedelta(seconds=4)
    self.mock_now(reap_ts)
    to_run.queue_number = None
    to_run.put()
    run_result = task_result.new_run_result(request, 1, 'localhost', 'abc', {})
    run_result.modified_ts = utils.utcnow()
    result_summary.set_from_run_result(run_result, request)
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': {},
      'bot_id': u'localhost',
      'bot_version': u'abc',
      'cipd_pins': None,
      'children_task_ids': [],
      'completed_ts': None,
      'costs_usd': [0.],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'duration': None,
      'exit_code': None,
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': reap_ts,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [u'v1a'],
      'started_ts': reap_ts,
      'state': task_result.State.RUNNING,
      'tags': [
        u'pool:default',
        u'priority:50',
        u'service_account:none',
        u'tag:1',
        u'user:Jesus',
      ],
      'try_number': 1,
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.key.get().to_dict())

    # Task completed after 2 seconds (6 secs total), the task has been running
    # for 2 seconds.
    complete_ts = self.now + datetime.timedelta(seconds=6)
    self.mock_now(complete_ts)
    run_result.completed_ts = complete_ts
    run_result.duration = 0.1
    run_result.exit_code = 0
    run_result.state = task_result.State.COMPLETED
    run_result.modified_ts = utils.utcnow()
    task_result.PerformanceStats(
        key=task_pack.run_result_key_to_performance_stats_key(run_result.key),
        bot_overhead=0.1,
        isolated_download=task_result.OperationStats(
            duration=0.05, initial_number_items=10, initial_size=10000,
            items_cold='foo', items_hot='bar'),
        isolated_upload=task_result.OperationStats(
            duration=0.01, items_cold='foo')).put()
    ndb.transaction(lambda: ndb.put_multi(run_result.append_output('foo', 0)))
    result_summary.set_from_run_result(run_result, request)
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': {},
      'bot_id': u'localhost',
      'bot_version': u'abc',
      'cipd_pins': None,
      'children_task_ids': [],
      'completed_ts': complete_ts,
      'costs_usd': [0.],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'duration': 0.1,
      'exit_code': 0,
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': complete_ts,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [u'v1a'],
      'started_ts': reap_ts,
      'state': task_result.State.COMPLETED,
      'tags': [
        u'pool:default',
        u'priority:50',
        u'service_account:none',
        u'tag:1',
        u'user:Jesus',
      ],
      'try_number': 1,
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.key.get().to_dict())
    expected = {
      'bot_overhead': 0.1,
      'isolated_download': {
        'duration': 0.05,
        'initial_number_items': 10,
        'initial_size': 10000,
        'items_cold': 'foo',
        'items_hot': 'bar',
      },
      'isolated_upload': {
        'duration': 0.01,
        'initial_number_items': None,
        'initial_size': None,
        'items_cold': 'foo',
        'items_hot': None,
      },
      'package_installation': {
        'duration': None,
        'initial_number_items': None,
        'initial_size': None,
        'items_cold': None,
        'items_hot': None,
      },
    }
    self.assertEqual(expected, result_summary.performance_stats.to_dict())
    self.assertEqual('foo', result_summary.get_output())
    self.assertEqual(
        datetime.timedelta(seconds=2),
        result_summary.duration_as_seen_by_server)
    self.assertEqual(
        datetime.timedelta(seconds=0.1),
        result_summary.duration_now(utils.utcnow()))
    self.assertEqual(
        datetime.timedelta(seconds=4), result_summary.pending)
    self.assertEqual(
        datetime.timedelta(seconds=4),
        result_summary.pending_now(utils.utcnow()))

    self.assertEqual(
        task_pack.pack_result_summary_key(result_summary.key),
        result_summary.task_id)
    self.assertEqual(complete_ts, result_summary.ended_ts)
    self.assertEqual(
        task_pack.pack_run_result_key(run_result.key),
        run_result.task_id)
    self.assertEqual(complete_ts, run_result.ended_ts)

Example 16

Project: luci-py Source File: task_request_test.py
  def test_bad_values(self):
    with self.assertRaises(AssertionError):
      mkreq(None)
    with self.assertRaises(AssertionError):
      mkreq({})
    with self.assertRaises(AttributeError):
      mkreq(_gen_request(properties={'foo': 'bar'}))
    mkreq(_gen_request())

    # Command.
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(properties=dict(command=[])))
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(properties=dict(command={'a': 'b'})))
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(properties=dict(command='python')))
    mkreq(_gen_request(properties=dict(command=['python'])))
    mkreq(_gen_request(properties=dict(command=[u'python'])))

    # CIPD.
    def mkcipdreq(idempotent=False, **cipd_input):
      mkreq(_gen_request(
          properties=dict(idempotent=idempotent, cipd_input=cipd_input)))

    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[{}])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[
        dict(package_name='infra|rm', path='.', version='latest')])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[dict(package_name='rm', path='.')])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[dict(package_name='rm', version='latest')])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[dict(package_name='rm', path='/', version='latest')])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[dict(package_name='rm', path='/a', version='latest')])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[
        dict(package_name='rm', path='a/..', version='latest')])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[
        dict(package_name='rm', path='a/./b', version='latest')])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(packages=[
        dict(package_name='rm', path='.', version='latest'),
        dict(package_name='rm', path='.', version='canary'),
      ])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(
          idempotent=True,
          packages=[dict(package_name='rm', path='.', version='latest')])
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(server='abc')
    with self.assertRaises(datastore_errors.BadValueError):
      mkcipdreq(client_package=dict(package_name='--bad package--'))
    mkcipdreq()
    mkcipdreq(packages=[dict(package_name='rm', path='.', version='latest')])
    mkcipdreq(
        client_package=dict(
            package_name='infra/tools/cipd/${platform}',
            version='git_revision:daedbeef',
        ),
        packages=[dict(package_name='rm', path='.', version='latest')],
        server='https://chrome-infra-packages.appspot.com',
    )

    # Named caches.
    mkcachereq = lambda *c: mkreq(_gen_request(properties=dict(caches=c)))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='', path='git_cache'))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='git_chromium', path=''))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(
          dict(name='git_chromium', path='git_cache'),
          dict(name='git_v8', path='git_cache'),
      )
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(
          dict(name='git_chromium', path='git_cache'),
          dict(name='git_chromium', path='git_cache2'),
      )
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='git_chromium', path='/git_cache'))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='git_chromium', path='../git_cache'))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='git_chromium', path='git_cache/../../a'))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='git_chromium', path='../git_cache'))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='git_chromium', path='git_cache//a'))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='git_chromium', path='a/./git_cache'))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='has space', path='git_cache'))
    with self.assertRaises(datastore_errors.BadValueError):
      mkcachereq(dict(name='CAPITAL', path='git_cache'))
    with self.assertRaises(datastore_errors.BadValueError):
      # A CIPD package and named caches cannot be mapped to the same path.
      mkreq(_gen_request(properties=dict(
          caches=[dict(name='git_chromium', path='git_cache')],
          cipd_input=dict(packages=[
            dict(package_name='foo', path='git_cache', version='latest')]))))
    mkcachereq()
    mkcachereq(dict(name='git_chromium', path='git_cache'))
    mkcachereq(
        dict(name='git_chromium', path='git_cache'),
        dict(name='build_chromium', path='out'))

    # Dimensions.
    with self.assertRaises(TypeError):
      mkreq(_gen_request(properties=dict(dimensions=[])))
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(properties=dict(dimensions={})))
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(
          properties=dict(dimensions={u'id': u'b', u'a:': u'b'})))
    mkreq(_gen_request(
        properties=dict(dimensions={u'id': u'b', u'a.': u'b'})))

    # Environment.
    with self.assertRaises(TypeError):
      mkreq(_gen_request(properties=dict(env=[])))
    with self.assertRaises(TypeError):
      mkreq(_gen_request(properties=dict(env={u'a': 1})))
    mkreq(_gen_request(properties=dict(env={})))

    # Priority.
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(priority=task_request.MAXIMUM_PRIORITY+1))
    mkreq(_gen_request(priority=task_request.MAXIMUM_PRIORITY))

    # Execution timeout.
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(
          properties=dict(execution_timeout_secs=task_request._ONE_DAY_SECS+1)))
    mkreq(_gen_request(
        properties=dict(execution_timeout_secs=task_request._ONE_DAY_SECS)))

    # Expiration.
    now = utils.utcnow()
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(
          created_ts=now,
          expiration_ts=now + datetime.timedelta(
              seconds=task_request._MIN_TIMEOUT_SECS-1)))
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(
          created_ts=now,
          expiration_ts=
              now+datetime.timedelta(seconds=task_request._SEVEN_DAYS_SECS+1)))
    mkreq(_gen_request(
        created_ts=now,
        expiration_ts=
            now+datetime.timedelta(seconds=task_request._MIN_TIMEOUT_SECS)))
    mkreq(_gen_request(
        created_ts=now,
        expiration_ts=
            now + datetime.timedelta(seconds=task_request._SEVEN_DAYS_SECS)))

    # Try with isolated/isolatedserver/namespace.
    with self.assertRaises(datastore_errors.BadValueError):
      # Both command and inputs_ref.isolated.
      mkreq(_gen_request(properties=dict(
          command=['see', 'spot', 'run'],
          inputs_ref=task_request.FilesRef(
              isolated='deadbeef',
              isolatedserver='http://localhost:1',
              namespace='default-gzip'))))
    with self.assertRaises(datastore_errors.BadValueError):
      # inputs_ref without server/namespace.
      mkreq(_gen_request(properties=dict(inputs_ref=task_request.FilesRef())))
    with self.assertRaises(datastore_errors.BadValueError):
      mkreq(_gen_request(properties=dict(
          command=[],
          inputs_ref=task_request.FilesRef(
              isolatedserver='https://isolateserver.appspot.com',
              namespace='default-gzip^^^',
          ))))
    mkreq(_gen_request(properties=dict(
        command=[],
        inputs_ref=task_request.FilesRef(
            isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef',
            isolatedserver='http://localhost:1',
            namespace='default-gzip'))))

Example 17

Project: pyculiarity Source File: detect_ts.py
def detect_ts(df, max_anoms=0.10, direction='pos',
              alpha=0.05, only_last=None, threshold=None,
              e_value=False, longterm=False,
              piecewise_median_period_weeks=2, plot=False,
              y_log=False, xlabel = '', ylabel = 'count',
              title=None, verbose=False):
    """
    Anomaly Detection Using Seasonal Hybrid ESD Test
    A technique for detecting anomalies in seasonal univariate time series where the input is a
    series of <timestamp, value> pairs.

    Args:

    x: Time series as a two column data frame where the first column consists of the
    timestamps and the second column consists of the observations.

    max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the
    data.

    direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both').

    alpha: The level of statistical significance with which to accept or reject anomalies.

    only_last: Find and report anomalies only within the last day or hr in the time series. Options: (None | 'day' | 'hr')

    threshold: Only report positive going anoms above the threshold specified. Options are: (None | 'med_max' | 'p95' | 'p99')

    e_value: Add an additional column to the anoms output containing the expected value.

    longterm: Increase anom detection efficacy for time series that are greater than a month.

    See Details below.
    piecewise_median_period_weeks: The piecewise median time window as described in Vallis, Hochenbaum, and Kejariwal (2014). Defaults to 2.

    plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms,
    indicated by circles, should also be returned.

    y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely
    large positive anomalies relative to the rest of the data.

    xlabel: X-axis label to be added to the output plot.
    ylabel: Y-axis label to be added to the output plot.

    Details


    'longterm' This option should be set when the input time series is longer than a month.
    The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014).
    'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller
    than one of the specified thresholds which include: the median
    of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the
    99th percentile of the daily max values (p99).
    'title' Title for the output plot.
    'verbose' Enable debug messages

    The returned value is a dictionary with the following components:
      anoms: Data frame containing timestamps, values, and optionally expected values.
      plot: A graphical object if plotting was requested by the user. The plot contains
      the estimated anomalies annotated on the input time series
    """

    if not isinstance(df, DataFrame):
        raise ValueError("data must be a single data frame.")
    else:
        if len(df.columns) != 2 or not df.iloc[:,1].map(np.isreal).all():
            raise ValueError(("data must be a 2 column data.frame, with the"
                              "first column being a set of timestamps, and "
                              "the second coloumn being numeric values."))

        if (not (df.dtypes[0].type is np.datetime64)
            and not (df.dtypes[0].type is np.int64)):
            df = format_timestamp(df)

    if list(df.columns.values) != ["timestamp", "value"]:
        df.columns = ["timestamp", "value"]

    # Sanity check all input parameters
    if max_anoms > 0.49:
        length = len(df.value)
        raise ValueError(
            ("max_anoms must be less than 50% of "
             "the data points (max_anoms =%f data_points =%s).")
                         % (round(max_anoms * length, 0), length))

    if not direction in ['pos', 'neg', 'both']:
        raise ValueError("direction options are: pos | neg | both.")

    if not (0.01 <= alpha or alpha <= 0.1):
        if verbose:
            import warnings
            warnings.warn(("alpha is the statistical signifigance, "
                           "and is usually between 0.01 and 0.1"))

    if only_last and not only_last in ['day', 'hr']:
        raise ValueError("only_last must be either 'day' or 'hr'")

    if not threshold in [None,'med_max','p95','p99']:
        raise ValueError("threshold options are: None | med_max | p95 | p99")

    if not isinstance(e_value, bool):
        raise ValueError("e_value must be a boolean")

    if not isinstance(longterm, bool):
        raise ValueError("longterm must be a boolean")

    if piecewise_median_period_weeks < 2:
        raise ValueError(
            "piecewise_median_period_weeks must be at greater than 2 weeks")

    if not isinstance(plot, bool):
        raise ValueError("plot must be a boolean")

    if not isinstance(y_log, bool):
        raise ValueError("y_log must be a boolean")

    if not isinstance(xlabel, basestring):
        raise ValueError("xlabel must be a string")

    if not isinstance(ylabel, basestring):
        raise ValueError("ylabel must be a string")

    if title and not isinstance(title, basestring):
        raise ValueError("title must be a string")

    if not title:
        title = ''
    else:
        title = title + " : "

    gran = get_gran(df)

    if gran == "day":
        num_days_per_line = 7
        if isinstance(only_last, basestring) and only_last == 'hr':
            only_last = 'day'
    else:
        num_days_per_line = 1

    if gran == 'sec':
        df.timestamp = date_format(df.timestamp, "%Y-%m-%d %H:%M:00")
        df = format_timestamp(df.groupby('timestamp').aggregate(np.sum))

    # if the data is daily, then we need to bump
    # the period to weekly to get multiple examples
    gran_period = {
        'min': 1440,
        'hr': 24,
        'day': 7
    }
    period = gran_period.get(gran)
    if not period:
        raise ValueError('%s granularity detected. This is currently not supported.' % gran)
    num_obs = len(df.value)

    clamp = (1 / float(num_obs))
    if max_anoms < clamp:
        max_anoms = clamp

    if longterm:
        if gran == "day":
            num_obs_in_period = period * piecewise_median_period_weeks + 1
            num_days_in_period = 7 * piecewise_median_period_weeks + 1
        else:
            num_obs_in_period = period * 7 * piecewise_median_period_weeks
            num_days_in_period = 7 * piecewise_median_period_weeks

        last_date = df.timestamp.iloc[-1]

        all_data = []

        for j in range(0, len(df.timestamp), num_obs_in_period):
            start_date = df.timestamp.iloc[j]
            end_date = min(start_date
                           + datetime.timedelta(days=num_obs_in_period),
                           df.timestamp.iloc[-1])

            # if there is at least 14 days left, subset it,
            # otherwise subset last_date - 14days
            if (end_date - start_date).days == num_days_in_period:
                sub_df = df[(df.timestamp >= start_date)
                            & (df.timestamp < end_date)]
            else:
                sub_df = df[(df.timestamp >
                     (last_date - datetime.timedelta(days=num_days_in_period)))
                    & (df.timestamp <= last_date)]
            all_data.append(sub_df)
    else:
        all_data = [df]

    all_anoms = DataFrame(columns=['timestamp', 'value'])
    seasonal_plus_trend = DataFrame(columns=['timestamp', 'value'])

    # Detect anomalies on all data (either entire data in one-pass,
    # or in 2 week blocks if longterm=TRUE)
    for i in range(len(all_data)):
        directions = {
            'pos': Direction(True, True),
            'neg': Direction(True, False),
            'both': Direction(False, True)
        }
        anomaly_direction = directions[direction]

        # detect_anoms actually performs the anomaly detection and
        # returns the results in a list containing the anomalies
        # as well as the decomposed components of the time series
        # for further analysis.

        s_h_esd_timestamps = detect_anoms(all_data[i], k=max_anoms, alpha=alpha,
                                          num_obs_per_period=period,
                                          use_decomp=True,
                                          one_tail=anomaly_direction.one_tail,
                                          upper_tail=anomaly_direction.upper_tail,
                                          verbose=verbose)

        # store decomposed components in local variable and overwrite
        # s_h_esd_timestamps to contain only the anom timestamps
        data_decomp = s_h_esd_timestamps['stl']
        s_h_esd_timestamps = s_h_esd_timestamps['anoms']

        # -- Step 3: Use detected anomaly timestamps to extract the actual
        # anomalies (timestamp and value) from the data
        if s_h_esd_timestamps:
            anoms = all_data[i][all_data[i].timestamp.isin(s_h_esd_timestamps)]
        else:
            anoms = DataFrame(columns=['timestamp', 'value'])

        # Filter the anomalies using one of the thresholding functions if applicable
        if threshold:
            # Calculate daily max values
            periodic_maxes = df.groupby(
                df.timestamp.map(Timestamp.date)).aggregate(np.max).value

            # Calculate the threshold set by the user
            if threshold == 'med_max':
                thresh = periodic_maxes.median()
            elif threshold == 'p95':
                thresh = periodic_maxes.quantile(.95)
            elif threshold == 'p99':
                thresh = periodic_maxes.quantile(.99)

            # Remove any anoms below the threshold
            anoms = anoms[anoms.value >= thresh]

        all_anoms = all_anoms.append(anoms)
        seasonal_plus_trend = seasonal_plus_trend.append(data_decomp)

    # Cleanup potential duplicates
    try:
        all_anoms.drop_duplicates(subset=['timestamp'])
        seasonal_plus_trend.drop_duplicates(subset=['timestamp'])
    except TypeError:
        all_anoms.drop_duplicates(cols=['timestamp'])
        seasonal_plus_trend.drop_duplicates(cols=['timestamp'])

    # -- If only_last was set by the user,
    # create subset of the data that represent the most recent day
    if only_last:
        start_date = df.timestamp.iloc[-1] - datetime.timedelta(days=7)
        start_anoms = df.timestamp.iloc[-1] - datetime.timedelta(days=1)
        if gran is "day":
            breaks = 3 * 12
            num_days_per_line = 7
        else:
            if only_last == 'day':
                breaks = 12
            else:
                start_date = df.timestamp.iloc[-1] - datetime.timedelta(days=2)
                # truncate to days
                start_date = datetime.date(start_date.year,
                                           start_date.month, start_date.day)
                start_anoms = (df.timestamp.iloc[-1]
                               - datetime.timedelta(hours=1))
                breaks = 3

        # subset the last days worth of data
        x_subset_single_day = df[df.timestamp > start_anoms]
        # When plotting anoms for the last day only
        # we only show the previous weeks data
        x_subset_week = df[(df.timestamp <= start_anoms)
                           & (df.timestamp > start_date)]
        if len(all_anoms) > 0:
            all_anoms = all_anoms[all_anoms.timestamp >=
                                  x_subset_single_day.timestamp.iloc[0]]
        num_obs = len(x_subset_single_day.value)

    # Calculate number of anomalies as a percentage
    anom_pct = (len(df.value) / float(num_obs)) * 100

    if anom_pct == 0:
        return {
            "anoms": None,
            "plot": None
        }

    # The original R implementation handles plotting here.
    # Plotting is currently not implemented in this version.
    # if plot:
    #     plot_something()

    all_anoms.index = all_anoms.timestamp

    if e_value:
        d = {
            'timestamp': all_anoms.timestamp,
            'anoms': all_anoms.value,
            'expected_value': seasonal_plus_trend[
                seasonal_plus_trend.timestamp.isin(
                    all_anoms.timestamp)].value
        }
    else:
        d = {
            'timestamp': all_anoms.timestamp,
            'anoms': all_anoms.value
        }
    anoms = DataFrame(d, index=d['timestamp'].index)

    return {
        'anoms': anoms,
        'plot': None
    }

Example 18

Project: Watson Source File: cli.py
@cli.command()
@click.option('-c/-C', '--current/--no-current', 'current', default=None,
              help="(Don't) include currently running frame in report.")
@click.option('-f', '--from', 'from_', cls=MutuallyExclusiveOption, type=Date,
              default=arrow.now().replace(days=-7),
              mutually_exclusive=_SHORTCUT_OPTIONS,
              help="The date from when the report should start. Defaults "
              "to seven days ago.")
@click.option('-t', '--to', cls=MutuallyExclusiveOption, type=Date,
              default=arrow.now(),
              mutually_exclusive=_SHORTCUT_OPTIONS,
              help="The date at which the report should stop (inclusive). "
              "Defaults to tomorrow.")
@click.option('-y', '--year', cls=MutuallyExclusiveOption, type=Date,
              flag_value=get_start_time_for_period('year'),
              mutually_exclusive=['day', 'week', 'month'],
              help='Reports activity for the current year.')
@click.option('-m', '--month', cls=MutuallyExclusiveOption, type=Date,
              flag_value=get_start_time_for_period('month'),
              mutually_exclusive=['day', 'week', 'year'],
              help='Reports activity for the current month.')
@click.option('-w', '--week', cls=MutuallyExclusiveOption, type=Date,
              flag_value=get_start_time_for_period('week'),
              mutually_exclusive=['day', 'month', 'year'],
              help='Reports activity for the current week.')
@click.option('-d', '--day', cls=MutuallyExclusiveOption, type=Date,
              flag_value=get_start_time_for_period('day'),
              mutually_exclusive=['week', 'month', 'year'],
              help='Reports activity for the current day.')
@click.option('-p', '--project', 'projects', multiple=True,
              help="Reports activity only for the given project. You can add "
              "other projects by using this option several times.")
@click.option('-T', '--tag', 'tags', multiple=True,
              help="Reports activity only for frames containing the given "
              "tag. You can add several tags by using this option multiple "
              "times")
@click.pass_obj
def report(watson, current, from_, to, projects, tags, year, month, week, day):
    """
    Display a report of the time spent on each project.

    If a project is given, the time spent on this project is printed.
    Else, print the total for each root project.

    By default, the time spent the last 7 days is printed. This timespan
    can be controlled with the `--from` and `--to` arguments. The dates
    must have the format `YEAR-MONTH-DAY`, like: `2014-05-19`.

    You can also use special shortcut options for easier timespan control:
    `--day` sets the report timespan to the current day (beginning at 00:00h)
    and `--year`, `--month` and `--week` to the current year, month or week
    respectively.

    You can limit the report to a project or a tag using the `--project` and
    `--tag` options. They can be specified several times each to add multiple
    projects or tags to the report.

    Example:

    \b
    $ watson report
    Mon 05 May 2014 -> Mon 12 May 2014
    \b
    apollo11 - 13h 22m 20s
            [brakes    7h 53m 18s]
            [module    7h 41m 41s]
            [reactor   8h 35m 50s]
            [steering 10h 33m 37s]
            [wheels   10h 11m 35s]
    \b
    hubble - 8h 54m 46s
            [camera        8h 38m 17s]
            [lens          5h 56m 22s]
            [transmission  6h 27m 07s]
    \b
    voyager1 - 11h 45m 13s
            [antenna     5h 53m 57s]
            [generators  9h 04m 58s]
            [probe      10h 14m 29s]
            [sensors    10h 30m 26s]
    \b
    voyager2 - 16h 16m 09s
            [antenna     7h 05m 50s]
            [generators 12h 20m 29s]
            [probe      12h 20m 29s]
            [sensors    11h 23m 17s]
    \b
    Total: 43h 42m 20s
    \b
    $ watson report --from 2014-04-01 --to 2014-04-30 --project apollo11
    Tue 01 April 2014 -> Wed 30 April 2014
    \b
    apollo11 - 13h 22m 20s
            [brakes    7h 53m 18s]
            [module    7h 41m 41s]
            [reactor   8h 35m 50s]
            [steering 10h 33m 37s]
            [wheels   10h 11m 35s]
    """
    for start_time in (_ for _ in [day, week, month, year]
                       if _ is not None):
        from_ = start_time

    if from_ > to:
        raise click.ClickException("'from' must be anterior to 'to'")

    if watson.current:
        if current or (current is None and
                       watson.config.getboolean('options', 'report_current')):
            cur = watson.current
            watson.frames.add(cur['project'], cur['start'], arrow.utcnow(),
                              cur['tags'], id="current")

    span = watson.frames.span(from_, to)

    frames_by_project = sorted_groupby(
        watson.frames.filter(
            projects=projects or None, tags=tags or None, span=span
        ),
        operator.attrgetter('project')
    )

    total = datetime.timedelta()

    click.echo("{} -> {}\n".format(
        style('date', '{:ddd DD MMMM YYYY}'.format(span.start)),
        style('date', '{:ddd DD MMMM YYYY}'.format(span.stop))
    ))

    for project, frames in frames_by_project:
        frames = tuple(frames)
        delta = reduce(
            operator.add,
            (f.stop - f.start for f in frames),
            datetime.timedelta()
        )
        total += delta

        click.echo("{project} - {time}".format(
            time=style('time', format_timedelta(delta)),
            project=style('project', project)
        ))

        tags_to_print = sorted(
            set(tag for frame in frames for tag in frame.tags
                if tag in tags or not tags)
        )
        if tags_to_print:
            longest_tag = max(len(tag) for tag in tags_to_print or [''])

        for tag in tags_to_print:
            delta = reduce(
                operator.add,
                (f.stop - f.start for f in frames if tag in f.tags),
                datetime.timedelta()
            )

            click.echo("\t[{tag} {time}]".format(
                time=style('time', '{:>11}'.format(format_timedelta(delta))),
                tag=style('tag', '{:<{}}'.format(tag, longest_tag)),
            ))

        click.echo()

    if len(projects) > 1:
        click.echo("Total: {}".format(
            style('time', '{}'.format(format_timedelta(total)))
        ))

Example 19

Project: tendenci Source File: send_corp_membership_notices.py
    def handle(self, *args, **options):
        verbosity = 1
        if 'verbosity' in options:
            verbosity = options['verbosity']
        # first test if we have notices set up
        from tendenci.apps.corporate_memberships.models import Notice
        if not Notice.objects.filter(status=True,
                                     status_detail='active'
                                    ).exclude(
                                    notice_time='attimeof'
                                    ).exists():
            if verbosity > 1:
                print('No notices set up...existing...')
            # no active notices to process. stop here
            return

        from tendenci.apps.corporate_memberships.models import (
            CorpMembership, CorpMembershipApp,
            NoticeLog,
            NoticeLogRecord)
        from tendenci.apps.notifications import models as notification
        from tendenci.apps.base.utils import fieldify
        from tendenci.apps.site_settings.utils import get_setting

        site_display_name = get_setting('site', 'global', 'sitedisplayname')
        site_contact_name = get_setting('site', 'global', 'sitecontactname')
        site_contact_email = get_setting('site', 'global', 'sitecontactemail')
        site_url = get_setting('site', 'global', 'siteurl')

        email_context = {
            'sender':get_setting('site', 'global', 'siteemailnoreplyaddress'),
            'sender_display':site_display_name,
            'reply_to':site_contact_email}

        now = datetime.now()
        nowstr = time.strftime("%d-%b-%y %I:%M %p", now.timetuple())

        def email_admins_recap(notices, total_sent):
            """Send admins recap after the notices were processed.
            """
            recap_recipient = get_admin_emails()
            if recap_recipient:
                template_name = "corporate_memberships/notices/email_recap.html"
                try:
                    recap_email_content = render_to_string(
                               template_name,
                               {'notices': notices,
                              'total_sent': total_sent,
                              'site_url': site_url,
                              'site_display_name': site_display_name,
                              'site_contact_name': site_contact_name,
                              'site_contact_email': site_contact_email})
                    recap_subject = '%s Corporate Membership Notices Distributed' % (
                                                    site_display_name)
                    email_context.update({
                        'subject':recap_subject,
                        'content': recap_email_content,
                        'content_type':"html"})

                    notification.send_emails(recap_recipient, 'corp_memb_notice_email',
                                             email_context)
                except TemplateDoesNotExist:
                    pass

        def email_script_errors(err_msg):
            """Send error message to us if any.
            """
            script_recipient = get_script_support_emails()
            if script_recipient:
                email_context.update({
                    'subject':'Error Processing Corporate Membership Notices on %s' % (
                                                            site_url),
                    'content':'%s \n\nTime Submitted: %s\n' % (err_msg, nowstr),
                    'content_type':"text"})

                notification.send_emails(script_recipient, 'corp_memb_notice_email',
                                         email_context)

        def get_script_support_emails():
            admins = getattr(settings, 'ADMINS', None)
            if admins:
                recipients_list = [admin[1] for admin in admins]
                return recipients_list
            return None

        def get_admin_emails():
            admin_emails = get_setting('module', 'corporate_memberships',
                                       'corporatemembershiprecipients').strip()
            if admin_emails:
                admin_emails = admin_emails.split(',')
            if not admin_emails:
                admin_emails = (get_setting('site', 'global',
                                            'admincontactemail'
                                            ).strip()).split(',')
            return admin_emails

        def process_notice(notice):
            notice.members_sent = []
            num_sent = 0
            if notice.notice_time == 'before':
                start_dt = now + timedelta(days=notice.num_days)
            else:
                start_dt = now - timedelta(days=notice.num_days)

            if notice.notice_type == 'disapprove':
                status_detail_list = ['inactive']
            else:
                status_detail_list = ['active', 'expired']

            memberships = CorpMembership.objects.filter(
                                    status=True,
                                    status_detail__in=status_detail_list
                                    )
            if notice.notice_type in ['approve_join', 'disapprove_join'
                                      'approve_renewal', 'disapprove_renewal']:
                filters = {'approved_denied_dt__year': start_dt.year,
                           'approved_denied_dt__month': start_dt.month,
                           'approved_denied_dt__day': start_dt.day,
                           'renewal': False,
                           'approved': True
                           }
                if notice.notice_type in ['approve_renewal',
                                          'disapprove_renewal']:
                    filters.update({'renewal': True})
                if notice.notice_type in ['disapprove_join',
                                          'disapprove_renewal']:
                    filters.update({'approved': False})

                memberships = memberships.filter(**filters)
            else:  # 'expire'
                memberships = memberships.filter(
                    expiration_dt__year=start_dt.year,
                    expiration_dt__month=start_dt.month,
                    expiration_dt__day=start_dt.day)

            # filter by membership type
            if notice.corporate_membership_type:
                memberships = memberships.filter(
                                corporate_membership_type=notice.corporate_membership_type)

            memberships_count = memberships.count()

            if memberships_count > 0:
                email_context.update({'content_type':notice.content_type})

                global_context = {'site_display_name': site_display_name,
                                  'site_contact_name': site_contact_name,
                                  'site_contact_email': site_contact_email,
                                  'time_submitted': nowstr,
                                  }

                # log notice sent
                notice_log = NoticeLog(notice=notice,
                                       num_sent=0)
                notice_log.save()
                notice.log = notice_log
                notice.err = ''

                for membership in memberships:
                    try:
                        num_sent += email_member(notice, membership, global_context)
                        if memberships_count <= 50:
                            notice.members_sent.append(membership)

                        # log record
                        notice_log_record = NoticeLogRecord(
                            notice_log=notice_log,
                            corp_membership=membership)
                        notice_log_record.save()
                    except:
                        # catch the exception and email
                        notice.err += traceback.format_exc()
                        print traceback.format_exc()

                if num_sent > 0:
                    notice_log.num_sent = num_sent
                    notice_log.save()

            return num_sent

        def email_member(notice, membership, global_context):
            corp_profile = membership.corp_profile
            representatives = corp_profile.reps.filter(Q(is_dues_rep=True)|(Q(is_member_rep=True)))
            sent = 0

            corp_app = CorpMembershipApp.objects.current_app()
            authentication_info = render_to_string(
                'notification/corp_memb_notice_email/auth_info.html',
                {'corp_membership': membership,
                 'corp_app': corp_app})
            individuals_join_url = '%s%s' % (site_url,
                                             reverse('membership_default.corp_pre_add',
                                                     args=[membership.id]))
            if membership.expiration_dt:
                expire_dt = time.strftime("%d-%b-%y %I:%M %p",
                                          membership.expiration_dt.timetuple())
            else:
                expire_dt = ''

            if membership.payment_method:
                payment_method = membership.payment_method.human_name
            else:
                payment_method = ''

            if membership.renewal:
                renewed_individuals_list = render_to_string(
                    'notification/corp_memb_notice_email/renew_list.html',
                    {'corp_membership': membership})
                total_individuals_renewed = membership.indivmembershiprenewentry_set.count()
            else:
                renewed_individuals_list = ''
                total_individuals_renewed = ''

            if membership.invoice:
                invoice_link = '%s%s' % (site_url,
                                         membership.invoice.get_absolute_url())
            else:
                invoice_link = ''

            global_context.update({
                'name': corp_profile.name,
                'email': corp_profile.email,
                'expire_dt': expire_dt,
                'payment_method': payment_method,
                'renewed_individuals_list': renewed_individuals_list,
                'total_individuals_renewed': total_individuals_renewed,
                'view_link': "%s%s" % (site_url, membership.get_absolute_url()),
                'renew_link': "%s%s" % (site_url, membership.get_renewal_url()),
                'invoice_link': invoice_link,
                'authentication_info': authentication_info,
                'individuals_join_url': individuals_join_url,
            })

            for recipient in representatives:
                body = notice.email_content
                context = membership.get_field_items()
                context['membership'] = membership
                context.update(global_context)

                context.update({
                    'rep_first_name': recipient.user.first_name,
                })

                body = fieldify(body)

                body = '%s <br /><br />%s' % (body, get_footer())

                context = Context(context)
                template = Template(body)
                body = template.render(context)

                email_recipient = recipient.user.email
                subject = notice.subject.replace('(name)',
                                            corp_profile.name)
                template = Template(subject)
                subject = template.render(context)

                email_context.update({
                    'subject':subject,
                    'content':body})

                if notice.sender:
                    email_context.update({
                        'sender':notice.sender,
                        'reply_to':notice.sender})
                if notice.sender_display:
                    email_context.update({'sender_display':notice.sender_display})

                notification.send_emails([email_recipient], 'corp_memb_notice_email',
                                         email_context)
                sent += 1
                if verbosity > 1:
                    print 'To ', email_recipient, subject
            return sent

        def get_footer():
            return """
                    This e-mail was generated by Tendenci&reg; Software -
                    a web based membership management software solution
                    www.tendenci.com developed by Schipul - The Web
                    Marketing Company
                    """

        exception_str = ""

        notices = Notice.objects.filter(status=True, status_detail='active'
                                    ).exclude(notice_time='attimeof')

        if notices:
            if verbosity > 1:
                print "Start sending out notices to members:"
            total_notices = 0
            total_sent = 0
            for notice in notices:
                total_notices += 1
                total_sent += process_notice(notice)
                if hasattr(notice, 'err'):
                    exception_str += notice.err

            if total_sent > 0:
                processed_notices = [notice for notice in notices if hasattr(
                                        notice, 'log'
                                        ) and notice.log.num_sent > 0]
                email_admins_recap(processed_notices, total_sent)

            # if there is any error, notify us
            if exception_str:
                email_script_errors(exception_str)

            if verbosity > 1:
                print 'Total notice processed: %d' % (total_notices)
                print 'Total email sent: %d' % (total_sent)
                print "Done"
        else:
            if verbosity > 1:
                print "No notices on the site."

Example 20

Project: tendenci Source File: send_membership_notices.py
    def handle(self, *args, **options):
        verbosity = 1
        if 'verbosity' in options:
            verbosity = options['verbosity']

        from django.conf import settings
        from tendenci.apps.memberships.models import (Notice,
                                                        MembershipDefault,
                                                        NoticeLog,
                                                        NoticeDefaultLogRecord)
        from tendenci.apps.base.utils import fieldify
        from tendenci.apps.notifications import models as notification
        from tendenci.apps.site_settings.utils import get_setting

        site_display_name = get_setting('site', 'global', 'sitedisplayname')
        site_contact_name = get_setting('site', 'global', 'sitecontactname')
        site_contact_email = get_setting('site', 'global', 'sitecontactemail')
        site_url = get_setting('site', 'global', 'siteurl')

        corp_replace_str = """
                            <br /><br />
                            <font color="#FF0000">
                            Organizational Members, please contact your company
                            Membership coordinator
                            to ensure that your membership is being renewed.
                            </font>
                            """

        email_context = {
            'sender':get_setting('site', 'global', 'siteemailnoreplyaddress'),
            'sender_display':site_display_name,
            'reply_to':site_contact_email}

        now = datetime.now()
        nowstr = time.strftime("%d-%b-%y %I:%M %p", now.timetuple())

        def email_admins_recap(notices, total_sent):
            """Send admins recap after the notices were processed.
            """
            recap_recipient = get_admin_emails()
            if recap_recipient:
                template_name = "memberships/notices/email_recap.html"
                try:
                    recap_email_content = render_to_string(
                               template_name,
                               {'notices': notices,
                              'total_sent': total_sent,
                              'site_url': site_url,
                              'site_display_name': site_display_name,
                              'site_contact_name': site_contact_name,
                              'site_contact_email': site_contact_email})
                    recap_subject = '%s Membership Notices Distributed' % (
                                                    site_display_name)
                    email_context.update({
                        'subject':recap_subject,
                        'content': recap_email_content,
                        'content_type':"html"})

                    notification.send_emails(recap_recipient, 'membership_notice_email',
                                             email_context)
                except TemplateDoesNotExist:
                    pass

        def email_script_errors(err_msg):
            """Send error message to us if any.
            """
            script_recipient = get_script_support_emails()
            if script_recipient:
                email_context.update({
                    'subject':'Error Processing Membership Notices on %s' % (
                                                            site_url),
                    'content':'%s \n\nTime Submitted: %s\n' % (err_msg, nowstr),
                    'content_type':"text"})

                notification.send_emails(script_recipient, 'membership_notice_email',
                                         email_context)

        def get_script_support_emails():
            admins = getattr(settings, 'ADMINS', None)
            if admins:
                recipients_list = [admin[1] for admin in admins]
                return recipients_list

            return None

        def get_admin_emails():
            admin_emails = get_setting('module', 'memberships',
                                       'membershiprecipients').strip()
            if admin_emails:
                admin_emails = admin_emails.split(',')
            if not admin_emails:
                admin_emails = (get_setting('site', 'global',
                                            'admincontactemail'
                                            ).strip()).split(',')
            return admin_emails

        def process_notice(notice):
            notice.members_sent = []
            num_sent = 0
            if notice.notice_time == 'before':
                start_dt = now + timedelta(days=notice.num_days)
            else:
                start_dt = now - timedelta(days=notice.num_days)

            if notice.notice_type == 'disapprove':
                status_detail_list = ['disapproved']
            else:
                status_detail_list = ['active', 'expired']
            memberships = MembershipDefault.objects.filter(
                                    status=True,
                                    status_detail__in=status_detail_list
                                    )
            if notice.notice_type == 'join':
                memberships = memberships.filter(
                                    join_dt__year=start_dt.year,
                                    join_dt__month=start_dt.month,
                                    join_dt__day=start_dt.day,
                                    renewal=False)
            elif notice.notice_type == 'renewal':
                memberships = memberships.filter(
                                    renew_dt__year=start_dt.year,
                                    renew_dt__month=start_dt.month,
                                    renew_dt__day=start_dt.day,
                                    renewal=True)
            elif notice.notice_type == 'approve':
                memberships = memberships.filter(
                                    application_approved_denied_dt__year=start_dt.year,
                                    application_approved_denied_dt__month=start_dt.month,
                                    application_approved_denied_dt__day=start_dt.day,
                                    application_approved=True)
            elif notice.notice_type == 'disapprove':
                memberships = memberships.filter(
                                    application_approved_denied_dt__year=start_dt.year,
                                    application_approved_denied_dt__month=start_dt.month,
                                    application_approved_denied_dt__day=start_dt.day,
                                    application_approved=False)
            else:  # 'expire'
                memberships = memberships.filter(
                                    expire_dt__year=start_dt.year,
                                    expire_dt__month=start_dt.month,
                                    expire_dt__day=start_dt.day)
                if get_setting('module', 'memberships', 'renewalreminderexcludecorpmembers'):
                    # exclude corp members
                    memberships = memberships.exclude(corporate_membership_id__gt=0)

            # filter by membership type
            if notice.membership_type:
                memberships = memberships.filter(
                                membership_type=notice.membership_type)

            memberships_count = memberships.count()

            if memberships_count > 0:
                email_context.update({'content_type':notice.content_type})

                # password
                passwd_str = """
                        If you've forgotten your password or need to reset
                        the auto-generated one, click <a href="%s%s">here</a>
                        and follow the instructions on the page to
                        reset your password.
                        """ % (site_url, reverse('auth_password_reset'))

                global_context = {'site_display_name': site_display_name,
                                  'site_contact_name': site_contact_name,
                                  'site_contact_email': site_contact_email,
                                  'time_submitted': nowstr,
                                  'sitedisplayname': site_display_name,
                                  'sitecontactname': site_contact_name,
                                  'sitecontactemail': site_contact_email,
                                  'timesubmitted': nowstr,
                                  'password': passwd_str
                                  }

                # log notice sent
                notice_log = NoticeLog(notice=notice,
                                       num_sent=0)
                notice_log.save()
                notice.log = notice_log
                notice.err = ''

                for membership in memberships:
                    try:
                        email_member(notice, membership, global_context)
                        if memberships_count <= 50:
                            notice.members_sent.append(membership)
                        num_sent += 1

                        # log record
                        notice_log_record = NoticeDefaultLogRecord(
                                                notice_log=notice_log,
                                                membership=membership)
                        notice_log_record.save()
                    except:
                        # catch the exception and email
                        notice.err += traceback.format_exc()
                        print traceback.format_exc()

                if num_sent > 0:
                    notice_log.num_sent = num_sent
                    notice_log.save()

            return num_sent

        def email_member(notice, membership, global_context):
            user = membership.user

            body = notice.email_content
            context = membership.get_field_items()
            context['membership'] = membership
            context.update(global_context)

            # corporate member corp_replace_str
            if membership.corporate_membership_id:
                context['corporate_membership_notice'] = corp_replace_str

            if membership.expire_dt:
                context.update({
                    'expire_dt': time.strftime(
                    "%d-%b-%y %I:%M %p",
                    membership.expire_dt.timetuple()),
                })

            if membership.payment_method:
                payment_method_name = membership.payment_method.human_name
            else:
                payment_method_name = ''

            context.update({
                'member_number': membership.member_number,
                'payment_method': payment_method_name,
                'referer_url': '%s%s?next=%s' % (site_url, reverse('auth_login'), membership.referer_url),
                'membership_link': '%s%s' % (site_url, membership.get_absolute_url()),
                'renew_link': '%s%s' % (site_url, membership.get_absolute_url()),
                'mymembershipslink': '%s%s' % (site_url, membership.get_absolute_url()),
                'membershiplink': '%s%s' % (site_url, membership.get_absolute_url()),
                'renewlink': '%s%s' % (site_url, membership.get_absolute_url())
            })

            body = fieldify(body)

            body = '%s <br /><br />%s' % (body, get_footer())

            context = Context(context)
            template = Template(body)
            body = template.render(context)

            email_recipient = user.email
            subject = notice.subject.replace('(name)',
                                        user.get_full_name())
            template = Template(subject)
            subject = template.render(context)

            email_context.update({
                'subject':subject,
                'content':body})
            if notice.sender:
                email_context.update({
                    #'sender':notice.sender,
                    'reply_to':notice.sender})
            if notice.sender_display:
                email_context.update({'sender_display':notice.sender_display})

            notification.send_emails([email_recipient], 'membership_notice_email',
                                     email_context)
            if verbosity > 1:
                print 'To ', email_recipient, subject

        def get_footer():
            return """
                    This e-mail was generated by Tendenci&reg; Software -
                    a web based membership management software solution
                    www.tendenci.com developed by Schipul - The Web
                    Marketing Company
                    """

        exception_str = ""

        notices = Notice.objects.filter(status=True, status_detail='active'
                                    ).exclude(notice_time='attimeof')

        if notices:
            if verbosity > 1:
                print "Start sending out notices to members:"
            total_notices = 0
            total_sent = 0
            for notice in notices:
                total_notices += 1
                total_sent += process_notice(notice)
                if hasattr(notice, 'err'):
                    exception_str += notice.err

            if total_sent > 0:
                processed_notices = [notice for notice in notices if hasattr(
                                        notice, 'log'
                                        ) and notice.log.num_sent > 0]
                email_admins_recap(processed_notices, total_sent)

            # if there is any error, notify us
            if exception_str:
                email_script_errors(exception_str)

            if verbosity > 1:
                print 'Total notice processed: %d' % (total_notices)
                print 'Total email sent: %d' % (total_sent)
                print "Done"
        else:
            if verbosity > 1:
                print "No notices on the site."

Example 21

Project: django-bulbs Source File: test_custom_search.py
    def setUp(self):
        super(BaseCustomSearchFilterTests, self).setUp()
        feature_type_names = (
            "News", "Slideshow", "TV Club", "Video",
        )
        feature_types = []
        for name in feature_type_names:
            feature_types.append(FeatureType.objects.create(name=name))
        tag_names = (
            "Barack Obama", "Joe Biden", "Wow", "Funny", "Politics"
        )
        tags = []
        for name in tag_names:
            tags.append(Tag.objects.create(name=name))
        content_data = (
            dict(
                title="Obama Does It Again",
                feature_type=0,
                tags=[0, 2, 4]
            ),
            dict(
                title="Biden Does It Again",
                feature_type=0,
                tags=[1, 2, 4]
            ),
            dict(
                title="Obama In Slides Is Flawless",
                feature_type=1,
                tags=[0, 2, 4]
            ),
            dict(
                title="Obama On TV",
                feature_type=2,
                tags=[0, 2]
            ),
            dict(
                title="Flawless video here",
                feature_type=3,
                tags=[3, 2]
            ),
            dict(
                title="Both Obama and Biden in One Article",
                feature_type=3,
                tags=[0, 1, 2]
            ),
        )
        time_step = timedelta(hours=12)
        pubtime = timezone.now() + time_step
        content_list = []
        for data in content_data:
            data["published"] = pubtime
            data["feature_type"] = feature_types[data.pop("feature_type")]
            data["tags"] = [tags[tag_idx] for tag_idx in data.pop("tags")]
            content = make_content(**data)
            content_list.append(content)
            content.index()  # reindex for related object updates
            pubtime -= time_step
        self.content_list = content_list
        self.feature_types = feature_types
        self.tags = tags
        Content.search_objects.refresh()

        # NOTE: we updated some field names after I initially typed this up.
        # NOTE: These functions munge the existing data into the new form.
        def makeGroups(groups):
            result = []
            for group in groups:
                if isinstance(group, dict):
                    this_group = group
                else:
                    this_group = dict(conditions=[])
                    for condition in group:
                        this_group["conditions"].append(makeCondition(*condition))
                result.append(this_group)
            return result

        def makeCondition(field, type, values):
            return dict(
                field=field, type=type,
                values=[dict(label=v, value=v) for v in values]
            )

        s_biden = dict(
            label="All Biden, Baby",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [self.tags[1].slug]),
                    ],
                ])
            )
        )
        s_obama = dict(
            label="All Obama, Baby",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [self.tags[0].slug]),
                    ],
                ])
            )
        )
        # logical and
        s_b_and_b = dict(
            label="Obama and Biden, together!",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [
                            self.tags[0].slug,
                            self.tags[1].slug
                        ]),
                    ],
                ])
            )
        )
        # logical or
        s_b_or_b = dict(
            label="Obama or Biden, whatever!",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "any", [
                            self.tags[0].slug,
                            self.tags[1].slug
                        ]),
                    ],
                ])
            )
        )
        # excluding some tags
        s_lite_obama = dict(
            label="Obama but not political stuff",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [
                            self.tags[0].slug,  # obama
                        ]),
                        ("tag", "none", [
                            self.tags[4].slug,  # politics
                        ]),
                    ],
                ])
            )
        )
        # multiple, disjoint groups
        s_funny_and_slideshows = dict(
            label="Anything funny and also slideshows!",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "any", [
                            self.tags[3].slug  # funny tags
                        ]),
                    ],
                    [
                        ("feature-type", "any", [
                            self.feature_types[1].slug  # slideshow
                        ]),
                    ],
                ])
            )
        )
        # this tag is on everything
        s_wow = dict(
            label="Wow!",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [
                            self.tags[2].slug  # funny tags
                        ]),
                    ],
                ])
            )
        )
        # filter by content type
        s_doctype = dict(
            label="Doctype",
            query=dict(
                groups=makeGroups([
                    [
                        ("content-type", "all", [
                            TestContentObjTwo.search_objects.mapping.doc_type
                        ])
                    ]
                ])
            )
        )
        # include some ids
        s_one_article = dict(
            label="Just this article",
            query=dict(
                groups=[],
                included_ids=[self.content_list[0].id]
            )
        )
        s_two_articles = dict(
            label="Just two articles",
            query=dict(
                groups=[],
                included_ids=[
                    self.content_list[0].id,
                    self.content_list[3].id
                ]
            )
        )
        # exclude ids
        s_all_but_one_article = dict(
            label="All but one article",
            query=dict(
                groups=[],
                excluded_ids=[
                    self.content_list[0].id
                ]
            )
        )
        # last day of articles
        s_last_day = dict(
            label="Last day",
            query=dict(
                groups=[dict(
                    conditions=[],
                    time="Past day"
                )],
            )
        )
        # pinned
        s_pinned = dict(
            label="Pinned something",
            query=dict(
                pinned_ids=[
                    content_list[-1].id  # last in time
                ]
            )
        )
        # pinned 2
        s_pinned_2 = dict(
            label="Pinned 2 things",
            query=dict(
                pinned_ids=[
                    content_list[-1].id,  # last in time
                    content_list[-2].id  # penultimate
                ]
            )
        )
        # pinned 2 with groups
        s_pinned_2_groups = dict(
            label="Pinned 2 things with other filters",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "any", [
                            self.tags[0].slug,
                            self.tags[1].slug,
                            self.tags[2].slug,
                            self.tags[3].slug,
                            self.tags[4].slug
                        ]),
                    ]
                ]),
                pinned_ids=[
                    content_list[-1].id,  # last in time
                    content_list[-2].id  # penultimate
                ]
            )
        )
        # text query
        s_text_query = dict(
            label="Text query",
            query=dict(
                query="again"
            )
        )
        # text query with pinned ids
        s_text_query_pinned = dict(
            label="Text query",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "any", [self.tags[2].slug]),
                    ]
                ]),
                pinned_ids=[self.content_list[4].id],
                query="Flawless"
            )
        )
        # saved search and the expected result count
        self.search_expectations = (
            (s_biden, 2),
            (s_obama, 4),
            (s_b_and_b, 1),
            (s_b_or_b, 5),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list)),
            (s_one_article, 1),
            (s_two_articles, 2),
            (s_all_but_one_article, len(self.content_list) - 1),
            (s_last_day, 3),
            (s_pinned, len(self.content_list)),
            (s_pinned_2, len(self.content_list)),
            (s_pinned_2_groups, len(self.content_list)),
            (s_doctype, TestContentObjTwo.objects.count()),
            (s_text_query, 2),
            (s_text_query_pinned, 2),
        )
        self.preview_expectations = (
            (s_biden, 2),
            (s_obama, 4),
            (s_b_and_b, 1),
            (s_b_or_b, 5),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list)),
            (s_one_article, 1),
            (s_two_articles, 2),
            (s_all_but_one_article, len(self.content_list)),  # excluded
            (s_last_day, 3),
            (s_doctype, TestContentObjTwo.objects.count()),
            (s_text_query, 2),
            (s_text_query_pinned, 2),
        )
        self.group_preview_expectations = (
            (s_biden, 2),
            (s_obama, 4),
            (s_b_and_b, 1),
            (s_wow, len(self.content_list)),
            (s_one_article, 1),
            (s_two_articles, 2),
            (s_all_but_one_article, len(self.content_list)),  # excluded
        )
        # is not published and not is_preview
        self.unpublished_expectations = (
            (s_biden, 2),
            (s_obama, 4),
            (s_b_and_b, 1),
            (s_b_or_b, 5),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list)),
            (s_one_article, 1),
            (s_two_articles, 2),
            (s_all_but_one_article, len(self.content_list) - 1),
            (s_last_day, 3),
            (s_pinned, len(self.content_list)),
            (s_pinned_2, len(self.content_list)),
            (s_pinned_2_groups, len(self.content_list)),
            (s_text_query, 2),
            (s_text_query_pinned, 2),
        )
        # is published and not is_preview
        self.published_expectations = (
            (s_biden, 2),
            (s_obama, 3),
            (s_b_and_b, 1),
            (s_b_or_b, 5 - 1),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list) - 1),
            (s_one_article, 1 - 1),
            (s_two_articles, 2 - 1),
            (s_all_but_one_article, len(self.content_list) - 1),
            (s_last_day, 2),
            (s_pinned, len(self.content_list) - 1),
            (s_pinned_2, len(self.content_list) - 1),
            (s_pinned_2_groups, len(self.content_list) - 1),
            (s_text_query, 1),
            (s_text_query_pinned, 2),
        )
        self.published_not_pinned_expectations = (
            (s_biden, 2),
            (s_obama, 3),
            (s_b_and_b, 1),
            (s_b_or_b, 5 - 1),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list) - 1),
            (s_one_article, 1 - 1),
            (s_two_articles, 2 - 1),
            (s_all_but_one_article, len(self.content_list) - 1),
            (s_last_day, 2),
        )
        # (search filter, (list, of, ids, in, order)),
        self.ordered_expectations = (
            (s_all_but_one_article, (content_list[1].id, content_list[2].id, content_list[3].id)),
            (s_text_query_pinned, (content_list[4].id, content_list[2].id)),
        )
        self.pinned_expectations = (
            (s_pinned, (
                content_list[-1].id,
            )),
            (s_pinned_2, (
                content_list[-2].id, content_list[-1].id,
            )),
            (s_pinned_2_groups, (
                content_list[-2].id, content_list[-1].id,
            )),
        )

Example 22

Project: termdown Source File: termdown.py
@graceful_ctrlc
def countdown(
    stdscr,
    alt_format=False,
    font=DEFAULT_FONT,
    blink=False,
    critical=3,
    quit_after=None,
    text=None,
    timespec=None,
    title=None,
    voice=None,
    outfile=None,
    no_seconds=False,
    no_text_magic=True,
    no_figlet=False,
    no_window_title=False,
    **kwargs
):
    try:
        sync_start, target = parse_timestr(timespec)
    except ValueError:
        raise click.BadParameter("Unable to parse TIME value '{}'".format(timespec))
    curses_lock, input_queue, quit_event = setup(stdscr)
    figlet = Figlet(font=font)

    if title and not no_figlet:
        try:
            title = figlet.renderText(title)
        except CharNotPrinted:
            title = ""

    input_thread = Thread(
        args=(stdscr, input_queue, quit_event, curses_lock),
        target=input_thread_body,
    )
    input_thread.start()

    seconds_total = seconds_left = int(ceil((target - datetime.now()).total_seconds()))

    try:
        while seconds_left > 0 or blink or text:
            figlet.width = stdscr.getmaxyx()[1]
            if alt_format:
                countdown_text = format_seconds_alt(
                    seconds_left, seconds_total, hide_seconds=no_seconds)
            else:
                countdown_text = format_seconds(seconds_left, hide_seconds=no_seconds)
            if seconds_left > 0:
                with curses_lock:
                    if not no_window_title:
                        os.write(stdout.fileno(), "\033]2;{0}\007".format(countdown_text).encode())
                    if outfile:
                        with open(outfile, 'w') as f:
                            f.write("{}\n{}\n".format(countdown_text, seconds_left))
                    stdscr.erase()
                    try:
                        draw_text(
                            stdscr,
                            countdown_text if no_figlet else figlet.renderText(countdown_text),
                            color=1 if seconds_left <= critical else 0,
                            fallback=countdown_text,
                            title=title,
                        )
                    except CharNotPrinted:
                        draw_text(stdscr, "E")
            if seconds_left <= 10 and voice:
                voice_exec = "echo"
                if os.path.exists("/usr/bin/say"):
                    voice_exec = "/usr/bin/say"
                elif os.path.exists("/usr/bin/espeak"):
                    voice_exec = "/usr/bin/espeak"
                Popen([voice_exec, "-v", voice, str(seconds_left)])

            # We want to sleep until this point of time has been
            # reached:
            sleep_target = sync_start + timedelta(seconds=1)

            # If sync_start has microsecond=0, it might happen that we
            # need to skip one frame (the very first one). This occurs
            # when the program has been startet at, say,
            # "2014-05-29 20:27:57.930651". Now suppose rendering the
            # frame took about 0.2 seconds. The real time now is
            # "2014-05-29 20:27:58.130000" and sleep_target is
            # "2014-05-29 20:27:58.000000" which is in the past! We're
            # already too late. We could either skip that frame
            # completely or we can draw it right now. I chose to do the
            # latter: Only sleep if haven't already missed our target.
            now = datetime.now()
            if sleep_target > now and seconds_left > 0:
                try:
                    input_action = input_queue.get(True, (sleep_target - now).total_seconds())
                except Empty:
                    input_action = None
                if input_action == INPUT_PAUSE:
                    pause_start = datetime.now()
                    with curses_lock:
                        stdscr.erase()
                        try:
                            draw_text(
                                stdscr,
                                countdown_text if no_figlet else figlet.renderText(countdown_text),
                                color=3,
                                fallback=countdown_text,
                            )
                        except CharNotPrinted:
                            draw_text(stdscr, "E")
                    input_action = input_queue.get()
                    if input_action == INPUT_PAUSE:
                        sync_start += (datetime.now() - pause_start)
                        target += (datetime.now() - pause_start)
                if input_action == INPUT_EXIT:  # no elif here! input_action may have changed
                    break
                elif input_action == INPUT_RESET:
                    sync_start, target = parse_timestr(timespec)
                    seconds_left = int(ceil((target - datetime.now()).total_seconds()))
                    continue
                elif input_action == INPUT_LAP:
                    continue

            sync_start = sleep_target

            seconds_left = int(ceil((target - datetime.now()).total_seconds()))

            if seconds_left <= 0:
                # we could write this entire block outside the parent while
                # but that would leave us unable to reset everything

                with curses_lock:
                    curses.beep()

                if text and not no_text_magic:
                    text = normalize_text(text)

                if outfile:
                    with open(outfile, 'w') as f:
                        f.write("{}\n{}\n".format(text if text else "DONE", 0))

                rendered_text = text

                if text and not no_figlet:
                    try:
                        rendered_text = figlet.renderText(text)
                    except CharNotPrinted:
                        rendered_text = ""

                if blink or text:
                    base_color = 1 if blink else 0
                    blink_reset = False
                    flip = True
                    slept = 0
                    extra_sleep = 0
                    while True:
                        with curses_lock:
                            os.write(stdout.fileno(), "\033]2;{0}\007".format("/" if flip else "\\").encode())
                            if text:
                                draw_text(
                                    stdscr,
                                    rendered_text,
                                    color=base_color if flip else 4,
                                    fallback=text,
                                )
                            else:
                                draw_text(stdscr, "", color=base_color if flip else 4)
                        if blink:
                            flip = not flip
                        try:
                            sleep_start = datetime.now()
                            input_action = input_queue.get(True, 0.5 + extra_sleep)
                        except Empty:
                            input_action = None
                        finally:
                            extra_sleep = 0
                            sleep_end = datetime.now()
                        if input_action == INPUT_PAUSE:
                            pause_start = datetime.now()
                            input_action = input_queue.get()
                            extra_sleep = (sleep_end - sleep_start).total_seconds()
                        if input_action == INPUT_EXIT:
                            # no elif here! input_action may have changed
                            return
                        elif input_action == INPUT_RESET:
                            sync_start, target = parse_timestr(timespec)
                            seconds_left = int(ceil((target - datetime.now()).total_seconds()))
                            blink_reset = True
                            break
                        slept += (sleep_end - sleep_start).total_seconds()
                        if quit_after and slept >= float(quit_after):
                            return
                    if blink_reset:
                        continue
    finally:
        with curses_lock:
            if not no_window_title:
                os.write(stdout.fileno(), "\033]2;\007".encode())
            if outfile:
                os.remove(outfile)
        quit_event.set()
        input_thread.join()

Example 23

Project: rapidpro Source File: test_models.py
    def test_event_deliveries(self):
        sms = self.create_msg(contact=self.joe, direction='I', status='H', text="I'm gonna pop some tags")

        with patch('requests.Session.send') as mock:
            now = timezone.now()
            mock.return_value = MockResponse(200, "Hello World")

            # trigger an event, shouldnn't fire as we don't have a webhook
            WebHookEvent.trigger_sms_event(SMS_RECEIVED, sms, now)
            self.assertFalse(WebHookEvent.objects.all())

        self.setupChannel()

        with patch('requests.Session.send') as mock:
            # clear out which events we listen for, we still shouldnt be notified though we have a webhook
            self.channel.org.webhook_events = 0
            self.channel.org.save()

            now = timezone.now()
            mock.return_value = MockResponse(200, "Hello World")

            # trigger an event, shouldnn't fire as we don't have a webhook
            WebHookEvent.trigger_sms_event(SMS_RECEIVED, sms, now)
            self.assertFalse(WebHookEvent.objects.all())

        self.setupChannel()

        with patch('requests.Session.send') as mock:
            # remove all the org users
            self.org.administrators.clear()
            self.org.editors.clear()
            self.org.viewers.clear()

            mock.return_value = MockResponse(200, "Hello World")

            # trigger an event
            WebHookEvent.trigger_sms_event(SMS_RECEIVED, sms, now)
            event = WebHookEvent.objects.get()

            self.assertEquals('F', event.status)
            self.assertEquals(0, event.try_count)
            self.assertFalse(event.next_attempt)

            result = WebHookResult.objects.get()
            self.assertStringContains("No active user", result.message)
            self.assertEquals(0, result.status_code)

            self.assertFalse(mock.called)

            # what if they send weird json back?
            WebHookEvent.objects.all().delete()
            WebHookResult.objects.all().delete()

        # add ad manager back in
        self.org.administrators.add(self.admin)
        self.admin.set_org(self.org)

        with patch('requests.Session.send') as mock:
            mock.return_value = MockResponse(200, "Hello World")

            # trigger an event
            WebHookEvent.trigger_sms_event(SMS_RECEIVED, sms, now)
            event = WebHookEvent.objects.get()

            self.assertEquals('C', event.status)
            self.assertEquals(1, event.try_count)
            self.assertFalse(event.next_attempt)

            result = WebHookResult.objects.get()
            self.assertStringContains("Event delivered successfully", result.message)
            self.assertStringContains("not JSON", result.message)
            self.assertEquals(200, result.status_code)

            self.assertTrue(mock.called)

            WebHookEvent.objects.all().delete()
            WebHookResult.objects.all().delete()

        with patch('requests.Session.send') as mock:
            # valid json, but not our format
            bad_json = '{ "thrift_shops": ["Goodwill", "Value Village"] }'
            mock.return_value = MockResponse(200, bad_json)

            WebHookEvent.trigger_sms_event(SMS_RECEIVED, sms, now)
            event = WebHookEvent.objects.get()

            self.assertEquals('C', event.status)
            self.assertEquals(1, event.try_count)
            self.assertFalse(event.next_attempt)

            self.assertTrue(mock.called)

            result = WebHookResult.objects.get()
            self.assertStringContains("Event delivered successfully", result.message)
            self.assertStringContains("ignoring", result.message)
            self.assertEquals(200, result.status_code)
            self.assertEquals(bad_json, result.body)

            WebHookEvent.objects.all().delete()
            WebHookResult.objects.all().delete()

        with patch('requests.Session.send') as mock:
            mock.return_value = MockResponse(200, '{ "phone": "+250788123123", "text": "I am success" }')

            WebHookEvent.trigger_sms_event(SMS_RECEIVED, sms, now)
            event = WebHookEvent.objects.get()

            self.assertEquals('C', event.status)
            self.assertEquals(1, event.try_count)
            self.assertFalse(event.next_attempt)

            result = WebHookResult.objects.get()
            self.assertEquals(200, result.status_code)

            self.assertTrue(mock.called)

            broadcast = Broadcast.objects.get()
            contact = Contact.get_or_create(self.org, self.admin, name=None, urns=["tel:+250788123123"], channel=self.channel)
            self.assertTrue("I am success", broadcast.text)
            self.assertTrue(contact, broadcast.contacts.all())

            self.assertTrue(mock.called)
            args = mock.call_args_list[0][0]
            prepared_request = args[0]
            self.assertEquals(self.org.get_webhook_url(), prepared_request.url)

            data = parse_qs(prepared_request.body)
            self.assertEquals(self.joe.get_urn(TEL_SCHEME).path, data['phone'][0])
            self.assertEquals(unicode(self.joe.get_urn(TEL_SCHEME)), data['urn'][0])
            self.assertEquals(self.joe.uuid, data['contact'][0])
            self.assertEquals(sms.pk, int(data['sms'][0]))
            self.assertEquals(self.channel.pk, int(data['channel'][0]))
            self.assertEquals(SMS_RECEIVED, data['event'][0])
            self.assertEquals("I'm gonna pop some tags", data['text'][0])
            self.assertTrue('time' in data)

            WebHookEvent.objects.all().delete()
            WebHookResult.objects.all().delete()

        with patch('requests.Session.send') as mock:
            mock.return_value = MockResponse(500, "I am error")

            next_attempt_earliest = timezone.now() + timedelta(minutes=4)
            next_attempt_latest = timezone.now() + timedelta(minutes=6)

            WebHookEvent.trigger_sms_event(SMS_RECEIVED, sms, now)
            event = WebHookEvent.objects.get()

            self.assertEquals('E', event.status)
            self.assertEquals(1, event.try_count)
            self.assertTrue(event.next_attempt)
            self.assertTrue(next_attempt_earliest < event.next_attempt and next_attempt_latest > event.next_attempt)

            result = WebHookResult.objects.get()
            self.assertStringContains("Error", result.message)
            self.assertEquals(500, result.status_code)
            self.assertEquals("I am error", result.body)

            # make sure things become failures after three retries
            event.try_count = 2
            event.deliver()
            event.save()

            self.assertTrue(mock.called)

            self.assertEquals('F', event.status)
            self.assertEquals(3, event.try_count)
            self.assertFalse(event.next_attempt)

            result = WebHookResult.objects.get()
            self.assertStringContains("Error", result.message)
            self.assertEquals(500, result.status_code)
            self.assertEquals("I am error", result.body)
            self.assertEquals("http://fake.com/webhook.php", result.url)
            self.assertTrue(result.data.find("pop+some+tags") > 0)

            # check out our api log
            response = self.client.get(reverse('api.log'))
            self.assertRedirect(response, reverse('users.user_login'))

            response = self.client.get(reverse('api.log_read', args=[event.pk]))
            self.assertRedirect(response, reverse('users.user_login'))

            WebHookEvent.objects.all().delete()
            WebHookResult.objects.all().delete()

        # add a webhook header to the org
        self.channel.org.webhook = u'{"url": "http://fake.com/webhook.php", "headers": {"X-My-Header": "foobar", "Authorization": "Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="}, "method": "POST"}'
        self.channel.org.save()

        # check that our webhook settings have saved
        self.assertEquals('http://fake.com/webhook.php', self.channel.org.get_webhook_url())
        self.assertDictEqual({'X-My-Header': 'foobar', 'Authorization': 'Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='}, self.channel.org.get_webhook_headers())

        with patch('requests.Session.send') as mock:
            mock.return_value = MockResponse(200, "Boom")
            WebHookEvent.trigger_sms_event(SMS_RECEIVED, sms, now)
            event = WebHookEvent.objects.get()

            result = WebHookResult.objects.get()
            # both headers should be in the json-encoded url string
            self.assertStringContains('X-My-Header: foobar', result.request)
            self.assertStringContains('Authorization: Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==', result.request)

Example 24

Project: smarthome Source File: tz.py
    def __init__(self, fileobj):
        if isinstance(fileobj, str):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 25

Project: termtrack Source File: cli.py
Function: render
@graceful_ctrlc
def render(
        stdscr,
        apsides=False,
        body="earth",
        coverage=False,
        crosshair=False,
        footprint=False,
        fps=1,
        grid=False,
        info=False,
        me=False,
        night=False,
        observer=None,
        orbit_ascdesc=False,
        orbit_res="/70",
        orbits=0,
        paused=False,
        planets="",
        satellite=None,
        tle=None,
        topo=False,
        **kwargs
):
    curses_lock, input_queue, quit_event = setup(stdscr)
    input_thread = Thread(
        args=(stdscr, input_queue, quit_event, curses_lock),
        target=input_thread_body,
    )
    input_thread.start()
    try:
        body = BODY_MAP[body.lower()](1, 1)
        if body.NAME != "Earth":
            night = False
            topo = False
            me = False
            satellite = None

        observer_latitude = None
        observer_longitude = None
        if me and observer is None:
            location_data = get("http://ip-api.com/json").json()
            observer_latitude = location_data['lat']
            observer_longitude = location_data['lon']
        if observer is not None:
            obs_latlon = observer.split()
            observer_latitude = float(obs_latlon[0])
            observer_longitude = float(obs_latlon[1])

        time_offset = timedelta(0)
        time = datetime.utcnow() + time_offset
        force_redraw = False

        if paused is True:
            paused = datetime.utcnow()
            force_redraw = True

        if satellite is None and tle is None:
            satellite_obj = None
        else:
            satellite_obj = EarthSatellite(
                satellite,
                time,
                observer_latitude=observer_latitude,
                observer_longitude=observer_longitude,
                tle_file=tle,
            )

        apsides_layer = Layer(draw_apsides, update_timeout=8)
        apsides_layer.hidden = not apsides
        coverage_layer = Layer(draw_coverage, update_timeout=10)
        coverage_layer.hidden = not coverage
        crosshair_layer = Layer(draw_crosshair)
        crosshair_layer.hidden = not crosshair
        footprint_layer = Layer(draw_footprint)
        footprint_layer.hidden = not footprint
        grid_layer = Layer(draw_grid, update_timeout=None)
        grid_layer.hidden = not grid
        info_layer = Layer(draw_info)
        info_layer.hidden = not info
        map_layer = Layer(draw_map, update_timeout=None)
        observer_layer = Layer(draw_location, update_timeout=None)
        orbit_layer = Layer(draw_orbits)
        planet_layer = Layer(draw_planets)
        satellite_layer = Layer(draw_satellite)
        satellite_layer.hidden = satellite_obj is None

        layers = [
            info_layer,
            satellite_layer,
            apsides_layer,
            observer_layer,
            footprint_layer,
            orbit_layer,
            planet_layer,
            coverage_layer,
            map_layer,
            crosshair_layer,
            grid_layer,
        ]

        while True:
            with curses_lock:
                body, did_resize = check_for_resize(stdscr, body)
            if did_resize:
                for layer in layers:
                    layer.last_updated = None

            draw_start = datetime.now()
            if not paused:
                time = datetime.utcnow() + time_offset
            if force_redraw:
                for layer in layers:
                    layer.last_updated = None
            if not paused or force_redraw:
                grid_layer.update(body)
                info_layer.update(
                    body,
                    time,
                    observer_latitude=observer_latitude,
                    observer_longitude=observer_longitude,
                    satellite=satellite_obj,
                )
                map_layer.update(body, time, night=night, topo=topo)
                observer_layer.update(body, observer_latitude, observer_longitude)
                planet_layer.update(body, time, planets)

                if satellite_obj is not None:
                    apsides_layer.update(body, satellite_obj)
                    coverage_layer.update(body, satellite_obj, time)
                    crosshair_layer.update(body, satellite_obj)
                    footprint_layer.update(body, satellite_obj)
                    orbit_layer.update(
                        body,
                        satellite_obj,
                        time,
                        orbits=orbits,
                        orbit_ascdesc=orbit_ascdesc,
                        orbit_resolution=orbit_res,
                    )
                    satellite_layer.update(body, satellite_obj)
                    satellite_obj.compute(time)

                with curses_lock:
                    redraw(stdscr, body, layers)

            draw_time = (datetime.now() - draw_start).total_seconds()

            # get input
            try:
                input_action = input_queue.get(True, max(0, 1/fps - draw_time))
                # we just received an input that probably modified how
                # our screen is supposed to look, ergo we need to redraw
                force_redraw = True
            except Empty:
                input_action = None
                force_redraw = False

            # react to input
            if input_action == INPUT_CYCLE_ORBITS:
                orbits += 1
                orbits = orbits % 4
            elif input_action == INPUT_EXIT:
                break
            elif input_action == INPUT_TIME_MINUS_SHORT:
                if satellite_obj is None:
                    time_offset -= timedelta(minutes=30)
                else:
                    time_offset -= satellite_obj.orbital_period / 20
            elif input_action == INPUT_TIME_MINUS_LONG:
                if satellite_obj is None:
                    time_offset -= timedelta(hours=6)
                else:
                    time_offset -= satellite_obj.orbital_period / 2
            elif input_action == INPUT_TIME_PAUSE:
                if paused:
                    time_offset -= datetime.utcnow() - paused
                    paused = False
                else:
                    paused = datetime.utcnow()
            elif input_action == INPUT_TIME_PLUS_SHORT:
                if satellite_obj is None:
                    time_offset += timedelta(minutes=30)
                else:
                    time_offset += satellite_obj.orbital_period / 20
            elif input_action == INPUT_TIME_PLUS_LONG:
                if satellite_obj is None:
                    time_offset += timedelta(hours=6)
                else:
                    time_offset += satellite_obj.orbital_period / 2
            elif input_action == INPUT_TIME_RESET:
                time_offset = timedelta(0)
                if paused:
                    paused = time = datetime.utcnow()
            elif input_action == INPUT_TOGGLE_COVERAGE:
                coverage_layer.hidden = not coverage_layer.hidden
                coverage_layer.last_updated = None
            elif input_action == INPUT_TOGGLE_CROSSHAIR:
                crosshair_layer.hidden = not crosshair_layer.hidden
            elif input_action == INPUT_TOGGLE_FOOTPRINT:
                footprint_layer.hidden = not footprint_layer.hidden
            elif input_action == INPUT_TOGGLE_GRID:
                grid_layer.hidden = not grid_layer.hidden
            elif input_action == INPUT_TOGGLE_INFO:
                info_layer.hidden = not info_layer.hidden
            elif input_action == INPUT_TOGGLE_NIGHT:
                night = not night
                map_layer.last_updated = None
                if night:
                    map_layer.update_timeout = timedelta(seconds=60)
                else:
                    map_layer.update_timeout = None
            elif input_action == INPUT_TOGGLE_ORBIT_APSIDES:
                apsides_layer.hidden = not apsides_layer.hidden
                apsides_layer.last_updated = None
            elif input_action == INPUT_TOGGLE_ORBIT_ASCDESC:
                orbit_ascdesc = not orbit_ascdesc
                orbit_layer.last_updated = None
            elif input_action == INPUT_TOGGLE_TOPO:
                topo = not topo
                map_layer.last_updated = None
    finally:
        quit_event.set()
        input_thread.join()

Example 26

Project: kamaelia_ Source File: LiveAnalysis.py
Function: main
    def main(self):
        # Calculate running total and mean etc
            self.dbConnect()
            while not self.finished():
                # The below does LIVE and FINAL analysis - do NOT run DataAnalyser at the same time

                Print("Analysis component: Checking for new data...")

                # Stage 1: Live analysis - could do with a better way to do the first query (indexed field 'analsed' to speed up for now)
                # Could move this into the main app to take a copy of tweets on arrival, but would rather solve separately if poss
                self.db_select("""SELECT tid,pid,timestamp,text,tweet_id,programme_position FROM rawdata WHERE analysed = 0 ORDER BY tid LIMIT 5000""")
                data = self.db_fetchall()

                # Cycle through all the as yet unanalysed tweets
                for result in data:
                    tid = result[0]
                    pid = result[1]
                    tweettime = result[2] # Timestamp based on the tweet's created_at field
                    tweettext = result[3]
                    tweetid = result[4] # This is the real tweet ID, tid just makes a unique identifier as each tweet can be stored against several pids
                    progpos = result[5] # Position through the programme that the tweet was made
                    dbtime = datetime.utcfromtimestamp(tweettime)
                    # Each tweet will be grouped into chunks of one minute to make display better, so set the seconds to zero
                    # This particular time is only used for console display now as a more accurate one calculated from programme position is found later
                    dbtime = dbtime.replace(second=0)
                    Print("Analysis component: Analysing new tweet for pid", pid, "(" , dbtime ,"):")
                    try:
                        Print("Analysis component: '" , tweettext , "'")
                    except UnicodeEncodeError:
                        e = sys.exc_info()[1]
                        Print ("UnicodeEncodeError", e)
                    self.db_select("""SELECT duration FROM programmes_unique WHERE pid = %s""",(pid))
                    progdata = self.db_fetchone()
                    duration = progdata[0]
                    self.db_select("""SELECT totaltweets,meantweets,mediantweets,modetweets,stdevtweets,timediff,timestamp,utcoffset FROM programmes WHERE pid = %s ORDER BY timestamp DESC""",(pid))
                    progdata2 = self.db_fetchone()
                    totaltweets = progdata2[0]
                    # Increment the total tweets recorded for this programme's broadcast
                    totaltweets += 1
                    meantweets = progdata2[1]
                    mediantweets = progdata2[2]
                    modetweets = progdata2[3]
                    stdevtweets = progdata2[4]
                    timediff = progdata2[5]
                    timestamp = progdata2[6]
                    utcoffset = progdata2[7]

                    # Need to work out the timestamp to assign to the entry in analysed data
                    progstart = timestamp - timediff
                    progmins = int(progpos / 60)
                    analysedstamp = int(progstart + (progmins * 60))
                    # Ensure that this tweet occurs within the length of the programme, otherwise for the purposes of this program it's useless

                    if progpos > 0 and progpos <= duration:
                        self.db_select("""SELECT did,totaltweets,wordfreqexpected,wordfrequnexpected FROM analyseddata WHERE pid = %s AND timestamp = %s""",(pid,analysedstamp))
                        analyseddata = self.db_fetchone()
                        # Just in case of a missing raw json object (ie. programme terminated before it was stored - allow it to be skipped if not found after 30 secs)
                        #failcounter = 0
                        # Pass this tweet to the NLTK analysis component
                        self.send([pid,tweetid],"nltk")
#                        print "BUM", 1
                        while not self.dataReady("nltk"):
                        #    if failcounter >= 3000:
                        #        nltkdata = list()
                        #        break
                            time.sleep(0.01)
                        #    failcounter += 1
                        #if failcounter < 3000:
#                        print "BUM", 2
                        if 1:
                            # Receive back a list of words and their frequency for this tweet, including whether or not they are common, an entity etc
                            nltkdata = self.recv("nltk")
                        if analyseddata == None: # No tweets yet recorded for this minute
                            minutetweets = 1
                            self.db_insert("""INSERT INTO analyseddata (pid,totaltweets,timestamp) VALUES (%s,%s,%s)""", (pid,minutetweets,analysedstamp))
                            for word in nltkdata:
                                # Check if we're storing a word or phrase here
                                if nltkdata[word][0] == 1:
                                    self.db_insert("""INSERT INTO wordanalysis (pid,timestamp,phrase,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                else:
                                    self.db_insert("""INSERT INTO wordanalysis (pid,timestamp,word,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                        else:
                            did = analyseddata[0]
                            minutetweets = analyseddata[1] # Get current number of tweets for this minute
                            minutetweets += 1 # Add one to it for this tweet

                            self.db_update("""UPDATE analyseddata SET totaltweets = %s WHERE did = %s""",(minutetweets,did))

                            for word in nltkdata:
                                # Check if we're storing a word or phrase
                                if nltkdata[word][0] == 1:
                                    self.db_select("""SELECT wid,count FROM wordanalysis WHERE pid = %s AND timestamp = %s AND phrase LIKE %s""",(pid,analysedstamp,word))
                                    # Check if this phrase has already been stored for this minute - if so, increment the count
                                    wordcheck = self.db_fetchone()
                                    if wordcheck == None:
                                        self.db_insert("""INSERT INTO wordanalysis (pid,timestamp,phrase,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                    else:
                                        self.db_update("""UPDATE wordanalysis SET count = %s WHERE wid = %s""",(nltkdata[word][1] + wordcheck[1],wordcheck[0]))
                                else:
                                    self.db_select("""SELECT wid,count FROM wordanalysis WHERE pid = %s AND timestamp = %s AND word LIKE %s""",(pid,analysedstamp,word))
                                    # Check if this word has already been stored for this minute - if so, increment the count
                                    wordcheck = self.db_fetchone()
                                    if wordcheck == None:
                                        self.db_insert("""INSERT INTO wordanalysis (pid,timestamp,word,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                    else:
                                        self.db_update("""UPDATE wordanalysis SET count = %s WHERE wid = %s""",(nltkdata[word][1] + wordcheck[1],wordcheck[0]))
                        # Averages / stdev are calculated roughly based on the programme's running time at this point
                        progdate = datetime.utcfromtimestamp(timestamp) + timedelta(seconds=utcoffset)
                        actualstart = progdate - timedelta(seconds=timediff)
                        actualtweettime = datetime.utcfromtimestamp(tweettime + utcoffset)

                        # Calculate how far through the programme this tweet occurred
                        runningtime = actualtweettime - actualstart
                        runningtime = runningtime.seconds

                        if runningtime < 0:
                            runningtime = 0
                        else:
                            runningtime = float(runningtime) / 60

                        try:
                            meantweets = totaltweets / runningtime
                        except ZeroDivisionError:
                            meantweets = 0

                        self.db_select("""SELECT totaltweets FROM analyseddata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""",(pid,progstart,analysedstamp+duration))
                        analyseddata = self.db_fetchall()

                        runningtime = int(runningtime)

                        tweetlist = list()
                        for result in analyseddata:
                            totaltweetsmin = result[0]
                            # Create a list of each minute and the total tweets for that minute in the programme
                            tweetlist.append(int(totaltweetsmin))

                        # Ensure tweetlist has enough entries
                        # If a minute has no tweets, it won't have a database record, so this has to be added
                        if len(tweetlist) < runningtime:
                            additions = runningtime - len(tweetlist)
                            while additions > 0:
                                tweetlist.append(0)
                                additions -= 1

                        # Order by programme position 0,1,2, mins etc
                        tweetlist.sort()

                        mediantweets = tweetlist[int(len(tweetlist)/2)]

                        modes = dict()
                        stdevlist = list()
                        for tweet in tweetlist:
                            modes[tweet] = tweetlist.count(tweet)
                            stdevlist.append((tweet - meantweets)*(tweet - meantweets))

                        modeitems = [[v, k] for k, v in modes.items()]
                        modeitems.sort(reverse=True)
                        modetweets = int(modeitems[0][1])

                        stdevtweets = 0
                        for val in stdevlist:
                            stdevtweets += val

                        try:
                            stdevtweets = math.sqrt(stdevtweets / runningtime)
                        except ZeroDivisionError:
                            stdevtweets = 0

                        # Finished analysis - update DB
                        self.db_update("""UPDATE programmes SET totaltweets = %s, meantweets = %s, mediantweets = %s, modetweets = %s, stdevtweets = %s WHERE pid = %s AND timestamp = %s""",(totaltweets,meantweets,mediantweets,modetweets,stdevtweets,pid,timestamp))

                    else:
                        pass
                        # Print("Analysis component: Skipping tweet - falls outside the programme's running time")

                    # Mark the tweet as analysed
                    self.db_update("""UPDATE rawdata SET analysed = 1 WHERE tid = %s""",(tid))
                    Print("Analysis component: Done!")

                # Stage 2: If all raw tweets analysed and imported = 1 (all data for this programme stored and programme finished), finalise the analysis - could do bookmark identification here too?
                self.db_select("""SELECT pid,totaltweets,meantweets,mediantweets,modetweets,stdevtweets,timestamp,timediff FROM programmes WHERE imported = 1 AND analysed = 0 LIMIT 5000""")
                data = self.db_fetchall()
                # Cycle through each programme that's ready for final analysis
                for result in data:
                    pid = result[0]
                    self.db_select("""SELECT duration,title FROM programmes_unique WHERE pid = %s""",(pid))
                    data2 = self.db_fetchone()
                    if not data2:
                        Print("Getting data for duration,title, etc failed - pid", pid)
                        Print("Let's try skipping this pid")
                        continue
                    duration = data2[0]
                    totaltweets = result[1]
                    meantweets = result[2]
                    mediantweets = result[3]
                    modetweets = result[4]
                    stdevtweets = result[5]
                    title = data2[1]
                    timestamp = result[6]
                    timediff = result[7]
                    # Cycle through checking if all tweets for this programme have been analysed - if so finalise the stats
                    self.db_select("""SELECT tid FROM rawdata WHERE analysed = 0 AND pid = %s""", (pid))
                    if self.db_fetchone() == None:
                        # OK to finalise stats here
                        Print("Analysis component: Finalising stats for pid:", pid, "(" , title , ")")
                        meantweets = float(totaltweets) / (duration / 60) # Mean tweets per minute
                        self.db_select("""SELECT totaltweets FROM analyseddata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""",(pid,timestamp-timediff,timestamp+duration-timediff))
                        analyseddata = self.db_fetchall()

                        runningtime = duration / 60

                        tweetlist = list()
                        for result in analyseddata:
                            totaltweetsmin = result[0]
                            tweetlist.append(int(totaltweetsmin))

                        # Ensure tweetlist has enough entries - as above, if no tweets are recorded for a minute it won't be present in the DB
                        if len(tweetlist) < runningtime:
                            additions = runningtime - len(tweetlist)
                            while additions > 0:
                                tweetlist.append(0)
                                additions -= 1

                        tweetlist.sort()

                        mediantweets = tweetlist[int(len(tweetlist)/2)]

                        modes = dict()
                        stdevlist = list()
                        for tweet in tweetlist:
                            modes[tweet] = tweetlist.count(tweet)
                            stdevlist.append((tweet - meantweets)*(tweet - meantweets))

                        modeitems = [[v, k] for k, v in modes.items()]
                        modeitems.sort(reverse=True)
                        modetweets = int(modeitems[0][1])

                        stdevtweets = 0
                        for val in stdevlist:
                            stdevtweets += val
                        try:
                            stdevtweets = math.sqrt(stdevtweets / runningtime)
                        except ZeroDivisionError:
                            stdevtweets = 0

                        if 1: # This data is purely a readout to the terminal at the moment associated with word and phrase frequency, and retweets
                            sqltimestamp1 = timestamp - timediff
                            sqltimestamp2 = timestamp + duration - timediff
                            self.db_select("""SELECT tweet_id FROM rawdata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""", (pid,sqltimestamp1,sqltimestamp2))
                            rawtweetids = self.db_fetchall()
                            tweetids = list()
                            for tweet in rawtweetids:
                                tweetids.append(tweet[0])

                            if len(tweetids) > 0:
                                # Just in case of a missing raw json object (ie. programme terminated before it was stored - allow it to be skipped if not found after 10 secs)
                                failcounter = 0
                                self.send([pid,tweetids],"nltkfinal")
                                while not self.dataReady("nltkfinal"):
                                    if failcounter >= 1000:
                                        Print("Timed out waiting for NTLKFINAL")
                                        nltkdata = list()
                                        break
                                    time.sleep(0.01)

                                    failcounter += 1
                                    if failcounter %100 == 0:
                                        Print( "Hanging waiting for NLTKFINAL" )

                                Print("failcounter (<1000 is success)", failcounter)
                                if failcounter < 1000:
#                                if 1:
                                    nltkdata = self.recv("nltkfinal")

                        self.db_update("""UPDATE programmes SET meantweets = %s, mediantweets = %s, modetweets = %s, stdevtweets = %s, analysed = 1 WHERE pid = %s AND timestamp = %s""",(meantweets,mediantweets,modetweets,stdevtweets,pid,timestamp))
                        Print("Analysis component: Done!")

                # Sleep here until more data is available to analyse
                Print("Analysis component: Sleeping for 10 seconds...")
                time.sleep(10)

Example 27

Project: aurproxy Source File: manager.py
  def test_source_manager(self):
    # Validation Functions
    def val_len(manager, source_endpoint_groups, sources, source_cb_scopes,
                overflow_endpoint_groups, overflow_sources,
                overflow_source_cb_scopes, overflow_threshold,
                weight_adj_start):
      '''
      Validate that the expected number of endpoints are returned.
      '''
      eps = list(itertools.chain(*source_endpoint_groups))
      oeps = list(itertools.chain(*overflow_endpoint_groups))
      self.assertEqual(len(manager.endpoints), len(eps) + len(oeps))

    def val_eps(manager, source_endpoint_groups, sources, source_cb_scopes,
                overflow_endpoint_groups, overflow_sources,
                overflow_source_cb_scopes, overflow_threshold,
                weight_adj_start):
      '''
      Validate that the expected endpoints are returned.
      '''
      eps = list(itertools.chain(*source_endpoint_groups))
      oeps = list(itertools.chain(*overflow_endpoint_groups))
      for ep in eps:
        self.assertIn(ep, manager.endpoints)
      for oep in oeps:
        self.assertIn(oep, manager.endpoints)

    def val_weights_all_healthy(manager, source_endpoint_groups, sources,
                            source_cb_scopes, overflow_endpoint_groups,
                            overflow_sources, overflow_source_cb_scopes,
                            overflow_threshold, weight_adj_start):
      '''
      Validate that when the service is healthy, all endpoints are present and
      weighted correctly.
      '''
      eps = list(itertools.chain(*source_endpoint_groups))
      oeps = list(itertools.chain(*overflow_endpoint_groups))
      for ep in manager.endpoints:
        if ep in eps:
          self.assertEqual(ep.weight, SIGNIFICANCE)
        elif ep in oeps:
          self.assertEqual(ep.weight, 0)
        else:
          raise Exception('Unknown endpoint.')

    def val_weights_overflow_m_src(manager, source_endpoint_groups, sources,
                                   source_cb_scopes, overflow_endpoint_groups,
                                   overflow_sources, overflow_source_cb_scopes,
                                   overflow_threshold, weight_adj_start):
      '''
      Validate that when passing the overflow threshold, all endpoints are
      present and weighted correctly.
      '''
      eps = list(itertools.chain(*source_endpoint_groups))
      oeps = list(itertools.chain(*overflow_endpoint_groups))
      min_healthy = int(len(eps) * float(overflow_threshold) / float(100))
      min_unhealthy = len(eps) - min_healthy

      # Set share to 0 for enough endpoints to reach or almost reach the
      # unhealthy threshold
      for i in range(min_unhealthy):
        ith_ep = source_endpoint_groups[0][i]
        sh_calcs = manager._share_calcs[sources[0]]
        sh_calcs[ith_ep]._share_adjusters[0].set_share(0.0)

      # Overflow shouldn't be on yet.
      for ep in manager.endpoints:
        if ep in oeps:
          self.assertEqual(ep.weight, 0)

      # Regular endpoints serving
      num_reg_serving = len([ep for ep in manager.endpoints
                             if ep.weight > 0 and ep in eps])
      if weight_adj_start == now:
        # Not all regular endpoints should be serving
        self.assertEqual(num_reg_serving, min_healthy)
      else:
        # Regular endpoints should be serving weight adjustment hasn't started.
        self.assertEqual(num_reg_serving, len(eps))

      # Overflow endpoints serving
      num_o_serving = len([ep for ep in manager.endpoints
                             if ep.weight > 0 and ep in oeps])
      # No overflow endpoints should be serving
      self.assertEqual(num_o_serving, 0)

      # Turn off one more regular endpoint
      sh_calcs = manager._share_calcs[sources[0]]
      sh_calc = sh_calcs[source_endpoint_groups[0][min_unhealthy+1]]
      sh_calc._share_adjusters[0].set_share(0.0)

      # Regular endpoints serving
      num_reg_serving = len([ep for ep in manager.endpoints
                             if ep.weight > 0 and ep in eps])
      if weight_adj_start == now:
        # Not all regular endpoints should be serving
        self.assertEqual(num_reg_serving, min_healthy-1)
        sum_overflow_weight = 0
        for ep in manager.endpoints:
          if ep in oeps:
            sum_overflow_weight += ep.weight
        self.assertGreater(sum_overflow_weight, 0)
      else:
        # Regular endpoints should be serving weight adjustment hasn't started.
        self.assertEqual(num_reg_serving, len(eps))

      # Overflow endpoints serving
      num_o_serving = len([ep for ep in manager.endpoints
                             if ep.weight > 0 and ep in oeps])
      if weight_adj_start == now:
        # Overflow endpoints should be serving
        self.assertGreater(num_o_serving, 0)
      else:
        self.assertEqual(num_o_serving, 0)

    def val_shares_m_src(manager, source_endpoint_groups, sources,
                         source_cb_scopes, overflow_endpoint_groups,
                         overflow_sources, overflow_source_cb_scopes,
                         overflow_threshold, weight_adj_start):
      '''
      Validate that when multiple share adjusters are applied, all endpoints
      are present and weighted correctly.
      '''
      if len(sources[0].share_adjuster_factories) < 2:
        raise Exception('Validator must be run on source with at least 2 share'
                        'adjuster factories registered.')
      if len(sources[0].endpoints) == 0:
        raise Exception('Validator must be run on source with at least one'
                        'endpoint.')
      eps = list(itertools.chain(*source_endpoint_groups))
      # Share calculator for one endpoint
      sh_calc = manager._share_calcs[sources[0]][source_endpoint_groups[0][0]]
      # Set share to 0.5 for 2 sibling adjusters - .5 * .5 -> expect .25
      sh_calc._share_adjusters[0].set_share(0.5)
      sh_calc._share_adjusters[1].set_share(0.5)
      if weight_adj_start == now:
        sorted_eps = sorted(manager.endpoints, key=lambda x: x.weight)
        lowest = sorted_eps[0]
        rest = sorted_eps[1:]
        for ep in rest:
          self.assertTrue(float(lowest.weight)/float(ep.weight) == 0.25)
      else:
        num_reg_serving = len([ep for ep in manager.endpoints
                              if ep.weight > 0 and ep in eps])
        self.assertEqual(num_reg_serving, len(eps))

    # Group standard validation functions
    val_fns = [
      val_len,
      val_eps,
      val_weights_all_healthy
    ]
    # Overflow validation functions
    o_val_fns = [
      val_weights_overflow_m_src
    ]
    # Share validation functions
    share_val_fns = [
      val_shares_m_src
    ]

    # Parameter group parts
    # Source endpoints
    s_1_eps = [SourceEndpoint('127.0.0.1', i) for i in range(8000, 8005)]
    s_2_eps = [SourceEndpoint('127.0.0.1', i) for i in range(9000, 9005)]
    # Overflow source endpoints
    os_1_eps = [SourceEndpoint('127.0.0.1', i) for i in range(10000, 10005)]

    # Share adjuster factory groups
    tst_adjuster = 'tellapart.aurproxytest.share.adjuster.TstShareAdjuster'
    tst_sh_adj_fact = load_klass_factory(tst_adjuster)

    # Source Builders
    s_src_no_sh_adj = [TstSourceBuilder(s_1_eps, [])]
    s_src_m_sh_adj = [TstSourceBuilder(s_1_eps, [tst_sh_adj_fact,
                                                 tst_sh_adj_fact])]
    m_src_no_sh_adj = [TstSourceBuilder(s_1_eps, []),
                       TstSourceBuilder(s_2_eps, [])]
    no_osrc_no_sh_adj = []
    s_osrc_no_sh_adj = [TstSourceBuilder(os_1_eps, [])]
    m_src_s_sh_adj = [TstSourceBuilder(s_1_eps, [tst_sh_adj_fact]),
                      TstSourceBuilder(s_2_eps, [])]

    # Start times
    now = datetime.now()
    future = now + timedelta(days=1)

    # Overflow Threshold Percentages
    thr_none = None
    thr_80 = 80

    # Parameter groups
    #  source_builders
    #  overflow_source_builders
    #  overflow_threshold_pct,
    #  weight_adjustment_start time,
    #  validation_fns
    pgroups = [
      (s_src_no_sh_adj, no_osrc_no_sh_adj, thr_none, now, val_fns),
      (s_src_no_sh_adj, no_osrc_no_sh_adj, thr_none, future, val_fns),
      (s_src_m_sh_adj, no_osrc_no_sh_adj, thr_none, now,
       val_fns+share_val_fns),
      (s_src_m_sh_adj, no_osrc_no_sh_adj, thr_none, future,
       val_fns+share_val_fns),
      (m_src_no_sh_adj, no_osrc_no_sh_adj, thr_none, now, val_fns),
      (m_src_no_sh_adj, no_osrc_no_sh_adj, thr_none, future, val_fns),
      (m_src_s_sh_adj, s_osrc_no_sh_adj, thr_80, now, val_fns+o_val_fns),
      (m_src_s_sh_adj, s_osrc_no_sh_adj, thr_80, future, val_fns+o_val_fns),
    ]

    # Helper to build source and related validation items.
    def build_sources(builders):
      srcs, cb_scopes, ep_groups = [], [], []
      for builder in builders:
        src, cb_scope, eps = builder.build()
        srcs.append(src)
        cb_scopes.append(cb_scope)
        ep_groups.append(eps)
      return srcs, cb_scopes, ep_groups

    # Run validators against parameter groups
    for src_builders, o_src_builders, o_thresh, w_adj_start, v_fns in pgroups:
      for validation_fn in v_fns:
        manager_cb_scope = SourceManagerCallbackScope()
        srcs, src_cb_scopes, src_ep_groups = build_sources(src_builders)
        o_srcs, o_src_cbs, o_src_ep_groups = build_sources(o_src_builders)

        signal_update_fn = manager_cb_scope.signal_update_fn
        manager = SourceGroupManager(sources=srcs,
                                     overflow_threshold_pct=o_thresh,
                                     overflow_sources=o_srcs,
                                     signal_update_fn=signal_update_fn)
        manager.start(weight_adjustment_start=w_adj_start)
        validation_fn(manager, src_ep_groups, srcs, src_cb_scopes,
                      o_src_ep_groups, o_srcs, o_src_ep_groups, o_thresh,
                      w_adj_start)

Example 28

Project: rapidpro Source File: tests.py
    def test_trigger_schedule(self):
        self.login(self.admin)
        flow = self.create_flow()

        chester = self.create_contact("Chester", "+250788987654")
        shinoda = self.create_contact("Shinoda", "+250234213455")
        linkin_park = self.create_group("Linkin Park", [chester, shinoda])
        stromae = self.create_contact("Stromae", "+250788645323")

        now = timezone.now()
        now_stamp = time.mktime(now.timetuple())

        tommorrow = now + timedelta(days=1)
        tommorrow_stamp = time.mktime(tommorrow.timetuple())

        post_data = dict()
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['repeat_period'] = 'D'
        post_data['start'] = 'later'
        post_data['start_datetime_value'] = "%d" % tommorrow_stamp

        response = self.client.post(reverse("triggers.trigger_schedule"), post_data)
        self.assertEquals(response.context['form'].errors.keys(), ['flow'])
        self.assertFalse(Trigger.objects.all())
        self.assertFalse(Schedule.objects.all())

        # survey flows should not be an option
        flow.flow_type = Flow.SURVEY
        flow.save()
        response = self.client.get(reverse("triggers.trigger_schedule"))
        self.assertEqual(0, response.context['form'].fields['flow'].queryset.all().count())

        # back to normal flow type
        flow.flow_type = Flow.FLOW
        flow.save()
        self.assertEqual(1, response.context['form'].fields['flow'].queryset.all().count())

        post_data = dict()
        post_data['flow'] = flow.pk
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['start'] = 'never'
        post_data['repeat_period'] = 'O'

        response = self.client.post(reverse("triggers.trigger_schedule"), post_data)
        self.assertEquals(1, Trigger.objects.all().count())

        trigger = Trigger.objects.all().order_by('-pk')[0]
        self.assertTrue(trigger.schedule)
        self.assertEquals(trigger.schedule.status, 'U')
        self.assertEquals(trigger.groups.all()[0].pk, linkin_park.pk)
        self.assertEquals(trigger.contacts.all()[0].pk, stromae.pk)

        post_data = dict()
        post_data['flow'] = flow.pk
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['start'] = 'stop'
        post_data['repeat_period'] = 'O'

        response = self.client.post(reverse("triggers.trigger_schedule"), post_data)
        self.assertEquals(2, Trigger.objects.all().count())

        trigger = Trigger.objects.all().order_by('-pk')[0]
        self.assertTrue(trigger.schedule)
        self.assertEquals(trigger.schedule.status, 'U')
        self.assertEquals(trigger.groups.all()[0].pk, linkin_park.pk)
        self.assertEquals(trigger.contacts.all()[0].pk, stromae.pk)

        post_data = dict()
        post_data['flow'] = flow.pk
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['repeat_period'] = 'O'
        post_data['start'] = 'now'
        post_data['start_datetime_value'] = "%d" % now_stamp

        response = self.client.post(reverse("triggers.trigger_schedule"), post_data)
        self.assertEquals(3, Trigger.objects.all().count())

        trigger = Trigger.objects.all().order_by('-pk')[0]
        self.assertTrue(trigger.schedule)
        self.assertFalse(trigger.schedule.next_fire)
        self.assertEquals(trigger.schedule.repeat_period, 'O')
        self.assertEquals(trigger.schedule.repeat_days, 0)
        self.assertEquals(trigger.groups.all()[0].pk, linkin_park.pk)
        self.assertEquals(trigger.contacts.all()[0].pk, stromae.pk)

        post_data = dict()
        post_data['flow'] = flow.pk
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['repeat_period'] = 'D'
        post_data['start'] = 'later'
        post_data['start_datetime_value'] = "%d" % tommorrow_stamp

        response = self.client.post(reverse("triggers.trigger_schedule"), post_data)
        self.assertEquals(4, Trigger.objects.all().count())

        trigger = Trigger.objects.all().order_by('-pk')[0]
        self.assertTrue(trigger.schedule)
        self.assertEquals(trigger.schedule.repeat_period, 'D')
        self.assertEquals(trigger.groups.all()[0].pk, linkin_park.pk)
        self.assertEquals(trigger.contacts.all()[0].pk, stromae.pk)

        update_url = reverse('triggers.trigger_update', args=[trigger.pk])

        post_data = dict()
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['repeat_period'] = 'O'
        post_data['start'] = 'now'
        post_data['start_datetime_value'] = "%d" % now_stamp

        response = self.client.post(update_url, post_data)
        self.assertEquals(response.context['form'].errors.keys(), ['flow'])

        post_data = dict()
        post_data['flow'] = flow.pk
        post_data['omnibox'] = "g-%s" % linkin_park.uuid
        post_data['repeat_period'] = 'O'
        post_data['start'] = 'now'
        post_data['start_datetime_value'] = "%d" % now_stamp

        response = self.client.post(update_url, post_data)

        trigger = Trigger.objects.get(pk=trigger.pk)
        self.assertTrue(trigger.schedule)
        self.assertEquals(trigger.schedule.repeat_period, 'O')
        self.assertFalse(trigger.schedule.next_fire)
        self.assertEquals(trigger.groups.all()[0].pk, linkin_park.pk)
        self.assertFalse(trigger.contacts.all())

        post_data = dict()
        post_data['flow'] = flow.pk
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['start'] = 'never'
        post_data['repeat_period'] = 'O'

        response = self.client.post(update_url, post_data)

        trigger = Trigger.objects.get(pk=trigger.pk)
        self.assertTrue(trigger.schedule)
        self.assertEquals(trigger.schedule.status, 'U')
        self.assertEquals(trigger.groups.all()[0].pk, linkin_park.pk)
        self.assertEquals(trigger.contacts.all()[0].pk, stromae.pk)

        post_data = dict()
        post_data['flow'] = flow.pk
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['start'] = 'stop'
        post_data['repeat_period'] = 'O'

        response = self.client.post(update_url, post_data)

        trigger = Trigger.objects.get(pk=trigger.pk)
        self.assertTrue(trigger.schedule)
        self.assertEquals(trigger.schedule.status, 'U')
        self.assertEquals(trigger.groups.all()[0].pk, linkin_park.pk)
        self.assertEquals(trigger.contacts.all()[0].pk, stromae.pk)

        post_data = dict()
        post_data['flow'] = flow.pk
        post_data['omnibox'] = "g-%s,c-%s" % (linkin_park.uuid, stromae.uuid)
        post_data['repeat_period'] = 'D'
        post_data['start'] = 'later'
        post_data['start_datetime_value'] = "%d" % tommorrow_stamp

        response = self.client.post(update_url, post_data)

        trigger = Trigger.objects.get(pk=trigger.pk)

        self.assertTrue(trigger.schedule)
        self.assertEquals(trigger.schedule.repeat_period, 'D')
        self.assertEquals(trigger.groups.all()[0].pk, linkin_park.pk)
        self.assertEquals(trigger.contacts.all()[0].pk, stromae.pk)

Example 29

Project: kamaelia_ Source File: TwitterSearch.py
Function: main
    def main(self):
        twitterurl = "http://api.twitter.com/1/users/search.json"

        if self.proxy:
            proxyhandler = urllib2.ProxyHandler({"http" : self.proxy})
            twitopener = urllib2.build_opener(proxyhandler)
            urllib2.install_opener(twitopener)

        headers = {'User-Agent' : "BBC R&D Grabber"}
        postdata = None

        if self.keypair == False:
            # Perform OAuth authentication - as we don't have the secret key pair we need to request it
            # This will require some user input
            request_token_url = 'http://api.twitter.com/oauth/request_token'
            access_token_url = 'http://api.twitter.com/oauth/access_token'
            authorize_url = 'http://api.twitter.com/oauth/authorize'

            token = None
            consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

            params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

            params['oauth_consumer_key'] = consumer.key

            req = oauth.Request(method="GET",url=request_token_url,parameters=params)

            signature_method = oauth.SignatureMethod_HMAC_SHA1()
            req.sign_request(signature_method, consumer, token)

            requestheaders = req.to_header()
            requestheaders['User-Agent'] = "BBC R&D Grabber"

            # Connect to Twitter
            try:
                req = urllib2.Request(request_token_url,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                conn1 = urllib2.urlopen(req)
            except httplib.BadStatusLine:
                e = sys.exc_info()[1]
                Print("PeopleSearch BadStatusLine error:", e )
                conn1 = False
            except urllib2.HTTPError:
                e = sys.exc_info()[1]
                Print("PeopleSearch HTTP error:", e.code)
#                sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                conn1 = False
            except urllib2.URLError:
                e = sys.exc_info()[1]
                Print("PeopleSearch URL error: ", e.reason)
#                sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                conn1 = False

            if conn1:
                content = conn1.read()
                conn1.close()

                request_token = dict(urlparse.parse_qsl(content))

                Print( "Request Token:")
                Print("     - oauth_token        = " , request_token['oauth_token'])
                Print("     - oauth_token_secret = " , request_token['oauth_token_secret'])
                Print("")

                # The user must confirm authorisation so a URL is Printed here
                Print("Go to the following link in your browser:")
                Print("%s?oauth_token=%s" % (authorize_url, request_token['oauth_token']) )
                Print("")

                accepted = 'n'
                # Wait until the user has confirmed authorisation
                while accepted.lower() == 'n':
                    accepted = raw_input('Have you authorized me? (y/n) ')
                oauth_verifier = raw_input('What is the PIN? ')

                token = oauth.Token(request_token['oauth_token'],
                    request_token['oauth_token_secret'])
                token.set_verifier(oauth_verifier)

                params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                params['oauth_token'] = token.key
                params['oauth_consumer_key'] = consumer.key

                req = oauth.Request(method="GET",url=access_token_url,parameters=params)

                signature_method = oauth.SignatureMethod_HMAC_SHA1()
                req.sign_request(signature_method, consumer, token)

                requestheaders = req.to_header()
                requestheaders['User-Agent'] = "BBC R&D Grabber"
                # Connect to Twitter
                try:
                    req = urllib2.Request(access_token_url,"oauth_verifier=%s" % oauth_verifier,requestheaders) # Why won't this work?!? Is it trying to POST?
                    conn1 = urllib2.urlopen(req)
                except httplib.BadStatusLine:
                    e = sys.exc_info()[1]
#                    sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                    Print('PeopleSearch BadStatusLine error: ', e)
                    conn1 = False
                except urllib2.HTTPError:
                    e = sys.exc_info()[1]
                    Print('PeopleSearch HTTP error: ', e.code)
                    conn1 = False
                except urllib2.URLError:
                    e = sys.exc_info()[1]
#                    sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                    Print('PeopleSearch URL error: ', e.reason)
                    conn1 = False

                if conn1:
                    content = conn1.read()
                    conn1.close()
                    access_token = dict(urlparse.parse_qsl(content))

                    # Access tokens retrieved from Twitter
                    Print("Access Token:")
                    Print("     - oauth_token        = " , access_token['oauth_token'])
                    Print("     - oauth_token_secret = " , access_token['oauth_token_secret'])
                    Print("")
                    Print("You may now access protected resources using the access tokens above.")
                    Print("")

                    save = False
                    # Load config to save OAuth keys
                    try:
                        homedir = os.path.expanduser("~")
                        file = open(homedir + "/twitter-login.conf",'r')
                        save = True
                    except IOError:
                        e = sys.exc_info()[1]
                        Print ("Failed to load config file - not saving oauth keys: " , e)

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf",'w')
                            file.write(raw_config)
                            file.close()
                        except IOError:
                            e = sys.exc_info()[1]
                            Print ("Failed to save oauth keys: " , e)

                    self.keypair = [access_token['oauth_token'], access_token['oauth_token_secret']]
        

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
                # Retieve keywords to look up
                person = self.recv("inbox")

                # Ensure we're not rate limited during the first request - if so we'll wait for 15 mins before our next request
                if (datetime.today() - timedelta(minutes=15)) > self.ratelimited:
                    requesturl = twitterurl + "?q=" + urllib.quote(person) + "&per_page=5"

                    params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                    token = oauth.Token(key=self.keypair[0],secret=self.keypair[1])
                    consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

                    params['oauth_token'] = token.key
                    params['oauth_consumer_key'] = consumer.key

                    req = oauth.Request(method="GET",url=requesturl,parameters=params)

                    signature_method = oauth.SignatureMethod_HMAC_SHA1()
                    req.sign_request(signature_method, consumer, token)

                    requestheaders = req.to_header()
                    requestheaders['User-Agent'] = "BBC R&D Grabber"

                    # Connect to Twitter
                    try:
                        req = urllib2.Request(requesturl,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                        conn1 = urllib2.urlopen(req)
                    except httplib.BadStatusLine:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                        Print('PeopleSearch BadStatusLine error: ', e)
                        conn1 = False
                    except urllib2.HTTPError:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                        Print('PeopleSearch HTTP error: ', e.code)
                        conn1 = False
                    except urllib2.URLError:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                        Print('PeopleSearch URL error: ', e.reason)
                        conn1 = False

                    if conn1:
                        # Check rate limiting here and Print current limit
                        headers = conn1.info()
                        try:
                            headerlist = string.split(str(headers),"\n")
                        except UnicodeEncodeError: # str may fail...
                            headerlist = []
                        for line in headerlist:
                            if line != "":
                                splitheader = line.split()
                                if splitheader[0] == "X-FeatureRateLimit-Remaining:" or splitheader[0] == "X-RateLimit-Remaining:":
                                    Print(splitheader[0] , " " , splitheader[1] )
                                    if int(splitheader[1]) < 5:
                                        self.ratelimited = datetime.today()
                        # Grab json format result of people search here
                        try:
                            data = conn1.read()
                            try:
                                content = cjson.decode(data)
                                self.send(content,"outbox")
                            except cjson.DecodeError:
                                self.send(dict(),"outbox")
                        except IOError:
                            e = sys.exc_info()[1]
#                            sys.stderr.write('PeopleSearch IO error: ' + str(e) + '\n')
                            Print('PeopleSearch IO error: ', e)
                            self.send(dict(),"outbox")
                        conn1.close()
                    else:
                        self.send(dict(),"outbox")
                else:
                   Print("Twitter search paused - rate limited")
                   self.send(dict(),"outbox")
            self.pause()
            yield 1

Example 30

Project: stacktester Source File: test_servers.py
    def test_build_server(self):
        """Build and manipulate a server"""

        # Don't block for the server until later
        expected_server = {
            'name': 'stacktester1',
            'imageRef': self.image_ref,
            'flavorRef': self.flavor_ref,
            'metadata': {'testEntry': 'testValue'},
        }
        post_body = json.dumps({'server': expected_server})
        response, body = self.os.nova.request('POST',
                                              '/servers',
                                              body=post_body)

        # Ensure attributes were returned
        self.assertEqual(response.status, 202)
        _body = json.loads(body)
        self.assertEqual(_body.keys(), ['server'])
        created_server = _body['server']
        admin_pass = created_server.pop('adminPass')
        self._assert_server_entity(created_server)
        self.assertEqual(expected_server['name'], created_server['name'])
        self.assertEqual(created_server['accessIPv4'], '')
        self.assertEqual(created_server['accessIPv6'], '')
        self.assertEqual(expected_server['metadata'],
                         created_server['metadata'])
        server_id = created_server['id']

        # Get server again and ensure attributes stuck
        server = self.os.nova.get_server(server_id)
        self._assert_server_entity(server)
        self.assertEqual(server['name'], expected_server['name'])
        self.assertEqual(server['accessIPv4'], '')
        self.assertEqual(server['accessIPv6'], '')
        self.assertEqual(server['metadata'], created_server['metadata'])

        # Parse last-updated time
        update_time = utils.load_isotime(server['updated'])

        # Ensure server not returned with future changes-since
        future_time = utils.dump_isotime(update_time + datetime.timedelta(1))
        params = 'changes-since?%s' % future_time
        response, body = self.os.nova.request('GET', '/servers?%s' % params)
        servers = json.loads(body)['servers']
        self.assertTrue(len(servers) == 0)

        # Ensure server is returned with past changes-since
        future_time = utils.dump_isotime(update_time - datetime.timedelta(1))
        params = 'changes-since?%s' % future_time
        response, body = self.os.nova.request('GET', '/servers?%s' % params)
        servers = json.loads(body)['servers']
        server_ids = map(lambda x: x['id'], servers)
        self.assertTrue(server_id in server_ids)

        # Update name
        new_server = {'name': 'stacktester2'}
        put_body = json.dumps({'server': new_server})
        url = '/servers/%s' % server_id
        resp, body = self.os.nova.request('PUT', url, body=put_body)

        # Output from update should be a full server
        self.assertEqual(resp.status, 200)
        data = json.loads(body)
        self.assertEqual(data.keys(), ['server'])
        self._assert_server_entity(data['server'])
        self.assertEqual('stacktester2', data['server']['name'])

        # Check that name was changed
        updated_server = self.os.nova.get_server(server_id)
        self._assert_server_entity(updated_server)
        self.assertEqual('stacktester2', updated_server['name'])

        # Update accessIPv4
        new_server = {'accessIPv4': '192.168.0.200'}
        put_body = json.dumps({'server': new_server})
        url = '/servers/%s' % server_id
        resp, body = self.os.nova.request('PUT', url, body=put_body)

        # Output from update should be a full server
        self.assertEqual(resp.status, 200)
        data = json.loads(body)
        self.assertEqual(data.keys(), ['server'])
        self._assert_server_entity(data['server'])
        self.assertEqual('192.168.0.200', data['server']['accessIPv4'])

        # Check that accessIPv4 was changed
        updated_server = self.os.nova.get_server(server_id)
        self._assert_server_entity(updated_server)
        self.assertEqual('192.168.0.200', updated_server['accessIPv4'])

        # Update accessIPv6
        new_server = {'accessIPv6': 'feed::beef'}
        put_body = json.dumps({'server': new_server})
        url = '/servers/%s' % server_id
        resp, body = self.os.nova.request('PUT', url, body=put_body)

        # Output from update should be a full server
        self.assertEqual(resp.status, 200)
        data = json.loads(body)
        self.assertEqual(data.keys(), ['server'])
        self._assert_server_entity(data['server'])
        self.assertEqual('feed::beef', data['server']['accessIPv6'])

        # Check that accessIPv6 was changed
        updated_server = self.os.nova.get_server(server_id)
        self._assert_server_entity(updated_server)
        self.assertEqual('feed::beef', updated_server['accessIPv6'])

        # Check metadata subresource
        url = '/servers/%s/metadata' % server_id
        response, body = self.os.nova.request('GET', url)
        self.assertEqual(200, response.status)

        result = json.loads(body)
        expected = {'metadata': {'testEntry': 'testValue'}}
        self.assertEqual(expected, result)

        # Ensure metadata container can be modified
        expected = {
            'metadata': {
                'new_meta1': 'new_value1',
                'new_meta2': 'new_value2',
            },
        }
        post_body = json.dumps(expected)
        url = '/servers/%s/metadata' % server_id
        response, body = self.os.nova.request('POST', url, body=post_body)
        self.assertEqual(200, response.status)
        result = json.loads(body)
        expected['metadata']['testEntry'] = 'testValue'
        self.assertEqual(expected, result)

        # Ensure values stick
        url = '/servers/%s/metadata' % server_id
        response, body = self.os.nova.request('GET', url)
        self.assertEqual(200, response.status)
        result = json.loads(body)
        self.assertEqual(expected, result)

        # Ensure metadata container can be overwritten
        expected = {
            'metadata': {
                'new_meta3': 'new_value3',
                'new_meta4': 'new_value4',
            },
        }
        url = '/servers/%s/metadata' % server_id
        post_body = json.dumps(expected)
        response, body = self.os.nova.request('PUT', url, body=post_body)
        self.assertEqual(200, response.status)
        result = json.loads(body)
        self.assertEqual(expected, result)

        # Ensure values stick
        url = '/servers/%s/metadata' % server_id
        response, body = self.os.nova.request('GET', url)
        self.assertEqual(200, response.status)
        result = json.loads(body)
        self.assertEqual(expected, result)

        # Set specific key
        expected_meta = {'meta': {'new_meta5': 'new_value5'}}
        put_body = json.dumps(expected_meta)
        url = '/servers/%s/metadata/new_meta5' % server_id
        response, body = self.os.nova.request('PUT', url, body=put_body)
        self.assertEqual(200, response.status)
        result = json.loads(body)
        self.assertDictEqual(expected_meta, result)

        # Ensure value sticks
        expected_metadata = {
            'metadata': {
                'new_meta3': 'new_value3',
                'new_meta4': 'new_value4',
                'new_meta5': 'new_value5',
            },
        }
        url = '/servers/%s/metadata' % server_id
        response, body = self.os.nova.request('GET', url)
        result = json.loads(body)
        self.assertDictEqual(expected_metadata, result)

        # Update existing key
        expected_meta = {'meta': {'new_meta4': 'new_value6'}}
        put_body = json.dumps(expected_meta)
        url = '/servers/%s/metadata/new_meta4' % server_id
        response, body = self.os.nova.request('PUT', url, body=put_body)
        self.assertEqual(200, response.status)
        result = json.loads(body)
        self.assertEqual(expected_meta, result)

        # Ensure value sticks
        expected_metadata = {
            'metadata': {
                'new_meta3': 'new_value3',
                'new_meta4': 'new_value6',
                'new_meta5': 'new_value5',
            },
        }
        url = '/servers/%s/metadata' % server_id
        response, body = self.os.nova.request('GET', url)
        result = json.loads(body)
        self.assertDictEqual(expected_metadata, result)

        # Delete a certain key
        url = '/servers/%s/metadata/new_meta3' % server_id
        response, body = self.os.nova.request('DELETE', url)
        self.assertEquals(204, response.status)

        # Make sure the key is gone
        url = '/servers/%s/metadata/new_meta3' % server_id
        response, body = self.os.nova.request('GET', url)
        self.assertEquals(404, response.status)

        # Delete a nonexistant key
        url = '/servers/%s/metadata/new_meta3' % server_id
        response, body = self.os.nova.request('DELETE', url)
        self.assertEquals(404, response.status)

        # Wait for instance to boot
        server_id = created_server['id']
        self.os.nova.wait_for_server_status(server_id,
                                            'ACTIVE',
                                            timeout=self.build_timeout)

        # Look for 'addresses' attribute on server
        url = '/servers/%s' % server_id
        response, body = self.os.nova.request('GET', url)
        self.assertEqual(response.status, 200)
        body = json.loads(body)
        self.assertTrue('addresses' in body['server'].keys())
        server_addresses = body['server']['addresses']

        # Addresses should be available from subresource
        url = '/servers/%s/ips' % server_id
        response, body = self.os.nova.request('GET', url)
        self.assertEqual(response.status, 200)
        body = json.loads(body)
        self.assertEqual(body.keys(), ['addresses'])
        ips_addresses = body['addresses']

        # Ensure both resources return identical information
        self.assertEqual(server_addresses, ips_addresses)

        # Validate entities within network containers
        for (network, network_data) in ips_addresses.items():
            url = '/servers/%s/ips/%s' % (server_id, network)
            response, body = self.os.nova.request('GET', url)
            self.assertEqual(response.status, 200)
            body = json.loads(body)
            self.assertEqual(body.keys(), [network])
            self.assertEqual(body[network], network_data)

            # Check each IP entity
            for ip_data in network_data:
                self.assertEqual(set(ip_data.keys()), set(['addr', 'version']))

        # Find IP of server
        try:
            (_, network) = server_addresses.items()[0]
            ip = network[0]['addr']
        except KeyError:
            self.fail("Failed to retrieve IP address from server entity")

        # Assert password works
        client = ssh.Client(ip, 'root', admin_pass, self.ssh_timeout)
        self.assertTrue(client.test_connection_auth())

        # Delete server
        url = '/servers/%s' % server_id
        response, body = self.os.nova.request('DELETE', url)
        self.assertEqual(response.status, 204)

        # Poll server until deleted
        try:
            url = '/servers/%s' % server_id
            self.os.nova.poll_request_status('GET', url, 404)
        except exceptions.TimeoutException:
            self.fail("Server deletion timed out")

Example 31

Project: django-bulbs Source File: test_spec_cov_query.py
    def setUp(self):
        super(BaseCustomSearchFilterTests, self).setUp()
        feature_type_names = (
            "News", "Slideshow", "TV Club", "Video",
        )
        feature_types = []
        for name in feature_type_names:
            feature_types.append(FeatureType.objects.create(name=name))
        tag_names = (
            "Barack Obama", "Joe Biden", "Wow", "Funny", "Politics"
        )
        tags = []
        for name in tag_names:
            tags.append(Tag.objects.create(name=name))
        content_data = (
            dict(
                title="Obama Does It Again",
                feature_type=0,
                tags=[0, 2, 4]
            ),
            dict(
                title="Biden Does It Again",
                feature_type=0,
                tags=[1, 2, 4]
            ),
            dict(
                title="Obama In Slides Is Flawless",
                feature_type=1,
                tags=[0, 2, 4]
            ),
            dict(
                title="Obama On TV",
                feature_type=2,
                tags=[0, 2]
            ),
            dict(
                title="Flawless video here",
                feature_type=3,
                tags=[3, 2]
            ),
            dict(
                title="Both Obama and Biden in One Article",
                feature_type=3,
                tags=[0, 1, 2]
            ),
        )
        time_step = timedelta(hours=12)
        pubtime = timezone.now() + time_step
        content_list = []
        for data in content_data:
            data["published"] = pubtime
            data["feature_type"] = feature_types[data.pop("feature_type")]
            data["tags"] = [tags[tag_idx] for tag_idx in data.pop("tags")]
            content = make_content(**data)
            content_list.append(content)
            content.index()  # reindex for related object updates
            pubtime -= time_step
        self.content_list = content_list
        self.feature_types = feature_types
        self.tags = tags
        Content.search_objects.refresh()

        # NOTE: we updated some field names after I initially typed this up.
        # NOTE: These functions munge the existing data into the new form.
        def makeGroups(groups):
            result = []
            for group in groups:
                if isinstance(group, dict):
                    this_group = group
                else:
                    this_group = dict(conditions=[])
                    for condition in group:
                        this_group["conditions"].append(makeCondition(*condition))
                result.append(this_group)
            return result

        def makeCondition(field, type, values):
            return dict(
                field=field, type=type,
                values=[dict(label=v, value=v) for v in values]
            )

        s_biden = dict(
            label="All Biden, Baby",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [self.tags[1].slug]),
                    ],
                ])
            )
        )
        s_obama = dict(
            label="All Obama, Baby",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [self.tags[0].slug]),
                    ],
                ])
            )
        )
        # logical and
        s_b_and_b = dict(
            label="Obama and Biden, together!",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [
                            self.tags[0].slug,
                            self.tags[1].slug
                        ]),
                    ],
                ])
            )
        )
        # logical or
        s_b_or_b = dict(
            label="Obama or Biden, whatever!",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "any", [
                            self.tags[0].slug,
                            self.tags[1].slug
                        ]),
                    ],
                ])
            )
        )
        # excluding some tags
        s_lite_obama = dict(
            label="Obama but not political stuff",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [
                            self.tags[0].slug,  # obama
                        ]),
                        ("tag", "none", [
                            self.tags[4].slug,  # politics
                        ]),
                    ],
                ])
            )
        )
        # multiple, disjoint groups
        s_funny_and_slideshows = dict(
            label="Anything funny and also slideshows!",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "any", [
                            self.tags[3].slug  # funny tags
                        ]),
                    ],
                    [
                        ("feature-type", "any", [
                            self.feature_types[1].slug  # slideshow
                        ]),
                    ],
                ])
            )
        )
        # this tag is on everything
        s_wow = dict(
            label="Wow!",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "all", [
                            self.tags[2].slug  # funny tags
                        ]),
                    ],
                ])
            )
        )
        # filter by content type
        s_doctype = dict(
            label="Doctype",
            query=dict(
                groups=makeGroups([
                    [
                        ("content-type", "all", [
                            TestContentObjTwo.search_objects.mapping.doc_type
                        ])
                    ]
                ])
            )
        )
        # include some ids
        s_one_article = dict(
            label="Just this article",
            query=dict(
                groups=[],
                included_ids=[self.content_list[0].id]
            )
        )
        s_two_articles = dict(
            label="Just two articles",
            query=dict(
                groups=[],
                included_ids=[
                    self.content_list[0].id,
                    self.content_list[3].id
                ]
            )
        )
        # exclude ids
        s_all_but_one_article = dict(
            label="All but one article",
            query=dict(
                groups=[],
                excluded_ids=[
                    self.content_list[0].id
                ]
            )
        )
        # last day of articles
        s_last_day = dict(
            label="Last day",
            query=dict(
                groups=[dict(
                    conditions=[],
                    time="1 day"
                )],
            )
        )
        # pinned
        s_pinned = dict(
            label="Pinned something",
            query=dict(
                pinned_ids=[
                    content_list[-1].id  # last in time
                ]
            )
        )
        # pinned 2
        s_pinned_2 = dict(
            label="Pinned 2 things",
            query=dict(
                pinned_ids=[
                    content_list[-1].id,  # last in time
                    content_list[-2].id  # penultimate
                ]
            )
        )
        # pinned 2 with groups
        s_pinned_2_groups = dict(
            label="Pinned 2 things with other filters",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "any", [
                            self.tags[0].slug,
                            self.tags[1].slug,
                            self.tags[2].slug,
                            self.tags[3].slug,
                            self.tags[4].slug
                        ]),
                    ]
                ]),
                pinned_ids=[
                    content_list[-1].id,  # last in time
                    content_list[-2].id  # penultimate
                ]
            )
        )
        # text query
        s_text_query = dict(
            label="Text query",
            query=dict(
                query="again"
            )
        )
        # text query with pinned ids
        s_text_query_pinned = dict(
            label="Text query",
            query=dict(
                groups=makeGroups([
                    [
                        ("tag", "any", [self.tags[2].slug]),
                    ]
                ]),
                pinned_ids=[self.content_list[4].id],
                query="Flawless"
            )
        )
        # saved search and the expected result count
        self.search_expectations = (
            (s_biden, 2),
            (s_obama, 4),
            (s_b_and_b, 1),
            (s_b_or_b, 5),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list)),
            (s_one_article, 1),
            (s_two_articles, 2),
            (s_all_but_one_article, len(self.content_list) - 1),
            (s_last_day, 3),
            (s_pinned, len(self.content_list)),
            (s_pinned_2, len(self.content_list)),
            (s_pinned_2_groups, len(self.content_list)),
            (s_doctype, TestContentObjTwo.objects.count()),
            (s_text_query, 2),
            (s_text_query_pinned, 2),
        )
        self.preview_expectations = (
            (s_biden, 2),
            (s_obama, 4),
            (s_b_and_b, 1),
            (s_b_or_b, 5),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list)),
            (s_one_article, 1),
            (s_two_articles, 2),
            (s_all_but_one_article, len(self.content_list)),  # excluded
            (s_last_day, 3),
            (s_doctype, TestContentObjTwo.objects.count()),
            (s_text_query, 2),
            (s_text_query_pinned, 2),
        )
        self.group_preview_expectations = (
            (s_biden, 2),
            (s_obama, 4),
            (s_b_and_b, 1),
            (s_wow, len(self.content_list)),
            (s_one_article, 1),
            (s_two_articles, 2),
            (s_all_but_one_article, len(self.content_list)),  # excluded
        )
        # is not published and not is_preview
        self.unpublished_expectations = (
            (s_biden, 2),
            (s_obama, 4),
            (s_b_and_b, 1),
            (s_b_or_b, 5),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list)),
            (s_one_article, 1),
            (s_two_articles, 2),
            (s_all_but_one_article, len(self.content_list) - 1),
            (s_last_day, 3),
            (s_pinned, len(self.content_list)),
            (s_pinned_2, len(self.content_list)),
            (s_pinned_2_groups, len(self.content_list)),
            (s_text_query, 2),
            (s_text_query_pinned, 2),
        )
        # is published and not is_preview
        self.published_expectations = (
            (s_biden, 2),
            (s_obama, 3),
            (s_b_and_b, 1),
            (s_b_or_b, 5 - 1),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list) - 1),
            (s_one_article, 1 - 1),
            (s_two_articles, 2 - 1),
            (s_all_but_one_article, len(self.content_list) - 1),
            (s_last_day, 2),
            (s_pinned, len(self.content_list) - 1),
            (s_pinned_2, len(self.content_list) - 1),
            (s_pinned_2_groups, len(self.content_list) - 1),
            (s_text_query, 1),
            (s_text_query_pinned, 2),
        )
        self.published_not_pinned_expectations = (
            (s_biden, 2),
            (s_obama, 3),
            (s_b_and_b, 1),
            (s_b_or_b, 5 - 1),
            (s_lite_obama, 2),
            (s_funny_and_slideshows, 2),
            (s_wow, len(self.content_list) - 1),
            (s_one_article, 1 - 1),
            (s_two_articles, 2 - 1),
            (s_all_but_one_article, len(self.content_list) - 1),
            (s_last_day, 2),
        )
        # (search filter, (list, of, ids, in, order)),
        self.ordered_expectations = (
            (s_all_but_one_article, (2, 3, 4)),
            (s_text_query_pinned, (content_list[4].id, content_list[2].id)),
        )
        self.pinned_expectations = (
            (s_pinned, (
                content_list[-1].id,
                content_list[0].id, content_list[1].id,
            )),
            (s_pinned_2, (
                content_list[-2].id, content_list[-1].id,
                content_list[0].id, content_list[1].id,
            )),
            (s_pinned_2_groups, (
                content_list[-2].id, content_list[-1].id,
                content_list[0].id, content_list[1].id,
            )),
        )

Example 32

Project: SmartElect Source File: test_end_to_end.py
    def _create_election_day_data(self, expected_stats):
        """Create various types of election data for testing of the election
        day dashboard."""

        # Pick open times that could vary by date based on time zone.
        rc_1_open_time = self.election_day_dt.replace(hour=1, minute=23)
        rc_2_open_time = self.election_day_dt.replace(hour=10, minute=23)
        # This center open time is before the election time really starts,
        # so it will be reported under the corresponding office as an
        # unopened center.
        open_time_3 = self.election.start_time - timedelta(hours=6)

        # configure election day activities by registration center
        center_activities = []

        center_activities.append({
            'center': self.rc_1,
            'open_time': rc_1_open_time,
            'phone_number': STAFF_PHONE_NUMBER_PATTERN % 1,
        })

        center_activities.append({
            'center': self.rc_2,
            'open_time': rc_2_open_time,
            'phone_number': STAFF_PHONE_NUMBER_PATTERN % 1,
            'prelim_time': self.election_day_dt,
            'prelim_option': 9,
            'prelim_votes': 7312,  # four digits to test intcomma formatting
            'period_4_time': rc_2_open_time + timedelta(hours=6),
            'period_4_count': 79,
            # period "5" is a report for period 4 sent on following day
            'period_5_time': self.election_day_dt + timedelta(days=1),
            'period_5_count': 82,
        })

        center_activities.append({
            'center': self.rc_3,
            'open_time': open_time_3,
            'phone_number': STAFF_PHONE_NUMBER_PATTERN % 2,
        })

        center_activities.append({
            'center': self.rc_4,
            # DOES NOT SEND CenterOpen or anything else
        })

        center_activities.append({
            'center': self.copy_of_rc_1,
            # The copy center opened, coincidentally at the same time as the copied center.
            'open_time': rc_1_open_time,
            'phone_number': STAFF_PHONE_NUMBER_PATTERN % 3,
            # vote report for period 2
            'period_2_time': self.election_day_dt,
            'period_2_count': 4321,  # four digits to test intcomma formatting
        })

        center_activities.append({
            'center': self.rc_5,
            # DOES NOT SEND CenterOpen or anything else
            # This shares an office id with rc_1, and is also marked as
            # inactive for this particular election.
        })

        # shortcuts into dictionaries
        expected_center_stats = expected_stats['by_center']
        expected_office_stats = expected_stats['by_office']
        expected_summary_stats = expected_stats['summary']

        # Clear office-level summaries
        # (Some offices will be repeated, but it doesn't matter.)
        for activity in center_activities:
            office_id = activity['center'].office_id
            for key in ('opened', 'closed', 'not_reported_1', 'not_reported_2', 'not_reported_3',
                        'not_reported_4', 'unopened'):
                expected_office_stats[office_id][key] = 0
            expected_office_stats[office_id]['summary'] = deepcopy(EMPTY_SUMMARY)

        # Create the messages, increment/set counters/fields to represent
        # expected dashboard data.
        for activity in center_activities:
            # shortcuts specific to this center
            expected_for_this_center = expected_center_stats[activity['center'].center_id]
            expected_for_this_office = expected_office_stats[activity['center'].office_id]
            expected_summary_for_this_office = expected_for_this_office['summary']

            last_report_dt = None  # track the last report from this center

            open_time = activity.get('open_time', None)
            if open_time:
                open_msg = CenterOpen(election=self.election,
                                      phone_number=activity['phone_number'],
                                      registration_center=activity['center'],
                                      creation_date=activity['open_time'])
                open_msg.full_clean()
                open_msg.save()
                last_report_dt = self._max_report_time(last_report_dt, activity['open_time'])

            # It does not count as an open if it happened too early
            if open_time and open_time >= self.election.start_time:
                expected_for_this_center['ed_open'] = open_time.strftime('%d/%m %H:%M')
                expected_for_this_center['opened_hm'] = open_time.strftime('%H:%M')
                expected_for_this_office['opened'] += 1
                expected_summary_stats['opened'] += 1
                expected_summary_for_this_office['opened'] += 1
            else:
                expected_for_this_center['ed_open'] = None
                expected_for_this_center['opened_hm'] = None
                expected_for_this_office['unopened'] += 1
                expected_summary_stats['unopened'] += 1
                expected_summary_for_this_office['unopened'] += 1

            for period in ('1', '2', '3', '4'):
                report_time, report_count = \
                    activity.get('period_' + period + '_time', None), \
                    activity.get('period_' + period + '_count', None)

                if report_time:
                    r = PollingReport(election=self.election,
                                      phone_number=activity['phone_number'],
                                      registration_center=activity['center'],
                                      period_number=int(period),
                                      num_voters=report_count,
                                      creation_date=report_time)
                    r.full_clean()
                    r.save()
                    last_report_dt = self._max_report_time(last_report_dt, report_time)

                    expected_for_this_center['votes_reported_' + period] = report_count
                    expected_for_this_center['reported_period_' + period] = 'has_reported'
                    expected_for_this_center['reported_period_' + period + '_count'] = report_count
                    expected_for_this_office['votes_reported_' + period] = report_count
                    expected_summary_stats['votes_reported_' + period] += report_count
                    expected_summary_for_this_office['votes_reported_' + period] += report_count
                    if period == '4':  # got period 4 report, so didn't close
                        expected_for_this_center['is_closed'] = 'Yes'
                        expected_for_this_office['closed'] += 1
                else:
                    if open_time and open_time >= self.election.start_time:
                        # The effective time of the reports was just after period 2, so
                        # if this is the period 1 or 2 report then it is overdue, and
                        # if this is the period 3 or 4 report then it is not due yet.
                        flag = 'has_not_reported' if period in ('1', '2') else 'not_due'
                        expected_for_this_center['reported_period_' + period] = flag
                    else:
                        expected_for_this_center['reported_period_' + period] = 'no_data'
                    expected_for_this_center['reported_period_' + period + '_count'] = 0
                    expected_for_this_office['not_reported_' + period] += 1
                    if period == '4':  # no period 4 report, so didn't close
                        expected_for_this_center['is_closed'] = 'No'

            # Very basic support for sending period 4 report on day after election
            #
            # It assumes that a period 4 report was also sent on election day, which
            # simplifies handling of votes_reported_4 counters and information on
            # closing.
            #
            # Period "5" is period 4 on the following day.
            period_5_time = activity.get('period_5_time', None)
            if period_5_time:
                period_5_count = activity['period_5_count']
                period_4_count = activity['period_4_count']

                r = PollingReport(election=self.election,
                                  phone_number=activity['phone_number'],
                                  registration_center=activity['center'],
                                  period_number=4,
                                  num_voters=period_5_count,
                                  creation_date=period_5_time)
                r.full_clean()
                r.save()
                last_report_dt = self._max_report_time(last_report_dt, period_5_time)

                # Add in delta to prior period 4 report
                delta = period_5_count - period_4_count
                expected_for_this_center['votes_reported_4'] += delta
                expected_for_this_center['reported_period_4_count'] += delta
                expected_for_this_office['votes_reported_4'] += delta
                expected_summary_stats['votes_reported_4'] += delta
                expected_summary_for_this_office['votes_reported_4'] += delta

            prelim_time = activity.get('prelim_time', None)
            if prelim_time:
                prelim = PreliminaryVoteCount(election=self.election,
                                              phone_number=activity['phone_number'],
                                              registration_center=activity['center'],
                                              option=activity['prelim_option'],
                                              num_votes=activity['prelim_votes'],
                                              creation_date=prelim_time)
                prelim.full_clean()
                prelim.save()
                last_report_dt = self._max_report_time(last_report_dt, prelim_time)

                expected_for_this_office['prelim'] = {
                    str(activity['prelim_option']): intcomma(activity['prelim_votes'])
                }

            expected_for_this_center['last_report'] = \
                'Not Reported' if not last_report_dt else \
                last_report_dt.strftime('%d/%m %H:%M')

        # rc_5 is inactive for this election
        # (CenterClosedForElection created when center was created)
        # Now that the office 'summary' has been set up, note where inactive should show up.
        expected_center_stats[self.rc_5.center_id]['inactive'] = True
        expected_office_stats[self.rc_5.office.id]['summary']['inactive'] += 1

Example 33

Project: kamaelia_ Source File: LiveAnalysis.py
Function: main
    def main(self):
        # Calculate running total and mean etc

        cursor = self.dbConnect(self.dbuser,self.dbpass)
        while not self.finished():
            # The below does LIVE and FINAL analysis - do NOT run DataAnalyser at the same time

            print "Analysis component: Checking for new data..."

            # Stage 1: Live analysis - could do with a better way to do the first query (indexed field 'analsed' to speed up for now)
            # Could move this into the main app to take a copy of tweets on arrival, but would rather solve separately if poss
            cursor.execute("""SELECT tid,pid,timestamp,text,tweet_id,programme_position FROM rawdata WHERE analysed = 0 ORDER BY tid LIMIT 5000""")
            data = cursor.fetchall()

            # Cycle through all the as yet unanalysed tweets
            for result in data:
                tid = result[0]
                pid = result[1]
                tweettime = result[2] # Timestamp based on the tweet's created_at field
                tweettext = result[3]
                tweetid = result[4] # This is the real tweet ID, tid just makes a unique identifier as each tweet can be stored against several pids
                progpos = result[5] # Position through the programme that the tweet was made
                dbtime = datetime.utcfromtimestamp(tweettime)
                # Each tweet will be grouped into chunks of one minute to make display better, so set the seconds to zero
                # This particular time is only used for console display now as a more accurate one calculated from programme position is found later
                dbtime = dbtime.replace(second=0)
                print "Analysis component: Analysing new tweet for pid", pid, "(" + str(dbtime) + "):"
                print "Analysis component: '" + tweettext + "'"
                cursor.execute("""SELECT duration FROM programmes_unique WHERE pid = %s""",(pid))
                progdata = cursor.fetchone()
                duration = progdata[0]
                cursor.execute("""SELECT totaltweets,meantweets,mediantweets,modetweets,stdevtweets,timediff,timestamp,utcoffset FROM programmes WHERE pid = %s ORDER BY timestamp DESC""",(pid))
                progdata2 = cursor.fetchone()
                totaltweets = progdata2[0]
                # Increment the total tweets recorded for this programme's broadcast
                totaltweets += 1
                meantweets = progdata2[1]
                mediantweets = progdata2[2]
                modetweets = progdata2[3]
                stdevtweets = progdata2[4]
                timediff = progdata2[5]
                timestamp = progdata2[6]
                utcoffset = progdata2[7]

                # Need to work out the timestamp to assign to the entry in analysed data
                progstart = timestamp - timediff
                progmins = int(progpos / 60)
                analysedstamp = int(progstart + (progmins * 60))
                # Ensure that this tweet occurs within the length of the programme, otherwise for the purposes of this program it's useless
                if progpos > 0 and progpos <= duration:
                    cursor.execute("""SELECT did,totaltweets,wordfreqexpected,wordfrequnexpected FROM analyseddata WHERE pid = %s AND timestamp = %s""",(pid,analysedstamp))
                    analyseddata = cursor.fetchone()
                    # Just in case of a missing raw json object (ie. programme terminated before it was stored - allow it to be skipped if not found after 30 secs)
                    failcounter = 0
                    # Pass this tweet to the NLTK analysis component
                    self.send([pid,tweetid],"nltk")
                    while not self.dataReady("nltk"):
                    #    if failcounter >= 3000:
                    #        nltkdata = list()
                    #        break
                        time.sleep(0.01)
                    #    failcounter += 1
                    #if failcounter < 3000:
                        # Receive back a list of words and their frequency for this tweet, including whether or not they are common, an entity etc
                    if 1:
                        nltkdata = self.recv("nltk")
                    if analyseddata == None: # No tweets yet recorded for this minute
                        minutetweets = 1
                        cursor.execute("""INSERT INTO analyseddata (pid,totaltweets,timestamp) VALUES (%s,%s,%s)""", (pid,minutetweets,analysedstamp))
                        for word in nltkdata:
                            # Check if we're storing a word or phrase here
                            if nltkdata[word][0] == 1:
                                cursor.execute("""INSERT INTO wordanalysis (pid,timestamp,phrase,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                            else:
                                cursor.execute("""INSERT INTO wordanalysis (pid,timestamp,word,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                    else:
                        did = analyseddata[0]
                        minutetweets = analyseddata[1] # Get current number of tweets for this minute
                        minutetweets += 1 # Add one to it for this tweet

                        cursor.execute("""UPDATE analyseddata SET totaltweets = %s WHERE did = %s""",(minutetweets,did))

                        for word in nltkdata:
                            # Check if we're storing a word or phrase
                            if nltkdata[word][0] == 1:
                                cursor.execute("""SELECT wid,count FROM wordanalysis WHERE pid = %s AND timestamp = %s AND phrase LIKE %s""",(pid,analysedstamp,word))
                                # Check if this phrase has already been stored for this minute - if so, increment the count
                                wordcheck = cursor.fetchone()
                                if wordcheck == None:
                                    cursor.execute("""INSERT INTO wordanalysis (pid,timestamp,phrase,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                else:
                                    cursor.execute("""UPDATE wordanalysis SET count = %s WHERE wid = %s""",(nltkdata[word][1] + wordcheck[1],wordcheck[0]))
                            else:
                                cursor.execute("""SELECT wid,count FROM wordanalysis WHERE pid = %s AND timestamp = %s AND word LIKE %s""",(pid,analysedstamp,word))
                                # Check if this word has already been stored for this minute - if so, increment the count
                                wordcheck = cursor.fetchone()
                                if wordcheck == None:
                                    cursor.execute("""INSERT INTO wordanalysis (pid,timestamp,word,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                else:
                                    cursor.execute("""UPDATE wordanalysis SET count = %s WHERE wid = %s""",(nltkdata[word][1] + wordcheck[1],wordcheck[0]))
                    # Averages / stdev are calculated roughly based on the programme's running time at this point
                    progdate = datetime.utcfromtimestamp(timestamp) + timedelta(seconds=utcoffset)
                    actualstart = progdate - timedelta(seconds=timediff)
                    actualtweettime = datetime.utcfromtimestamp(tweettime + utcoffset)

                    # Calculate how far through the programme this tweet occurred
                    runningtime = actualtweettime - actualstart
                    runningtime = runningtime.seconds

                    if runningtime < 0:
                        runningtime = 0
                    else:
                        runningtime = float(runningtime) / 60

                    try:
                        meantweets = totaltweets / runningtime
                    except ZeroDivisionError, e:
                        meantweets = 0

                    cursor.execute("""SELECT totaltweets FROM analyseddata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""",(pid,progstart,analysedstamp+duration))
                    analyseddata = cursor.fetchall()

                    runningtime = int(runningtime)

                    tweetlist = list()
                    for result in analyseddata:
                        totaltweetsmin = result[0]
                        # Create a list of each minute and the total tweets for that minute in the programme
                        tweetlist.append(int(totaltweetsmin))

                    # Ensure tweetlist has enough entries
                    # If a minute has no tweets, it won't have a database record, so this has to be added
                    if len(tweetlist) < runningtime:
                        additions = runningtime - len(tweetlist)
                        while additions > 0:
                            tweetlist.append(0)
                            additions -= 1

                    # Order by programme position 0,1,2, mins etc
                    tweetlist.sort()

                    mediantweets = tweetlist[int(len(tweetlist)/2)]

                    modes = dict()
                    stdevlist = list()
                    for tweet in tweetlist:
                        modes[tweet] = tweetlist.count(tweet)
                        stdevlist.append((tweet - meantweets)*(tweet - meantweets))

                    modeitems = [[v, k] for k, v in modes.items()]
                    modeitems.sort(reverse=True)
                    modetweets = int(modeitems[0][1])

                    stdevtweets = 0
                    for val in stdevlist:
                        stdevtweets += val

                    try:
                        stdevtweets = math.sqrt(stdevtweets / runningtime)
                    except ZeroDivisionError, e:
                        stdevtweets = 0

                    # Finished analysis - update DB
                    cursor.execute("""UPDATE programmes SET totaltweets = %s, meantweets = %s, mediantweets = %s, modetweets = %s, stdevtweets = %s WHERE pid = %s AND timestamp = %s""",(totaltweets,meantweets,mediantweets,modetweets,stdevtweets,pid,timestamp))

                else:
                    print "Analysis component: Skipping tweet - falls outside the programme's running time"

                # Mark the tweet as analysed
                cursor.execute("""UPDATE rawdata SET analysed = 1 WHERE tid = %s""",(tid))
                print "Analysis component: Done!"

            # Stage 2: If all raw tweets analysed and imported = 1 (all data for this programme stored and programme finished), finalise the analysis - could do bookmark identification here too?
            cursor.execute("""SELECT pid,totaltweets,meantweets,mediantweets,modetweets,stdevtweets,timestamp,timediff FROM programmes WHERE imported = 1 AND analysed = 0 LIMIT 5000""")
            data = cursor.fetchall()
            # Cycle through each programme that's ready for final analysis
            for result in data:
                pid = result[0]
                cursor.execute("""SELECT duration,title FROM programmes_unique WHERE pid = %s""",(pid))
                data2 = cursor.fetchone()
                duration = data2[0]
                totaltweets = result[1]
                meantweets = result[2]
                mediantweets = result[3]
                modetweets = result[4]
                stdevtweets = result[5]
                title = data2[1]
                timestamp = result[6]
                timediff = result[7]
                # Cycle through checking if all tweets for this programme have been analysed - if so finalise the stats
                cursor.execute("""SELECT tid FROM rawdata WHERE analysed = 0 AND pid = %s""", (pid))
                if cursor.fetchone() == None:
                    # OK to finalise stats here
                    print "Analysis component: Finalising stats for pid:", pid, "(" + title + ")"

                    meantweets = float(totaltweets) / (duration / 60) # Mean tweets per minute

                    cursor.execute("""SELECT totaltweets FROM analyseddata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""",(pid,timestamp-timediff,timestamp+duration-timediff))
                    analyseddata = cursor.fetchall()

                    runningtime = duration / 60

                    tweetlist = list()
                    for result in analyseddata:
                        totaltweetsmin = result[0]
                        tweetlist.append(int(totaltweetsmin))

                    # Ensure tweetlist has enough entries - as above, if no tweets are recorded for a minute it won't be present in the DB
                    if len(tweetlist) < runningtime:
                        additions = runningtime - len(tweetlist)
                        while additions > 0:
                            tweetlist.append(0)
                            additions -= 1

                    tweetlist.sort()

                    mediantweets = tweetlist[int(len(tweetlist)/2)]

                    modes = dict()
                    stdevlist = list()
                    for tweet in tweetlist:
                        modes[tweet] = tweetlist.count(tweet)
                        stdevlist.append((tweet - meantweets)*(tweet - meantweets))

                    modeitems = [[v, k] for k, v in modes.items()]
                    modeitems.sort(reverse=True)
                    modetweets = int(modeitems[0][1])

                    stdevtweets = 0
                    for val in stdevlist:
                        stdevtweets += val
                    try:
                        stdevtweets = math.sqrt(stdevtweets / runningtime)
                    except ZeroDivisionError, e:
                        stdevtweets = 0

                    if 1: # This data is purely a readout to the terminal at the moment associated with word and phrase frequency, and retweets
                        sqltimestamp1 = timestamp - timediff
                        sqltimestamp2 = timestamp + duration - timediff
                        cursor.execute("""SELECT tweet_id FROM rawdata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""", (pid,sqltimestamp1,sqltimestamp2))
                        rawtweetids = cursor.fetchall()
                        tweetids = list()
                        for tweet in rawtweetids:
                            tweetids.append(tweet[0])

                        if len(tweetids) > 0:
                            # Just in case of a missing raw json object (ie. programme terminated before it was stored - allow it to be skipped if not found after 30 secs)
                            failcounter = 0
                            self.send([pid,tweetids],"nltkfinal")
                            while not self.dataReady("nltkfinal"):
                            #    if failcounter >= 3000:
                            #        nltkdata = list()
                            #        break
                                time.sleep(0.01)
                            #    failcounter += 1
                            #if failcounter < 3000:
                            if 1:
                                nltkdata = self.recv("nltkfinal")

                    cursor.execute("""UPDATE programmes SET meantweets = %s, mediantweets = %s, modetweets = %s, stdevtweets = %s, analysed = 1 WHERE pid = %s AND timestamp = %s""",(meantweets,mediantweets,modetweets,stdevtweets,pid,timestamp))
                    print "Analysis component: Done!"

            # Sleep here until more data is available to analyse
            print "Analysis component: Sleeping for 10 seconds..."
            time.sleep(10)

Example 34

Project: mpop Source File: msg_seviri_hdf.py
def load(satscene, calibrate=True, area_extent=None, **kwargs):
    """Load MSG SEVIRI data from hdf5 format.
    """

    # Read config file content
    conf = ConfigParser()
    conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
    values = {"orbit": satscene.orbit,
    "satname": satscene.satname,
    "number": satscene.number,
    "instrument": satscene.instrument_name,
    "satellite": satscene.fullname
    }

    LOG.info("assume seviri-level4")
    print "... assume seviri-level4"

    satscene.add_to_history("hdf5 data read by mpop/msg_seviri_hdf.py")


    if "reader_level" in kwargs.keys():
        reader_level = kwargs["reader_level"]
    else:
        reader_level = "seviri-level4"

    if "RSS" in kwargs.keys():
        if kwargs["RSS"]:
            dt_end =  4
        else:
            dt_end = 12
    else:
        from my_msg_module import check_RSS
        RSS = check_RSS(satscene.sat_nr(), satscene.time_slot)
        if RSS == None:
            print "*** Error in mpop/satin/msg_seviri_hdf.py"
            print "    satellite MSG", satscene.sat_nr() ," is not active yet"
            quit()
        else:
            if RSS:
                dt_end =  4
            else:
                dt_end = 12

    print "... hdf file name is specified by observation end time"
    print "    assume ", dt_end, " min between start and end time of observation"

    # end of scan time 4 min after start 
    end_time = satscene.time_slot + datetime.timedelta(minutes=dt_end)

    filename = os.path.join( end_time.strftime(conf.get(reader_level, "dir", raw=True)),
                             end_time.strftime(conf.get(reader_level, "filename", raw=True)) % values )
    
    print "... search for file: ", filename
    filenames=glob(str(filename))
    if len(filenames) == 0:
        print "*** Error, no file found"
        return # just return without exit the program 
    elif len(filenames) > 1:
        print "*** Warning, more than 1 datafile found: ", filenames 
    filename = filenames[0]
    print("... read data from %s" % str(filename))

    # read data from hdf5 file 
    data_folder='U-MARF/MSG/Level1.5/'

    # Load data from hdf file
    with h5py.File(filename,'r') as hf:

        subset_info=hf.get(data_folder+'METADATA/SUBSET')
        for i in range(subset_info.len()):
            #print subset_info[i]['EntryName'], subset_info[i]['Value']
            if subset_info[i]['EntryName'] == "VIS_IRSouthLineSelectedRectangle":
                VIS_IRSouthLine = int(subset_info[i]['Value'])
            if subset_info[i]['EntryName'] == "VIS_IRNorthLineSelectedRectangle":
                VIS_IRNorthLine = int(subset_info[i]['Value'])
            if subset_info[i]['EntryName'] == "VIS_IREastColumnSelectedRectangle":
                VIS_IREastColumn = int(subset_info[i]['Value'])
            if subset_info[i]['EntryName'] == "VIS_IRWestColumnSelectedRectangle":
                VIS_IRWestColumn = int(subset_info[i]['Value'])
            if subset_info[i]['EntryName'] == "HRVLowerNorthLineSelectedRectangle":
                HRVLowerNorthLine = int(subset_info[i]['Value'])
            if subset_info[i]['EntryName'] == "HRVLowerSouthLineSelectedRectangle":
                HRVLowerSouthLine = int(subset_info[i]['Value'])
            if subset_info[i]['EntryName'] == "HRVLowerEastColumnSelectedRectangle":
                HRVLowerEastColumn = int(subset_info[i]['Value'])
            if subset_info[i]['EntryName'] == "HRVLowerWestColumnSelectedRectangle":
                HRVLowerWestColumn = int(subset_info[i]['Value'])
            if subset_info[i]['EntryName'] == "HRVUpperSouthLineSelectedRectangle":
                HRVUpperSouthLine = int(subset_info[i]['Value'])  # 0
            if subset_info[i]['EntryName'] == "HRVUpperNorthLineSelectedRectangle":
                HRVUpperNorthLine = int(subset_info[i]['Value'])  # 0
            if subset_info[i]['EntryName'] == "HRVUpperEastColumnSelectedRectangle":
                HRVUpperEastColumn = int(subset_info[i]['Value']) # 0
            if subset_info[i]['EntryName'] == "HRVUpperWestColumnSelectedRectangle":
                HRVUpperWestColumn = int(subset_info[i]['Value']) # 0

        sat_status=hf.get(data_folder+'METADATA/HEADER/SatelliteStatus/SatelliteStatus_DESCR')
        for i in range(subset_info.len()):
            if sat_status[i]['EntryName']=="SatelliteDefinition-NominalLongitude":
                sat_lon = sat_status[i]['Value']
                break

        #print 'VIS_IRSouthLine', VIS_IRSouthLine
        #print 'VIS_IRNorthLine', VIS_IRNorthLine
        #print 'VIS_IREastColumn', VIS_IREastColumn
        #print 'VIS_IRWestColumn', VIS_IRWestColumn
        #print 'sat_longitude', sat_lon, type(sat_lon), 'GEOS<'+'{:+06.1f}'.format(sat_lon)+'>' 

        if 1 == 0:
            # works only if all pixels are on the disk 
            from msg_pixcoord2area import msg_pixcoord2area
            print "VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn: ", VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn
            area_def = msg_pixcoord2area ( VIS_IRNorthLine, VIS_IRWestColumn, VIS_IRSouthLine, VIS_IREastColumn, "vis", sat_lon )
        else:
            # works also for pixels outside of the disk 
            pname = 'GEOS<'+'{:+06.1f}'.format(sat_lon)+'>'  # "GEOS<+009.5>"
            proj = {'proj': 'geos', 'a': '6378169.0', 'b': '6356583.8', 'h': '35785831.0', 'lon_0': str(sat_lon)}
            aex=(-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612)

            # define full disk projection 
            from pyresample.geometry import AreaDefinition
            full_disk_def = AreaDefinition('full_disk',
                                           'full_disk',
                                           pname,
                                           proj,
                                           3712,
                                           3712,
                                           aex )

            # define name and calculate area for sub-demain 
            area_name= 'MSG_'+'{:04d}'.format(VIS_IRNorthLine)+'_'+'{:04d}'.format(VIS_IRWestColumn)+'_'+'{:04d}'.format(VIS_IRSouthLine)+'_'+'{:04d}'.format(VIS_IREastColumn)
            aex = full_disk_def.get_area_extent_for_subset(3712-VIS_IRSouthLine,3712-VIS_IRWestColumn,3712-VIS_IRNorthLine,3712-VIS_IREastColumn)

            area_def = AreaDefinition(area_name,
                                      area_name,
                                      pname,
                                      proj,
                                      (VIS_IRWestColumn-VIS_IREastColumn)+1,
                                      (VIS_IRNorthLine-VIS_IRSouthLine)+1,
                                      aex )

        #print area_def
        #print "REGION:", area_def.area_id, "{"
        #print "\tNAME:\t", area_def.name
        #print "\tPCS_ID:\t", area_def.proj_id
        #print ("\tPCS_DEF:\tproj="+area_def.proj_dict['proj']+", lon_0=" + area_def.proj_dict['lon_0'] + ", a="+area_def.proj_dict['a']+", b="+area_def.proj_dict['b']+", h="+area_def.proj_dict['h'])
        #print "\tXSIZE:\t", area_def.x_size
        #print "\tYSIZE:\t", area_def.y_size
        #print "\tAREA_EXTENT:\t", area_def.area_extent
        #print "};"

        # copy area to satscene 
        satscene.area = area_def

        # write information used by mipp.xrit.MSG._Calibrator in a fake header file
        hdr = dict()

        # satellite ID number 
        hdr["SatelliteDefinition"] = dict()
        hdr["SatelliteDefinition"]["SatelliteId"] = SatelliteIds[str(satscene.sat_nr())]
        
        # processing 
        hdr["Level 1_5 ImageProduction"] = dict()
        hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"] = np_array([2,2,2,2,2,2,2,2,2,2,2,2], int)
        
        # calibration factors  
        Level15ImageCalibration = hf.get(data_folder+'METADATA/HEADER/RadiometricProcessing/Level15ImageCalibration_ARRAY')
        hdr["Level1_5ImageCalibration"] = dict()

        for chn_name in channel_numbers.keys():
            chn_nb = channel_numbers[chn_name]-1
            hdr["Level1_5ImageCalibration"][chn_nb] = dict()
            #print chn_name, chn_nb, Level15ImageCalibration[chn_nb]['Cal_Slope'], Level15ImageCalibration[chn_nb]['Cal_Offset']
            hdr["Level1_5ImageCalibration"][chn_nb]['Cal_Slope']  = Level15ImageCalibration[chn_nb]['Cal_Slope']
            hdr["Level1_5ImageCalibration"][chn_nb]['Cal_Offset'] = Level15ImageCalibration[chn_nb]['Cal_Offset']

        # loop over channels to load 
        for chn_name in satscene.channels_to_load:

            dataset_name = data_folder+'DATA/'+dict_channel[chn_name]+'/IMAGE_DATA'
            if dataset_name in hf:
                data_tmp = hf.get(data_folder+'DATA/'+dict_channel[chn_name]+'/IMAGE_DATA')

                LOG.info('hdr["SatelliteDefinition"]["SatelliteId"]: '+str(hdr["SatelliteDefinition"]["SatelliteId"]))
                #LOG.info('hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"]', hdr["Level 1_5 ImageProduction"]["PlannedChanProcessing"])
                chn_nb = channel_numbers[chn_name]-1
                LOG.info('hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Slope"]:  '+str(hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Slope"]))
                LOG.info('hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Offset"]: '+str(hdr["Level1_5ImageCalibration"][chn_nb]["Cal_Offset"]))

                if calibrate:
                    #Calibrator = _Calibrator(hdr, chn_name)
                    bits_per_pixel = 10   ### !!! I have no idea if this is correct !!!
                    Calibrator = _Calibrator(hdr, chn_name, bits_per_pixel) ## changed call in mipp/xrit/MSG.py
                    data, calibration_unit = Calibrator (data_tmp, calibrate=1)
                else:
                    data = data_tmp
                    calibration_unit = "counts"

                LOG.info(chn_name+ " min/max: "+str(data.min())+","+str(data.max())+" "+calibration_unit )

                satscene[chn_name] = ma.asarray(data)

                satscene[chn_name].info['units'] = calibration_unit
                satscene[chn_name].info['satname'] = satscene.satname
                satscene[chn_name].info['satnumber'] = satscene.number
                satscene[chn_name].info['instrument_name'] = satscene.instrument_name
                satscene[chn_name].info['time'] = satscene.time_slot
                satscene[chn_name].info['is_calibrated'] = True

            else: 
                print "*** Warning, no data for channel "+ chn_name+ " in file "+ filename
                data = np_nan
                calibration_unit = ""
                LOG.info("*** Warning, no data for channel "+ chn_name+" in file "+filename)

Example 35

Project: orchestra Source File: orchestra_settings.py
def setup_orchestra(settings_module_name):
    settings = sys.modules[settings_module_name]
    if not hasattr(settings, 'INSTALLED_APPS'):
        settings.INSTALLED_APPS = ()
    if not hasattr(settings, 'STATICFILES_FINDERS'):
        settings.STATICFILES_FINDERS = ()

    # General
    ##########

    # URL at which Orchestra is publicly accessible
    settings.ORCHESTRA_URL = 'http://127.0.0.1:8000'

    # Production environment
    environment = os.environ.get('ENVIRONMENT')
    settings.PRODUCTION = False
    settings.STAGING = False
    if environment == 'production':
        settings.PRODUCTION = True
    elif environment == 'staging':
        settings.STAGING = True

    # Required Django apps
    settings.INSTALLED_APPS += (
        'orchestra',
        'beanstalk_dispatch',
        'registration',
        'widget_tweaks',
        'ajax_select',
    )

    settings.STATICFILES_FINDERS += (
        'django.contrib.staticfiles.finders.FileSystemFinder',
        'django.contrib.staticfiles.finders.AppDirectoriesFinder',
    )

    # Add the Django admin and the Django CMS admin style to make it pretty.
    # The CMS style must be listed before the admin, so we do some processing
    # of the INSTALLED_APPS list to preserve that property.
    settings.INSTALLED_APPS = install_admin(settings.INSTALLED_APPS)

    settings.INSTALLED_APPS += (
        'django_nose',
    )
    settings.TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'

    # Tasks and Workflows
    ######################

    # Installed orchestra workflows to be included as Django apps
    settings.ORCHESTRA_WORKFLOWS = (
        # 'workflows.workflow_app_1',
    )
    settings.INSTALLED_APPS += settings.ORCHESTRA_WORKFLOWS

    # The maximum number of tasks an expert can pick up at a time.
    # Currently disabled.
    settings.ORCHESTRA_MAX_IN_PROGRESS_TASKS = 3

    # S3 bucket name to upload images to
    settings.EDITOR_IMAGE_BUCKET_NAME = 'CHANGEME'

    # Registration
    ###############

    # Orchestra registration urls: must match urls.py
    settings.LOGIN_REDIRECT_URL = '/orchestra/app/'
    settings.LOGIN_URL = '/orchestra/accounts/login/'

    # Orchestra Registration setings
    settings.ACCOUNT_ACTIVATION_DAYS = 7  # One-week activation window
    settings.REGISTRATION_AUTO_LOGIN = True  # Automatically log the user in.
    settings.INCLUDE_REGISTER_URL = False

    # API Authentication
    #####################

    # Orchestra project API client credentials: CHANGE THE SECRET.
    settings.ORCHESTRA_PROJECT_API_KEY = 'orchestra-user'
    settings.ORCHESTRA_PROJECT_API_SECRET = 'CHANGEME'

    # Orchestra project API server authentication via httpsignature.
    settings.INSTALLED_APPS += ('rest_framework_httpsignature',)

    # A dictionary of allowed project API keys and secrets.
    settings.ORCHESTRA_PROJECT_API_CREDENTIALS = {
        'orchestra-user': 'CHANGEME'
    }

    # Django REST framework
    settings.INSTALLED_APPS += ('rest_framework',)

    # Don't authenticate users without a view explicitly calling for it
    settings.REST_FRAMEWORK = {
        'DEFAULT_AUTHENTICATION_CLASSES': (
            'rest_framework.authentication.BasicAuthentication',
            'rest_framework.authentication.SessionAuthentication',
        ),
    }

    # Hijack settings
    settings.INSTALLED_APPS += (
        'hijack',
        'compat',
        'hijack_admin',
        'related_admin',
    )

    # Optionally toggle this to enable user hijack functionality.
    settings.HIJACK_ALLOW_GET_REQUESTS = True

    # Machine Step Scheduling
    ##########################

    # Scheduler for machine steps
    settings.MACHINE_STEP_SCHEDULER = {
        'path': ('orchestra.utils.machine_step_scheduler.'
                 'SynchronousMachineStepScheduler')
    }

    # Beanstalk dispatcher
    # Add keys to use AsynchronousMachineStepScheduler
    settings.BEANSTALK_DISPATCH_SQS_KEY = ''
    settings.BEANSTALK_DISPATCH_SQS_SECRET = ''
    settings.WORK_QUEUE = ''
    if os.environ.get('BEANSTALK_WORKER') == 'True':
        settings.BEANSTALK_DISPATCH_TABLE = {
            'machine_task_executor': ('orchestra.machine_tasks.execute')
        }

    # Email and Notifications
    #########################

    # For registration to work, an email backend must be configured.
    # This file defaults to printing emails to the console if there is no email
    # backend configured already, but that should be changed in production.
    settings.EMAIL_BACKEND = getattr(
        settings,
        'EMAIL_BACKEND',
        'django.core.mail.backends.console.EmailBackend')
    settings.DEFAULT_FROM_EMAIL = getattr(
        settings,
        'DEFAULT_FROM_EMAIL',
        'Orchestra <[email protected]>')

    # Notification-specific email for message bundling and searching
    settings.ORCHESTRA_NOTIFICATIONS_FROM_EMAIL = settings.DEFAULT_FROM_EMAIL
    # Feature flag for mocking emails during staffing. Should be disabled for
    # production but enabled otherwise.
    settings.ORCHESTRA_MOCK_EMAILS = False
    # Used to test email sending in development/staging environments
    settings.ORCHESTRA_MOCK_TO_EMAIL = ''

    # 3rd Party Integrations
    #########################

    # AWS Credentials
    settings.AWS_S3_KEY = ''  # FILL IN
    settings.AWS_S3_SECRET = ''  # FILL IN

    # Feature flag for toggling optional Google Apps integration. If a
    # service email and secret key are provided, Google Apps is used to
    # structure project data in Drive folders and can be used for
    # customizing workflow steps as well.
    settings.GOOGLE_APPS = False

    # Optional Google API related service email and path to a secret key.
    settings.GOOGLE_SERVICE_EMAIL = ''
    settings.GOOGLE_P12_PATH = ''

    # Google Drive root folder id in which we create projects.
    settings.GOOGLE_PROJECT_ROOT_ID = ''

    # Feature flags for toggling optional slack integration
    settings.ORCHESTRA_SLACK_INTERNAL_ENABLED = False
    settings.ORCHESTRA_SLACK_EXPERTS_ENABLED = False

    # Settings for slack notifications. Notifications are shared internally
    # upon task status change; the experts team organizes project
    # communication.
    settings.SLACK_EXPERTS_BASE_URL = ''
    settings.SLACK_INTERNAL_API_KEY = ''
    settings.SLACK_EXPERTS_API_KEY = ''
    settings.SLACK_INTERNAL_NOTIFICATION_CHANNEL = '#orchestra-tasks'

    # Settings for orchestra bots Each bot needs a slack token to
    # authenticate the command.
    settings.ORCHESTRA_SLACK_STAFFBOT_TOKEN = ''
    settings.ORCHESTRA_STAFFBOT_WORKER_BATCH_SIZE = 5
    settings.ORCHESTRA_STAFFBOT_BATCH_FREQUENCY = timedelta(minutes=2)

    # Optionally add a path for a template to support third party scripts
    # (such as Google Analytics)
    settings.ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE = (
        'orchestra/third_party_scripts.html')

    # Optionally configure a google analytics key to learn about your users.
    settings.GOOGLE_ANALYTICS_KEY = ''
    # Pass the Google Analytics key to templates with a context processor.
    install_context_processors(settings)

    # Set to True if you want to block Workers from picking up new
    # tasks while existing ones are returned by reviewers.
    settings.ORCHESTRA_ENFORCE_NO_NEW_TASKS_DURING_REVIEW = True

    # Set to True if you want to enable buttons to pull new
    # delivery/review tasks in the dashboard.
    settings.ORCHESTRA_ENABLE_NEW_TASK_BUTTONS = True

Example 36

Project: kamaelia_ Source File: LiveAnalysis.py
Function: main
    def main(self):
        # Calculate running total and mean etc

        cursor = self.dbConnect(self.dbuser,self.dbpass)
        while not self.finished():
            # The below does LIVE and FINAL analysis - do NOT run DataAnalyser at the same time

            print "Analysis component: Checking for new data..."

            # Stage 1: Live analysis - could do with a better way to do the first query (indexed field 'analsed' to speed up for now)
            # Could move this into the main app to take a copy of tweets on arrival, but would rather solve separately if poss
            cursor.execute("""SELECT tid,pid,timestamp,text,tweet_id,programme_position FROM rawdata WHERE analysed = 0 ORDER BY tid LIMIT 5000""")
            data = cursor.fetchall()

            # Cycle through all the as yet unanalysed tweets
            for result in data:
                tid = result[0]
                pid = result[1]
                tweettime = result[2] # Timestamp based on the tweet's created_at field
                tweettext = result[3]
                tweetid = result[4] # This is the real tweet ID, tid just makes a unique identifier as each tweet can be stored against several pids
                progpos = result[5] # Position through the programme that the tweet was made
                dbtime = datetime.utcfromtimestamp(tweettime)
                # Each tweet will be grouped into chunks of one minute to make display better, so set the seconds to zero
                # This particular time is only used for console display now as a more accurate one calculated from programme position is found later
                dbtime = dbtime.replace(second=0)
                print "Analysis component: Analysing new tweet for pid", pid, "(" + str(dbtime) + "):"
                print "Analysis component: '" + tweettext + "'"
                cursor.execute("""SELECT duration FROM programmes_unique WHERE pid = %s""",(pid))
                progdata = cursor.fetchone()
                duration = progdata[0]
                cursor.execute("""SELECT totaltweets,meantweets,mediantweets,modetweets,stdevtweets,timediff,timestamp,utcoffset FROM programmes WHERE pid = %s ORDER BY timestamp DESC""",(pid))
                progdata2 = cursor.fetchone()
                totaltweets = progdata2[0]
                # Increment the total tweets recorded for this programme's broadcast
                totaltweets += 1
                meantweets = progdata2[1]
                mediantweets = progdata2[2]
                modetweets = progdata2[3]
                stdevtweets = progdata2[4]
                timediff = progdata2[5]
                timestamp = progdata2[6]
                utcoffset = progdata2[7]

                # Need to work out the timestamp to assign to the entry in analysed data
                progstart = timestamp - timediff
                progmins = int(progpos / 60)
                analysedstamp = int(progstart + (progmins * 60))
                # Ensure that this tweet occurs within the length of the programme, otherwise for the purposes of this program it's useless
                if progpos > 0 and progpos <= duration:
                    cursor.execute("""SELECT did,totaltweets,wordfreqexpected,wordfrequnexpected FROM analyseddata WHERE pid = %s AND timestamp = %s""",(pid,analysedstamp))
                    analyseddata = cursor.fetchone()
                    # Just in case of a missing raw json object (ie. programme terminated before it was stored - allow it to be skipped if not found after 30 secs)
                    #failcounter = 0
                    # Pass this tweet to the NLTK analysis component
                    self.send([pid,tweetid],"nltk")
                    while not self.dataReady("nltk"):
                    #    if failcounter >= 3000:
                    #        nltkdata = list()
                    #        break
                        time.sleep(0.01)
                    #    failcounter += 1
                    #if failcounter < 3000:
                    if 1:
                        # Receive back a list of words and their frequency for this tweet, including whether or not they are common, an entity etc
                        nltkdata = self.recv("nltk")
                    if analyseddata == None: # No tweets yet recorded for this minute
                        minutetweets = 1
                        cursor.execute("""INSERT INTO analyseddata (pid,totaltweets,timestamp) VALUES (%s,%s,%s)""", (pid,minutetweets,analysedstamp))
                        for word in nltkdata:
                            # Check if we're storing a word or phrase here
                            if nltkdata[word][0] == 1:
                                cursor.execute("""INSERT INTO wordanalysis (pid,timestamp,phrase,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                            else:
                                cursor.execute("""INSERT INTO wordanalysis (pid,timestamp,word,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                    else:
                        did = analyseddata[0]
                        minutetweets = analyseddata[1] # Get current number of tweets for this minute
                        minutetweets += 1 # Add one to it for this tweet

                        cursor.execute("""UPDATE analyseddata SET totaltweets = %s WHERE did = %s""",(minutetweets,did))

                        for word in nltkdata:
                            # Check if we're storing a word or phrase
                            if nltkdata[word][0] == 1:
                                cursor.execute("""SELECT wid,count FROM wordanalysis WHERE pid = %s AND timestamp = %s AND phrase LIKE %s""",(pid,analysedstamp,word))
                                # Check if this phrase has already been stored for this minute - if so, increment the count
                                wordcheck = cursor.fetchone()
                                if wordcheck == None:
                                    cursor.execute("""INSERT INTO wordanalysis (pid,timestamp,phrase,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                else:
                                    cursor.execute("""UPDATE wordanalysis SET count = %s WHERE wid = %s""",(nltkdata[word][1] + wordcheck[1],wordcheck[0]))
                            else:
                                cursor.execute("""SELECT wid,count FROM wordanalysis WHERE pid = %s AND timestamp = %s AND word LIKE %s""",(pid,analysedstamp,word))
                                # Check if this word has already been stored for this minute - if so, increment the count
                                wordcheck = cursor.fetchone()
                                if wordcheck == None:
                                    cursor.execute("""INSERT INTO wordanalysis (pid,timestamp,word,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                else:
                                    cursor.execute("""UPDATE wordanalysis SET count = %s WHERE wid = %s""",(nltkdata[word][1] + wordcheck[1],wordcheck[0]))
                    # Averages / stdev are calculated roughly based on the programme's running time at this point
                    progdate = datetime.utcfromtimestamp(timestamp) + timedelta(seconds=utcoffset)
                    actualstart = progdate - timedelta(seconds=timediff)
                    actualtweettime = datetime.utcfromtimestamp(tweettime + utcoffset)

                    # Calculate how far through the programme this tweet occurred
                    runningtime = actualtweettime - actualstart
                    runningtime = runningtime.seconds

                    if runningtime < 0:
                        runningtime = 0
                    else:
                        runningtime = float(runningtime) / 60

                    try:
                        meantweets = totaltweets / runningtime
                    except ZeroDivisionError, e:
                        meantweets = 0

                    cursor.execute("""SELECT totaltweets FROM analyseddata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""",(pid,progstart,analysedstamp+duration))
                    analyseddata = cursor.fetchall()

                    runningtime = int(runningtime)

                    tweetlist = list()
                    for result in analyseddata:
                        totaltweetsmin = result[0]
                        # Create a list of each minute and the total tweets for that minute in the programme
                        tweetlist.append(int(totaltweetsmin))

                    # Ensure tweetlist has enough entries
                    # If a minute has no tweets, it won't have a database record, so this has to be added
                    if len(tweetlist) < runningtime:
                        additions = runningtime - len(tweetlist)
                        while additions > 0:
                            tweetlist.append(0)
                            additions -= 1

                    # Order by programme position 0,1,2, mins etc
                    tweetlist.sort()

                    mediantweets = tweetlist[int(len(tweetlist)/2)]

                    modes = dict()
                    stdevlist = list()
                    for tweet in tweetlist:
                        modes[tweet] = tweetlist.count(tweet)
                        stdevlist.append((tweet - meantweets)*(tweet - meantweets))

                    modeitems = [[v, k] for k, v in modes.items()]
                    modeitems.sort(reverse=True)
                    modetweets = int(modeitems[0][1])

                    stdevtweets = 0
                    for val in stdevlist:
                        stdevtweets += val

                    try:
                        stdevtweets = math.sqrt(stdevtweets / runningtime)
                    except ZeroDivisionError, e:
                        stdevtweets = 0

                    # Finished analysis - update DB
                    cursor.execute("""UPDATE programmes SET totaltweets = %s, meantweets = %s, mediantweets = %s, modetweets = %s, stdevtweets = %s WHERE pid = %s AND timestamp = %s""",(totaltweets,meantweets,mediantweets,modetweets,stdevtweets,pid,timestamp))

                else:
                    print "Analysis component: Skipping tweet - falls outside the programme's running time"

                # Mark the tweet as analysed
                cursor.execute("""UPDATE rawdata SET analysed = 1 WHERE tid = %s""",(tid))
                print "Analysis component: Done!"

            # Stage 2: If all raw tweets analysed and imported = 1 (all data for this programme stored and programme finished), finalise the analysis - could do bookmark identification here too?
            cursor.execute("""SELECT pid,totaltweets,meantweets,mediantweets,modetweets,stdevtweets,timestamp,timediff FROM programmes WHERE imported = 1 AND analysed = 0 LIMIT 5000""")
            data = cursor.fetchall()
            # Cycle through each programme that's ready for final analysis
            for result in data:
                pid = result[0]
                cursor.execute("""SELECT duration,title FROM programmes_unique WHERE pid = %s""",(pid))
                data2 = cursor.fetchone()
                duration = data2[0]
                totaltweets = result[1]
                meantweets = result[2]
                mediantweets = result[3]
                modetweets = result[4]
                stdevtweets = result[5]
                title = data2[1]
                timestamp = result[6]
                timediff = result[7]
                # Cycle through checking if all tweets for this programme have been analysed - if so finalise the stats
                cursor.execute("""SELECT tid FROM rawdata WHERE analysed = 0 AND pid = %s""", (pid))
                if cursor.fetchone() == None:
                    # OK to finalise stats here
                    print "Analysis component: Finalising stats for pid:", pid, "(" + title + ")"

                    meantweets = float(totaltweets) / (duration / 60) # Mean tweets per minute

                    cursor.execute("""SELECT totaltweets FROM analyseddata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""",(pid,timestamp-timediff,timestamp+duration-timediff))
                    analyseddata = cursor.fetchall()

                    runningtime = duration / 60

                    tweetlist = list()
                    for result in analyseddata:
                        totaltweetsmin = result[0]
                        tweetlist.append(int(totaltweetsmin))

                    # Ensure tweetlist has enough entries - as above, if no tweets are recorded for a minute it won't be present in the DB
                    if len(tweetlist) < runningtime:
                        additions = runningtime - len(tweetlist)
                        while additions > 0:
                            tweetlist.append(0)
                            additions -= 1

                    tweetlist.sort()

                    mediantweets = tweetlist[int(len(tweetlist)/2)]

                    modes = dict()
                    stdevlist = list()
                    for tweet in tweetlist:
                        modes[tweet] = tweetlist.count(tweet)
                        stdevlist.append((tweet - meantweets)*(tweet - meantweets))

                    modeitems = [[v, k] for k, v in modes.items()]
                    modeitems.sort(reverse=True)
                    modetweets = int(modeitems[0][1])

                    stdevtweets = 0
                    for val in stdevlist:
                        stdevtweets += val
                    try:
                        stdevtweets = math.sqrt(stdevtweets / runningtime)
                    except ZeroDivisionError, e:
                        stdevtweets = 0

                    if 1: # This data is purely a readout to the terminal at the moment associated with word and phrase frequency, and retweets
                        sqltimestamp1 = timestamp - timediff
                        sqltimestamp2 = timestamp + duration - timediff
                        cursor.execute("""SELECT tweet_id FROM rawdata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""", (pid,sqltimestamp1,sqltimestamp2))
                        rawtweetids = cursor.fetchall()
                        tweetids = list()
                        for tweet in rawtweetids:
                            tweetids.append(tweet[0])

                        if len(tweetids) > 0:
                            # Just in case of a missing raw json object (ie. programme terminated before it was stored - allow it to be skipped if not found after 30 secs)
                            #failcounter = 0
                            self.send([pid,tweetids],"nltkfinal")
                            while not self.dataReady("nltkfinal"):
                            #    if failcounter >= 3000:
                            #        nltkdata = list()
                            #        break
                                time.sleep(0.01)
                            #    failcounter += 1
                            #if failcounter < 3000:
                            if 1:
                                nltkdata = self.recv("nltkfinal")

                    cursor.execute("""UPDATE programmes SET meantweets = %s, mediantweets = %s, modetweets = %s, stdevtweets = %s, analysed = 1 WHERE pid = %s AND timestamp = %s""",(meantweets,mediantweets,modetweets,stdevtweets,pid,timestamp))
                    print "Analysis component: Done!"

            # Sleep here until more data is available to analyse
            print "Analysis component: Sleeping for 10 seconds..."
            time.sleep(10)

Example 37

Project: kamaelia_ Source File: TwitterSearch.py
    def main(self):
        twitterurl = "http://api.twitter.com/1/users/search.json"

        if self.proxy:
            proxyhandler = urllib2.ProxyHandler({"http" : self.proxy})
            twitopener = urllib2.build_opener(proxyhandler)
            urllib2.install_opener(twitopener)

        headers = {'User-Agent' : "BBC R&D Grabber"}
        postdata = None

        if self.keypair == False:
            # Perform OAuth authentication - as we don't have the secret key pair we need to request it
            # This will require some user input
            request_token_url = 'http://api.twitter.com/oauth/request_token'
            access_token_url = 'http://api.twitter.com/oauth/access_token'
            authorize_url = 'http://api.twitter.com/oauth/authorize'

            token = None
            consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

            params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

            params['oauth_consumer_key'] = consumer.key

            req = oauth.Request(method="GET",url=request_token_url,parameters=params)

            signature_method = oauth.SignatureMethod_HMAC_SHA1()
            req.sign_request(signature_method, consumer, token)

            requestheaders = req.to_header()
            requestheaders['User-Agent'] = "BBC R&D Grabber"

            # Connect to Twitter
            try:
                req = urllib2.Request(request_token_url,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                conn1 = urllib2.urlopen(req)
            except httplib.BadStatusLine, e:
                sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                conn1 = False
            except urllib2.HTTPError, e:
                sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                conn1 = False
            except urllib2.URLError, e:
                sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                conn1 = False

            if conn1:
                content = conn1.read()
                conn1.close()

                request_token = dict(urlparse.parse_qsl(content))

                print "Request Token:"
                print "     - oauth_token        = %s" % request_token['oauth_token']
                print "     - oauth_token_secret = %s" % request_token['oauth_token_secret']
                print

                # The user must confirm authorisation so a URL is printed here
                print "Go to the following link in your browser:"
                print "%s?oauth_token=%s" % (authorize_url, request_token['oauth_token'])
                print

                accepted = 'n'
                # Wait until the user has confirmed authorisation
                while accepted.lower() == 'n':
                    accepted = raw_input('Have you authorized me? (y/n) ')
                oauth_verifier = raw_input('What is the PIN? ')

                token = oauth.Token(request_token['oauth_token'],
                    request_token['oauth_token_secret'])
                token.set_verifier(oauth_verifier)

                params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                params['oauth_token'] = token.key
                params['oauth_consumer_key'] = consumer.key

                req = oauth.Request(method="GET",url=access_token_url,parameters=params)

                signature_method = oauth.SignatureMethod_HMAC_SHA1()
                req.sign_request(signature_method, consumer, token)

                requestheaders = req.to_header()
                requestheaders['User-Agent'] = "BBC R&D Grabber"
                # Connect to Twitter
                try:
                    req = urllib2.Request(access_token_url,"oauth_verifier=%s" % oauth_verifier,requestheaders) # Why won't this work?!? Is it trying to POST?
                    conn1 = urllib2.urlopen(req)
                except httplib.BadStatusLine, e:
                    sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                    conn1 = False
                except urllib2.HTTPError, e:
                    sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                    conn1 = False
                except urllib2.URLError, e:
                    sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                    conn1 = False

                if conn1:
                    content = conn1.read()
                    conn1.close()
                    access_token = dict(urlparse.parse_qsl(content))

                    # Access tokens retrieved from Twitter
                    print "Access Token:"
                    print "     - oauth_token        = %s" % access_token['oauth_token']
                    print "     - oauth_token_secret = %s" % access_token['oauth_token_secret']
                    print
                    print "You may now access protected resources using the access tokens above."
                    print

                    save = False
                    # Load config to save OAuth keys
                    try:
                        homedir = os.path.expanduser("~")
                        file = open(homedir + "/twitter-login.conf",'r')
                        save = True
                    except IOError, e:
                        print ("Failed to load config file - not saving oauth keys: " + str(e))

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf",'w')
                            file.write(raw_config)
                            file.close()
                        except IOError, e:
                            print ("Failed to save oauth keys: " + str(e))

                    self.keypair = [access_token['oauth_token'], access_token['oauth_token_secret']]
        

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
                # Retieve keywords to look up
                person = self.recv("inbox")

                # Ensure we're not rate limited during the first request - if so we'll wait for 15 mins before our next request
                if (datetime.today() - timedelta(minutes=15)) > self.ratelimited:
                    requesturl = twitterurl + "?q=" + urllib.quote(person) + "&per_page=5"

                    params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                    token = oauth.Token(key=self.keypair[0],secret=self.keypair[1])
                    consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

                    params['oauth_token'] = token.key
                    params['oauth_consumer_key'] = consumer.key

                    req = oauth.Request(method="GET",url=requesturl,parameters=params)

                    signature_method = oauth.SignatureMethod_HMAC_SHA1()
                    req.sign_request(signature_method, consumer, token)

                    requestheaders = req.to_header()
                    requestheaders['User-Agent'] = "BBC R&D Grabber"

                    # Connect to Twitter
                    try:
                        req = urllib2.Request(requesturl,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                        conn1 = urllib2.urlopen(req)
                    except httplib.BadStatusLine, e:
                        sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                        conn1 = False
                    except urllib2.HTTPError, e:
                        sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                        conn1 = False
                    except urllib2.URLError, e:
                        sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                        conn1 = False

                    if conn1:
                        # Check rate limiting here and print current limit
                        headers = conn1.info()
                        headerlist = string.split(str(headers),"\n")
                        for line in headerlist:
                            if line != "":
                                splitheader = line.split()
                                if splitheader[0] == "X-FeatureRateLimit-Remaining:" or splitheader[0] == "X-RateLimit-Remaining:":
                                    print splitheader[0] + " " + str(splitheader[1])
                                    if int(splitheader[1]) < 5:
                                        self.ratelimited = datetime.today()
                        # Grab json format result of people search here
                        try:
                            data = conn1.read()
                            try:
                                content = cjson.decode(data)
                                self.send(content,"outbox")
                            except cjson.DecodeError, e:
                                self.send(dict(),"outbox")
                        except IOError, e:
                            sys.stderr.write('PeopleSearch IO error: ' + str(e) + '\n')
                            self.send(dict(),"outbox")
                        conn1.close()
                    else:
                        self.send(dict(),"outbox")
                else:
                   print "Twitter search paused - rate limited"
                   self.send(dict(),"outbox")
            self.pause()
            yield 1

Example 38

Project: eden Source File: config.py
def config(settings):
    """
        New Zealand Red Cross
        - designed to be used with locations.NZ & VM templates
    """

    T = current.T

    #settings.base.system_name = T("Sahana Skeleton")
    #settings.base.system_name_short = T("Sahana")

    # PrePopulate data
    settings.base.prepopulate+= ("custom/NZRC", "custom/NZRC/Demo")

    # Theme (folder to use for views/layout.html)
    settings.base.theme = "custom.NZRC"

    # Authentication settings
    # Do new users need to verify their email address?
    settings.auth.registration_requires_verification = True
    # Do new users need to be approved by an administrator prior to being able to login?
    settings.auth.registration_requires_approval = True
    # Need an Org in order to link to Volunteer/Member records
    settings.auth.registration_requests_organisation = True
    settings.auth.registration_organisation_required = True

    settings.auth.registration_link_user_to = {"staff": T("Staff"),
                                               "volunteer": T("Volunteer"),
                                               "member": T("Member")
                                               }

    # Approval emails get sent to all admins
    settings.mail.approver = "ADMIN"

    # Uncomment to display the Map Legend as a floating DIV
    settings.gis.legend = "float"
    # Uncomment to show the Print control:
    # http://eden.sahanafoundation.org/wiki/UserGuidelines/Admin/MapPrinting
    #settings.gis.print_button = True

    # Number formats (defaults to ISO 31-0)
    # Decimal separator for numbers (defaults to ,)
    settings.L10n.decimal_separator = "."
    # Thousands separator for numbers (defaults to space)
    settings.L10n.thousands_separator = ","

    # Security Policy
    # http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
    # 1: Simple (default): Global as Reader, Authenticated as Editor
    # 2: Editor role required for Update/Delete, unless record owned by session
    # 3: Apply Controller ACLs
    # 4: Apply both Controller & Function ACLs
    # 5: Apply Controller, Function & Table ACLs
    # 6: Apply Controller, Function, Table ACLs and Entity Realm
    # 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
    # 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations

    settings.security.policy = 7 # Organisation-ACLs + Hierarchy

    # -------------------------------------------------------------------------
    # Org
    settings.org.branches = True

    # -------------------------------------------------------------------------
    # PR
    # Uncomment to do a search for duplicates in AddPersonWidget2
    settings.pr.lookup_duplicates = True

    # -------------------------------------------------------------------------
    # HRM
    settings.hrm.email_required = False

    settings.hrm.use_credentials = False

    # -------------------------------------------------------------------------
    # Projects
    settings.project.mode_task = True

    # -------------------------------------------------------------------------
    # Req
    settings.req.req_type = ["People"]

    # Uncomment to Commit Named People rather than simply Anonymous Skills
    settings.req.commit_people = True

    # Disable Inline Forms, unless we enable separate controllers
    # (otherwise Create form cannot redirect to next tab correctly)
    settings.req.inline_forms = False

    settings.req.show_quantity_transit = False

    # -------------------------------------------------------------------------
    def ns_only(tablename,
                fieldname = "organisation_id",
                required = True,
                branches = True,
                updateable = True,
                limit_filter_opts = True,
                hierarchy = True,
                ):
        """
            Function to configure an organisation_id field to be restricted to just
            NS/Branch

            @param required: Field is mandatory
            @param branches: Include Branches
            @param updateable: Limit to Orgs which the user can update
            @param limit_filter_opts: Also limit the Filter options
            @param hierarchy: Use the hierarchy widget (unsuitable for use in Inline Components)

            NB If limit_filter_opts=True, apply in customise_xx_controller inside prep,
               after standard_prep is run
        """

        # Lookup organisation_type_id for Red Cross
        db = current.db
        s3db = current.s3db
        ttable = s3db.org_organisation_type
        try:
            type_id = db(ttable.name == "Red Cross / Red Crescent").select(ttable.id,
                                                                           limitby=(0, 1),
                                                                           cache = s3db.cache,
                                                                           ).first().id
        except:
            # No IFRC prepop done - skip (e.g. testing impacts of CSS changes in this theme)
            return

        # Load standard model
        f = s3db[tablename][fieldname]

        if limit_filter_opts:
            # Find the relevant filter widget & limit it's options
            filter_widgets = s3db.get_config(tablename, "filter_widgets")
            filter_widget = None
            if filter_widgets:
                from s3 import FS, S3HierarchyFilter
                for w in filter_widgets:
                    if isinstance(w, S3HierarchyFilter) and \
                       w.field == "organisation_id":
                        filter_widget = w
                        break
            if filter_widget is not None:
                selector = FS("organisation_organisation_type.organisation_type_id")
                filter_widget.opts["filter"] = (selector == type_id)

        # Label
        if branches:
            f.label = T("National Society / Branch")
            #f.label = T("Branch")
        else:
            f.label = T("National Society")

        # Requires

        # Filter by type
        ltable = db.org_organisation_organisation_type
        rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
        filter_opts = [row.organisation_id for row in rows]

        auth = current.auth
        s3_has_role = auth.s3_has_role
        Admin = s3_has_role("ADMIN")
        if branches:
            if Admin:
                parent = True
            else:
                # @ToDo: Set the represent according to whether the user can see resources of just a single NS or multiple
                # @ToDo: Consider porting this into core
                user = auth.user
                if user:
                    realms = user.realms
                    #delegations = user.delegations
                    if realms:
                        parent = True
                    else:
                        parent = False
                else:
                    parent = True

        else:
            # Keep the represent function as simple as possible
            parent = False
            # Exclude branches
            btable = s3db.org_organisation_branch
            rows = db((btable.deleted != True) &
                      (btable.branch_id.belongs(filter_opts))).select(btable.branch_id)
            filter_opts = list(set(filter_opts) - set(row.branch_id for row in rows))

        organisation_represent = s3db.org_OrganisationRepresent
        represent = organisation_represent(parent=parent)
        f.represent = represent

        from s3 import IS_ONE_OF
        requires = IS_ONE_OF(db, "org_organisation.id",
                             represent,
                             filterby = "id",
                             filter_opts = filter_opts,
                             updateable = updateable,
                             orderby = "org_organisation.name",
                             sort = True)
        if not required:
            from gluon import IS_EMPTY_OR
            requires = IS_EMPTY_OR(requires)
        f.requires = requires

        if parent and hierarchy:
            # Use hierarchy-widget
            from s3 import FS, S3HierarchyWidget
            # No need for parent in represent (it's a hierarchy view)
            node_represent = organisation_represent(parent=False)
            # Filter by type
            # (no need to exclude branches - we wouldn't be here if we didn't use branches)
            selector = FS("organisation_organisation_type.organisation_type_id")
            f.widget = S3HierarchyWidget(lookup="org_organisation",
                                         filter=(selector == type_id),
                                         represent=node_represent,
                                         multiple=False,
                                         leafonly=False,
                                         )
        else:
            # Dropdown not Autocomplete
            f.widget = None

        # Comment
        if (Admin or s3_has_role("ORG_ADMIN")):
            # Need to do import after setting Theme
            from s3layouts import S3PopupLink
            from s3 import S3ScriptItem
            add_link = S3PopupLink(c = "org",
                                   f = "organisation",
                                   vars = {"organisation_type.name":"Red Cross / Red Crescent"},
                                   label = T("Create National Society"),
                                   title = T("National Society"),
                                   )
            comment = f.comment
            if not comment or isinstance(comment, S3PopupLink):
                f.comment = add_link
            elif isinstance(comment[1], S3ScriptItem):
                # Don't overwrite scripts
                f.comment[0] = add_link
            else:
                f.comment = add_link
        else:
            # Not allowed to add NS/Branch
            f.comment = ""

    # -----------------------------------------------------------------------------
    def customise_auth_user_controller(**attr):
        """
            Customise admin/user() and default/user() controllers
        """

        if current.auth.is_logged_in():
            # Organisation needs to be an NS/Branch
            ns_only("auth_user",
                    required = True,
                    branches = True,
                    updateable = False, # Need to see all Orgs in Registration screens
                    )
        else:
            # Registration page
            db = current.db
            s3db = current.s3db
            field = db.auth_user.organisation_id

            # Limit to just Branches
            field.label = T("Branch")
            # Simplify Represent
            field.represent = represent = s3db.org_OrganisationRepresent(parent=False)
            otable = s3db.org_organisation
            btable = s3db.org_organisation_branch
            query = (btable.deleted != True) & \
                    (btable.organisation_id == otable.id) & \
                    (otable.name == "New Zealand Red Cross")
            rows = db(query).select(btable.branch_id)
            filter_opts = [row.branch_id for row in rows]
            from s3 import IS_ONE_OF
            field.requires = IS_ONE_OF(db, "org_organisation.id",
                                       represent,
                                       filterby = "id",
                                       filter_opts = filter_opts,
                                       orderby = "org_organisation.name",
                                       sort = True,
                                       )
            # Don't create new branches here
            field.comment = None

        return attr

    settings.customise_auth_user_controller = customise_auth_user_controller

    # -----------------------------------------------------------------------------
    def customise_deploy_alert_resource(r, tablename):

        current.s3db.deploy_alert_recipient.human_resource_id.label = T("Member")

    settings.customise_deploy_alert_resource = customise_deploy_alert_resource

    # -----------------------------------------------------------------------------
    def customise_deploy_application_resource(r, tablename):

        r.table.human_resource_id.label = T("Member")

    settings.customise_deploy_application_resource = customise_deploy_application_resource

    # -----------------------------------------------------------------------------
    def _customise_assignment_fields(**attr):

        MEMBER = T("Member")
        from gluon.html import DIV
        hr_comment =  \
            DIV(_class="tooltip",
                _title="%s|%s" % (MEMBER,
                                  current.messages.AUTOCOMPLETE_HELP))

        from s3 import IS_ONE_OF
        s3db = current.s3db
        atable = s3db.deploy_assignment
        atable.human_resource_id.label = MEMBER
        atable.human_resource_id.comment = hr_comment
        field = atable.job_title_id
        field.comment = None
        field.label = T("Sector")
        field.requires = IS_ONE_OF(current.db, "hrm_job_title.id",
                                   field.represent,
                                   filterby = "type",
                                   filter_opts = (4,),
                                   )

        # Default activity_type when creating experience records from assignments
        activity_type = s3db.hrm_experience.activity_type
        activity_type.default = activity_type.update = "rdrt"

        return

    # -----------------------------------------------------------------------------
    def customise_deploy_assignment_controller(**attr):

        s3db = current.s3db
        table = s3db.deploy_assignment

        # Labels
        table.job_title_id.label = T("RDRT Type")
        table.start_date.label = T("Deployment Date")
        #table.end_date.label = T("EOM")

        # List fields
        list_fields = [(T("Mission"), "mission_id$name"),
                       (T("Appeal Code"), "mission_id$code"),
                       (T("Country"), "mission_id$location_id"),
                       (T("Disaster Type"), "mission_id$event_type_id"),
                       # @todo: replace by date of first alert?
                       (T("Date"), "mission_id$created_on"),
                       "job_title_id",
                       (T("Member"), "human_resource_id$person_id"),
                       (T("Deploying NS"), "human_resource_id$organisation_id"),
                       "start_date",
                       "end_date",
                       "appraisal.rating",
                       # @todo: Comments of the mission (=>XLS only)
                      ]

        # Report options
        report_fact = [(T("Number of Deployments"), "count(human_resource_id)"),
                       (T("Average Rating"), "avg(appraisal.rating)"),
                       ]
        report_axis = [(T("Appeal Code"), "mission_id$code"),
                       (T("Country"), "mission_id$location_id"),
                       (T("Disaster Type"), "mission_id$event_type_id"),
                       (T("RDRT Type"), "job_title_id"),
                       (T("Deploying NS"), "human_resource_id$organisation_id"),
                      ]
        report_options = Storage(
            rows=report_axis,
            cols=report_axis,
            fact=report_fact,
            defaults=Storage(rows="mission_id$location_id",
                             cols="mission_id$event_type_id",
                             fact="count(human_resource_id)",
                             totals=True
                             )
            )

        s3db.configure("deploy_assignment",
                       list_fields = list_fields,
                       report_options = report_options,
                       )


        # CRUD Strings
        current.response.s3.crud_strings["deploy_assignment"] = Storage(
            label_create = T("Add Deployment"),
            title_display = T("Deployment Details"),
            title_list = T("Deployments"),
            title_update = T("Edit Deployment Details"),
            title_upload = T("Import Deployments"),
            label_list_button = T("List Deployments"),
            label_delete_button = T("Delete Deployment"),
            msg_record_created = T("Deployment added"),
            msg_record_modified = T("Deployment Details updated"),
            msg_record_deleted = T("Deployment deleted"),
            msg_list_empty = T("No Deployments currently registered"))

        _customise_assignment_fields()

        # Restrict Location to just Countries
        from s3 import S3Represent
        field = s3db.deploy_mission.location_id
        field.represent = S3Represent(lookup="gis_location", translate=True)

        return attr

    settings.customise_deploy_assignment_controller = customise_deploy_assignment_controller

    # -----------------------------------------------------------------------------
    def customise_deploy_mission_controller(**attr):

        db = current.db
        s3db = current.s3db
        s3 = current.response.s3
        MEMBER = T("Member")
        from gluon.html import DIV
        hr_comment =  \
            DIV(_class="tooltip",
                _title="%s|%s" % (MEMBER,
                                  current.messages.AUTOCOMPLETE_HELP))

        table = s3db.deploy_mission
        table.code.label = T("Appeal Code")
        table.event_type_id.label = T("Disaster Type")
        table.organisation_id.readable = table.organisation_id.writable = False

        # Restrict Location to just Countries
        from s3 import S3Represent, S3MultiSelectWidget
        field = table.location_id
        field.label = current.messages.COUNTRY
        field.requires = s3db.gis_country_requires
        field.widget = S3MultiSelectWidget(multiple=False)
        field.represent = S3Represent(lookup="gis_location", translate=True)

        rtable = s3db.deploy_response
        rtable.human_resource_id.label = MEMBER
        rtable.human_resource_id.comment = hr_comment

        _customise_assignment_fields()

        # Report options
        report_fact = [(T("Number of Missions"), "count(id)"),
                       (T("Number of Countries"), "count(location_id)"),
                       (T("Number of Disaster Types"), "count(event_type_id)"),
                       (T("Number of Responses"), "sum(response_count)"),
                       (T("Number of Deployments"), "sum(hrquantity)"),
                      ]
        report_axis = ["code",
                       "location_id",
                       "event_type_id",
                       "status",
                       ]
        report_options = Storage(rows = report_axis,
                                 cols = report_axis,
                                 fact = report_fact,
                                 defaults = Storage(rows = "location_id",
                                                    cols = "event_type_id",
                                                    fact = "sum(hrquantity)",
                                                    totals = True,
                                                    ),
                                 )

        s3db.configure("deploy_mission",
                       report_options = report_options,
                       )

        # CRUD Strings
        s3.crud_strings["deploy_assignment"] = Storage(
            label_create = T("New Deployment"),
            title_display = T("Deployment Details"),
            title_list = T("Deployments"),
            title_update = T("Edit Deployment Details"),
            title_upload = T("Import Deployments"),
            label_list_button = T("List Deployments"),
            label_delete_button = T("Delete Deployment"),
            msg_record_created = T("Deployment added"),
            msg_record_modified = T("Deployment Details updated"),
            msg_record_deleted = T("Deployment deleted"),
            msg_list_empty = T("No Deployments currently registered"))

        # Custom prep
        standard_prep = s3.prep
        def custom_prep(r):
            # Call standard prep
            if callable(standard_prep):
                result = standard_prep(r)
            else:
                result = True

            if r.interactive and not current.auth.s3_has_role("RDRT_ADMIN"):
                # Limit write-access to these fields to RDRT Admins:
                fields = ("name",
                          "event_type_id",
                          "location_id",
                          "code",
                          "status",
                          )
                table = r.resource.table
                for f in fields:
                    if f in table:
                        table[f].writable = False

            #if not r.component and r.method == "create":
            #    # Org is always IFRC
            #    otable = s3db.org_organisation
            #    query = (otable.name == "International Federation of Red Cross and Red Crescent Societies")
            #    organisation = db(query).select(otable.id,
            #                                    limitby = (0, 1),
            #                                    ).first()
            #    if organisation:
            #        r.table.organisation_id.default = organisation.id

            return result
        s3.prep = custom_prep

        return attr

    settings.customise_deploy_mission_controller = customise_deploy_mission_controller

    # -----------------------------------------------------------------------------
    def customise_hrm_certificate_resource(r, tablename):

        field = current.s3db.hrm_certificate.organisation_id
        field.readable = field.writable = False

    settings.customise_hrm_certificate_resource = customise_hrm_certificate_resource

    # -----------------------------------------------------------------------------
    def customise_hrm_course_resource(r, tablename):

        field = current.s3db.hrm_course.organisation_id
        field.readable = field.writable = False

    settings.customise_hrm_course_resource = customise_hrm_course_resource

    # -----------------------------------------------------------------------------
    def customise_hrm_credential_controller(**attr):

        # Currently just used by RDRT
        table = current.s3db.hrm_credential
        field = table.job_title_id
        field.comment = None
        field.label = T("Sector")
        from s3 import IS_ONE_OF
        field.requires = IS_ONE_OF(current.db, "hrm_job_title.id",
                                   field.represent,
                                   filterby = "type",
                                   filter_opts = (4,),
                                   )
        table.organisation_id.readable = table.organisation_id.writable = False
        table.performance_rating.readable = table.performance_rating.writable = False
        table.start_date.readable = table.start_date.writable = False
        table.end_date.readable = table.end_date.writable = False

        return attr

    settings.customise_hrm_credential_controller = customise_hrm_credential_controller

    # -----------------------------------------------------------------------------
    def customise_hrm_department_resource(r, tablename):

        field = current.s3db.hrm_department.organisation_id
        field.readable = field.writable = False

    settings.customise_hrm_department_resource = customise_hrm_department_resource

    # -----------------------------------------------------------------------------
    def hrm_human_resource_create_onaccept(form):

        s3db = current.s3db

        # Make Volunteer deployable
        s3db.deploy_application.insert(human_resource_id=form.vars.id)

        # Fire nromal onaccept
        s3db.hrm_human_resource_onaccept(form)


    # -----------------------------------------------------------------------------
    def customise_hrm_human_resource_resource(r, tablename):

        db = current.db
        s3db = current.s3db
        field = s3db.hrm_human_resource.organisation_id

        # Limit to just Branches
        field.label = T("Branch")
        # Simplify Represent
        field.represent = represent = s3db.org_OrganisationRepresent(parent=False)
        otable = s3db.org_organisation
        btable = s3db.org_organisation_branch
        query = (btable.deleted != True) & \
                (btable.organisation_id == otable.id) & \
                (otable.name == "New Zealand Red Cross")
        rows = db(query).select(btable.branch_id)
        filter_opts = [row.branch_id for row in rows]
        from s3 import IS_ONE_OF
        field.requires = IS_ONE_OF(db, "org_organisation.id",
                                   represent,
                                   filterby = "id",
                                   filter_opts = filter_opts,
                                   updateable = True,
                                   orderby = "org_organisation.name",
                                   sort = True,
                                   )
        # Don't create new branches here
        field.comment = None

        s3db.configure("hrm_human_resource",
                       create_onaccept = hrm_human_resource_create_onaccept,
                       )

    settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource

    # -----------------------------------------------------------------------------
    def customise_hrm_experience_controller(**attr):

        s3 = current.response.s3

        standard_prep = s3.prep
        def custom_prep(r):
            # Call standard prep
            if callable(standard_prep):
                if not standard_prep(r):
                    return False

            if r.controller == "deploy":
                # Popups in RDRT Member Profile

                table = r.table

                job_title_id = table.job_title_id
                job_title_id.label = T("Sector / Area of Expertise")
                job_title_id.comment = None
                jtable = current.s3db.hrm_job_title
                query = (jtable.type == 4)
                if r.method == "update" and r.record.job_title_id:
                    # Allow to keep the current value
                    query |= (jtable.id == r.record.job_title_id)
                from s3 import IS_ONE_OF
                job_title_id.requires = IS_ONE_OF(current.db(query),
                                                  "hrm_job_title.id",
                                                  job_title_id.represent,
                                                  )
                job_title = table.job_title
                job_title.readable = job_title.writable = True
            return True
        s3.prep = custom_prep

        return attr

    settings.customise_hrm_experience_controller = customise_hrm_experience_controller

    # =============================================================================
    def vol_programme_active(person_id):
        """
            Whether a Volunteer counts as 'Active' based on the number of hours
            they've done (both Trainings & Programmes) per month, averaged over
            the last year.
            If nothing recorded for the last 3 months, don't penalise as assume
            that data entry hasn't yet been done.

            @ToDo: This should be based on the HRM record, not Person record
                   - could be active with Org1 but not with Org2
            @ToDo: allow to be calculated differently per-Org
        """

        now = current.request.utcnow

        # Time spent on Programme work
        htable = current.s3db.hrm_programme_hours
        query = (htable.deleted == False) & \
                (htable.person_id == person_id) & \
                (htable.date != None)
        programmes = current.db(query).select(htable.hours,
                                              htable.date,
                                              orderby=htable.date)
        if programmes:
            # Ignore up to 3 months of records
            import datetime
            three_months_prior = (now - datetime.timedelta(days=92))
            end = max(programmes.last().date, three_months_prior.date())
            last_year = end - datetime.timedelta(days=365)
            # Is this the Volunteer's first year?
            if programmes.first().date > last_year:
                # Only start counting from their first month
                start = programmes.first().date
            else:
                # Start from a year before the latest record
                start = last_year

            # Total hours between start and end
            programme_hours = 0
            for programme in programmes:
                if programme.date >= start and programme.date <= end and programme.hours:
                    programme_hours += programme.hours

            # Average hours per month
            months = max(1, (end - start).days / 30.5)
            average = programme_hours / months

            # Active?
            if average >= 8:
                return True

        return False

    def hrm_vol_active(default):
        """ Whether & How to track Volunteers as Active """

        # Use formula based on hrm_programme
        return vol_programme_active

    settings.hrm.vol_active = hrm_vol_active

    # -----------------------------------------------------------------------------
    def rdrt_member_profile_header(r):
        """ Custom profile header to allow update of RDRT roster status """

        record = r.record
        if not record:
            return ""

        person_id = record.person_id
        from s3 import s3_fullname, s3_avatar_represent
        name = s3_fullname(person_id)

        table = r.table

        # Organisation
        comments = table.organisation_id.represent(record.organisation_id)

        from s3 import s3_unicode
        from gluon.html import A, DIV, H2, LABEL, P, SPAN

        # Add job title if present
        job_title_id = record.job_title_id
        if job_title_id:
            comments = (SPAN("%s, " % \
                             s3_unicode(table.job_title_id.represent(job_title_id))),
                             comments)

        # Determine the current roster membership status (active/inactive)
        atable = current.s3db.deploy_application
        status = atable.active
        query = atable.human_resource_id == r.id
        row = current.db(query).select(atable.id,
                                       atable.active,
                                       limitby=(0, 1)).first()
        if row:
            active = 1 if row.active else 0
            status_id = row.id
            roster_status = status.represent(row.active)
        else:
            active = None
            status_id = None
            roster_status = current.messages.UNKNOWN_OPT

        if status_id and \
           current.auth.s3_has_permission("update",
                                          "deploy_application",
                                          record_id=status_id):
            # Make inline-editable
            roster_status = A(roster_status,
                              data = {"status": active},
                              _id = "rdrt-roster-status",
                              _title = T("Click to edit"),
                              )
            s3 = current.response.s3
            script = "/%s/static/themes/IFRC/js/rdrt.js" % r.application
            if script not in s3.scripts:
                s3.scripts.append(script)
            script = '''$.rdrtStatus('%(url)s','%(active)s','%(inactive)s','%(submit)s')'''
            from gluon import URL
            options = {"url": URL(c="deploy", f="application",
                                  args=["%s.s3json" % status_id]),
                       "active": status.represent(True),
                       "inactive": status.represent(False),
                       "submit": T("Save"),
                       }
            s3.jquery_ready.append(script % options)
        else:
            # Read-only
            roster_status = SPAN(roster_status)

        # Render profile header
        return DIV(A(s3_avatar_represent(person_id,
                                         tablename="pr_person",
                                         _class="media-object",
                                         ),
                     _class="pull-left",
                     ),
                   H2(name),
                   P(comments),
                   DIV(LABEL(status.label + ": "), roster_status),
                   _class="profile-header",
                   )

    # -----------------------------------------------------------------------------
    def emergency_contact_represent(row):
        """
            Representation of Emergency Contacts (S3Represent label renderer)

            @param row: the row
        """

        items = [row["pr_contact_emergency.name"]]
        relationship = row["pr_contact_emergency.relationship"]
        if relationship:
            items.append(" (%s)" % relationship)
        phone_number = row["pr_contact_emergency.phone"]
        if phone_number:
            items.append(": %s" % phone_number)
        return "".join(items)

    # -----------------------------------------------------------------------------
    def customise_member_membership_resource(r, tablename):

        db = current.db
        s3db = current.s3db
        field = s3db.member_membership.organisation_id

        # Limit to just Branches
        field.label = T("Branch")
        # Simplify Represent
        field.represent = represent = s3db.org_OrganisationRepresent(parent=False)
        otable = s3db.org_organisation
        btable = s3db.org_organisation_branch
        query = (btable.deleted != True) & \
                (btable.organisation_id == otable.id) & \
                (otable.name == "New Zealand Red Cross")
        rows = db(query).select(btable.branch_id)
        filter_opts = [row.branch_id for row in rows]
        from s3 import IS_ONE_OF
        field.requires = IS_ONE_OF(db, "org_organisation.id",
                                   represent,
                                   filterby = "id",
                                   filter_opts = filter_opts,
                                   updateable = True,
                                   orderby = "org_organisation.name",
                                   sort = True,
                                   )
        # Don't create new branches here
        field.comment = None

    settings.customise_member_membership_resource = customise_member_membership_resource

    # -----------------------------------------------------------------------------
    def customise_member_membership_type_resource(r, tablename):

        field = current.s3db.member_membership_type.organisation_id
        field.readable = field.writable = False

    settings.customise_member_membership_type_resource = customise_member_membership_type_resource

    # -----------------------------------------------------------------------------
    def customise_vol_award_resource(r, tablename):

        field = current.s3db.vol_award.organisation_id
        field.readable = field.writable = False

    settings.customise_vol_award_resource = customise_vol_award_resource

    # -----------------------------------------------------------------------------
    def customise_dvr_case_resource(r, tablename):

        db = current.db
        s3db = current.s3db
        field = s3db.dvr_case.organisation_id

        # Limit to just Branches
        field.label = T("Branch")
        # Simplify Represent
        field.represent = represent = s3db.org_OrganisationRepresent(parent=False)
        otable = s3db.org_organisation
        btable = s3db.org_organisation_branch
        query = (btable.deleted != True) & \
                (btable.organisation_id == otable.id) & \
                (otable.name == "New Zealand Red Cross")
        rows = db(query).select(btable.branch_id)
        filter_opts = [row.branch_id for row in rows]
        from s3 import IS_ONE_OF
        field.requires = IS_ONE_OF(db, "org_organisation.id",
                                   represent,
                                   filterby = "id",
                                   filter_opts = filter_opts,
                                   updateable = True,
                                   orderby = "org_organisation.name",
                                   sort = True,
                                   )
        # Don't create new branches here
        field.comment = None

    settings.customise_dvr_case_resource = customise_dvr_case_resource

    # -------------------------------------------------------------------------
    # Comment/uncomment modules here to disable/enable them
    # Modules menu is defined in modules/eden/menu.py
    settings.modules = OrderedDict([
        # Core modules which shouldn't be disabled
        ("default", Storage(
            name_nice = T("Home"),
            restricted = False, # Use ACLs to control access to this module
            access = None,      # All Users (inc Anonymous) can see this module in the default menu & access the controller
        )),
        ("admin", Storage(
            name_nice = T("Administration"),
            #description = "Site Administration",
            restricted = True,
            access = "|1|",     # Only Administrators can see this module in the default menu & access the controller
        )),
        ("appadmin", Storage(
            name_nice = T("Administration"),
            #description = "Site Administration",
            restricted = True,
        )),
        ("errors", Storage(
            name_nice = T("Ticket Viewer"),
            #description = "Needed for Breadcrumbs",
            restricted = False,
        )),
        ("gis", Storage(
            name_nice = T("Map"),
            #description = "Situation Awareness & Geospatial Analysis",
            restricted = True,
        )),
        ("pr", Storage(
            name_nice = T("Person Registry"),
            #description = "Central point to record details on People",
            restricted = True,
            access = "|1|",     # Only Administrators can see this module in the default menu (access to controller is possible to all still)
        )),
        ("org", Storage(
            name_nice = T("Organizations"),
            #description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
            restricted = True,
        )),
        ("hrm", Storage(
            name_nice = T("Staff"),
            #description = "Human Resources Management",
            restricted = True,
        )),
        ("vol", Storage(
            name_nice = T("Volunteers"),
            #description = "Human Resources Management",
            restricted = True,
        )),
        ("cms", Storage(
          name_nice = T("Content Management"),
          #description = "Content Management System",
          restricted = True,
        )),
        ("doc", Storage(
            name_nice = T("Docuements"),
            #description = "A library of digital resources, such as photos, docuements and reports",
            restricted = True,
        )),
        ("msg", Storage(
            name_nice = T("Messaging"),
            #description = "Sends & Receives Alerts via Email & SMS",
            restricted = True,
            # The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
        )),
        ("member", Storage(
               name_nice = T("Members"),
               #description = "Membership Management System",
               restricted = True,
        )),
        ("deploy", Storage(
               name_nice = T("Delegates"),
               #description = "Alerting and Deployment of Disaster Response Teams",
               restricted = True,
        )),
        ("project", Storage(
               name_nice = T("Tasks"),
               restricted = True,
        )),
        ("req", Storage(
           name_nice = T("Opportunities"),
           restricted = True,
        )),
        #("dvr", Storage(
        #   name_nice = T("Case Management"),
        #   #description = "Allow affected individuals & households to register to receive compensation and distributions",
        #   restricted = True,
        #)),
        ("po", Storage(
           name_nice = T("Project Outreach"),
           #description = "Allow affected individuals & households to register to receive compensation and distributions",
           restricted = True,
        )),
    ])

# END =========================================================================

Example 39

Project: kamaelia_ Source File: LiveAnalysis.py
Function: main
    def main(self):
        # Calculate running total and mean etc
            self.dbConnect()
            while not self.finished():
                # The below does LIVE and FINAL analysis - do NOT run DataAnalyser at the same time

                Print("Analysis component: Checking for new data...")

                # Stage 1: Live analysis - could do with a better way to do the first query (indexed field 'analsed' to speed up for now)
                # Could move this into the main app to take a copy of tweets on arrival, but would rather solve separately if poss
                self.db_select("""SELECT tid,pid,timestamp,text,tweet_id,programme_position FROM rawdata WHERE analysed = 0 ORDER BY tid LIMIT 5000""")
                data = self.db_fetchall()

                # Cycle through all the as yet unanalysed tweets
                for result in data:
                    tid = result[0]
                    pid = result[1]
                    tweettime = result[2] # Timestamp based on the tweet's created_at field
                    tweettext = result[3]
                    tweetid = result[4] # This is the real tweet ID, tid just makes a unique identifier as each tweet can be stored against several pids
                    progpos = result[5] # Position through the programme that the tweet was made
                    dbtime = datetime.utcfromtimestamp(tweettime)
                    # Each tweet will be grouped into chunks of one minute to make display better, so set the seconds to zero
                    # This particular time is only used for console display now as a more accurate one calculated from programme position is found later
                    dbtime = dbtime.replace(second=0)
                    Print("Analysis component: Analysing new tweet for pid", pid, "(" , dbtime ,"):")
                    try:
                        Print("Analysis component: '" , tweettext , "'")
                    except UnicodeEncodeError, e:
                        Print ("UnicodeEncodeError", e)
                    self.db_select("""SELECT duration FROM programmes_unique WHERE pid = %s""",(pid))
                    progdata = self.db_fetchone()
                    duration = progdata[0]
                    self.db_select("""SELECT totaltweets,meantweets,mediantweets,modetweets,stdevtweets,timediff,timestamp,utcoffset FROM programmes WHERE pid = %s ORDER BY timestamp DESC""",(pid))
                    progdata2 = self.db_fetchone()
                    totaltweets = progdata2[0]
                    # Increment the total tweets recorded for this programme's broadcast
                    totaltweets += 1
                    meantweets = progdata2[1]
                    mediantweets = progdata2[2]
                    modetweets = progdata2[3]
                    stdevtweets = progdata2[4]
                    timediff = progdata2[5]
                    timestamp = progdata2[6]
                    utcoffset = progdata2[7]

                    # Need to work out the timestamp to assign to the entry in analysed data
                    progstart = timestamp - timediff
                    progmins = int(progpos / 60)
                    analysedstamp = int(progstart + (progmins * 60))
                    # Ensure that this tweet occurs within the length of the programme, otherwise for the purposes of this program it's useless

                    if progpos > 0 and progpos <= duration:
                        self.db_select("""SELECT did,totaltweets,wordfreqexpected,wordfrequnexpected FROM analyseddata WHERE pid = %s AND timestamp = %s""",(pid,analysedstamp))
                        analyseddata = self.db_fetchone()
                        # Just in case of a missing raw json object (ie. programme terminated before it was stored - allow it to be skipped if not found after 30 secs)
                        #failcounter = 0
                        # Pass this tweet to the NLTK analysis component
                        self.send([pid,tweetid],"nltk")
#                        print "BUM", 1
                        while not self.dataReady("nltk"):
                        #    if failcounter >= 3000:
                        #        nltkdata = list()
                        #        break
                            time.sleep(0.01)
                        #    failcounter += 1
                        #if failcounter < 3000:
#                        print "BUM", 2
                        if 1:
                            # Receive back a list of words and their frequency for this tweet, including whether or not they are common, an entity etc
                            nltkdata = self.recv("nltk")
                        if analyseddata == None: # No tweets yet recorded for this minute
                            minutetweets = 1
                            self.db_insert("""INSERT INTO analyseddata (pid,totaltweets,timestamp) VALUES (%s,%s,%s)""", (pid,minutetweets,analysedstamp))
                            for word in nltkdata:
                                # Check if we're storing a word or phrase here
                                if nltkdata[word][0] == 1:
                                    self.db_insert("""INSERT INTO wordanalysis (pid,timestamp,phrase,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                else:
                                    self.db_insert("""INSERT INTO wordanalysis (pid,timestamp,word,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                        else:
                            did = analyseddata[0]
                            minutetweets = analyseddata[1] # Get current number of tweets for this minute
                            minutetweets += 1 # Add one to it for this tweet

                            self.db_update("""UPDATE analyseddata SET totaltweets = %s WHERE did = %s""",(minutetweets,did))

                            for word in nltkdata:
                                # Check if we're storing a word or phrase
                                if nltkdata[word][0] == 1:
                                    self.db_select("""SELECT wid,count FROM wordanalysis WHERE pid = %s AND timestamp = %s AND phrase LIKE %s""",(pid,analysedstamp,word))
                                    # Check if this phrase has already been stored for this minute - if so, increment the count
                                    wordcheck = self.db_fetchone()
                                    if wordcheck == None:
                                        self.db_insert("""INSERT INTO wordanalysis (pid,timestamp,phrase,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                    else:
                                        self.db_update("""UPDATE wordanalysis SET count = %s WHERE wid = %s""",(nltkdata[word][1] + wordcheck[1],wordcheck[0]))
                                else:
                                    self.db_select("""SELECT wid,count FROM wordanalysis WHERE pid = %s AND timestamp = %s AND word LIKE %s""",(pid,analysedstamp,word))
                                    # Check if this word has already been stored for this minute - if so, increment the count
                                    wordcheck = self.db_fetchone()
                                    if wordcheck == None:
                                        self.db_insert("""INSERT INTO wordanalysis (pid,timestamp,word,count,is_keyword,is_entity,is_common) VALUES (%s,%s,%s,%s,%s,%s,%s)""", (pid,analysedstamp,word,nltkdata[word][1],nltkdata[word][2],nltkdata[word][3],nltkdata[word][4]))
                                    else:
                                        self.db_update("""UPDATE wordanalysis SET count = %s WHERE wid = %s""",(nltkdata[word][1] + wordcheck[1],wordcheck[0]))
                        # Averages / stdev are calculated roughly based on the programme's running time at this point
                        progdate = datetime.utcfromtimestamp(timestamp) + timedelta(seconds=utcoffset)
                        actualstart = progdate - timedelta(seconds=timediff)
                        actualtweettime = datetime.utcfromtimestamp(tweettime + utcoffset)

                        # Calculate how far through the programme this tweet occurred
                        runningtime = actualtweettime - actualstart
                        runningtime = runningtime.seconds

                        if runningtime < 0:
                            runningtime = 0
                        else:
                            runningtime = float(runningtime) / 60

                        try:
                            meantweets = totaltweets / runningtime
                        except ZeroDivisionError, e:
                            meantweets = 0

                        self.db_select("""SELECT totaltweets FROM analyseddata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""",(pid,progstart,analysedstamp+duration))
                        analyseddata = self.db_fetchall()

                        runningtime = int(runningtime)

                        tweetlist = list()
                        for result in analyseddata:
                            totaltweetsmin = result[0]
                            # Create a list of each minute and the total tweets for that minute in the programme
                            tweetlist.append(int(totaltweetsmin))

                        # Ensure tweetlist has enough entries
                        # If a minute has no tweets, it won't have a database record, so this has to be added
                        if len(tweetlist) < runningtime:
                            additions = runningtime - len(tweetlist)
                            while additions > 0:
                                tweetlist.append(0)
                                additions -= 1

                        # Order by programme position 0,1,2, mins etc
                        tweetlist.sort()

                        mediantweets = tweetlist[int(len(tweetlist)/2)]

                        modes = dict()
                        stdevlist = list()
                        for tweet in tweetlist:
                            modes[tweet] = tweetlist.count(tweet)
                            stdevlist.append((tweet - meantweets)*(tweet - meantweets))

                        modeitems = [[v, k] for k, v in modes.items()]
                        modeitems.sort(reverse=True)
                        modetweets = int(modeitems[0][1])

                        stdevtweets = 0
                        for val in stdevlist:
                            stdevtweets += val

                        try:
                            stdevtweets = math.sqrt(stdevtweets / runningtime)
                        except ZeroDivisionError, e:
                            stdevtweets = 0

                        # Finished analysis - update DB
                        self.db_update("""UPDATE programmes SET totaltweets = %s, meantweets = %s, mediantweets = %s, modetweets = %s, stdevtweets = %s WHERE pid = %s AND timestamp = %s""",(totaltweets,meantweets,mediantweets,modetweets,stdevtweets,pid,timestamp))

                    else:
                        pass
                        # Print("Analysis component: Skipping tweet - falls outside the programme's running time")

                    # Mark the tweet as analysed
                    self.db_update("""UPDATE rawdata SET analysed = 1 WHERE tid = %s""",(tid))
                    Print("Analysis component: Done!")

                # Stage 2: If all raw tweets analysed and imported = 1 (all data for this programme stored and programme finished), finalise the analysis - could do bookmark identification here too?
                self.db_select("""SELECT pid,totaltweets,meantweets,mediantweets,modetweets,stdevtweets,timestamp,timediff FROM programmes WHERE imported = 1 AND analysed = 0 LIMIT 5000""")
                data = self.db_fetchall()
                # Cycle through each programme that's ready for final analysis
                for result in data:
                    pid = result[0]
                    self.db_select("""SELECT duration,title FROM programmes_unique WHERE pid = %s""",(pid))
                    data2 = self.db_fetchone()
                    if not data2:
                        Print("Getting data for duration,title, etc failed - pid", pid)
                        Print("Let's try skipping this pid")
                        continue
                    duration = data2[0]
                    totaltweets = result[1]
                    meantweets = result[2]
                    mediantweets = result[3]
                    modetweets = result[4]
                    stdevtweets = result[5]
                    title = data2[1]
                    timestamp = result[6]
                    timediff = result[7]
                    # Cycle through checking if all tweets for this programme have been analysed - if so finalise the stats
                    self.db_select("""SELECT tid FROM rawdata WHERE analysed = 0 AND pid = %s""", (pid))
                    if self.db_fetchone() == None:
                        # OK to finalise stats here
                        Print("Analysis component: Finalising stats for pid:", pid, "(" , title , ")")
                        meantweets = float(totaltweets) / (duration / 60) # Mean tweets per minute
                        self.db_select("""SELECT totaltweets FROM analyseddata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""",(pid,timestamp-timediff,timestamp+duration-timediff))
                        analyseddata = self.db_fetchall()

                        runningtime = duration / 60

                        tweetlist = list()
                        for result in analyseddata:
                            totaltweetsmin = result[0]
                            tweetlist.append(int(totaltweetsmin))

                        # Ensure tweetlist has enough entries - as above, if no tweets are recorded for a minute it won't be present in the DB
                        if len(tweetlist) < runningtime:
                            additions = runningtime - len(tweetlist)
                            while additions > 0:
                                tweetlist.append(0)
                                additions -= 1

                        tweetlist.sort()

                        mediantweets = tweetlist[int(len(tweetlist)/2)]

                        modes = dict()
                        stdevlist = list()
                        for tweet in tweetlist:
                            modes[tweet] = tweetlist.count(tweet)
                            stdevlist.append((tweet - meantweets)*(tweet - meantweets))

                        modeitems = [[v, k] for k, v in modes.items()]
                        modeitems.sort(reverse=True)
                        modetweets = int(modeitems[0][1])

                        stdevtweets = 0
                        for val in stdevlist:
                            stdevtweets += val
                        try:
                            stdevtweets = math.sqrt(stdevtweets / runningtime)
                        except ZeroDivisionError, e:
                            stdevtweets = 0

                        if 1: # This data is purely a readout to the terminal at the moment associated with word and phrase frequency, and retweets
                            sqltimestamp1 = timestamp - timediff
                            sqltimestamp2 = timestamp + duration - timediff
                            self.db_select("""SELECT tweet_id FROM rawdata WHERE pid = %s AND timestamp >= %s AND timestamp < %s""", (pid,sqltimestamp1,sqltimestamp2))
                            rawtweetids = self.db_fetchall()
                            tweetids = list()
                            for tweet in rawtweetids:
                                tweetids.append(tweet[0])

                            if len(tweetids) > 0:
                                # Just in case of a missing raw json object (ie. programme terminated before it was stored - allow it to be skipped if not found after 10 secs)
                                failcounter = 0
                                self.send([pid,tweetids],"nltkfinal")
                                while not self.dataReady("nltkfinal"):
                                    if failcounter >= 1000:
                                        Print("Timed out waiting for NTLKFINAL")
                                        nltkdata = list()
                                        break
                                    time.sleep(0.01)

                                    failcounter += 1
                                    if failcounter %100 == 0:
                                        Print( "Hanging waiting for NLTKFINAL" )

                                Print("failcounter (<1000 is success)", failcounter)
                                if failcounter < 1000:
#                                if 1:
                                    nltkdata = self.recv("nltkfinal")

                        self.db_update("""UPDATE programmes SET meantweets = %s, mediantweets = %s, modetweets = %s, stdevtweets = %s, analysed = 1 WHERE pid = %s AND timestamp = %s""",(meantweets,mediantweets,modetweets,stdevtweets,pid,timestamp))
                        Print("Analysis component: Done!")

                # Sleep here until more data is available to analyse
                Print("Analysis component: Sleeping for 10 seconds...")
                time.sleep(10)

Example 40

Project: mpop Source File: viirs_compact.py
def load(satscene, *args, **kwargs):
    del args

    files_to_load = []
    files_to_delete = []

    try:
        filename = kwargs.get("filename")
        logger.debug("reading %s", str(filename))
        if filename is not None:
            if isinstance(filename, (list, set, tuple)):
                files = filename
            else:
                files = [filename]
            files_to_load = []
            for filename in files:
                pathname, ext = os.path.splitext(filename)
                if ext == ".bz2":
                    zipfile = bz2.BZ2File(filename)
                    newname = os.path.join("/tmp", os.path.basename(pathname))
                    if not os.path.exists(newname):
                        with open(newname, "wb") as fp_:
                            fp_.write(zipfile.read())
                    zipfile.close()
                    files_to_load.append(newname)
                    files_to_delete.append(newname)
                else:
                    files_to_load.append(filename)
        else:
            time_start, time_end = kwargs.get("time_interval",
                                              (satscene.time_slot, None))

            conf = ConfigParser()
            conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
            options = {}
            for option, value in conf.items(satscene.instrument_name + "-level2",
                                            raw=True):
                options[option] = value

            template = os.path.join(options["dir"], options["filename"])

            second = timedelta(seconds=1)
            files_to_load = []

            if time_end is not None:
                time = time_start - second * 85
                files_to_load = []
                while time <= time_end:
                    fname = time.strftime(template)
                    flist = glob.glob(fname)
                    try:
                        files_to_load.append(flist[0])
                        time += second * 80
                    except IndexError:
                        pass
                    time += second

            else:
                files_to_load = glob.glob(time_start.strftime(template))

        chan_dict = {"M01": "M1",
                     "M02": "M2",
                     "M03": "M3",
                     "M04": "M4",
                     "M05": "M5",
                     "M06": "M6",
                     "M07": "M7",
                     "M08": "M8",
                     "M09": "M9",
                     "M10": "M10",
                     "M11": "M11",
                     "M12": "M12",
                     "M13": "M13",
                     "M14": "M14",
                     "M15": "M15",
                     "M16": "M16",
                     "DNB": "DNB"}

        channels = [(chn, chan_dict[chn])
                    for chn in satscene.channels_to_load
                    if chn in chan_dict]
        try:
            channels_to_load, chans = zip(*channels)
        except ValueError:
            return

        m_chans = []
        dnb_chan = []
        for chn in chans:
            if chn.startswith('M'):
                m_chans.append(chn)
            elif chn.startswith('DNB'):
                dnb_chan.append(chn)
            else:
                raise ValueError("Reading of channel %s not implemented", chn)

        m_datas = []
        m_lonlats = []
        dnb_datas = []
        dnb_lonlats = []

        for fname in files_to_load:
            is_dnb = os.path.basename(fname).startswith('SVDNBC')
            logger.debug("Reading %s", fname)
            if is_dnb:
                if tables:
                    h5f = tables.open_file(fname, "r")
                else:
                    logger.warning("DNB data could not be read from %s, "
                                   "PyTables not available.", fname)
                    continue
            else:
                h5f = h5py.File(fname, "r")
            if m_chans and not is_dnb:
                try:
                    arr, m_units = read_m(h5f, m_chans)
                    m_datas.append(arr)
                    m_lonlats.append(navigate_m(h5f, m_chans[0]))
                except KeyError:
                    pass
            if dnb_chan and is_dnb and tables:
                try:
                    arr, dnb_units = read_dnb(h5f)
                    dnb_datas.append(arr)
                    dnb_lonlats.append(navigate_dnb(h5f))
                except KeyError:
                    pass
            h5f.close()

        if len(m_lonlats) > 0:
            m_lons = np.ma.vstack([lonlat[0] for lonlat in m_lonlats])
            m_lats = np.ma.vstack([lonlat[1] for lonlat in m_lonlats])
        if len(dnb_lonlats) > 0:
            dnb_lons = np.ma.vstack([lonlat[0] for lonlat in dnb_lonlats])
            dnb_lats = np.ma.vstack([lonlat[1] for lonlat in dnb_lonlats])

        m_i = 0
        dnb_i = 0
        for chn in channels_to_load:
            if m_datas and chn.startswith('M'):
                m_data = np.ma.vstack([dat[m_i] for dat in m_datas])
                satscene[chn] = m_data
                satscene[chn].info["units"] = m_units[m_i]
                m_i += 1
            if dnb_datas and chn.startswith('DNB'):
                dnb_data = np.ma.vstack([dat[dnb_i] for dat in dnb_datas])
                satscene[chn] = dnb_data
                satscene[chn].info["units"] = dnb_units[dnb_i]
                dnb_i += 1

        if m_datas:
            m_area_def = SwathDefinition(np.ma.masked_where(m_data.mask, m_lons),
                                         np.ma.masked_where(m_data.mask, m_lats))
        else:
            logger.warning("No M channel data available.")

        if dnb_datas:
            dnb_area_def = SwathDefinition(np.ma.masked_where(dnb_data.mask,
                                                              dnb_lons),
                                           np.ma.masked_where(dnb_data.mask,
                                                              dnb_lats))
        else:
            logger.warning("No DNB data available.")

        for chn in channels_to_load:
            if "DNB" not in chn and m_datas:
                satscene[chn].area = m_area_def

        if dnb_datas:
            for chn in dnb_chan:
                satscene[chn].area = dnb_area_def

    finally:
        for fname in files_to_delete:
            if os.path.exists(fname):
                os.remove(fname)

Example 41

Project: NOT_UPDATED_Sick-Beard-Dutch Source File: tz.py
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 42

Project: kamaelia_ Source File: TwitterSearch.py
Function: main
    def main(self):
        twitterurl = "http://api.twitter.com/1/users/search.json"

        if self.proxy:
            proxyhandler = urllib2.ProxyHandler({"http" : self.proxy})
            twitopener = urllib2.build_opener(proxyhandler)
            urllib2.install_opener(twitopener)

        headers = {'User-Agent' : "BBC R&D Grabber"}
        postdata = None

        if self.keypair == False:
            # Perform OAuth authentication - as we don't have the secret key pair we need to request it
            # This will require some user input
            request_token_url = 'http://api.twitter.com/oauth/request_token'
            access_token_url = 'http://api.twitter.com/oauth/access_token'
            authorize_url = 'http://api.twitter.com/oauth/authorize'

            token = None
            consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

            params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

            params['oauth_consumer_key'] = consumer.key

            req = oauth.Request(method="GET",url=request_token_url,parameters=params)

            signature_method = oauth.SignatureMethod_HMAC_SHA1()
            req.sign_request(signature_method, consumer, token)

            requestheaders = req.to_header()
            requestheaders['User-Agent'] = "BBC R&D Grabber"

            # Connect to Twitter
            try:
                req = urllib2.Request(request_token_url,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                conn1 = urllib2.urlopen(req)
            except httplib.BadStatusLine, e:
                Print("PeopleSearch BadStatusLine error:", e )
                conn1 = False
            except urllib2.HTTPError, e:
                Print("PeopleSearch HTTP error:", e.code)
#                sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                conn1 = False
            except urllib2.URLError, e:
                Print("PeopleSearch URL error: ", e.reason)
#                sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                conn1 = False

            if conn1:
                content = conn1.read()
                conn1.close()

                request_token = dict(urlparse.parse_qsl(content))

                Print( "Request Token:")
                Print("     - oauth_token        = " , request_token['oauth_token'])
                Print("     - oauth_token_secret = " , request_token['oauth_token_secret'])
                Print("")

                # The user must confirm authorisation so a URL is Printed here
                Print("Go to the following link in your browser:")
                Print("%s?oauth_token=%s" % (authorize_url, request_token['oauth_token']) )
                Print("")

                accepted = 'n'
                # Wait until the user has confirmed authorisation
                while accepted.lower() == 'n':
                    accepted = raw_input('Have you authorized me? (y/n) ')
                oauth_verifier = raw_input('What is the PIN? ')

                token = oauth.Token(request_token['oauth_token'],
                    request_token['oauth_token_secret'])
                token.set_verifier(oauth_verifier)

                params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                params['oauth_token'] = token.key
                params['oauth_consumer_key'] = consumer.key

                req = oauth.Request(method="GET",url=access_token_url,parameters=params)

                signature_method = oauth.SignatureMethod_HMAC_SHA1()
                req.sign_request(signature_method, consumer, token)

                requestheaders = req.to_header()
                requestheaders['User-Agent'] = "BBC R&D Grabber"
                # Connect to Twitter
                try:
                    req = urllib2.Request(access_token_url,"oauth_verifier=%s" % oauth_verifier,requestheaders) # Why won't this work?!? Is it trying to POST?
                    conn1 = urllib2.urlopen(req)
                except httplib.BadStatusLine, e:
#                    sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                    Print('PeopleSearch BadStatusLine error: ', e)
                    conn1 = False
                except urllib2.HTTPError, e:
                    Print('PeopleSearch HTTP error: ', e.code)
                    conn1 = False
                except urllib2.URLError, e:
#                    sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                    Print('PeopleSearch URL error: ', e.reason)
                    conn1 = False

                if conn1:
                    content = conn1.read()
                    conn1.close()
                    access_token = dict(urlparse.parse_qsl(content))

                    # Access tokens retrieved from Twitter
                    Print("Access Token:")
                    Print("     - oauth_token        = " , access_token['oauth_token'])
                    Print("     - oauth_token_secret = " , access_token['oauth_token_secret'])
                    Print("")
                    Print("You may now access protected resources using the access tokens above.")
                    Print("")

                    save = False
                    # Load config to save OAuth keys
                    try:
                        homedir = os.path.expanduser("~")
                        file = open(homedir + "/twitter-login.conf",'r')
                        save = True
                    except IOError, e:
                        Print ("Failed to load config file - not saving oauth keys: " , e)

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf",'w')
                            file.write(raw_config)
                            file.close()
                        except IOError, e:
                            Print ("Failed to save oauth keys: " , e)

                    self.keypair = [access_token['oauth_token'], access_token['oauth_token_secret']]
        

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
                # Retieve keywords to look up
                person = self.recv("inbox")

                # Ensure we're not rate limited during the first request - if so we'll wait for 15 mins before our next request
                if (datetime.today() - timedelta(minutes=15)) > self.ratelimited:
                    requesturl = twitterurl + "?q=" + urllib.quote(person) + "&per_page=5"

                    params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                    token = oauth.Token(key=self.keypair[0],secret=self.keypair[1])
                    consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

                    params['oauth_token'] = token.key
                    params['oauth_consumer_key'] = consumer.key

                    req = oauth.Request(method="GET",url=requesturl,parameters=params)

                    signature_method = oauth.SignatureMethod_HMAC_SHA1()
                    req.sign_request(signature_method, consumer, token)

                    requestheaders = req.to_header()
                    requestheaders['User-Agent'] = "BBC R&D Grabber"

                    # Connect to Twitter
                    try:
                        req = urllib2.Request(requesturl,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                        conn1 = urllib2.urlopen(req)
                    except httplib.BadStatusLine, e:
#                        sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                        Print('PeopleSearch BadStatusLine error: ', e)
                        conn1 = False
                    except urllib2.HTTPError, e:
#                        sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                        Print('PeopleSearch HTTP error: ', e.code)
                        conn1 = False
                    except urllib2.URLError, e:
#                        sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                        Print('PeopleSearch URL error: ', e.reason)
                        conn1 = False

                    if conn1:
                        # Check rate limiting here and Print current limit
                        headers = conn1.info()
                        try:
                            headerlist = string.split(str(headers),"\n")
                        except UnicodeEncodeError: # str may fail...
                            headerlist = []
                        for line in headerlist:
                            if line != "":
                                splitheader = line.split()
                                if splitheader[0] == "X-FeatureRateLimit-Remaining:" or splitheader[0] == "X-RateLimit-Remaining:":
                                    Print(splitheader[0] , " " , splitheader[1] )
                                    if int(splitheader[1]) < 5:
                                        self.ratelimited = datetime.today()
                        # Grab json format result of people search here
                        try:
                            data = conn1.read()
                            try:
                                content = cjson.decode(data)
                                self.send(content,"outbox")
                            except cjson.DecodeError, e:
                                self.send(dict(),"outbox")
                        except IOError, e:
#                            sys.stderr.write('PeopleSearch IO error: ' + str(e) + '\n')
                            Print('PeopleSearch IO error: ', e)
                            self.send(dict(),"outbox")
                        conn1.close()
                    else:
                        self.send(dict(),"outbox")
                else:
                   Print("Twitter search paused - rate limited")
                   self.send(dict(),"outbox")
            self.pause()
            yield 1

Example 43

Project: timestring Source File: Date.py
    def __init__(self, date, offset=None, start_of_week=None, tz=None, verbose=False):
        if isinstance(date, Date):
            self.date = copy(date.date)
            return

        # The original request
        self._original = date
        if tz:
            tz = pytz.timezone(str(tz))

        if date == 'infinity':
            self.date = 'infinity'

        elif date == 'now':
            self.date = datetime.now()

        elif type(date) in (str, unicode) and re.match(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+-\d{2}", date):
            self.date = datetime.strptime(date[:-3], "%Y-%m-%d %H:%M:%S.%f") - timedelta(hours=int(date[-3:]))

        else:
            # Determinal starting date.
            if type(date) in (str, unicode):
                """The date is a string and needs to be converted into a <dict> for processesing
                """
                _date = date.lower()
                res = TIMESTRING_RE.search(_date.strip())
                if res:
                    date = res.groupdict()
                    if verbose:
                        print("Matches:\n", ''.join(["\t%s: %s\n" % (k, v) for k, v in date.items() if v]))
                else:
                    raise TimestringInvalid('Invalid date string >> %s' % date)

                date = dict((k, v if type(v) is str else v) for k, v in date.items() if v)
                #print(_date, dict(map(lambda a: (a, date.get(a)), filter(lambda a: date.get(a), date))))

            if isinstance(date, dict):
                # Initial date.
                new_date = datetime(*time.localtime()[:3])
                if tz and tz.zone != "UTC":
                    #
                    # The purpose here is to adjust what day it is based on the timezeone
                    #
                    ts = datetime.now()
                    # Daylight savings === second Sunday in March and reverts to standard time on the first Sunday in November
                    # Monday is 0 and Sunday is 6.
                    # 14 days - dst_start.weekday()
                    dst_start = datetime(ts.year, 3, 1, 2, 0, 0) + timedelta(13 - datetime(ts.year, 3, 1).weekday())
                    dst_end = datetime(ts.year, 11, 1, 2, 0, 0) + timedelta(6 - datetime(ts.year, 11, 1).weekday())

                    ts = ts + tz.utcoffset(new_date, is_dst=(dst_start < ts < dst_end))
                    new_date = datetime(ts.year, ts.month, ts.day)

                if date.get('unixtime'):
                    new_date = datetime.fromtimestamp(int(date.get('unixtime')))

                # !number of (days|...) (ago)?
                elif date.get('num') and (date.get('delta') or date.get('delta_2')):
                    if date.get('num', '').find('couple') > -1:
                        i = 2 * int(1 if date.get('ago', True) or date.get('ref') == 'last' else -1)
                    else:
                        i = int(text2num(date.get('num', 'one'))) * int(1 if date.get('ago') or (date.get('ref', '') or '') == 'last' else -1)

                    delta = (date.get('delta') or date.get('delta_2')).lower()
                    if delta.startswith('y'):
                        try:
                            new_date = new_date.replace(year=(new_date.year - i))
                        # day is out of range for month
                        except ValueError:
                            new_date = new_date - timedelta(days=(365*i))
                    elif delta.startswith('month'):
                        try:
                            new_date = new_date.replace(month=(new_date.month - i))
                        # day is out of range for month
                        except ValueError:
                            new_date = new_date - timedelta(days=(30*i))

                    elif delta.startswith('q'):
                        '''
                        This section is not working...
                        Most likely need a generator that will take me to the right quater.
                        '''
                        q1, q2, q3, q4 = datetime(new_date.year, 1, 1), datetime(new_date.year, 4, 1), datetime(new_date.year, 7, 1), datetime(new_date.year, 10, 1)
                        if q1 <= new_date < q2:
                            # We are in Q1
                            if i == -1:
                                new_date = datetime(new_date.year-1, 10, 1)
                            else:
                                new_date = q2
                        elif q2 <= new_date < q3:
                            # We are in Q2
                            pass
                        elif q3 <= new_date < q4:
                            # We are in Q3
                            pass
                        else:
                            # We are in Q4
                            pass
                        new_date = new_date - timedelta(days=(91*i))

                    elif delta.startswith('w'):
                        new_date = new_date - timedelta(days=(i * 7))

                    else:
                        new_date = new_date - timedelta(**{('days' if delta.startswith('d') else 'hours' if delta.startswith('h') else 'minutes' if delta.startswith('m') else 'seconds'): i})

                # !dow
                if [date.get(key) for key in ('day', 'day_2', 'day_3') if date.get(key)]:
                    dow = max([date.get(key) for key in ('day', 'day_2', 'day_3') if date.get(key)])
                    iso = dict(monday=1, tuesday=2, wednesday=3, thursday=4, friday=5, saturday=6, sunday=7, mon=1, tue=2, tues=2, wed=3, wedn=3, thu=4, thur=4, fri=5, sat=6, sun=7).get(dow)
                    if iso:
                        # determin which direction
                        if date.get('ref') not in ('this', 'next'):
                            days = iso - new_date.isoweekday() - (7 if iso >= new_date.isoweekday() else 0)
                        else:
                            days = iso - new_date.isoweekday() + (7 if iso < new_date.isoweekday() else 0)

                        new_date = new_date + timedelta(days=days)

                    elif dow == 'yesterday':
                        new_date = new_date - timedelta(days=1)
                    elif dow == 'tomorrow':
                        new_date = new_date + timedelta(days=1)

                # !year
                year = [int(CLEAN_NUMBER.sub('', date[key])) for key in ('year', 'year_2', 'year_3', 'year_4', 'year_5', 'year_6') if date.get(key)]
                if year:
                    year = max(year)
                    if len(str(year)) != 4:
                        year += 2000 if year <= 40 else 1900
                    new_date = new_date.replace(year=year)

                # !month
                month = [date.get(key) for key in ('month', 'month_1', 'month_2', 'month_3', 'month_4') if date.get(key)]
                if month:
                    new_date = new_date.replace(day=1)
                    new_date = new_date.replace(month=int(max(month)) if re.match('^\d+$', max(month)) else dict(january=1, february=2, march=3, april=4, june=6, july=7, august=8, september=9, october=10, november=11, december=12, jan=1, feb=2, mar=3, apr=4, may=5, jun=6, jul=7, aug=8, sep=9, sept=9, oct=10, nov=11, dec=12).get(max(month),  new_date.month))

                # !day
                day = [date.get(key) for key in ('date', 'date_2', 'date_3') if date.get(key)]
                if day:
                    new_date = new_date.replace(day=int(max(day)))

                # !daytime
                if date.get('daytime'):
                    if date['daytime'].find('this time') >= 1:
                        new_date = new_date.replace(hour=datetime(*time.localtime()[:5]).hour,
                                                    minute=datetime(*time.localtime()[:5]).minute)
                    else:
                        new_date = new_date.replace(hour=dict(morning=9, noon=12, afternoon=15, evening=18, night=21, nighttime=21, midnight=24).get(date.get('daytime'), 12))
                    # No offset because the hour was set.
                    offset = False

                # !hour
                hour = [date.get(key) for key in ('hour', 'hour_2', 'hour_3') if date.get(key)]
                if hour:
                    new_date = new_date.replace(hour=int(max(hour)))
                    am = [date.get(key) for key in ('am', 'am_1') if date.get(key)]
                    if am and max(am) in ('p', 'pm'):
                        h = int(max(hour))
                        if h < 12:
                            new_date = new_date.replace(hour=h+12)
                    # No offset because the hour was set.
                    offset = False

                    #minute
                    minute = [date.get(key) for key in ('minute', 'minute_2') if date.get(key)]
                    if minute:
                        new_date = new_date.replace(minute=int(max(minute)))

                    #second
                    seconds = date.get('seconds', 0)
                    if seconds:
                        new_date = new_date.replace(second=int(seconds))

                self.date = new_date

            elif type(date) in (int, long, float) and re.match('^\d{10}$', str(date)):
                self.date = datetime.fromtimestamp(int(date))

            elif isinstance(date, datetime):
                self.date = date

            elif date is None:
                self.date = datetime.now()

            else:
                # Set to the current date Y, M, D, H0, M0, S0
                self.date = datetime(*time.localtime()[:3])

            if tz:
                self.date = self.date.replace(tzinfo=tz)

            # end if type(date) is types.DictType: and self.date.hour == 0:
            if offset and isinstance(offset, dict):
                self.date = self.date.replace(**offset)

Example 44

Project: baruwa Source File: sendpdfreports.py
    def handle(self, *args, **options):
        if len(args) != 0:
            raise CommandError(_("Command doesn't accept any arguments"))

        by_domain = options.get('by_domain')
        domain_name = options.get('domain_name')
        copy_admin = options.get('copy_admin')
        period = options.get('period')
        include_daily = options.get('include_daily')
        startdate =  options.get('startdate')
        end_date =  options.get('enddate')
        enddate = None

        if startdate and end_date:
            if not checkdate(startdate) or not checkdate(end_date):
                raise CommandError(_("The startdate, enddate specified is invalid"))
            daterange = (startdate, end_date)
        else:
            daterange = None

        period_re = re.compile(r"(?P<num>(\d+))\s+(?P<period>(day|week|month))(?:s)?")
        if period:
            match = period_re.match(period)
            if not match:
                raise CommandError(_("The period you specified is invalid"))
            num = match.group('num')
            ptype = match.group('period')
            if not ptype.endswith('s'):
                ptype = ptype + 's'
            delta = datetime.timedelta(**{ptype: int(num)})
            enddate = datetime.date.today() - delta

        table_style = TableStyle([
            ('FONT', (0, 0), (-1, -1), 'Helvetica'),
            ('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
            ('FONTSIZE', (0, 0), (-1, -1), 8),
            ('GRID', (0, 0), (-1, -1), 0.15, colors.black),
            ('ALIGN', (0, 0), (-1, 0), 'CENTER'),
            ('ALIGN', (4, 1), (-1, -1), 'CENTER'),
            ('ALIGN', (0, 0), (0, -1), 'CENTER'),
            ('VALIGN', (4, 1), (-1, -1), 'MIDDLE'),
            ('SPAN', (4, 1), (-1, -1)),
        ])

        styles = getSampleStyleSheet()

        reports = [
            [
                'from_address', {'from_address__exact': ""}, 'num_count',
                'Top senders by quantity'],
            [
                'from_address', {'from_address__exact': ""}, 'total_size',
                'Top senders by volume'],
            [
                'from_domain', {'from_domain__exact': ""}, 'num_count',
                'Top sender domains by quantity'],
            [
                'from_domain', {'from_domain__exact': ""}, 'total_size',
                'Top sender domains by volume'],
            [
                'to_address', {'to_address__exact': ""}, 'num_count',
                'Top recipients by quantity'],
            [
                'to_address', {'to_address__exact': ""}, 'total_size',
                'Top recipients by volume'],
            [
                'to_domain', {'to_domain__exact': "",
                'to_domain__isnull': False}, 'num_count',
                'Top recipient domains by quantity'],
            [
                'to_domain', {'to_domain__exact': "",
                'to_domain__isnull': False}, 'total_size',
                'Top recipient domains by volume'],
        ]

        emails = []
        admin_addrs = []
        if copy_admin:
            mails = User.objects.values('email').filter(is_superuser=True)
            admin_addrs = [mail['email'] for mail in mails]

        from_email = getattr(settings, 'DEFAULT_FROM_EMAIL',
            'postmaster@localhost')
        url = getattr(settings, 'QUARANTINE_REPORT_HOSTURL', '')
        logo_dir = getattr(settings, 'MEDIA_ROOT', '')
        img = Image(logo_dir + '/imgs/css/logo.jpg')

        def build_chart(data, column, order, title):
            "build chart"
            headings = [('', _('Address'), _('Count'), _('Volume'), '')]
            rows = [[draw_square(PIE_CHART_COLORS[index]),
            tds_trunc(row[column], 45), row['num_count'],
            filesizeformat(row['total_size']), '']
            for index, row in enumerate(data)]

            if len(rows) != 10:
                missing = 10 - len(rows)
                add_rows = [
                    ('', '', '', '', '') for ind in range(missing)
                    ]
                rows.extend(add_rows)

            headings.extend(rows)
            dat = [row[order] for row in data]
            total = sum(dat)
            labels = [
                    ("%.1f%%" % ((1.0 * row[order] / total) * 100))
                    for row in data
                ]

            pie = PieChart()
            pie.chart.labels = labels
            pie.chart.data = dat
            headings[1][4] = pie

            table_with_style = Table(headings, [0.2 * inch,
                2.8 * inch, 0.5 * inch, 0.7 * inch, 3.2 * inch])
            table_with_style.setStyle(table_style)

            paragraph = Paragraph(title, styles['Heading1'])

            return [paragraph, table_with_style]

        def build_parts(account, enddate, isdom=None, daterange=None):
            "build parts"
            parts = []
            sentry = 0
            for report in reports:
                column = report[0]
                exclude_kwargs = report[1]
                order_by = "-%s" % report[2]
                order = report[2]
                title = report[3]

                if isdom:
                    #dom
                    data = Message.objects.values(column).\
                    filter(Q(from_domain=account.address) | \
                    Q(to_domain=account.address)).\
                    exclude(**exclude_kwargs).annotate(
                        num_count=Count(column), total_size=Sum('size')
                    ).order_by(order_by)
                    if daterange:
                        data.filter(date__range=(daterange[0], daterange[1]))
                    elif enddate:
                        data.filter(date__gt=enddate)
                    data = data[:10]
                else:
                    #all users
                    data = Message.report.all(user, enddate, daterange).values(
                            column).exclude(**exclude_kwargs).annotate(
                            num_count=Count(column), total_size=Sum('size')
                            ).order_by(order_by)
                    data = data[:10]

                if data:
                    sentry += 1
                    pgraphs = build_chart(data, column, order, title)
                    parts.extend(pgraphs)
                    parts.append(Spacer(1, 70))
                    if (sentry % 2) == 0:
                        parts.append(PageBreak())
            parts.append(Paragraph(_('Message Totals'), styles['Heading1']))
            if isdom:
                #doms
                msg_totals = MessageTotals.objects.doms(account.address, enddate)
            else:
                #norm
                filters = []
                addrs = [
                    addr.address for addr in UserAddresses.objects.filter(
                        user=account
                    ).exclude(enabled__exact=0)]
                if enddate:
                    efilter = {
                                'filter': 3,
                                'field': 'date',
                                'value': str(enddate)
                               }
                    filters.append(efilter)
                msg_totals = MessageTotals.objects.all(
                                account, filters, addrs,
                                profile.account_type,
                                daterange)

            mail_total = []
            spam_total = []
            virus_total = []
            dates = []
            if include_daily:
                rows = [(
                Table([[draw_square(colors.white),
                Paragraph('Date', styles["Heading6"])]],
                [0.35 * inch, 1.50 * inch, ]),
                Table([[draw_square(colors.green),
                Paragraph('Mail totals', styles["Heading6"])]],
                [0.35 * inch, 1.50 * inch, ]),
                Table([[draw_square(colors.pink),
                Paragraph('Spam totals', styles["Heading6"])]],
                [0.35 * inch, 1.50 * inch, ]),
                Table([[draw_square(colors.red),
                Paragraph('Virus totals', styles["Heading6"])]],
                [0.35 * inch, 1.50 * inch, ]),
                )]
            for ind, msgt in enumerate(msg_totals):
                if ind % 10:
                    dates.append('')
                else:
                    dates.append(str(msgt.date))

                mail_total.append(int(msgt.mail_total))
                spam_total.append(int(msgt.spam_total))
                virus_total.append(int(msgt.virus_total))
                if include_daily:
                    rows.append((str(msgt.date), msgt.mail_total,
                    msgt.spam_total, msgt.virus_total))

            graph = BarChart()
            graph.chart.data = [
                    tuple(mail_total), tuple(spam_total),
                    tuple(virus_total)
                ]
            graph.chart.categoryAxis.categoryNames = dates
            graph_table = Table([[graph]], [7.4 * inch])
            parts.append(graph_table)
            if include_daily:
                rows.append(('Totals', sum(mail_total), sum(spam_total),
                sum(virus_total)))
                parts.append(Spacer(1, 20))
                graph_table = Table(rows, [1.85 * inch, 1.85 * inch,
                1.85 * inch, 1.85 * inch, ])
                graph_table.setStyle(TableStyle([
                ('FONTSIZE', (0, 0), (-1, -1), 8),
                ('FONT', (0, 0), (-1, -1), 'Helvetica'),
                ('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
                ('GRID', (0, 0), (-1, -1), 0.15, colors.black),
                ('FONT', (0, -1), (-1, -1), 'Helvetica-Bold'),
                #('BACKGROUND', (0, -1), (-1, -1), colors.green),
                ]))
                parts.append(graph_table)
            return parts

        def build_pdf(charts):
            "Build a PDF"
            pdf = StringIO()
            doc = SimpleDocTemplate(pdf, topMargin=50, bottomMargin=18)
            logo = [(img, _('Baruwa mail report'))]
            logo_table = Table(logo, [2.0 * inch, 5.4 * inch])
            logo_table.setStyle(TableStyle([
            ('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
            ('ALIGN', (0, 0), (-1, 0), 'LEFT'),
            ('ALIGN', (1, 0), (-1, 0), 'RIGHT'),
            ('FONTSIZE', (1, 0), (-1, 0), 10),
            ('LINEBELOW', (0, 0), (-1, -1), 0.15, colors.black),
            ]))
            parts = [logo_table]
            parts.append(Spacer(1, 20))
            parts.extend(charts)
            try:
                doc.build(parts)
            except IndexError:
                pass
            return pdf

        def gen_email(pdf, user, owner):
            "generate and return email"
            text_content = render_to_string('reports/pdf_report.txt',
                {'user': user, 'url': url})
            subject = _('Baruwa usage report for: %(user)s') % {
                        'user': owner}
            if email_re.match(user.username):
                toaddr = user.username
            if email_re.match(user.email):
                toaddr = user.email

            if admin_addrs:
                msg = EmailMessage(subject, text_content, from_email, [toaddr], admin_addrs)
            else:
                msg = EmailMessage(subject, text_content, from_email, [toaddr])
            msg.attach('baruwa.pdf', pdf.getvalue(), "application/pdf")
            print _("* Queue %(user)s's report to: %(addr)s") % {
                'user': owner, 'addr': toaddr}
            pdf.close()
            return msg

        print _("=================== Processing reports ======================")
        if by_domain:
            #do domain query
            #print "camacamlilone"
            domains = UserAddresses.objects.filter(Q(enabled=1), Q(address_type=1))
            if domain_name != 'all':
                domains = domains.filter(address=domain_name)
                if not domains:
                    print _("========== domain name %(dom)s does not exist ==========") % {
                    'dom': domain_name
                    }
            for domain in domains:
                if email_re.match(domain.user.email):
                    parts = build_parts(domain, enddate, True, daterange)
                    if parts:
                        pdf = build_pdf(parts)
                        email = gen_email(pdf, domain.user, domain.address)
                        emails.append(email)
        else:
            #do normal query
            profiles = UserProfile.objects.filter(send_report=1)
            for profile in profiles:
                try:
                    user = profile.user
                    if email_re.match(user.email) or email_re.match(user.username):
                        parts = build_parts(user, enddate, False, daterange)
                        if parts:
                            pdf = build_pdf(parts)
                            email = gen_email(pdf, user, user.username)
                            emails.append(email)
                except User.DoesNotExist:
                    pass

        if emails:
            try:
                conn = SMTPConnection()
                conn.send_messages(emails)
                print _("====== sending %(num)s messages =======") % {
                        'num': str(len(emails))}
            except Exception, exception:
                print _("Sending failed ERROR: %(error)s") % {'error': str(exception)}

Example 45

Project: syndicate Source File: SMDS_auth.py
Function: init
   def __init__(self, api):
      """
      auth=Auth(globals(), db)

      - environment is there for legacy but unused (awful)
      - db has to be the database where to create tables for authentication

      """
      controller = 'default'
      cas_provider = None
      
      self.db = None
      self.environment = current
      request = current.request
      session = current.session
      auth = session.auth
      if auth and auth.last_visit and auth.last_visit + \
               datetime.timedelta(days=0, seconds=auth.expiration) > request.now:
         self.user = auth.user
         # this is a trick to speed up sessions
         if (request.now - auth.last_visit).seconds > (auth.expiration/10):
               auth.last_visit = request.now
      else:
         self.user = None
         session.auth = None
      settings = self.settings = Settings()

      # ## what happens after login?

      # ## what happens after registration?

      settings.hideerror = False
      settings.cas_domains = [request.env.http_host]
      settings.cas_provider = cas_provider
      settings.extra_fields = {}
      settings.actions_disabled = []
      settings.reset_password_requires_verification = False
      settings.registration_requires_verification = False
      settings.registration_requires_approval = True
      settings.alternate_requires_registration = False
      settings.create_user_groups = False

      settings.controller = controller
      settings.login_url = self.url('user', args='login')
      settings.logged_url = self.url('user', args='profile')
      settings.download_url = self.url('download')
      settings.mailer = None
      settings.login_captcha = None
      settings.register_captcha = None
      settings.retrieve_username_captcha = None
      settings.retrieve_password_captcha = None
      settings.captcha = None
      settings.expiration = 3600            # one hour
      settings.long_expiration = 3600*30*24 # one month
      settings.remember_me_form = False
      settings.allow_basic_login = False
      settings.allow_basic_login_only = False
      settings.on_failed_authorization = \
         self.url('user',args='not_authorized')

      settings.on_failed_authentication = lambda x: redirect(x)

      settings.formstyle = 'table3cols'
      settings.label_separator = ': '

      # ## table names to be used

      settings.password_field = 'password'
      settings.table_user_name = 'auth_user'
      settings.table_group_name = 'auth_group'
      settings.table_membership_name = 'auth_membership'
      settings.table_permission_name = 'auth_permission'
      settings.table_event_name = 'auth_event'
      settings.table_cas_name = 'auth_cas'

      # ## if none, they will be created

      settings.table_user = None
      settings.table_group = None
      settings.table_membership = None
      settings.table_permission = None
      settings.table_event = None
      settings.table_cas = None

      # ##

      settings.showid = False

      # ## these should be functions or lambdas

      settings.login_next = self.url('index')
      settings.login_onvalidation = []
      settings.login_onaccept = []
      settings.login_methods = [self]
      settings.login_form = self
      settings.login_email_validate = True
      settings.login_userfield = "username"

      settings.logout_next = self.url('index')
      settings.logout_onlogout = lambda x: None

      settings.register_next = self.url('index')
      settings.register_onvalidation = []
      settings.register_onaccept = []
      settings.register_fields = None

      settings.verify_email_next = self.url('user', args='login')
      settings.verify_email_onaccept = []

      settings.profile_next = self.url('index')
      settings.profile_onvalidation = []
      settings.profile_onaccept = []
      settings.profile_fields = None
      settings.retrieve_username_next = self.url('index')
      settings.retrieve_password_next = self.url('index')
      settings.request_reset_password_next = self.url('user', args='login')
      settings.reset_password_next = self.url('user', args='login')

      settings.change_password_next = self.url('index')
      settings.change_password_onvalidation = []
      settings.change_password_onaccept = []

      settings.retrieve_password_onvalidation = []
      settings.reset_password_onvalidation = []

      settings.hmac_key = None
      settings.lock_keys = True


      # ## these are messages that can be customized
      messages = self.messages = Messages(current.T)
      messages.login_button = 'Login'
      messages.register_button = 'Register'
      messages.password_reset_button = 'Request reset password'
      messages.password_change_button = 'Change password'
      messages.profile_save_button = 'Save profile'
      messages.submit_button = 'Submit'
      messages.verify_password = 'Verify Password'
      messages.delete_label = 'Check to delete:'
      messages.function_disabled = 'Function disabled'
      messages.access_denied = 'Insufficient privileges'
      messages.registration_verifying = 'Registration needs verification'
      messages.registration_pending = 'Registration is pending approval'
      messages.login_disabled = 'Login disabled by administrator'
      messages.logged_in = 'Logged in'
      messages.email_sent = 'Email sent'
      messages.unable_to_send_email = 'Unable to send email'
      messages.email_verified = 'Email verified'
      messages.logged_out = 'Logged out'
      messages.registration_successful = 'Registration successful'
      messages.invalid_email = 'Invalid email'
      messages.unable_send_email = 'Unable to send email'
      messages.invalid_login = 'Invalid login'
      messages.invalid_user = 'Invalid user'
      messages.invalid_password = 'Invalid password'
      messages.is_empty = "Cannot be empty"
      messages.mismatched_password = "Password fields don't match"
      messages.verify_email = 'A user wishes to join Syndicate.\nDetails:\n   Username: %(username)s\n   Email: %(email)s'
      messages.verify_email_subject = 'Email verification'
      messages.username_sent = 'Your username was emailed to you'
      messages.new_password_sent = 'A new password was emailed to you'
      messages.password_changed = 'Password changed'
      messages.retrieve_username = 'Your username is: %(username)s'
      messages.retrieve_username_subject = 'Username retrieve'
      messages.retrieve_password = 'Your password is: %(password)s'
      messages.retrieve_password_subject = 'Password retrieve'
      messages.reset_password = \
         'Click on the link http://...reset_password/%(key)s to reset your password'
      messages.reset_password_subject = 'Password reset'
      messages.invalid_reset_password = 'Invalid reset password'
      messages.profile_updated = 'Profile updated'
      messages.new_password = 'New password'
      messages.old_password = 'Old password'
      messages.group_description = \
         'Group uniquely assigned to user %(id)s'

      messages.register_log = 'User %(id)s Registered'
      messages.login_log = 'User %(id)s Logged-in'
      messages.login_failed_log = None
      messages.logout_log = 'User %(id)s Logged-out'
      messages.profile_log = 'User %(id)s Profile updated'
      messages.verify_email_log = 'User %(id)s Verification email sent'
      messages.retrieve_username_log = 'User %(id)s Username retrieved'
      messages.retrieve_password_log = 'User %(id)s Password retrieved'
      messages.reset_password_log = 'User %(id)s Password reset'
      messages.change_password_log = 'User %(id)s Password changed'
      messages.add_group_log = 'Group %(group_id)s created'
      messages.del_group_log = 'Group %(group_id)s deleted'
      messages.add_membership_log = None
      messages.del_membership_log = None
      messages.has_membership_log = None
      messages.add_permission_log = None
      messages.del_permission_log = None
      messages.has_permission_log = None
      messages.impersonate_log = 'User %(id)s is impersonating %(other_id)s'

      messages.label_first_name = 'First name'
      messages.label_last_name = 'Last name'
      messages.label_username = 'Username'
      messages.label_email = 'E-mail'
      messages.label_password = 'Password'
      messages.label_registration_key = 'Registration key'
      messages.label_reset_password_key = 'Reset Password key'
      messages.label_registration_id = 'Registration identifier'
      messages.label_role = 'Role'
      messages.label_description = 'Description'
      messages.label_user_id = 'User ID'
      messages.label_group_id = 'Group ID'
      messages.label_name = 'Name'
      messages.label_table_name = 'Table name'
      messages.label_record_id = 'Record ID'
      messages.label_time_stamp = 'Timestamp'
      messages.label_client_ip = 'Client IP'
      messages.label_origin = 'Origin'
      messages.label_remember_me = "Remember me (for 30 days)"
      messages['T'] = current.T
      messages.verify_password_comment = 'please input your password again'
      messages.lock_keys = True

      self.user = None
      self.api = api
      self.maint_email = api.config.MD_MAIL_SUPPORT_ADDRESS
      
      # disable stuff for now
      settings.actions_disabled.append('retrieve_username')
      settings.actions_disabled.append('retrieve_password')
      settings.actions_disabled.append('request_reset_password')
      settings.actions_disabled.append('profile')
      settings.actions_disabled.append('change_password')

Example 46

Project: cpppo Source File: times.py
    @classmethod
    @mutexmethod( '_cls_lock' )
    def support_abbreviations( cls, region, exclude=None, at=None, reach=None, reset=False ):
        """Add all the DST and non-DST abbreviations for the specified region.  If a country code
        (eg. 'CA') is specified, we'll get all its timezones from pytz.country_timezones.
        Otherwise, we'll get all the matching '<region>[/<city>]' zone(s) from pytz's
        common_timezones.  Multiple invocations may be made, to include abbreviations covering
        multiple regions.

        We'll look for the first time transition within 'at' +/- 'reach' in pytz's
        _utc_transition_times list, and see if the timezone yields different timezone names and
        is_dst designations for the days surrounding that time.  If both are different, we'll use
        the abbreviations as DST and non-DST specific abbreviations.  There are instants when a
        timezone changes times, which are *NOT* DST changes; eg 'America/Eirunepe' (west Amazonas)
        on November 10, 2013, where the timezone switched from AMT (UTC-4) to ACT (UTC-5).  Neither
        of these are DST times; the timezone just permanently changed its offset.

        Once we find a set of DST-specific abbreviations for a timezone, we must ensure that they
        are consistent with the abbreviations that already appear in the abbreviations table.  For
        example, the 'IST' (Irish Summer Time) abbreviation presented by 'Europe/Dublin' timezone is
        different than the 'IST' (Isreal Standard Time) presented by the 'Asia/Jeruslem' timezone.
        You cannot load them both at once.  If multiple timezones produce the same abbreviation,
        they must have the same DST transitions between 'at' +/- 'reach', or AmbiguousTimeZoneError
        will be raised -- the timezone abbreviations have ambiguous meaning, and the zones cannot be
        identified via abbreviation at the same time.

        Returns all the timezone abbreviations added to the class's _tzabbrev; you may want to check:

            region		= 'CA'
            abbrevs		= timestamp.support_abbreviations( region )
            assert abbrevs, "Invalid region %r: Matches no timezones" % region

        Timezone definitions change over time.  A 'reach' timedelta (default: 1 year) on either side
        of the 'at' (a naive UTC datetime, default: current time) is required, in order for multiple
        zones to use the same abbreviation with guaranteed consistent definitions.

        """
        if reset and cls._tzabbrev:
            log.detail( "Resetting %d timezone abbreviations: %r", len( cls._tzabbrev), cls._tzabbrev.keys() )
            cls._tzabbrev	= {}

        def format_dst( dst ):
            return "dst" if dst else "n/a" if dst is None else "   "

        # Check consistency during relevant time periods for all timezones using the same
        # DST-specific abbreviations.  It is problematic to have multiple timezones with the same
        # abbreviation but with different DST change times or UTC offsets.
        if reach is None:# 1 year on either side by default
            reach		= datetime.timedelta( 365 )
        if at is None:	 # around the current time by default (naive, UTC)
            at			= datetime.datetime.utcnow()
        oneday			= datetime.timedelta( 1 )

        # Take a (shallow) copy to update; only when we complete integrating all the target
        # timezones successfully do we commit the updated abbrev dict.
        abbrev			= cls._tzabbrev.copy()
        incompatible		= []
        exclusions		= set( zone_names( exclude ))
        log.info( "Excluding: %r", exclusions )
        for tz in zone_names( region ): # eg 'America/Vancouver', 'America/Dawson_Creek', ...
            if tz in exclusions:
                log.detail( "%-30s: Ignoring; excluded", tz )
                continue

            tzinfo		= pytz.timezone( tz )
            tzdetails		= []

            # Find the nearest future transition time (> at - reach), or the list length if all are
            # <=. This will compute the index where 'at - reach' should be inserted to maintain the
            # list order, so it may index one beyond the end of the list.  Pick the index (next
            # higher than 'at - reach') time, and make sure it is in the future (not beyond the end
            # of the list).  Get the list of the zones' (time,abbrev,DST) settings (one if purely
            # non-DST, or two if a DST zone or a zone that has changed its UTC offset) in tzdetails.
            nxt			= bisect.bisect( tzinfo._utc_transition_times, at - reach )
            lst			= bisect.bisect( tzinfo._utc_transition_times, at + reach )
            if nxt == len( tzinfo._utc_transition_times ) or nxt == lst:
                # This is (in the at +/- reach time span, anyway) probably a non-DST timezone.
                loc		= tzinfo.normalize( pytz.UTC.localize( at ).astimezone( tzinfo ))
                abb		= loc.strftime( "%Z" )
                dst		= bool( loc.dst() )
                off		= loc.utcoffset()
                log.detail( "%-30s: %-5s %s %s: no time change in %s to %s",
                            tzinfo, abb, format_offset( timedelta_total_seconds( off ), ms=False ), format_dst( dst ),
                            at - reach, at + reach )
                tzdetails	= [ (at,abb,dst,off) ]
            else:
                # A DST zone?; found 1 or more time change.  Uses times 1 day before/after to get
                # appropriate abbreviations.  NOTE: This may be a time change, but isn't necessarily
                # a DST/non-DST change!  So, insdst/outdst may be the same (False) for both.  All
                # _utc_transition_times are naive UTC; probe the tzinfo at +/- one day around then
                # change, interpreting the localized UTC time as a time in the 'tzinfo' zone, and
                # pytz.normalize it to correct the DST information.
                ins,out		= ( tzinfo._utc_transition_times[nxt] - oneday,
                                    tzinfo._utc_transition_times[nxt] + oneday )
                insloc,outloc	= ( tzinfo.normalize( pytz.UTC.localize( dt ).astimezone( tzinfo ))
                                    			for dt in ( ins, out ))
                insoff,outoff	= ( dt.utcoffset()	for dt in ( insloc, outloc ))	# The net UTC offset
                insabb,outabb	= ( dt.strftime( "%Z" ) for dt in ( insloc, outloc ))	# The timezone abbrev.
                insdst,outdst	= ( bool( dt.dst() )    for dt in ( insloc, outloc ))	# Is there a DST offset?
                msg		= "%-5s %s %s / %-5s %s %s" % (
                        insabb, format_offset( timedelta_total_seconds( insoff ), ms=False ), format_dst( insdst ),
                        outabb, format_offset( timedelta_total_seconds( outoff ), ms=False ), format_dst( outdst ))
                if insabb == outabb:
                    # This timezone has the same name for DST/non-DST (eg. 'Australia/Adelaide' CST
                    # Australian Central Standard Time ).  Thus, 'is_dst' will be None, and times
                    # during the DST transition will still be ambiguous.
                    msg		+= ": abbreviations are the same; will be ambiguous during DST overlap"
                    log.detail( "%-30s: %s", tzinfo, msg )
                    tzdetails	= [ (ins,insabb,None,insoff) ]
                else:
                    # A regular DST/non-DST time change (eg. 'Canada/Mountain'), or a zone offset
                    # change (eg. 'America/Eirunepe', both DST false).
                    if insdst == outdst == True:
                        # This timezone has different names, but both are DST.  Strange, but possible.
                        msg       += ": both zones indicate DST; allowing..."

                    log.detail( "%-30s: %s", tzinfo, msg )
                    tzdetails	= [ (ins,insabb,insdst,insoff), (out,outabb,outdst,outoff) ]

            # Save the non-DST (eg. 'MST', 'GMT') and DST (eg. 'MDT', 'IST', 'CEST') timezones.  If
            # either timezone abbreviation is already in the abbreviations list, make certain it is
            # the exact same timezone; same UTC offset, same transition times if a DST zone
            # (different name ok), during the relevant period of time.  For the non-DST zones, the
            # transition times are not as important -- all non-DST times are unambiguous, so long as
            # the overall UTC offset is the same.  The 'dt' here is always a naive UTC datetime.
            for dt,abb,dst,off in tzdetails:
                if abb in exclusions:
                    log.detail( "%-30s: Ignoring %s; excluded", tzinfo, abb )
                msg		= "%-5s %s %s" % (
                    abb, format_offset( timedelta_total_seconds( off ), ms=False ), format_dst( dst ))
                dup		= abb in abbrev
                if dup and not dst:
                    # A duplicate; non-DST or ambiguous, must have consistent UTC offset and DST
                    # designation.  We'll allow replacement of a dst=None (still ambiguous) zone with a dst=False zone

                    abbtzi,abbdst,abboff= abbrev[abb]
                    if abboff != off:
                        msg    += " x %-5s %s %s in %s; incompatible" % (
                            abb, format_offset( timedelta_total_seconds( abboff ), ms=False ), format_dst( abbdst ), abbtzi )
                        incompatible.append( "%s: %s" % ( tzinfo, msg ))
                        log.warning( "%-30s: %s", tzinfo, msg )
                    elif abbdst is None:
                        msg    += " ~ %-5s %s %s in %s; replacing ambiguous w/ concrete non-DST zone" % (
                            abb, format_offset( timedelta_total_seconds( abboff ), ms=False ), format_dst( abbdst ), abbtzi )
                        dup	= False

                if dup and dst:
                    # A duplicate; DST-specific, must be consistently specified; if not, just the
                    # main UTC offset must be consistent.
                    abbtzi	= abbrev[abb][0]
                    abbtzinxt	= bisect.bisect( abbtzi._utc_transition_times, at - reach )
                    abbtzilst	= bisect.bisect( abbtzi._utc_transition_times, at + reach )
                    if abbtzilst - abbtzinxt != lst - nxt:
                        msg	= "%s has %d time changes vs. %d in %s" % (
                            abb, lst-nxt, abbtzilst-abbtzinxt, abbtzi )
                        incompatible.append( "%s: %s" % ( tzinfo, msg ))
                        log.warning( "%-30s: %s", tzinfo, msg )
                        continue
                    chg		= zip( tzinfo._utc_transition_times[nxt:lst], tzinfo._transition_info[nxt:lst] )
                    abbchg	= zip( abbtzi._utc_transition_times[abbtzinxt:abbtzilst], abbtzi._transition_info[abbtzinxt:abbtzilst] )

                    def transition_consistent( zt1, zt2 ):
                        dt1,(off1,dst1,_)	= zt1
                        dt2,(off2,dst2,_)	= zt2
                        return off1 == off2 and dt1 == dt2 and dst1 == dst2

                    difs	= [ (a,b) for a,b in zip( chg, abbchg ) if not transition_consistent( a, b ) ]
                    if difs:
                        msg	= "%s time changes differ vs. %s" % ( abb, abbtzi )
                        incompatible.append( "%s: %s" % ( tzinfo, msg ))
                        desc	= " vs. ".join( "on %s, offset %s, dst %s" % ( dt, format_offset( timedelta_total_seconds( off ), ms=False ),
                                                                               format_offset( timedelta_total_seconds( dst ), ms=False ))
                                                for dt,(off,dst,_) in ( difs[0][0], difs[0][1] ))
                        log.warning( "%-30s: %s; %d differences: %s", tzinfo, msg, len( difs ), desc )
                        continue
                ( log.detail if dup else log.normal )( "%-30s: %-5s %s %s at %s UTC%s",
                    tzinfo, abb, format_offset( timedelta_total_seconds( off ), ms=False ), format_dst( dst ),
                                                       dt.strftime( cls._fmt ), "; Ignoring duplicate" if dup else "" )
                if not dup:
                    abbrev[abb]	= tzinfo,dst,off
        if incompatible:
            raise AmbiguousTimeZoneError( "%-30s region(s) incompatible: %s" % ( region, ", ".join( incompatible )))
        added			= list( set( abbrev ) - set( cls._tzabbrev ))
        cls._tzabbrev		= abbrev
        return added

Example 47

Project: plone.app.event Source File: importer.py
def ical_import(container, ics_resource, event_type,
                sync_strategy=base.SYNC_KEEP_NEWER):
    cal = icalendar.Calendar.from_ical(ics_resource)
    events = cal.walk('VEVENT')

    cat = getToolByName(container, 'portal_catalog')
    container_path = '/'.join(container.getPhysicalPath())

    def _get_by_sync_uid(uid):
        return cat(
            sync_uid=uid,
            path={'query': container_path, 'depth': 1}
        )

    def _get_prop(prop, item, default=None):
        ret = default
        if prop in item:
            ret = safe_unicode(item.decoded(prop))
        return ret

    def _from_list(ical, prop):
        """For EXDATE and RDATE recurrence component properties, the dates can
        be defined within one EXDATE/RDATE line or for each date an individual
        line.
        In the latter case, icalendar creates a list.
        This method handles this case.

        TODO: component property parameters like TZID are not used here.
        """
        val = ical[prop] if prop in ical else []
        if not isinstance(val, list):
            val = [val]
        #ret = ''
        #for item in val:
        #    ret = '%s\n' % ret if ret else ret  # insert linebreak
        #    ret = '%s%s:%s' % (ret, prop, item.to_ical())
        #return ret

        # Zip multiple lines into one, since jquery.recurrenceinput.js does
        # not support multiple lines here
        # https://github.com/collective/jquery.recurrenceinput.js/issues/15
        ret = ''
        for item in val:
            ret = '%s,' % ret if ret else ret  # insert linebreak
            ret = '%s%s' % (ret, item.to_ical())
        return '%s:%s' % (prop, ret) if ret else None

    count = 0
    for item in events:
        start = _get_prop('DTSTART', item)
        end = _get_prop('DTEND', item)
        if not end:
            duration = _get_prop('DURATION', item)
            if duration:
                end = start + duration
            # else: whole day or open end

        whole_day = False
        open_end = False
        if is_date(start) and (is_date(end) or end is None):
            # All day / whole day events
            # End must be same type as start (RFC5545, 3.8.2.2)
            whole_day = True
            if end is None:
                end = start
            if start < end:
                # RFC5545 doesn't define clearly, if all day events should have
                # a end date one day after the start day at 0:00.
                # Internally, we handle all day events with start=0:00,
                # end=:23:59:59, so we substract one day here.
                end = end - datetime.timedelta(days=1)
            start = base.dt_start_of_day(date_to_datetime(start))
            end = base.dt_end_of_day(date_to_datetime(end))
        elif is_datetime(start) and end is None:
            # Open end event, see RFC 5545, 3.6.1
            open_end = True
            end = base.dt_end_of_day(date_to_datetime(start))
        assert(is_datetime(start))
        assert(is_datetime(end))

        # Set timezone, if not already set
        tz = base.default_timezone(container, as_tzinfo=True)
        if not getattr(start, 'tzinfo', False):
            start = tz.localize(start)
        if not getattr(end, 'tzinfo', False):
            end = tz.localize(end)

        title = _get_prop('SUMMARY', item)
        description = _get_prop('DESCRIPTION', item)
        location = _get_prop('LOCATION', item)

        url = _get_prop('URL', item)

        rrule = _get_prop('RRULE', item)
        rrule = 'RRULE:%s' % rrule.to_ical() if rrule else ''
        rdates = _from_list(item, 'RDATE')
        exdates = _from_list(item, 'EXDATE')
        rrule = '\n'.join([it for it in [rrule, rdates, exdates] if it])

        # TODO: attendee-lists are not decoded properly and contain only
        # vCalAddress values
        attendees = item.get('ATTENDEE', ())

        contact = _get_prop('CONTACT', item)
        categories = item.get('CATEGORIES', ())
        if getattr(categories, '__iter__', False):
            categories = tuple([safe_unicode(it) for it in categories])

        ext_modified = utc(_get_prop('LAST-MODIFIED', item))

        # TODO: better use plone.api for content creation, from which some of
        # the code here is copied

        content = None
        new_content_id = None
        existing_event = None
        sync_uid = _get_prop('UID', item)
        if sync_strategy != base.SYNC_NONE and sync_uid:
            existing_event = _get_by_sync_uid(sync_uid)
        if existing_event:
            if sync_strategy == base.SYNC_KEEP_MINE:
                # On conflict, keep mine
                continue

            exist_event = existing_event[0].getObject()
            acc = IEventAccessor(exist_event)

            if sync_strategy == base.SYNC_KEEP_NEWER and\
                    (not ext_modified or acc.last_modified > ext_modified):
                # Update only if modified date was passed in and it is not
                # older than the current modified date.  The client is not
                # expected to update the "last-modified" property, it is the
                # job of the server (calendar store) to keep it up to date.
                # This makes sure the client did the change on an up-to-date
                # version of the object.  See
                # http://tools.ietf.org/search/rfc5545#section-3.8.7.3
                continue

            # Else: update
            content = exist_event
        else:
            # TODO: if AT had the same attrs like IDXEventBase, we could set
            # everything within this invokeFactory call.
            new_content_id = str(random.randint(0, 99999999))
            container.invokeFactory(event_type,
                                    id=new_content_id,
                                    title=title,
                                    description=description)
            content = container[new_content_id]

        assert(content)  # At this point, a content must be available.

        event = IEventAccessor(content)
        event.title = title
        event.description = description
        event.start = start
        event.end = end
        event.whole_day = whole_day
        event.open_end = open_end
        event.location = location
        event.event_url = url
        event.recurrence = rrule
        event.attendees = attendees
        event.contact_name = contact
        event.subjects = categories
        if sync_strategy != base.SYNC_NONE:
            # Don't import the sync_uid, if no sync strategy is chosen. Let the
            # sync_uid be autogenerated then.
            event.sync_uid = sync_uid
        notify(ObjectModifiedEvent(content))

        # Use commits instead of savepoints to avoid "FileStorageError:
        # description too long" on large imports.
        transaction.get().commit()  # Commit before rename

        if new_content_id and new_content_id in container:
            # Rename with new id from title, if processForm didn't do it.
            chooser = INameChooser(container)
            new_id = chooser.chooseName(title, content)
            content.aq_parent.manage_renameObject(new_content_id, new_id)

        # Do this at the end, otherwise it's overwritten
        if ext_modified:
            event.last_modified = ext_modified

        count += 1

    return {'count': count}

Example 48

Project: zipline Source File: test_finance.py
    def transaction_sim(self, **params):
        """This is a utility method that asserts expected
        results for conversion of orders to transactions given a
        trade history
        """
        trade_count = params['trade_count']
        trade_interval = params['trade_interval']
        order_count = params['order_count']
        order_amount = params['order_amount']
        order_interval = params['order_interval']
        expected_txn_count = params['expected_txn_count']
        expected_txn_volume = params['expected_txn_volume']

        # optional parameters
        # ---------------------
        # if present, alternate between long and short sales
        alternate = params.get('alternate')

        # if present, expect transaction amounts to match orders exactly.
        complete_fill = params.get('complete_fill')

        sid = 1
        metadata = make_simple_equity_info([sid], self.start, self.end)
        with TempDirectory() as tempdir, \
                tmp_trading_env(equities=metadata) as env:

            if trade_interval < timedelta(days=1):
                sim_params = factory.create_simulation_parameters(
                    start=self.start,
                    end=self.end,
                    data_frequency="minute"
                )

                minutes = self.trading_calendar.minutes_window(
                    sim_params.first_open,
                    int((trade_interval.total_seconds() / 60) * trade_count)
                    + 100)

                price_data = np.array([10.1] * len(minutes))
                assets = {
                    sid: pd.DataFrame({
                        "open": price_data,
                        "high": price_data,
                        "low": price_data,
                        "close": price_data,
                        "volume": np.array([100] * len(minutes)),
                        "dt": minutes
                    }).set_index("dt")
                }

                write_bcolz_minute_data(
                    self.trading_calendar,
                    self.trading_calendar.sessions_in_range(
                        self.trading_calendar.minute_to_session_label(
                            minutes[0]
                        ),
                        self.trading_calendar.minute_to_session_label(
                            minutes[-1]
                        )
                    ),
                    tempdir.path,
                    iteritems(assets),
                )

                equity_minute_reader = BcolzMinuteBarReader(tempdir.path)

                data_portal = DataPortal(
                    env.asset_finder, self.trading_calendar,
                    first_trading_day=equity_minute_reader.first_trading_day,
                    equity_minute_reader=equity_minute_reader,
                )
            else:
                sim_params = factory.create_simulation_parameters(
                    data_frequency="daily"
                )

                days = sim_params.sessions

                assets = {
                    1: pd.DataFrame({
                        "open": [10.1] * len(days),
                        "high": [10.1] * len(days),
                        "low": [10.1] * len(days),
                        "close": [10.1] * len(days),
                        "volume": [100] * len(days),
                        "day": [day.value for day in days]
                    }, index=days)
                }

                path = os.path.join(tempdir.path, "testdata.bcolz")
                BcolzDailyBarWriter(path, self.trading_calendar, days[0],
                                    days[-1]).write(
                    assets.items()
                )

                equity_daily_reader = BcolzDailyBarReader(path)

                data_portal = DataPortal(
                    env.asset_finder, self.trading_calendar,
                    first_trading_day=equity_daily_reader.first_trading_day,
                    equity_daily_reader=equity_daily_reader,
                )

            if "default_slippage" not in params or \
               not params["default_slippage"]:
                slippage_func = FixedSlippage()
            else:
                slippage_func = None

            blotter = Blotter(sim_params.data_frequency, self.env.asset_finder,
                              slippage_func)

            start_date = sim_params.first_open

            if alternate:
                alternator = -1
            else:
                alternator = 1

            tracker = PerformanceTracker(sim_params, self.trading_calendar,
                                         self.env)

            # replicate what tradesim does by going through every minute or day
            # of the simulation and processing open orders each time
            if sim_params.data_frequency == "minute":
                ticks = minutes
            else:
                ticks = days

            transactions = []

            order_list = []
            order_date = start_date
            for tick in ticks:
                blotter.current_dt = tick
                if tick >= order_date and len(order_list) < order_count:
                    # place an order
                    direction = alternator ** len(order_list)
                    order_id = blotter.order(
                        blotter.asset_finder.retrieve_asset(sid),
                        order_amount * direction,
                        MarketOrder())
                    order_list.append(blotter.orders[order_id])
                    order_date = order_date + order_interval
                    # move after market orders to just after market next
                    # market open.
                    if order_date.hour >= 21:
                        if order_date.minute >= 00:
                            order_date = order_date + timedelta(days=1)
                            order_date = order_date.replace(hour=14, minute=30)
                else:
                    bar_data = BarData(
                        data_portal=data_portal,
                        simulation_dt_func=lambda: tick,
                        data_frequency=sim_params.data_frequency,
                        trading_calendar=self.trading_calendar,
                        restrictions=NoRestrictions(),
                    )
                    txns, _, closed_orders = blotter.get_transactions(bar_data)
                    for txn in txns:
                        tracker.process_transaction(txn)
                        transactions.append(txn)

                    blotter.prune_orders(closed_orders)

            for i in range(order_count):
                order = order_list[i]
                self.assertEqual(order.sid, sid)
                self.assertEqual(order.amount, order_amount * alternator ** i)

            if complete_fill:
                self.assertEqual(len(transactions), len(order_list))

            total_volume = 0
            for i in range(len(transactions)):
                txn = transactions[i]
                total_volume += txn.amount
                if complete_fill:
                    order = order_list[i]
                    self.assertEqual(order.amount, txn.amount)

            self.assertEqual(total_volume, expected_txn_volume)

            self.assertEqual(len(transactions), expected_txn_count)

            cuemulative_pos = tracker.position_tracker.positions[sid]
            if total_volume == 0:
                self.assertIsNone(cuemulative_pos)
            else:
                self.assertEqual(total_volume, cuemulative_pos.amount)

            # the open orders should not contain sid.
            oo = blotter.open_orders
            self.assertNotIn(sid, oo, "Entry is removed when no open orders")

Example 49

Project: SickGear Source File: tz.py
    def __init__(self, fileobj, filename=None):
        file_opened_here = False
        if isinstance(fileobj, string_types):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
            file_opened_here = True
        elif filename is not None:
            self._filename = filename
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).
        try:
            if fileobj.read(4).decode() != "TZif":
                raise ValueError("magic not found")

            fileobj.read(16)

            (
                # The number of UTC/local indicators stored in the file.
                ttisgmtcnt,

                # The number of standard/wall indicators stored in the file.
                ttisstdcnt,

                # The number of leap seconds for which data is
                # stored in the file.
                leapcnt,

                # The number of "transition times" for which data
                # is stored in the file.
                timecnt,

                # The number of "local time types" for which data
                # is stored in the file (must not be zero).
                typecnt,

                # The  number  of  characters  of "time zone
                # abbreviation strings" stored in the file.
                charcnt,

            ) = struct.unpack(">6l", fileobj.read(24))

            # The above header is followed by tzh_timecnt four-byte
            # values  of  type long,  sorted  in ascending order.
            # These values are written in ``standard'' byte order.
            # Each is used as a transition time (as  returned  by
            # time(2)) at which the rules for computing local time
            # change.

            if timecnt:
                self._trans_list = struct.unpack(">%dl" % timecnt,
                                                 fileobj.read(timecnt*4))
            else:
                self._trans_list = []

            # Next come tzh_timecnt one-byte values of type unsigned
            # char; each one tells which of the different types of
            # ``local time'' types described in the file is associated
            # with the same-indexed transition time. These values
            # serve as indices into an array of ttinfo structures that
            # appears next in the file.

            if timecnt:
                self._trans_idx = struct.unpack(">%dB" % timecnt,
                                                fileobj.read(timecnt))
            else:
                self._trans_idx = []

            # Each ttinfo structure is written as a four-byte value
            # for tt_gmtoff  of  type long,  in  a  standard  byte
            # order, followed  by a one-byte value for tt_isdst
            # and a one-byte  value  for  tt_abbrind.   In  each
            # structure, tt_gmtoff  gives  the  number  of
            # seconds to be added to UTC, tt_isdst tells whether
            # tm_isdst should be set by  localtime(3),  and
            # tt_abbrind serves  as an index into the array of
            # time zone abbreviation characters that follow the
            # ttinfo structure(s) in the file.

            ttinfo = []

            for i in range(typecnt):
                ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

            abbr = fileobj.read(charcnt).decode()

            # Then there are tzh_leapcnt pairs of four-byte
            # values, written in  standard byte  order;  the
            # first  value  of  each pair gives the time (as
            # returned by time(2)) at which a leap second
            # occurs;  the  second  gives the  total  number of
            # leap seconds to be applied after the given time.
            # The pairs of values are sorted in ascending order
            # by time.

            # Not used, for now
            # if leapcnt:
            #    leap = struct.unpack(">%dl" % (leapcnt*2),
            #                         fileobj.read(leapcnt*8))

            # Then there are tzh_ttisstdcnt standard/wall
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as standard
            # time or wall clock time, and are used when
            # a time zone file is used in handling POSIX-style
            # time zone environment variables.

            if ttisstdcnt:
                isstd = struct.unpack(">%db" % ttisstdcnt,
                                      fileobj.read(ttisstdcnt))

            # Finally, there are tzh_ttisgmtcnt UTC/local
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as UTC or
            # local time, and are used when a time zone file
            # is used in handling POSIX-style time zone envi-
            # ronment variables.

            if ttisgmtcnt:
                isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                      fileobj.read(ttisgmtcnt))

            # ** Everything has been read **
        finally:
            if file_opened_here:
                fileobj.close()

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind = ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 50

Project: taiga-back Source File: test_emails.py
Function: handle
    def handle(self, *args, **options):
        if len(args) != 1:
            print("Usage: ./manage.py test_emails <email-address>")
            return

        locale = options.get('locale')
        test_email = args[0]

        # Register email
        context = {"lang": locale,
                    "user": get_user_model().objects.all().order_by("?").first(),
                    "cancel_token": "cancel-token"}

        email = mail_builder.registered_user(test_email, context)
        email.send()

        # Membership invitation
        membership = Membership.objects.order_by("?").filter(user__isnull=True).first()
        membership.invited_by = get_user_model().objects.all().order_by("?").first()
        membership.invitation_extra_text = "Text example, Text example,\nText example,\n\nText example"

        context = {"lang": locale, "membership": membership}
        email = mail_builder.membership_invitation(test_email, context)
        email.send()

        # Membership notification
        context = {"lang": locale,
                   "membership": Membership.objects.order_by("?").filter(user__isnull=False).first()}
        email = mail_builder.membership_notification(test_email, context)
        email.send()

        # Feedback
        context = {
            "lang": locale,
            "feedback_entry": {
                "full_name": "Test full name",
                "email": "[email protected]",
                "comment": "Test comment",
            },
            "extra": {
                "key1": "value1",
                "key2": "value2",
            },
        }
        email = mail_builder.feedback_notification(test_email, context)
        email.send()

        # Password recovery
        context = {"lang": locale, "user": get_user_model().objects.all().order_by("?").first()}
        email = mail_builder.password_recovery(test_email, context)
        email.send()

        # Change email
        context = {"lang": locale, "user": get_user_model().objects.all().order_by("?").first()}
        email = mail_builder.change_email(test_email, context)
        email.send()

        # Export/Import emails
        context = {
            "lang": locale,
            "user": get_user_model().objects.all().order_by("?").first(),
            "project": Project.objects.all().order_by("?").first(),
            "error_subject": "Error generating project dump",
            "error_message": "Error generating project dump",
        }
        email = mail_builder.export_error(test_email, context)
        email.send()
        context = {
            "lang": locale,
            "user": get_user_model().objects.all().order_by("?").first(),
            "error_subject": "Error importing project dump",
            "error_message": "Error importing project dump",
        }
        email = mail_builder.import_error(test_email, context)
        email.send()

        deletion_date = timezone.now() + datetime.timedelta(seconds=60*60*24)
        context = {
            "lang": locale,
            "url": "http://dummyurl.com",
            "user": get_user_model().objects.all().order_by("?").first(),
            "project": Project.objects.all().order_by("?").first(),
            "deletion_date": deletion_date,
        }
        email = mail_builder.dump_project(test_email, context)
        email.send()

        context = {
            "lang": locale,
            "user": get_user_model().objects.all().order_by("?").first(),
            "project": Project.objects.all().order_by("?").first(),
        }
        email = mail_builder.load_dump(test_email, context)
        email.send()

        # Notification emails
        notification_emails = [
            ("issues.Issue", "issues/issue-change"),
            ("issues.Issue", "issues/issue-create"),
            ("issues.Issue", "issues/issue-delete"),
            ("tasks.Task", "tasks/task-change"),
            ("tasks.Task", "tasks/task-create"),
            ("tasks.Task", "tasks/task-delete"),
            ("userstories.UserStory", "userstories/userstory-change"),
            ("userstories.UserStory", "userstories/userstory-create"),
            ("userstories.UserStory", "userstories/userstory-delete"),
            ("milestones.Milestone", "milestones/milestone-change"),
            ("milestones.Milestone", "milestones/milestone-create"),
            ("milestones.Milestone", "milestones/milestone-delete"),
            ("wiki.WikiPage", "wiki/wikipage-change"),
            ("wiki.WikiPage", "wiki/wikipage-create"),
            ("wiki.WikiPage", "wiki/wikipage-delete"),
        ]

        context = {
            "lang": locale,
            "project": Project.objects.all().order_by("?").first(),
            "changer": get_user_model().objects.all().order_by("?").first(),
            "history_entries": HistoryEntry.objects.all().order_by("?")[0:5],
            "user": get_user_model().objects.all().order_by("?").first(),
        }

        for notification_email in notification_emails:
            model = apps.get_model(*notification_email[0].split("."))
            snapshot = {
                "subject": "Tests subject",
                "ref": 123123,
                "name": "Tests name",
                "slug": "test-slug"
            }
            queryset = model.objects.all().order_by("?")
            for obj in queryset:
                end = False
                entries = get_history_queryset_by_model_instance(obj).filter(is_snapshot=True).order_by("?")

                for entry in entries:
                    if entry.snapshot:
                        snapshot = entry.snapshot
                        end = True
                        break
                if end:
                    break
            context["snapshot"] = snapshot

            cls = type("InlineCSSTemplateMail", (InlineCSSTemplateMail,), {"name": notification_email[1]})
            email = cls()
            email.send(test_email, context)


        # Transfer Emails
        context = {
            "project": Project.objects.all().order_by("?").first(),
            "requester": User.objects.all().order_by("?").first(),
        }
        email = mail_builder.transfer_request(test_email, context)
        email.send()

        context = {
            "project": Project.objects.all().order_by("?").first(),
            "receiver": User.objects.all().order_by("?").first(),
            "token": "test-token",
            "reason": "Test reason"
        }
        email = mail_builder.transfer_start(test_email, context)
        email.send()

        context = {
            "project": Project.objects.all().order_by("?").first(),
            "old_owner": User.objects.all().order_by("?").first(),
            "new_owner": User.objects.all().order_by("?").first(),
            "reason": "Test reason"
        }
        email = mail_builder.transfer_accept(test_email, context)
        email.send()

        context = {
            "project": Project.objects.all().order_by("?").first(),
            "rejecter": User.objects.all().order_by("?").first(),
            "reason": "Test reason"
        }
        email = mail_builder.transfer_reject(test_email, context)
        email.send()
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3